repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
abhikumar22/MYBLOG | blg/Lib/site-packages/social_core/backends/base.py | 1 | 10025 | import time
from requests import request, ConnectionError
from ..utils import SSLHttpAdapter, module_member, parse_qs, user_agent
from ..exceptions import AuthFailed
class BaseAuth(object):
"""A authentication backend that authenticates the user based on
the provider response"""
name = '' # provider name, it's stored in database
supports_inactive_user = False # Django auth
ID_KEY = None
EXTRA_DATA = None
GET_ALL_EXTRA_DATA = False
REQUIRES_EMAIL_VALIDATION = False
SEND_USER_AGENT = False
SSL_PROTOCOL = None
def __init__(self, strategy, redirect_uri=None):
self.strategy = strategy
self.redirect_uri = redirect_uri
self.data = self.strategy.request_data()
self.redirect_uri = self.strategy.absolute_uri(
self.redirect_uri
)
def setting(self, name, default=None):
"""Return setting value from strategy"""
return self.strategy.setting(name, default=default, backend=self)
def start(self):
if self.uses_redirect():
return self.strategy.redirect(self.auth_url())
else:
return self.strategy.html(self.auth_html())
def complete(self, *args, **kwargs):
return self.auth_complete(*args, **kwargs)
def auth_url(self):
"""Must return redirect URL to auth provider"""
raise NotImplementedError('Implement in subclass')
def auth_html(self):
"""Must return login HTML content returned by provider"""
raise NotImplementedError('Implement in subclass')
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
raise NotImplementedError('Implement in subclass')
def process_error(self, data):
"""Process data for errors, raise exception if needed.
Call this method on any override of auth_complete."""
pass
def authenticate(self, *args, **kwargs):
"""Authenticate user using social credentials
Authentication is made if this is the correct backend, backend
verification is made by kwargs inspection for current backend
name presence.
"""
# Validate backend and arguments. Require that the Social Auth
# response be passed in as a keyword argument, to make sure we
# don't match the username/password calling conventions of
# authenticate.
if 'backend' not in kwargs or kwargs['backend'].name != self.name or \
'strategy' not in kwargs or 'response' not in kwargs:
return None
self.strategy = kwargs.get('strategy') or self.strategy
self.redirect_uri = kwargs.get('redirect_uri') or self.redirect_uri
self.data = self.strategy.request_data()
kwargs.setdefault('is_new', False)
pipeline = self.strategy.get_pipeline(self)
args, kwargs = self.strategy.clean_authenticate_args(*args, **kwargs)
return self.pipeline(pipeline, *args, **kwargs)
def pipeline(self, pipeline, pipeline_index=0, *args, **kwargs):
out = self.run_pipeline(pipeline, pipeline_index, *args, **kwargs)
if not isinstance(out, dict):
return out
user = out.get('user')
if user:
user.social_user = out.get('social')
user.is_new = out.get('is_new')
return user
def disconnect(self, *args, **kwargs):
pipeline = self.strategy.get_disconnect_pipeline(self)
kwargs['name'] = self.name
kwargs['user_storage'] = self.strategy.storage.user
return self.run_pipeline(pipeline, *args, **kwargs)
def run_pipeline(self, pipeline, pipeline_index=0, *args, **kwargs):
out = kwargs.copy()
out.setdefault('strategy', self.strategy)
out.setdefault('backend', out.pop(self.name, None) or self)
out.setdefault('request', self.strategy.request_data())
out.setdefault('details', {})
if not isinstance(pipeline_index, int) or \
pipeline_index < 0 or \
pipeline_index >= len(pipeline):
pipeline_index = 0
for idx, name in enumerate(pipeline[pipeline_index:]):
out['pipeline_index'] = pipeline_index + idx
func = module_member(name)
result = func(*args, **out) or {}
if not isinstance(result, dict):
return result
out.update(result)
return out
def extra_data(self, user, uid, response, details=None, *args, **kwargs):
"""Return default extra data to store in extra_data field"""
data = {
# store the last time authentication toke place
'auth_time': int(time.time())
}
extra_data_entries = []
if self.GET_ALL_EXTRA_DATA or self.setting('GET_ALL_EXTRA_DATA', False):
extra_data_entries = response.keys()
else:
extra_data_entries = (self.EXTRA_DATA or []) + self.setting('EXTRA_DATA', [])
for entry in extra_data_entries:
if not isinstance(entry, (list, tuple)):
entry = (entry,)
size = len(entry)
if size >= 1 and size <= 3:
if size == 3:
name, alias, discard = entry
elif size == 2:
(name, alias), discard = entry, False
elif size == 1:
name = alias = entry[0]
discard = False
value = response.get(name) or details.get(name)
if discard and not value:
continue
data[alias] = value
return data
def auth_allowed(self, response, details):
"""Return True if the user should be allowed to authenticate, by
default check if email is whitelisted (if there's a whitelist)"""
emails = self.setting('WHITELISTED_EMAILS', [])
domains = self.setting('WHITELISTED_DOMAINS', [])
email = details.get('email')
allowed = True
if email and (emails or domains):
domain = email.split('@', 1)[1]
allowed = email in emails or domain in domains
return allowed
def get_user_id(self, details, response):
"""Return a unique ID for the current user, by default from server
response."""
return response.get(self.ID_KEY)
def get_user_details(self, response):
"""Must return user details in a know internal struct:
{'username': <username if any>,
'email': <user email if any>,
'fullname': <user full name if any>,
'first_name': <user first name if any>,
'last_name': <user last name if any>}
"""
raise NotImplementedError('Implement in subclass')
def get_user_names(self, fullname='', first_name='', last_name=''):
# Avoid None values
fullname = fullname or ''
first_name = first_name or ''
last_name = last_name or ''
if fullname and not (first_name or last_name):
try:
first_name, last_name = fullname.split(' ', 1)
except ValueError:
first_name = first_name or fullname or ''
last_name = last_name or ''
fullname = fullname or ' '.join((first_name, last_name))
return fullname.strip(), first_name.strip(), last_name.strip()
def get_user(self, user_id):
"""
Return user with given ID from the User model used by this backend.
This is called by django.contrib.auth.middleware.
"""
return self.strategy.get_user(user_id)
def continue_pipeline(self, partial):
"""Continue previous halted pipeline"""
return self.strategy.authenticate(self,
pipeline_index=partial.next_step,
*partial.args,
**partial.kwargs)
def auth_extra_arguments(self):
"""Return extra arguments needed on auth process. The defaults can be
overridden by GET parameters."""
extra_arguments = self.setting('AUTH_EXTRA_ARGUMENTS', {}).copy()
extra_arguments.update((key, self.data[key]) for key in extra_arguments
if key in self.data)
return extra_arguments
def uses_redirect(self):
"""Return True if this provider uses redirect url method,
otherwise return false."""
return True
def request(self, url, method='GET', *args, **kwargs):
kwargs.setdefault('headers', {})
if self.setting('VERIFY_SSL') is not None:
kwargs.setdefault('verify', self.setting('VERIFY_SSL'))
kwargs.setdefault('timeout', self.setting('REQUESTS_TIMEOUT') or
self.setting('URLOPEN_TIMEOUT'))
if self.SEND_USER_AGENT and 'User-Agent' not in kwargs['headers']:
kwargs['headers']['User-Agent'] = self.setting('USER_AGENT') or \
user_agent()
try:
if self.SSL_PROTOCOL:
session = SSLHttpAdapter.ssl_adapter_session(self.SSL_PROTOCOL)
response = session.request(method, url, *args, **kwargs)
else:
response = request(method, url, *args, **kwargs)
except ConnectionError as err:
raise AuthFailed(self, str(err))
response.raise_for_status()
return response
def get_json(self, url, *args, **kwargs):
return self.request(url, *args, **kwargs).json()
def get_querystring(self, url, *args, **kwargs):
return parse_qs(self.request(url, *args, **kwargs).text)
def get_key_and_secret(self):
"""Return tuple with Consumer Key and Consumer Secret for current
service provider. Must return (key, secret), order *must* be respected.
"""
return self.setting('KEY'), self.setting('SECRET')
| gpl-3.0 | -677,159,543,657,564,700 | 39.587045 | 89 | 0.587731 | false |
Reflexe/doc_to_pdf | Windows/program/python-core-3.5.0/lib/crypt.py | 104 | 1879 | """Wrapper to the POSIX crypt library call and associated functionality."""
import _crypt
import string as _string
from random import SystemRandom as _SystemRandom
from collections import namedtuple as _namedtuple
_saltchars = _string.ascii_letters + _string.digits + './'
_sr = _SystemRandom()
class _Method(_namedtuple('_Method', 'name ident salt_chars total_size')):
"""Class representing a salt method per the Modular Crypt Format or the
legacy 2-character crypt method."""
def __repr__(self):
return '<crypt.METHOD_{}>'.format(self.name)
def mksalt(method=None):
"""Generate a salt for the specified method.
If not specified, the strongest available method will be used.
"""
if method is None:
method = methods[0]
s = '${}$'.format(method.ident) if method.ident else ''
s += ''.join(_sr.choice(_saltchars) for char in range(method.salt_chars))
return s
def crypt(word, salt=None):
"""Return a string representing the one-way hash of a password, with a salt
prepended.
If ``salt`` is not specified or is ``None``, the strongest
available method will be selected and a salt generated. Otherwise,
``salt`` may be one of the ``crypt.METHOD_*`` values, or a string as
returned by ``crypt.mksalt()``.
"""
if salt is None or isinstance(salt, _Method):
salt = mksalt(salt)
return _crypt.crypt(word, salt)
# available salting/crypto methods
METHOD_CRYPT = _Method('CRYPT', None, 2, 13)
METHOD_MD5 = _Method('MD5', '1', 8, 34)
METHOD_SHA256 = _Method('SHA256', '5', 16, 63)
METHOD_SHA512 = _Method('SHA512', '6', 16, 106)
methods = []
for _method in (METHOD_SHA512, METHOD_SHA256, METHOD_MD5):
_result = crypt('', _method)
if _result and len(_result) == _method.total_size:
methods.append(_method)
methods.append(METHOD_CRYPT)
del _result, _method
| mpl-2.0 | -7,258,003,073,911,361,000 | 29.306452 | 79 | 0.666844 | false |
brisad/grec | setup.py | 1 | 2031 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
from setuptools.command.test import test as TestCommand
class Tox(TestCommand):
user_options = [('tox-args=', 'a', 'Arguments to pass to tox')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ""
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox
import shlex
errno = tox.cmdline(args=shlex.split(self.tox_args))
sys.exit(errno)
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
'termcolor'
]
test_requirements = [
'pytest', 'tox'
]
setup(
name='grec',
version='0.2.0',
description='Colorize terminal text with regular expressions.',
long_description=readme + '\n\n' + history,
author='Michael Brennan',
author_email='[email protected]',
url='https://github.com/brisad/grec',
packages=[
'grec',
],
scripts=['scripts/grec'],
package_dir={'grec':
'grec'},
include_package_data=True,
install_requires=requirements,
license="GPL",
zip_safe=False,
keywords='grec',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'
],
test_suite='tests',
tests_require=test_requirements,
cmdclass={'test': Tox}
)
| gpl-3.0 | 6,247,863,193,786,951,000 | 25.723684 | 75 | 0.606598 | false |
resmo/ansible | test/units/module_utils/common/parameters/test_list_no_log_values.py | 22 | 1187 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.module_utils.common.parameters import list_no_log_values
@pytest.fixture
def params():
return {
'secret': 'undercookwovennativity',
'other_secret': 'cautious-slate-makeshift',
'state': 'present',
'value': 5,
}
def test_list_no_log_values(params):
argument_spec = {
'secret': {'type': 'str', 'no_log': True},
'other_secret': {'type': 'str', 'no_log': True},
'state': {'type': 'str'},
'value': {'type': 'int'},
}
result = set(('undercookwovennativity', 'cautious-slate-makeshift'))
assert result == list_no_log_values(argument_spec, params)
def test_list_no_log_values_no_secrets(params):
argument_spec = {
'other_secret': {'type': 'str', 'no_log': False},
'state': {'type': 'str'},
'value': {'type': 'int'},
}
result = set()
assert result == list_no_log_values(argument_spec, params)
| gpl-3.0 | -5,025,738,247,655,301,000 | 27.95122 | 92 | 0.601516 | false |
Exa-Networks/exaproxy | lib/exaproxy/icap/parser.py | 1 | 5875 | #!/usr/bin/env python
# encoding: utf-8
from .request import ICAPRequestFactory
from .response import ICAPResponseFactory
from .header import ICAPResponseHeaderFactory
class ICAPParser (object):
ICAPResponseHeaderFactory = ICAPResponseHeaderFactory
ICAPRequestFactory = ICAPRequestFactory
ICAPResponseFactory = ICAPResponseFactory
VERSIONS = ('ICAP/1.0',)
METHODS = ('REQMOD', 'OPTIONS')
HEADERS = ('cache-control', 'connection', 'date', 'trailer', 'upgrade', 'via',
'authorization','allow','from','host','referer','user-agent', 'preview',
'encapsulated','proxy-authenticate','proxy-authorization', 'istag')
def __init__ (self, configuration):
self.configuration = configuration
self.header_factory = self.ICAPResponseHeaderFactory(configuration)
self.request_factory = self.ICAPRequestFactory(configuration)
self.response_factory = self.ICAPResponseFactory(configuration)
def parseRequestLine (self, request_line):
request_parts = request_line.split() if request_line else []
if len(request_parts) == 3:
method, url, version = request_parts
method = method.upper()
version = version.upper()
else:
method, url, version = None, None, None
return method, url, version
def parseResponseLine (self, response_line):
response_parts = response_line.split(' ', 2) if response_line else []
if len(response_parts) == 3:
version, code, status = response_parts
if code.isdigit():
code = int(code)
else:
version, code, status = None, None, None
else:
version, code, status = None, None, None
return version, code, status
def readHeaders (self, request_lines):
headers = {}
for line in request_lines:
if not line:
break
if ':' not in line:
headers = None
break
key, value = line.split(':', 1)
key = key.lower().strip()
value = value.strip()
if key in self.HEADERS or key.startswith('x-'):
headers[key] = value
if key == 'pragma' and ':' in value:
pkey, pvalue = value.split(':', 1)
pkey = pkey.lower().strip()
pvalue = pvalue.strip()
headers.setdefault(key, {})[pkey] = pvalue
return headers
def parseRequest (self, icap_string, http_string):
request_lines = (p for ss in icap_string.split('\r\n') for p in ss.split('\n'))
try:
request_line = request_lines.next()
except StopIteration:
request_line = None
method, url, version = self.parseRequestLine(request_line)
if method in self.METHODS and version in self.VERSIONS:
headers = self.readHeaders(request_lines)
site_name = url.rsplit(',',1)[-1] if ',' in url else 'default'
headers['x-customer-name'] = site_name
else:
headers = None
offsets = self.getOffsets(headers) if headers is not None else []
length, complete = self.getBodyLength(offsets)
if set(('res-hdr', 'res-body')).intersection(dict(offsets)):
headers = None
return self.request_factory.create(method, url, version, headers, icap_string, http_string, offsets, length, complete) if headers else None
def getOffsets (self, headers):
encapsulated_line = headers.get('encapsulated', '')
parts = (p.strip() for p in encapsulated_line.split(',') if '=' in p)
pairs = (p.split('=',1) for p in parts)
offsets = ((k,int(v)) for (k,v) in pairs if v.isdigit())
return sorted(offsets, lambda (_,a), (__,b): 1 if a >= b else -1)
def getBodyLength (self, offsets):
final, offset = offsets[-1] if offsets else ('null-body', 0)
return offset, final == 'null-body'
def splitResponseParts (self, offsets, body_string):
final, offset = offsets[-1] if offsets else (None, None)
if final != 'null-body':
offsets = offsets + [('null-body', len(body_string))]
names = [name for name,offset in offsets]
positions = [offset for name,offset in offsets]
blocks = ((positions[i], positions[i+1]) for i in xrange(len(positions)-1))
strings = (body_string[start:end] for start,end in blocks)
return dict(zip(names, strings))
def parseResponseHeader (self, header_string):
response_lines = (p for ss in header_string.split('\r\n') for p in ss.split('\n'))
try:
response_line = response_lines.next()
except StopIteration:
response_line = None
version, code, status = self.parseResponseLine(response_line)
if version in self.VERSIONS:
headers = self.readHeaders(response_lines)
headers['server'] = 'EXA Proxy 1.0'
else:
headers = {}
offsets = self.getOffsets(headers) if headers is not None else []
length, complete = self.getBodyLength(offsets)
return self.header_factory.create(version, code, status, headers, header_string, offsets, length, complete)
def continueResponse (self, response_header, body_string):
version, code, status = response_header.info
headers = response_header.headers
header_string = response_header.header_string
# split the body string into components
parts = self.splitResponseParts(response_header.offsets, body_string)
response_string = parts.get('res-hdr', '')
request_string = parts.get('req-hdr', '')
if request_string.startswith('CONNECT'):
intercept_string, new_request_string = self.splitResponse(request_string)
if headers.get('x-intercept', '') != 'active' and not new_request_string:
intercept_string = None
else:
request_string = new_request_string
else:
intercept_string = None
body_string = parts.get('res-body', None) if response_string else parts.get('req-body', None)
return self.response_factory.create(version, code, status, headers, header_string, request_string, response_string, body_string, intercept_string)
def splitResponse (self, response_string):
for delimiter in ('\n\n', '\r\n\r\n'):
if delimiter in response_string:
header_string, subheader_string = response_string.split(delimiter, 1)
break
else:
header_string, subheader_string = response_string, ''
return header_string, subheader_string
| bsd-2-clause | -1,654,896,000,895,622,000 | 30.417112 | 148 | 0.69566 | false |
alvaroaleman/ansible | lib/ansible/modules/network/nxos/nxos_ntp_auth.py | 12 | 17744 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_ntp_auth
version_added: "2.2"
short_description: Manages NTP authentication.
description:
- Manages NTP authentication.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
notes:
- If C(state=absent), the module will attempt to remove the given key configuration.
If a matching key configuration isn't found on the device, the module will fail.
- If C(state=absent) and C(authentication=on), authentication will be turned off.
- If C(state=absent) and C(authentication=off), authentication will be turned on.
options:
key_id:
description:
- Authentication key identifier (numeric).
required: true
md5string:
description:
- MD5 String.
required: true
default: null
auth_type:
description:
- Whether the given md5string is in cleartext or
has been encrypted. If in cleartext, the device
will encrypt it before storing it.
required: false
default: text
choices: ['text', 'encrypt']
trusted_key:
description:
- Whether the given key is required to be supplied by a time source
for the device to synchronize to the time source.
required: false
default: false
choices: ['true', 'false']
authentication:
description:
- Turns NTP authentication on or off.
required: false
default: null
choices: ['on', 'off']
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Basic NTP authentication configuration
- nxos_ntp_auth:
key_id: 32
md5string: hello
auth_type: text
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"auth_type": "text", "authentication": "off",
"key_id": "32", "md5string": "helloWorld",
"trusted_key": "true"}
existing:
description:
- k/v pairs of existing ntp authentication
type: dict
sample: {"authentication": "off", "trusted_key": "false"}
end_state:
description: k/v pairs of ntp authentication after module execution
returned: always
type: dict
sample: {"authentication": "off", "key_id": "32",
"md5string": "kapqgWjwdg", "trusted_key": "true"}
state:
description: state as sent in from the playbook
returned: always
type: string
sample: "present"
updates:
description: command sent to the device
returned: always
type: list
sample: ["ntp authentication-key 32 md5 helloWorld 0", "ntp trusted-key 32"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import json
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
def execute_config_command(commands, module):
try:
module.configure(commands)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
except AttributeError:
try:
commands.insert(0, 'configure')
module.cli.add_commands(commands, output='config')
module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
def get_cli_body_ssh(command, response, module):
"""Get response for when transport=cli. This is kind of a hack and mainly
needed because these modules were originally written for NX-API. And
not every command supports "| json" when using cli/ssh. As such, we assume
if | json returns an XML string, it is a valid command, but that the
resource doesn't exist yet. Instead, the output will be a raw string
when issuing commands containing 'show run'.
"""
if 'xml' in response[0]:
body = []
elif 'show run' in command:
body = response
else:
try:
body = [json.loads(response[0])]
except ValueError:
module.fail_json(msg='Command does not support JSON output',
command=command)
return body
def execute_show(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, raw=True)
response = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh(command, response, module)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = execute_show(cmds, module, command_type=command_type)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_ntp_auth(module):
command = 'show ntp authentication-status'
body = execute_show_command(command, module)[0]
ntp_auth_str = body['authentication']
if 'enabled' in ntp_auth_str:
ntp_auth = True
else:
ntp_auth = False
return ntp_auth
def get_ntp_trusted_key(module):
trusted_key_list = []
command = 'show run | inc ntp.trusted-key'
trusted_key_str = execute_show_command(
command, module, command_type='cli_show_ascii')[0]
if trusted_key_str:
trusted_keys = trusted_key_str.splitlines()
else:
trusted_keys = []
for line in trusted_keys:
if line:
trusted_key_list.append(str(line.split()[2]))
return trusted_key_list
def get_ntp_auth_key(key_id, module):
authentication_key = {}
command = 'show run | inc ntp.authentication-key.{0}'.format(key_id)
auth_regex = (".*ntp\sauthentication-key\s(?P<key_id>\d+)\s"
"md5\s(?P<md5string>\S+).*")
body = execute_show_command(command, module, command_type='cli_show_ascii')
try:
match_authentication = re.match(auth_regex, body[0], re.DOTALL)
group_authentication = match_authentication.groupdict()
key_id = group_authentication["key_id"]
md5string = group_authentication['md5string']
authentication_key['key_id'] = key_id
authentication_key['md5string'] = md5string
except (AttributeError, TypeError):
authentication_key = {}
return authentication_key
def get_ntp_auth_info(key_id, module):
auth_info = get_ntp_auth_key(key_id, module)
trusted_key_list = get_ntp_trusted_key(module)
auth_power = get_ntp_auth(module)
if key_id in trusted_key_list:
auth_info['trusted_key'] = 'true'
else:
auth_info['trusted_key'] = 'false'
if auth_power:
auth_info['authentication'] = 'on'
else:
auth_info['authentication'] = 'off'
return auth_info
def auth_type_to_num(auth_type):
if auth_type == 'encrypt' :
return '7'
else:
return '0'
def set_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
ntp_auth_cmds = []
auth_type_num = auth_type_to_num(auth_type)
ntp_auth_cmds.append(
'ntp authentication-key {0} md5 {1} {2}'.format(
key_id, md5string, auth_type_num))
if trusted_key == 'true':
ntp_auth_cmds.append(
'ntp trusted-key {0}'.format(key_id))
elif trusted_key == 'false':
ntp_auth_cmds.append(
'no ntp trusted-key {0}'.format(key_id))
if authentication == 'on':
ntp_auth_cmds.append(
'ntp authenticate')
elif authentication == 'off':
ntp_auth_cmds.append(
'no ntp authenticate')
return ntp_auth_cmds
def remove_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
auth_remove_cmds = []
auth_type_num = auth_type_to_num(auth_type)
auth_remove_cmds.append(
'no ntp authentication-key {0} md5 {1} {2}'.format(
key_id, md5string, auth_type_num))
if authentication == 'on':
auth_remove_cmds.append(
'no ntp authenticate')
elif authentication == 'off':
auth_remove_cmds.append(
'ntp authenticate')
return auth_remove_cmds
def main():
argument_spec = dict(
key_id=dict(required=True, type='str'),
md5string=dict(required=True, type='str'),
auth_type=dict(choices=['text', 'encrypt'], default='text'),
trusted_key=dict(choices=['true', 'false'], default='false'),
authentication=dict(choices=['on', 'off']),
state=dict(choices=['absent', 'present'], default='present'),
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
key_id = module.params['key_id']
md5string = module.params['md5string']
auth_type = module.params['auth_type']
trusted_key = module.params['trusted_key']
authentication = module.params['authentication']
state = module.params['state']
args = dict(key_id=key_id, md5string=md5string,
auth_type=auth_type, trusted_key=trusted_key,
authentication=authentication)
changed = False
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_ntp_auth_info(key_id, module)
end_state = existing
delta = dict(set(proposed.items()).difference(existing.items()))
commands = []
if state == 'present':
if delta:
command = set_ntp_auth_key(
key_id, md5string, auth_type, trusted_key, delta.get('authentication'))
if command:
commands.append(command)
elif state == 'absent':
if existing:
auth_toggle = None
if authentication == existing.get('authentication'):
auth_toggle = authentication
command = remove_ntp_auth_key(
key_id, md5string, auth_type, trusted_key, auth_toggle)
if command:
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
try:
execute_config_command(cmds, module)
except ShellError:
clie = get_exception()
module.fail_json(msg=str(clie) + ": " + cmds)
end_state = get_ntp_auth_info(key_id, module)
delta = dict(set(end_state.items()).difference(existing.items()))
if delta or (len(existing) != len(end_state)):
changed = True
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['updates'] = cmds
results['changed'] = changed
results['end_state'] = end_state
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | -4,630,164,256,637,387,000 | 30.020979 | 88 | 0.585606 | false |
hi2srihari/crab | scikits/crab/metrics/sampling.py | 10 | 3357 | """Utilities for sampling techniques"""
# Author: Marcel Caraciolo <[email protected]>
# License: BSD Style.
import numpy as np
from ..utils import check_random_state
from math import ceil
class SplitSampling(object):
""" Random Split Sampling the dataset into two sets.
Parameters
----------
n : int
Total number of elements in the dataset.
evaluation_fraction : float (default 0.7)
Should be between 0.0 and 1.0 and represent the proportion of
the dataset to include in the training set.
indices : boolean, optional (default False)
Return split with integer indices or boolean mask.
Integer indices are useful when dealing with sparse matrices
that cannot be indexed by boolean masks.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
"""
def __init__(self, n, evaluation_fraction=0.7, indices=False,
random_state=None):
self.n = n
self.evaluation_fraction = evaluation_fraction
self.random_state = random_state
self.indices = indices
def split(self, evaluation_fraction=None, indices=False,
random_state=None, permutation=True):
"""
Random Split Sampling the dataset into two sets.
Parameters
----------
evaluation_fraction : float (default None)
Should be between 0.0 and 1.0 and represent the proportion of
the dataset to include in the training set. If evaluation_fraction
is None, it will be used the one passed in the constructor.
indices : boolean, optional (default False)
Return split with integer indices or boolean mask.
Integer indices are useful when dealing with sparse matrices
that cannot be indexed by boolean masks.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
permutation: boolean, optional (default True)
For testing purposes, to deactivate the permutation.
"""
if evaluation_fraction is not None:
self.evaluation_fraction = evaluation_fraction
if random_state is not None:
self.random_state = random_state
self.indices = indices
rng = self.random_state = check_random_state(self.random_state)
n_train = ceil(self.evaluation_fraction * self.n)
#random partition
permutation = rng.permutation(self.n) if permutation \
else np.arange(self.n)
ind_train = permutation[-n_train:]
ind_ignore = permutation[:-n_train]
if self.indices:
return ind_train, ind_ignore
else:
train_mask = np.zeros(self.n, dtype=np.bool)
train_mask[ind_train] = True
test_mask = np.zeros(self.n, dtype=np.bool)
test_mask[ind_ignore] = True
return train_mask, test_mask
def __repr__(self):
return ('%s(%d, evaluation_fraction=%s, indices=%s, '
'random_state=%d)' % (
self.__class__.__name__,
self.n,
str(self.evaluation_fraction),
self.indices,
self.random_state,
))
| bsd-3-clause | -5,166,204,451,373,372,000 | 34.712766 | 78 | 0.60709 | false |
valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/twitter.py | 1 | 18792 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
determine_ext,
dict_get,
ExtractorError,
float_or_none,
int_or_none,
remove_end,
try_get,
xpath_text,
)
from .periscope import PeriscopeIE
class TwitterBaseIE(InfoExtractor):
def _extract_formats_from_vmap_url(self, vmap_url, video_id):
vmap_data = self._download_xml(vmap_url, video_id)
video_url = xpath_text(vmap_data, './/MediaFile').strip()
if determine_ext(video_url) == 'm3u8':
return self._extract_m3u8_formats(
video_url, video_id, ext='mp4', m3u8_id='hls',
entry_protocol='m3u8_native')
return [{
'url': video_url,
}]
@staticmethod
def _search_dimensions_in_video_url(a_format, video_url):
m = re.search(r'/(?P<width>\d+)x(?P<height>\d+)/', video_url)
if m:
a_format.update({
'width': int(m.group('width')),
'height': int(m.group('height')),
})
class TwitterCardIE(TwitterBaseIE):
IE_NAME = 'twitter:card'
_VALID_URL = r'https?://(?:www\.)?twitter\.com/i/(?P<path>cards/tfw/v1|videos(?:/tweet)?)/(?P<id>\d+)'
_TESTS = [
{
'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889',
# MD5 checksums are different in different places
'info_dict': {
'id': '560070183650213889',
'ext': 'mp4',
'title': 'Twitter web player',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 30.033,
},
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/623160978427936768',
'md5': '7ee2a553b63d1bccba97fbed97d9e1c8',
'info_dict': {
'id': '623160978427936768',
'ext': 'mp4',
'title': 'Twitter web player',
'thumbnail': r're:^https?://.*$',
},
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/654001591733886977',
'md5': 'b6d9683dd3f48e340ded81c0e917ad46',
'info_dict': {
'id': 'dq4Oj5quskI',
'ext': 'mp4',
'title': 'Ubuntu 11.10 Overview',
'description': 'md5:a831e97fa384863d6e26ce48d1c43376',
'upload_date': '20111013',
'uploader': 'OMG! Ubuntu!',
'uploader_id': 'omgubuntu',
},
'add_ie': ['Youtube'],
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/665289828897005568',
'md5': '6dabeaca9e68cbb71c99c322a4b42a11',
'info_dict': {
'id': 'iBb2x00UVlv',
'ext': 'mp4',
'upload_date': '20151113',
'uploader_id': '1189339351084113920',
'uploader': 'ArsenalTerje',
'title': 'Vine by ArsenalTerje',
'timestamp': 1447451307,
},
'add_ie': ['Vine'],
}, {
'url': 'https://twitter.com/i/videos/tweet/705235433198714880',
'md5': '884812a2adc8aaf6fe52b15ccbfa3b88',
'info_dict': {
'id': '705235433198714880',
'ext': 'mp4',
'title': 'Twitter web player',
'thumbnail': r're:^https?://.*',
},
}, {
'url': 'https://twitter.com/i/videos/752274308186120192',
'only_matching': True,
},
]
_API_BASE = 'https://api.twitter.com/1.1'
def _parse_media_info(self, media_info, video_id):
formats = []
for media_variant in media_info.get('variants', []):
media_url = media_variant['url']
if media_url.endswith('.m3u8'):
formats.extend(self._extract_m3u8_formats(media_url, video_id, ext='mp4', m3u8_id='hls'))
elif media_url.endswith('.mpd'):
formats.extend(self._extract_mpd_formats(media_url, video_id, mpd_id='dash'))
else:
tbr = int_or_none(dict_get(media_variant, ('bitRate', 'bitrate')), scale=1000)
a_format = {
'url': media_url,
'format_id': 'http-%d' % tbr if tbr else 'http',
'tbr': tbr,
}
# Reported bitRate may be zero
if not a_format['tbr']:
del a_format['tbr']
self._search_dimensions_in_video_url(a_format, media_url)
formats.append(a_format)
return formats
def _extract_mobile_formats(self, username, video_id):
webpage = self._download_webpage(
'https://mobile.twitter.com/%s/status/%s' % (username, video_id),
video_id, 'Downloading mobile webpage',
headers={
# A recent mobile UA is necessary for `gt` cookie
'User-Agent': 'Mozilla/5.0 (Android 6.0.1; Mobile; rv:54.0) Gecko/54.0 Firefox/54.0',
})
main_script_url = self._html_search_regex(
r'<script[^>]+src="([^"]+main\.[^"]+)"', webpage, 'main script URL')
main_script = self._download_webpage(
main_script_url, video_id, 'Downloading main script')
bearer_token = self._search_regex(
r'BEARER_TOKEN\s*:\s*"([^"]+)"',
main_script, 'bearer token')
# https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/get-statuses-show-id
api_data = self._download_json(
'%s/statuses/show/%s.json' % (self._API_BASE, video_id),
video_id, 'Downloading API data',
headers={
'Authorization': 'Bearer ' + bearer_token,
})
media_info = try_get(api_data, lambda o: o['extended_entities']['media'][0]['video_info']) or {}
return self._parse_media_info(media_info, video_id)
def _real_extract(self, url):
path, video_id = re.search(self._VALID_URL, url).groups()
config = None
formats = []
duration = None
urls = [url]
if path.startswith('cards/'):
urls.append('https://twitter.com/i/videos/' + video_id)
for u in urls:
webpage = self._download_webpage(
u, video_id, headers={'Referer': 'https://twitter.com/'})
iframe_url = self._html_search_regex(
r'<iframe[^>]+src="((?:https?:)?//(?:www\.youtube\.com/embed/[^"]+|(?:www\.)?vine\.co/v/\w+/card))"',
webpage, 'video iframe', default=None)
if iframe_url:
return self.url_result(iframe_url)
config = self._parse_json(self._html_search_regex(
r'data-(?:player-)?config="([^"]+)"', webpage,
'data player config', default='{}'),
video_id)
if config.get('source_type') == 'vine':
return self.url_result(config['player_url'], 'Vine')
periscope_url = PeriscopeIE._extract_url(webpage)
if periscope_url:
return self.url_result(periscope_url, PeriscopeIE.ie_key())
video_url = config.get('video_url') or config.get('playlist', [{}])[0].get('source')
if video_url:
if determine_ext(video_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(video_url, video_id, ext='mp4', m3u8_id='hls'))
else:
f = {
'url': video_url,
}
self._search_dimensions_in_video_url(f, video_url)
formats.append(f)
vmap_url = config.get('vmapUrl') or config.get('vmap_url')
if vmap_url:
formats.extend(
self._extract_formats_from_vmap_url(vmap_url, video_id))
media_info = None
for entity in config.get('status', {}).get('entities', []):
if 'mediaInfo' in entity:
media_info = entity['mediaInfo']
if media_info:
formats.extend(self._parse_media_info(media_info, video_id))
duration = float_or_none(media_info.get('duration', {}).get('nanos'), scale=1e9)
username = config.get('user', {}).get('screen_name')
if username:
formats.extend(self._extract_mobile_formats(username, video_id))
if formats:
title = self._search_regex(r'<title>([^<]+)</title>', webpage, 'title')
thumbnail = config.get('posterImageUrl') or config.get('image_src')
duration = float_or_none(config.get('duration'), scale=1000) or duration
break
if not formats:
headers = {
'Authorization': 'Bearer AAAAAAAAAAAAAAAAAAAAAPYXBAAAAAAACLXUNDekMxqa8h%2F40K4moUkGsoc%3DTYfbDKbT3jJPCEVnMYqilB28NHfOPqkca3qaAxGfsyKCs0wRbw',
'Referer': url,
}
ct0 = self._get_cookies(url).get('ct0')
if ct0:
headers['csrf_token'] = ct0.value
guest_token = self._download_json(
'%s/guest/activate.json' % self._API_BASE, video_id,
'Downloading guest token', data=b'',
headers=headers)['guest_token']
headers['x-guest-token'] = guest_token
self._set_cookie('api.twitter.com', 'gt', guest_token)
config = self._download_json(
'%s/videos/tweet/config/%s.json' % (self._API_BASE, video_id),
video_id, headers=headers)
track = config['track']
vmap_url = track.get('vmapUrl')
if vmap_url:
formats = self._extract_formats_from_vmap_url(vmap_url, video_id)
else:
playback_url = track['playbackUrl']
if determine_ext(playback_url) == 'm3u8':
formats = self._extract_m3u8_formats(
playback_url, video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls')
else:
formats = [{
'url': playback_url,
}]
title = 'Twitter web player'
thumbnail = config.get('posterImage')
duration = float_or_none(track.get('durationMs'), scale=1000)
self._remove_duplicate_formats(formats)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
class TwitterIE(InfoExtractor):
IE_NAME = 'twitter'
_VALID_URL = r'https?://(?:www\.|m\.|mobile\.)?twitter\.com/(?:i/web|(?P<user_id>[^/]+))/status/(?P<id>\d+)'
_TEMPLATE_URL = 'https://twitter.com/%s/status/%s'
_TEMPLATE_STATUSES_URL = 'https://twitter.com/statuses/%s'
_TESTS = [{
'url': 'https://twitter.com/freethenipple/status/643211948184596480',
'info_dict': {
'id': '643211948184596480',
'ext': 'mp4',
'title': 'FREE THE NIPPLE - FTN supporters on Hollywood Blvd today!',
'thumbnail': r're:^https?://.*\.jpg',
'description': 'FREE THE NIPPLE on Twitter: "FTN supporters on Hollywood Blvd today! http://t.co/c7jHH749xJ"',
'uploader': 'FREE THE NIPPLE',
'uploader_id': 'freethenipple',
'duration': 12.922,
},
}, {
'url': 'https://twitter.com/giphz/status/657991469417025536/photo/1',
'md5': 'f36dcd5fb92bf7057f155e7d927eeb42',
'info_dict': {
'id': '657991469417025536',
'ext': 'mp4',
'title': 'Gifs - tu vai cai tu vai cai tu nao eh capaz disso tu vai cai',
'description': 'Gifs on Twitter: "tu vai cai tu vai cai tu nao eh capaz disso tu vai cai https://t.co/tM46VHFlO5"',
'thumbnail': r're:^https?://.*\.png',
'uploader': 'Gifs',
'uploader_id': 'giphz',
},
'expected_warnings': ['height', 'width'],
'skip': 'Account suspended',
}, {
'url': 'https://twitter.com/starwars/status/665052190608723968',
'info_dict': {
'id': '665052190608723968',
'ext': 'mp4',
'title': 'Star Wars - A new beginning is coming December 18. Watch the official 60 second #TV spot for #StarWars: #TheForceAwakens.',
'description': 'Star Wars on Twitter: "A new beginning is coming December 18. Watch the official 60 second #TV spot for #StarWars: #TheForceAwakens."',
'uploader_id': 'starwars',
'uploader': 'Star Wars',
},
}, {
'url': 'https://twitter.com/BTNBrentYarina/status/705235433198714880',
'info_dict': {
'id': '705235433198714880',
'ext': 'mp4',
'title': 'Brent Yarina - Khalil Iverson\'s missed highlight dunk. And made highlight dunk. In one highlight.',
'description': 'Brent Yarina on Twitter: "Khalil Iverson\'s missed highlight dunk. And made highlight dunk. In one highlight."',
'uploader_id': 'BTNBrentYarina',
'uploader': 'Brent Yarina',
},
'params': {
# The same video as https://twitter.com/i/videos/tweet/705235433198714880
# Test case of TwitterCardIE
'skip_download': True,
},
}, {
'url': 'https://twitter.com/jaydingeer/status/700207533655363584',
'info_dict': {
'id': '700207533655363584',
'ext': 'mp4',
'title': 'JG - BEAT PROD: @suhmeduh #Damndaniel',
'description': 'JG on Twitter: "BEAT PROD: @suhmeduh https://t.co/HBrQ4AfpvZ #Damndaniel https://t.co/byBooq2ejZ"',
'thumbnail': r're:^https?://.*\.jpg',
'uploader': 'JG',
'uploader_id': 'jaydingeer',
'duration': 30.0,
},
}, {
'url': 'https://twitter.com/Filmdrunk/status/713801302971588609',
'md5': '89a15ed345d13b86e9a5a5e051fa308a',
'info_dict': {
'id': 'MIOxnrUteUd',
'ext': 'mp4',
'title': 'Vince Mancini - Vine of the day',
'description': 'Vince Mancini on Twitter: "Vine of the day https://t.co/xmTvRdqxWf"',
'uploader': 'Vince Mancini',
'uploader_id': 'Filmdrunk',
'timestamp': 1402826626,
'upload_date': '20140615',
},
'add_ie': ['Vine'],
}, {
'url': 'https://twitter.com/captainamerica/status/719944021058060289',
'info_dict': {
'id': '719944021058060289',
'ext': 'mp4',
'title': 'Captain America - @King0fNerd Are you sure you made the right choice? Find out in theaters.',
'description': 'Captain America on Twitter: "@King0fNerd Are you sure you made the right choice? Find out in theaters. https://t.co/GpgYi9xMJI"',
'uploader_id': 'captainamerica',
'uploader': 'Captain America',
'duration': 3.17,
},
}, {
'url': 'https://twitter.com/OPP_HSD/status/779210622571536384',
'info_dict': {
'id': '1zqKVVlkqLaKB',
'ext': 'mp4',
'title': 'Sgt Kerry Schmidt - LIVE on #Periscope: Road rage, mischief, assault, rollover and fire in one occurrence',
'description': 'Sgt Kerry Schmidt on Twitter: "LIVE on #Periscope: Road rage, mischief, assault, rollover and fire in one occurrence https://t.co/EKrVgIXF3s"',
'upload_date': '20160923',
'uploader_id': 'OPP_HSD',
'uploader': 'Sgt Kerry Schmidt',
'timestamp': 1474613214,
},
'add_ie': ['Periscope'],
}, {
# has mp4 formats via mobile API
'url': 'https://twitter.com/news_al3alm/status/852138619213144067',
'info_dict': {
'id': '852138619213144067',
'ext': 'mp4',
'title': 'عالم الأخبار - كلمة تاريخية بجلسة الجناسي التاريخية.. النائب خالد مؤنس العتيبي للمعارضين : اتقوا الله .. الظلم ظلمات يوم القيامة',
'description': 'عالم الأخبار on Twitter: "كلمة تاريخية بجلسة الجناسي التاريخية.. النائب خالد مؤنس العتيبي للمعارضين : اتقوا الله .. الظلم ظلمات يوم القيامة https://t.co/xg6OhpyKfN"',
'uploader': 'عالم الأخبار',
'uploader_id': 'news_al3alm',
'duration': 277.4,
},
}, {
'url': 'https://twitter.com/i/web/status/910031516746514432',
'info_dict': {
'id': '910031516746514432',
'ext': 'mp4',
'title': 'Préfet de Guadeloupe - [Direct] #Maria Le centre se trouve actuellement au sud de Basse-Terre. Restez confinés. Réfugiez-vous dans la pièce la + sûre.',
'thumbnail': r're:^https?://.*\.jpg',
'description': 'Préfet de Guadeloupe on Twitter: "[Direct] #Maria Le centre se trouve actuellement au sud de Basse-Terre. Restez confinés. Réfugiez-vous dans la pièce la + sûre. https://t.co/mwx01Rs4lo"',
'uploader': 'Préfet de Guadeloupe',
'uploader_id': 'Prefet971',
'duration': 47.48,
},
'params': {
'skip_download': True, # requires ffmpeg
},
}, {
# card via api.twitter.com/1.1/videos/tweet/config
'url': 'https://twitter.com/LisPower1/status/1001551623938805763',
'info_dict': {
'id': '1001551623938805763',
'ext': 'mp4',
'title': 're:.*?Shep is on a roll today.*?',
'thumbnail': r're:^https?://.*\.jpg',
'description': 'md5:63b036c228772523ae1924d5f8e5ed6b',
'uploader': 'Lis Power',
'uploader_id': 'LisPower1',
'duration': 111.278,
},
'params': {
'skip_download': True, # requires ffmpeg
},
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user_id = mobj.group('user_id')
twid = mobj.group('id')
webpage, urlh = self._download_webpage_handle(
self._TEMPLATE_STATUSES_URL % twid, twid)
if 'twitter.com/account/suspended' in urlh.geturl():
raise ExtractorError('Account suspended by Twitter.', expected=True)
if user_id is None:
mobj = re.match(self._VALID_URL, urlh.geturl())
user_id = mobj.group('user_id')
username = remove_end(self._og_search_title(webpage), ' on Twitter')
title = description = self._og_search_description(webpage).strip('').replace('\n', ' ').strip('“”')
# strip 'https -_t.co_BJYgOjSeGA' junk from filenames
title = re.sub(r'\s+(https?://[^ ]+)', '', title)
info = {
'uploader_id': user_id,
'uploader': username,
'webpage_url': url,
'description': '%s on Twitter: "%s"' % (username, description),
'title': username + ' - ' + title,
}
mobj = re.search(r'''(?x)
<video[^>]+class="animated-gif"(?P<more_info>[^>]+)>\s*
<source[^>]+video-src="(?P<url>[^"]+)"
''', webpage)
if mobj:
more_info = mobj.group('more_info')
height = int_or_none(self._search_regex(
r'data-height="(\d+)"', more_info, 'height', fatal=False))
width = int_or_none(self._search_regex(
r'data-width="(\d+)"', more_info, 'width', fatal=False))
thumbnail = self._search_regex(
r'poster="([^"]+)"', more_info, 'poster', fatal=False)
info.update({
'id': twid,
'url': mobj.group('url'),
'height': height,
'width': width,
'thumbnail': thumbnail,
})
return info
twitter_card_url = None
if 'class="PlayableMedia' in webpage:
twitter_card_url = '%s//twitter.com/i/videos/tweet/%s' % (self.http_scheme(), twid)
else:
twitter_card_iframe_url = self._search_regex(
r'data-full-card-iframe-url=([\'"])(?P<url>(?:(?!\1).)+)\1',
webpage, 'Twitter card iframe URL', default=None, group='url')
if twitter_card_iframe_url:
twitter_card_url = compat_urlparse.urljoin(url, twitter_card_iframe_url)
if twitter_card_url:
info.update({
'_type': 'url_transparent',
'ie_key': 'TwitterCard',
'url': twitter_card_url,
})
return info
raise ExtractorError('There\'s no video in this tweet.')
class TwitterAmplifyIE(TwitterBaseIE):
IE_NAME = 'twitter:amplify'
_VALID_URL = r'https?://amp\.twimg\.com/v/(?P<id>[0-9a-f\-]{36})'
_TEST = {
'url': 'https://amp.twimg.com/v/0ba0c3c7-0af3-4c0a-bed5-7efd1ffa2951',
'md5': '7df102d0b9fd7066b86f3159f8e81bf6',
'info_dict': {
'id': '0ba0c3c7-0af3-4c0a-bed5-7efd1ffa2951',
'ext': 'mp4',
'title': 'Twitter Video',
'thumbnail': 're:^https?://.*',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
vmap_url = self._html_search_meta(
'twitter:amplify:vmap', webpage, 'vmap url')
formats = self._extract_formats_from_vmap_url(vmap_url, video_id)
thumbnails = []
thumbnail = self._html_search_meta(
'twitter:image:src', webpage, 'thumbnail', fatal=False)
def _find_dimension(target):
w = int_or_none(self._html_search_meta(
'twitter:%s:width' % target, webpage, fatal=False))
h = int_or_none(self._html_search_meta(
'twitter:%s:height' % target, webpage, fatal=False))
return w, h
if thumbnail:
thumbnail_w, thumbnail_h = _find_dimension('image')
thumbnails.append({
'url': thumbnail,
'width': thumbnail_w,
'height': thumbnail_h,
})
video_w, video_h = _find_dimension('player')
formats[0].update({
'width': video_w,
'height': video_h,
})
return {
'id': video_id,
'title': 'Twitter Video',
'formats': formats,
'thumbnails': thumbnails,
}
| gpl-3.0 | 4,302,065,616,401,205,000 | 32.205725 | 207 | 0.635061 | false |
Lujeni/ansible | lib/ansible/modules/cloud/cloudstack/cs_network_acl_rule.py | 13 | 14343 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_network_acl_rule
short_description: Manages network access control list (ACL) rules on Apache CloudStack based clouds.
description:
- Add, update and remove network ACL rules.
version_added: '2.4'
author: René Moser (@resmo)
options:
network_acl:
description:
- Name of the network ACL.
type: str
required: true
aliases: [ acl ]
cidrs:
description:
- CIDRs of the rule.
type: list
default: [ 0.0.0.0/0 ]
aliases: [ cidr ]
rule_position:
description:
- The position of the network ACL rule.
type: int
required: true
aliases: [ number ]
protocol:
description:
- Protocol of the rule
choices: [ tcp, udp, icmp, all, by_number ]
type: str
default: tcp
protocol_number:
description:
- Protocol number from 1 to 256 required if I(protocol=by_number).
type: int
start_port:
description:
- Start port for this rule.
- Considered if I(protocol=tcp) or I(protocol=udp).
type: int
aliases: [ port ]
end_port:
description:
- End port for this rule.
- Considered if I(protocol=tcp) or I(protocol=udp).
- If not specified, equal I(start_port).
type: int
icmp_type:
description:
- Type of the icmp message being sent.
- Considered if I(protocol=icmp).
type: int
icmp_code:
description:
- Error code for this icmp message.
- Considered if I(protocol=icmp).
type: int
vpc:
description:
- VPC the network ACL is related to.
type: str
required: true
traffic_type:
description:
- Traffic type of the rule.
type: str
choices: [ ingress, egress ]
default: ingress
aliases: [ type ]
action_policy:
description:
- Action policy of the rule.
type: str
choices: [ allow, deny ]
default: allow
aliases: [ action ]
tags:
description:
- List of tags. Tags are a list of dictionaries having keys I(key) and I(value).
- "If you want to delete all tags, set a empty list e.g. I(tags: [])."
type: list
aliases: [ tag ]
domain:
description:
- Domain the VPC is related to.
type: str
account:
description:
- Account the VPC is related to.
type: str
project:
description:
- Name of the project the VPC is related to.
type: str
zone:
description:
- Name of the zone the VPC related to.
- If not set, default zone is used.
type: str
state:
description:
- State of the network ACL rule.
type: str
default: present
choices: [ present, absent ]
poll_async:
description:
- Poll async jobs until job has finished.
type: bool
default: yes
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: create a network ACL rule, allow port 80 ingress
cs_network_acl_rule:
network_acl: web
rule_position: 1
vpc: my vpc
traffic_type: ingress
action_policy: allow
port: 80
cidr: 0.0.0.0/0
delegate_to: localhost
- name: create a network ACL rule, deny port range 8000-9000 ingress for 10.20.0.0/16 and 10.22.0.0/16
cs_network_acl_rule:
network_acl: web
rule_position: 1
vpc: my vpc
traffic_type: ingress
action_policy: deny
start_port: 8000
end_port: 9000
cidrs:
- 10.20.0.0/16
- 10.22.0.0/16
delegate_to: localhost
- name: remove a network ACL rule
cs_network_acl_rule:
network_acl: web
rule_position: 1
vpc: my vpc
state: absent
delegate_to: localhost
'''
RETURN = '''
---
network_acl:
description: Name of the network ACL.
returned: success
type: str
sample: customer acl
cidr:
description: CIDR of the network ACL rule.
returned: success
type: str
sample: 0.0.0.0/0
cidrs:
description: CIDRs of the network ACL rule.
returned: success
type: list
sample: [ 0.0.0.0/0 ]
version_added: '2.9'
rule_position:
description: Position of the network ACL rule.
returned: success
type: int
sample: 1
action_policy:
description: Action policy of the network ACL rule.
returned: success
type: str
sample: deny
traffic_type:
description: Traffic type of the network ACL rule.
returned: success
type: str
sample: ingress
protocol:
description: Protocol of the network ACL rule.
returned: success
type: str
sample: tcp
protocol_number:
description: Protocol number in case protocol is by number.
returned: success
type: int
sample: 8
start_port:
description: Start port of the network ACL rule.
returned: success
type: int
sample: 80
end_port:
description: End port of the network ACL rule.
returned: success
type: int
sample: 80
icmp_code:
description: ICMP code of the network ACL rule.
returned: success
type: int
sample: 8
icmp_type:
description: ICMP type of the network ACL rule.
returned: success
type: int
sample: 0
state:
description: State of the network ACL rule.
returned: success
type: str
sample: Active
vpc:
description: VPC of the network ACL.
returned: success
type: str
sample: customer vpc
tags:
description: List of resource tags associated with the network ACL rule.
returned: success
type: list
sample: '[ { "key": "foo", "value": "bar" } ]'
domain:
description: Domain the network ACL rule is related to.
returned: success
type: str
sample: example domain
account:
description: Account the network ACL rule is related to.
returned: success
type: str
sample: example account
project:
description: Name of project the network ACL rule is related to.
returned: success
type: str
sample: Production
zone:
description: Zone the VPC is related to.
returned: success
type: str
sample: ch-gva-2
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackNetworkAclRule(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackNetworkAclRule, self).__init__(module)
self.returns = {
'cidrlist': 'cidr',
'action': 'action_policy',
'protocol': 'protocol',
'icmpcode': 'icmp_code',
'icmptype': 'icmp_type',
'number': 'rule_position',
'traffictype': 'traffic_type',
}
# these values will be casted to int
self.returns_to_int = {
'startport': 'start_port',
'endport': 'end_port',
}
def get_network_acl_rule(self):
args = {
'aclid': self.get_network_acl(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
}
network_acl_rules = self.query_api('listNetworkACLs', **args)
for acl_rule in network_acl_rules.get('networkacl', []):
if acl_rule['number'] == self.module.params.get('rule_position'):
return acl_rule
return None
def present_network_acl_rule(self):
network_acl_rule = self.get_network_acl_rule()
protocol = self.module.params.get('protocol')
start_port = self.module.params.get('start_port')
end_port = self.get_or_fallback('end_port', 'start_port')
icmp_type = self.module.params.get('icmp_type')
icmp_code = self.module.params.get('icmp_code')
if protocol in ['tcp', 'udp'] and (start_port is None or end_port is None):
self.module.fail_json(msg="protocol is %s but the following are missing: start_port, end_port" % protocol)
elif protocol == 'icmp' and (icmp_type is None or icmp_code is None):
self.module.fail_json(msg="protocol is icmp but the following are missing: icmp_type, icmp_code")
elif protocol == 'by_number' and self.module.params.get('protocol_number') is None:
self.module.fail_json(msg="protocol is by_number but the following are missing: protocol_number")
if not network_acl_rule:
network_acl_rule = self._create_network_acl_rule(network_acl_rule)
else:
network_acl_rule = self._update_network_acl_rule(network_acl_rule)
if network_acl_rule:
network_acl_rule = self.ensure_tags(resource=network_acl_rule, resource_type='NetworkACL')
return network_acl_rule
def absent_network_acl_rule(self):
network_acl_rule = self.get_network_acl_rule()
if network_acl_rule:
self.result['changed'] = True
args = {
'id': network_acl_rule['id'],
}
if not self.module.check_mode:
res = self.query_api('deleteNetworkACL', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'networkacl')
return network_acl_rule
def _create_network_acl_rule(self, network_acl_rule):
self.result['changed'] = True
protocol = self.module.params.get('protocol')
args = {
'aclid': self.get_network_acl(key='id'),
'action': self.module.params.get('action_policy'),
'protocol': protocol if protocol != 'by_number' else self.module.params.get('protocol_number'),
'startport': self.module.params.get('start_port'),
'endport': self.get_or_fallback('end_port', 'start_port'),
'number': self.module.params.get('rule_position'),
'icmpcode': self.module.params.get('icmp_code'),
'icmptype': self.module.params.get('icmp_type'),
'traffictype': self.module.params.get('traffic_type'),
'cidrlist': self.module.params.get('cidrs'),
}
if not self.module.check_mode:
res = self.query_api('createNetworkACL', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
network_acl_rule = self.poll_job(res, 'networkacl')
return network_acl_rule
def _update_network_acl_rule(self, network_acl_rule):
protocol = self.module.params.get('protocol')
args = {
'id': network_acl_rule['id'],
'action': self.module.params.get('action_policy'),
'protocol': protocol if protocol != 'by_number' else str(self.module.params.get('protocol_number')),
'startport': self.module.params.get('start_port'),
'endport': self.get_or_fallback('end_port', 'start_port'),
'icmpcode': self.module.params.get('icmp_code'),
'icmptype': self.module.params.get('icmp_type'),
'traffictype': self.module.params.get('traffic_type'),
'cidrlist': ",".join(self.module.params.get('cidrs')),
}
if self.has_changed(args, network_acl_rule):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateNetworkACLItem', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
network_acl_rule = self.poll_job(res, 'networkacl')
return network_acl_rule
def get_result(self, network_acl_rule):
super(AnsibleCloudStackNetworkAclRule, self).get_result(network_acl_rule)
if network_acl_rule:
if 'cidrlist' in network_acl_rule:
self.result['cidrs'] = network_acl_rule['cidrlist'].split(',') or [network_acl_rule['cidrlist']]
if network_acl_rule['protocol'] not in ['tcp', 'udp', 'icmp', 'all']:
self.result['protocol_number'] = int(network_acl_rule['protocol'])
self.result['protocol'] = 'by_number'
self.result['action_policy'] = self.result['action_policy'].lower()
self.result['traffic_type'] = self.result['traffic_type'].lower()
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
network_acl=dict(required=True, aliases=['acl']),
rule_position=dict(required=True, type='int', aliases=['number']),
vpc=dict(required=True),
cidrs=dict(type='list', default=['0.0.0.0/0'], aliases=['cidr']),
protocol=dict(choices=['tcp', 'udp', 'icmp', 'all', 'by_number'], default='tcp'),
protocol_number=dict(type='int'),
traffic_type=dict(choices=['ingress', 'egress'], aliases=['type'], default='ingress'),
action_policy=dict(choices=['allow', 'deny'], aliases=['action'], default='allow'),
icmp_type=dict(type='int'),
icmp_code=dict(type='int'),
start_port=dict(type='int', aliases=['port']),
end_port=dict(type='int'),
state=dict(choices=['present', 'absent'], default='present'),
zone=dict(),
domain=dict(),
account=dict(),
project=dict(),
tags=dict(type='list', aliases=['tag']),
poll_async=dict(type='bool', default=True),
))
required_together = cs_required_together()
required_together.extend([
['icmp_type', 'icmp_code'],
])
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
mutually_exclusive=(
['icmp_type', 'start_port'],
['icmp_type', 'end_port'],
),
supports_check_mode=True
)
acs_network_acl_rule = AnsibleCloudStackNetworkAclRule(module)
state = module.params.get('state')
if state == 'absent':
network_acl_rule = acs_network_acl_rule.absent_network_acl_rule()
else:
network_acl_rule = acs_network_acl_rule.present_network_acl_rule()
result = acs_network_acl_rule.get_result(network_acl_rule)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,489,008,725,255,440,000 | 30.10846 | 118 | 0.616136 | false |
ApuliaSoftware/odoo | addons/account/wizard/account_move_line_unreconcile_select.py | 385 | 1864 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_move_line_unreconcile_select(osv.osv_memory):
_name = "account.move.line.unreconcile.select"
_description = "Unreconciliation"
_columns ={
'account_id': fields.many2one('account.account','Account',required=True),
}
def action_open_window(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
return {
'domain': "[('account_id','=',%d),('reconcile_id','<>',False),('state','<>','draft')]" % data['account_id'],
'name': 'Unreconciliation',
'view_type': 'form',
'view_mode': 'tree,form',
'view_id': False,
'res_model': 'account.move.line',
'type': 'ir.actions.act_window'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,393,558,162,073,278,000 | 42.348837 | 124 | 0.582082 | false |
Fusion-Rom/android_external_chromium_org | tools/telemetry/telemetry/core/backends/chrome/inspector_console.py | 27 | 1896 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class InspectorConsole(object):
def __init__(self, inspector_backend):
self._inspector_backend = inspector_backend
self._inspector_backend.RegisterDomain(
'Console',
self._OnNotification,
self._OnClose)
self._message_output_stream = None
self._last_message = None
self._console_enabled = False
def _OnNotification(self, msg):
if msg['method'] == 'Console.messageAdded':
if msg['params']['message']['url'] == 'chrome://newtab/':
return
self._last_message = 'At %s:%i: %s' % (
msg['params']['message']['url'],
msg['params']['message']['line'],
msg['params']['message']['text'])
if self._message_output_stream:
self._message_output_stream.write(
'%s\n' % self._last_message)
elif msg['method'] == 'Console.messageRepeatCountUpdated':
if self._message_output_stream:
self._message_output_stream.write(
'%s\n' % self._last_message)
def _OnClose(self):
pass
# False positive in PyLint 0.25.1: http://www.logilab.org/89092
@property
def message_output_stream(self): # pylint: disable=E0202
return self._message_output_stream
@message_output_stream.setter
def message_output_stream(self, stream): # pylint: disable=E0202
self._message_output_stream = stream
self._UpdateConsoleEnabledState()
def _UpdateConsoleEnabledState(self):
enabled = self._message_output_stream != None
if enabled == self._console_enabled:
return
if enabled:
method_name = 'enable'
else:
method_name = 'disable'
self._inspector_backend.SyncRequest({
'method': 'Console.%s' % method_name
})
self._console_enabled = enabled
| bsd-3-clause | 5,397,747,633,040,719,000 | 31.135593 | 72 | 0.640823 | false |
vabs22/zulip | zerver/migrations/0053_emailchangestatus.py | 19 | 1109 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-23 05:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('zerver', '0052_auto_fix_realmalias_realm_nullable'),
]
operations = [
migrations.CreateModel(
name='EmailChangeStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_email', models.EmailField(max_length=254)),
('old_email', models.EmailField(max_length=254)),
('updated_at', models.DateTimeField(auto_now=True)),
('status', models.IntegerField(default=0)),
('realm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='zerver.Realm')),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| apache-2.0 | -2,320,656,404,644,722,700 | 37.241379 | 126 | 0.61587 | false |
junmin-zhu/chromium-rivertrail | chrome/test/pyautolib/chromeos/suid_actions.py | 70 | 5373 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper script to perform actions as a super-user on ChromeOS.
Needs to be run with superuser privileges, typically using the
suid_python binary.
Usage:
sudo python suid_actions.py --action=CleanFlimflamDirs
"""
import optparse
import os
import shutil
import subprocess
import sys
import time
sys.path.append('/usr/local') # to import autotest libs.
from autotest.cros import constants
from autotest.cros import cryptohome
TEMP_BACKCHANNEL_FILE = '/tmp/pyauto_network_backchannel_file'
class SuidAction(object):
"""Helper to perform some super-user actions on ChromeOS."""
def _ParseArgs(self):
parser = optparse.OptionParser()
parser.add_option(
'-a', '--action', help='Action to perform.')
self._options = parser.parse_args()[0]
if not self._options.action:
raise RuntimeError('No action specified.')
def Run(self):
self._ParseArgs()
assert os.geteuid() == 0, 'Needs superuser privileges.'
handler = getattr(self, self._options.action)
assert handler and callable(handler), \
'No handler for %s' % self._options.action
handler()
return 0
## Actions ##
def CleanFlimflamDirs(self):
"""Clean the contents of all connection manager (shill/flimflam) profiles.
"""
flimflam_dirs = ['/home/chronos/user/flimflam',
'/home/chronos/user/shill',
'/var/cache/flimflam',
'/var/cache/shill']
# The stop/start flimflam command should stop/start shill respectivly if
# enabled.
os.system('stop flimflam')
try:
for flimflam_dir in flimflam_dirs:
if not os.path.exists(flimflam_dir):
continue
for item in os.listdir(flimflam_dir):
path = os.path.join(flimflam_dir, item)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
finally:
os.system('start flimflam')
# TODO(stanleyw): crosbug.com/29421 This method should wait until
# flimflam/shill is fully initialized and accessible via DBus again.
# Otherwise, there is a race conditions and subsequent accesses to
# flimflam/shill may fail. Until this is fixed, waiting for the
# resolv.conf file to be created is better than nothing.
begin = time.time()
while not os.path.exists(constants.RESOLV_CONF_FILE):
if time.time() - begin > 10:
raise RuntimeError('Timeout while waiting for flimflam/shill start.')
time.sleep(.25)
def RemoveAllCryptohomeVaults(self):
"""Remove any existing cryptohome vaults."""
cryptohome.remove_all_vaults()
def _GetEthInterfaces(self):
"""Returns a list of the eth* interfaces detected by the device."""
# Assumes ethernet interfaces all have "eth" in the name.
import pyudev
return sorted([iface.sys_name for iface in
pyudev.Context().list_devices(subsystem='net')
if 'eth' in iface.sys_name])
def _Renameif(self, old_iface, new_iface, mac_address):
"""Renames the interface with mac_address from old_iface to new_iface.
Args:
old_iface: The name of the interface you want to change.
new_iface: The name of the interface you want to change to.
mac_address: The mac address of the interface being changed.
"""
subprocess.call(['stop', 'flimflam'])
subprocess.call(['ifconfig', old_iface, 'down'])
subprocess.call(['nameif', new_iface, mac_address])
subprocess.call(['ifconfig', new_iface, 'up'])
subprocess.call(['start', 'flimflam'])
# Check and make sure interfaces have been renamed
eth_ifaces = self._GetEthInterfaces()
if new_iface not in eth_ifaces:
raise RuntimeError('Interface %s was not renamed to %s' %
(old_iface, new_iface))
elif old_iface in eth_ifaces:
raise RuntimeError('Old iface %s is still present' % old_iface)
def SetupBackchannel(self):
"""Renames the connected ethernet interface to eth_test for offline mode
testing. Does nothing if no connected interface is found.
"""
# Return the interface with ethernet connected or returns if none found.
for iface in self._GetEthInterfaces():
with open('/sys/class/net/%s/operstate' % iface, 'r') as fp:
if 'up' in fp.read():
eth_iface = iface
break
else:
return
# Write backup file to be used by TeardownBackchannel to restore the
# interface names.
with open(TEMP_BACKCHANNEL_FILE, 'w') as fpw:
with open('/sys/class/net/%s/address' % eth_iface) as fp:
mac_address = fp.read().strip()
fpw.write('%s, %s' % (eth_iface, mac_address))
self._Renameif(eth_iface, 'eth_test', mac_address)
def TeardownBackchannel(self):
"""Restores the eth interface names if SetupBackchannel was called."""
if not os.path.isfile(TEMP_BACKCHANNEL_FILE):
return
with open(TEMP_BACKCHANNEL_FILE, 'r') as fp:
eth_iface, mac_address = fp.read().split(',')
self._Renameif('eth_test', eth_iface, mac_address)
os.remove(TEMP_BACKCHANNEL_FILE)
if __name__ == '__main__':
sys.exit(SuidAction().Run())
| bsd-3-clause | 9,137,824,891,599,362,000 | 34.117647 | 79 | 0.65643 | false |
liangxia/origin | vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate_test.py | 629 | 1362 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boilerplate
import unittest
import StringIO
import os
import sys
class TestBoilerplate(unittest.TestCase):
"""
Note: run this test from the hack/boilerplate directory.
$ python -m unittest boilerplate_test
"""
def test_boilerplate(self):
os.chdir("test/")
class Args(object):
def __init__(self):
self.filenames = []
self.rootdir = "."
self.boilerplate_dir = "../"
self.verbose = True
# capture stdout
old_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
boilerplate.args = Args()
ret = boilerplate.main()
output = sorted(sys.stdout.getvalue().split())
sys.stdout = old_stdout
self.assertEquals(
output, ['././fail.go', '././fail.py'])
| apache-2.0 | 8,347,876,149,462,143,000 | 25.192308 | 74 | 0.685022 | false |
pascalchevrel/bedrock | bedrock/pocketfeed/api.py | 4 | 1683 | import datetime
import re
import requests
from sentry_sdk import capture_exception
from django.conf import settings
from django.utils.timezone import make_aware, utc
def get_articles_data(count=8):
payload = {
'consumer_key': settings.POCKET_CONSUMER_KEY,
'access_token': settings.POCKET_ACCESS_TOKEN,
'count': count,
'detailType': 'complete',
}
try:
resp = requests.post(settings.POCKET_API_URL, json=payload, timeout=5)
resp.raise_for_status()
return resp.json()
except Exception:
capture_exception()
return None
def complete_articles_data(articles):
for _, article in articles:
# id from API should be moved to pocket_id to not conflict w/DB's id
article['pocket_id'] = article['id']
# convert time_shared from unix timestamp to datetime
article['time_shared'] = make_aware(datetime.datetime.fromtimestamp(int(article['time_shared'])), utc)
# remove data points we don't need
del article['comment']
del article['excerpt']
del article['id']
del article['quote']
check_article_image(article)
def check_article_image(article):
"""Determine if external image is available"""
# sanity check to make sure image provided by API actually exists and is https
if article['image_src'] and re.match(r'^https://', article['image_src'], flags=re.I):
try:
resp = requests.get(article['image_src'])
resp.raise_for_status()
except Exception:
capture_exception()
article['image_src'] = None
else:
article['image_src'] = None
| mpl-2.0 | -536,857,982,937,449,200 | 28.526316 | 110 | 0.633393 | false |
philanthropy-u/edx-platform | openedx/core/djangoapps/embargo/test_utils.py | 13 | 2864 | """Utilities for writing unit tests that involve course embargos. """
import contextlib
import mock
from django.core.cache import cache
from django.urls import reverse
import pygeoip
from .models import Country, CountryAccessRule, RestrictedCourse
@contextlib.contextmanager
def restrict_course(course_key, access_point="enrollment", disable_access_check=False):
"""Simulate that a course is restricted.
This does two things:
1) Configures country access rules so that the course is restricted.
2) Mocks the GeoIP call so the user appears to be coming
from a country that's blocked from the course.
This is useful for tests that need to verify
that restricted users won't be able to access
particular views.
Arguments:
course_key (CourseKey): The location of the course to block.
Keyword Arguments:
access_point (str): Either "courseware" or "enrollment"
Yields:
str: A URL to the page in the embargo app that explains
why the user was blocked.
Example Usage:
>>> with restrict_course(course_key) as redirect_url:
>>> # The client will appear to be coming from
>>> # an IP address that is blocked.
>>> resp = self.client.get(url)
>>> self.assertRedirects(resp, redirect_url)
"""
# Clear the cache to ensure that previous tests don't interfere
# with this test.
cache.clear()
with mock.patch.object(pygeoip.GeoIP, 'country_code_by_addr') as mock_ip:
# Remove all existing rules for the course
CountryAccessRule.objects.all().delete()
# Create the country object
# Ordinarily, we'd create models for every country,
# but that would slow down the test suite.
country, __ = Country.objects.get_or_create(country='IR')
# Create a model for the restricted course
restricted_course, __ = RestrictedCourse.objects.get_or_create(course_key=course_key)
restricted_course.enroll_msg_key = 'default'
restricted_course.access_msg_key = 'default'
restricted_course.disable_access_check = disable_access_check
restricted_course.save()
# Ensure that there is a blacklist rule for the country
CountryAccessRule.objects.get_or_create(
restricted_course=restricted_course,
country=country,
rule_type='blacklist'
)
# Simulate that the user is coming from the blacklisted country
mock_ip.return_value = 'IR'
# Yield the redirect url so the tests don't need to know
# the embargo messaging URL structure.
redirect_url = reverse(
'embargo:blocked_message',
kwargs={
'access_point': access_point,
'message_key': 'default'
}
)
yield redirect_url
| agpl-3.0 | 3,371,867,869,505,580,000 | 33.095238 | 93 | 0.656425 | false |
jswope00/GAI | lms/djangoapps/verify_student/migrations/0002_auto__add_field_softwaresecurephotoverification_window.py | 53 | 6710 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SoftwareSecurePhotoVerification.window'
db.add_column('verify_student_softwaresecurephotoverification', 'window',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['reverification.MidcourseReverificationWindow'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SoftwareSecurePhotoVerification.window'
db.delete_column('verify_student_softwaresecurephotoverification', 'window_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'reverification.midcoursereverificationwindow': {
'Meta': {'object_name': 'MidcourseReverificationWindow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'verify_student.softwaresecurephotoverification': {
'Meta': {'ordering': "['-created_at']", 'object_name': 'SoftwareSecurePhotoVerification'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'error_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'error_msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'face_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_key': ('django.db.models.fields.TextField', [], {'max_length': '1024'}),
'receipt_id': ('django.db.models.fields.CharField', [], {'default': "'<function uuid4 at 0x21d4398>'", 'max_length': '255', 'db_index': 'True'}),
'reviewing_service': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'reviewing_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'photo_verifications_reviewed'", 'null': 'True', 'to': "orm['auth.User']"}),
'status': ('model_utils.fields.StatusField', [], {'default': "'created'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'window': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reverification.MidcourseReverificationWindow']", 'null': 'True'})
}
}
complete_apps = ['verify_student'] | agpl-3.0 | 6,167,705,835,153,198,000 | 75.261364 | 194 | 0.572429 | false |
musically-ut/statsmodels | statsmodels/graphics/tests/test_regressionplots.py | 20 | 9978 | import numpy as np
import statsmodels.api as sm
from numpy.testing import dec
from statsmodels.graphics.regressionplots import (plot_fit, plot_ccpr,
plot_partregress, plot_regress_exog, abline_plot,
plot_partregress_grid, plot_ccpr_grid, add_lowess,
plot_added_variable, plot_partial_residuals,
plot_ceres_residuals)
from pandas import Series, DataFrame
try:
import matplotlib.pyplot as plt #makes plt available for test functions
have_matplotlib = True
except:
have_matplotlib = False
pdf_output = False
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_regressionplots.pdf")
else:
pdf = None
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
plt.close(fig)
@dec.skipif(not have_matplotlib)
def teardown_module():
plt.close('all')
if pdf_output:
pdf.close()
class TestPlot(object):
def __init__(self):
self.setup() #temp: for testing without nose
def setup(self):
nsample = 100
sig = 0.5
x1 = np.linspace(0, 20, nsample)
x2 = 5 + 3* np.random.randn(nsample)
X = np.c_[x1, x2, np.sin(0.5*x1), (x2-5)**2, np.ones(nsample)]
beta = [0.5, 0.5, 1, -0.04, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
exog0 = sm.add_constant(np.c_[x1, x2], prepend=False)
res = sm.OLS(y, exog0).fit()
self.res = res
@dec.skipif(not have_matplotlib)
def test_plot_fit(self):
res = self.res
fig = plot_fit(res, 0, y_true=None)
x0 = res.model.exog[:, 0]
yf = res.fittedvalues
y = res.model.endog
px1, px2 = fig.axes[0].get_lines()[0].get_data()
np.testing.assert_equal(x0, px1)
np.testing.assert_equal(y, px2)
px1, px2 = fig.axes[0].get_lines()[1].get_data()
np.testing.assert_equal(x0, px1)
np.testing.assert_equal(yf, px2)
close_or_save(pdf, fig)
@dec.skipif(not have_matplotlib)
def test_plot_oth(self):
#just test that they run
res = self.res
plot_fit(res, 0, y_true=None)
plot_partregress_grid(res, exog_idx=[0,1])
plot_regress_exog(res, exog_idx=0)
plot_ccpr(res, exog_idx=0)
plot_ccpr_grid(res, exog_idx=[0])
fig = plot_ccpr_grid(res, exog_idx=[0,1])
for ax in fig.axes:
add_lowess(ax)
close_or_save(pdf, fig)
class TestPlotPandas(TestPlot):
def setup(self):
nsample = 100
sig = 0.5
x1 = np.linspace(0, 20, nsample)
x2 = 5 + 3* np.random.randn(nsample)
X = np.c_[x1, x2, np.sin(0.5*x1), (x2-5)**2, np.ones(nsample)]
beta = [0.5, 0.5, 1, -0.04, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
exog0 = sm.add_constant(np.c_[x1, x2], prepend=False)
exog0 = DataFrame(exog0, columns=["const", "var1", "var2"])
y = Series(y, name="outcome")
res = sm.OLS(y, exog0).fit()
self.res = res
data = DataFrame(exog0, columns=["const", "var1", "var2"])
data['y'] = y
self.data = data
class TestPlotFormula(TestPlotPandas):
@dec.skipif(not have_matplotlib)
def test_one_column_exog(self):
from statsmodels.formula.api import ols
res = ols("y~var1-1", data=self.data).fit()
plot_regress_exog(res, "var1")
res = ols("y~var1", data=self.data).fit()
plot_regress_exog(res, "var1")
class TestABLine(object):
@classmethod
def setupClass(cls):
np.random.seed(12345)
X = sm.add_constant(np.random.normal(0, 20, size=30))
y = np.dot(X, [25, 3.5]) + np.random.normal(0, 30, size=30)
mod = sm.OLS(y,X).fit()
cls.X = X
cls.y = y
cls.mod = mod
@dec.skipif(not have_matplotlib)
def test_abline_model(self):
fig = abline_plot(model_results=self.mod)
ax = fig.axes[0]
ax.scatter(self.X[:,1], self.y)
close_or_save(pdf, fig)
@dec.skipif(not have_matplotlib)
def test_abline_model_ax(self):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(self.X[:,1], self.y)
fig = abline_plot(model_results=self.mod, ax=ax)
close_or_save(pdf, fig)
@dec.skipif(not have_matplotlib)
def test_abline_ab(self):
mod = self.mod
intercept, slope = mod.params
fig = abline_plot(intercept=intercept, slope=slope)
close_or_save(pdf, fig)
@dec.skipif(not have_matplotlib)
def test_abline_ab_ax(self):
mod = self.mod
intercept, slope = mod.params
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(self.X[:,1], self.y)
fig = abline_plot(intercept=intercept, slope=slope, ax=ax)
close_or_save(pdf, fig)
class TestABLinePandas(TestABLine):
@classmethod
def setupClass(cls):
np.random.seed(12345)
X = sm.add_constant(np.random.normal(0, 20, size=30))
y = np.dot(X, [25, 3.5]) + np.random.normal(0, 30, size=30)
cls.X = X
cls.y = y
X = DataFrame(X, columns=["const", "someX"])
y = Series(y, name="outcome")
mod = sm.OLS(y,X).fit()
cls.mod = mod
class TestAddedVariablePlot(object):
@dec.skipif(not have_matplotlib)
def test_added_variable_poisson(self):
np.random.seed(3446)
n = 100
p = 3
exog = np.random.normal(size=(n, p))
lin_pred = 4 + exog[:, 0] + 0.2*exog[:, 1]**2
expval = np.exp(lin_pred)
endog = np.random.poisson(expval)
model = sm.GLM(endog, exog, family=sm.families.Poisson())
results = model.fit()
for focus_col in 0, 1, 2:
for use_glm_weights in False, True:
for resid_type in "resid_deviance", "resid_response":
weight_str = ["Unweighted", "Weighted"][use_glm_weights]
# Run directly and called as a results method.
for j in 0,1:
if j == 0:
fig = plot_added_variable(results, focus_col,
use_glm_weights=use_glm_weights,
resid_type=resid_type)
ti = "Added variable plot"
else:
fig = results.plot_added_variable(focus_col,
use_glm_weights=use_glm_weights,
resid_type=resid_type)
ti = "Added variable plot (called as method)"
ax = fig.get_axes()[0]
add_lowess(ax)
ax.set_position([0.1, 0.1, 0.8, 0.7])
effect_str = ["Linear effect, slope=1",
"Quadratic effect", "No effect"][focus_col]
ti += "\nPoisson regression\n"
ti += effect_str + "\n"
ti += weight_str + "\n"
ti += "Using '%s' residuals" % resid_type
ax.set_title(ti)
close_or_save(pdf, fig)
class TestPartialResidualPlot(object):
@dec.skipif(not have_matplotlib)
def test_partial_residual_poisson(self):
np.random.seed(3446)
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
lin_pred = 4 + exog[:, 1] + 0.2*exog[:, 2]**2
expval = np.exp(lin_pred)
endog = np.random.poisson(expval)
model = sm.GLM(endog, exog, family=sm.families.Poisson())
results = model.fit()
for focus_col in 1, 2:
for j in 0,1:
if j == 0:
fig = plot_partial_residuals(results, focus_col)
else:
fig = results.plot_partial_residuals(focus_col)
ax = fig.get_axes()[0]
add_lowess(ax)
ax.set_position([0.1, 0.1, 0.8, 0.77])
effect_str = ["Intercept", "Linear effect, slope=1",
"Quadratic effect"][focus_col]
ti = "Partial residual plot"
if j == 1:
ti += " (called as method)"
ax.set_title(ti + "\nPoisson regression\n" +
effect_str)
close_or_save(pdf, fig)
class TestCERESPlot(object):
@dec.skipif(not have_matplotlib)
def test_ceres_poisson(self):
np.random.seed(3446)
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
lin_pred = 4 + exog[:, 1] + 0.2*exog[:, 2]**2
expval = np.exp(lin_pred)
endog = np.random.poisson(expval)
model = sm.GLM(endog, exog, family=sm.families.Poisson())
results = model.fit()
for focus_col in 1, 2:
for j in 0, 1:
if j == 0:
fig = plot_ceres_residuals(results, focus_col)
else:
fig = results.plot_ceres_residuals(focus_col)
ax = fig.get_axes()[0]
add_lowess(ax)
ax.set_position([0.1, 0.1, 0.8, 0.77])
effect_str = ["Intercept", "Linear effect, slope=1",
"Quadratic effect"][focus_col]
ti = "CERES plot"
if j == 1:
ti += " (called as method)"
ax.set_title(ti + "\nPoisson regression\n" +
effect_str)
close_or_save(pdf, fig)
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb'], exit=False)
| bsd-3-clause | -1,023,910,245,204,352,100 | 32.149502 | 86 | 0.511826 | false |
HyperBaton/ansible | lib/ansible/modules/network/onyx/onyx_ospf.py | 21 | 8189 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: onyx_ospf
version_added: "2.5"
author: "Samer Deeb (@samerd)"
short_description: Manage OSPF protocol on Mellanox ONYX network devices
description:
- This module provides declarative management and configuration of OSPF
protocol on Mellanox ONYX network devices.
notes:
- Tested on ONYX 3.6.4000
options:
ospf:
description:
- "OSPF instance number 1-65535"
required: true
router_id:
description:
- OSPF router ID. Required if I(state=present).
interfaces:
description:
- List of interfaces and areas. Required if I(state=present).
suboptions:
name:
description:
- Interface name.
required: true
area:
description:
- OSPF area.
required: true
state:
description:
- OSPF state.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: add ospf router to interface
onyx_ospf:
ospf: 2
router_id: 192.168.8.2
interfaces:
- name: Eth1/1
- area: 0.0.0.0
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- router ospf 2
- router-id 192.168.8.2
- exit
- interface ethernet 1/1 ip ospf area 0.0.0.0
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.onyx.onyx import BaseOnyxModule
from ansible.module_utils.network.onyx.onyx import show_cmd
class OnyxOspfModule(BaseOnyxModule):
OSPF_IF_REGEX = re.compile(
r'^(Loopback\d+|Eth\d+\/\d+|Vlan\d+|Po\d+)\s+(\S+).*')
OSPF_ROUTER_REGEX = re.compile(r'^Routing Process (\d+).*ID\s+(\S+).*')
@classmethod
def _get_element_spec(cls):
interface_spec = dict(
name=dict(required=True),
area=dict(required=True),
)
element_spec = dict(
ospf=dict(type='int', required=True),
router_id=dict(),
interfaces=dict(type='list', elements='dict',
options=interface_spec),
state=dict(choices=['present', 'absent'], default='present'),
)
return element_spec
def init_module(self):
""" Ansible module initialization
"""
element_spec = self._get_element_spec()
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
def validate_ospf(self, value):
if value and not 1 <= int(value) <= 65535:
self._module.fail_json(msg='ospf id must be between 1 and 65535')
def get_required_config(self):
module_params = self._module.params
self._required_config = dict(
ospf=module_params['ospf'],
router_id=module_params['router_id'],
state=module_params['state'],
)
interfaces = module_params['interfaces'] or list()
req_interfaces = self._required_config['interfaces'] = dict()
for interface_data in interfaces:
req_interfaces[interface_data['name']] = interface_data['area']
self.validate_param_values(self._required_config)
def _update_ospf_data(self, ospf_data):
match = self.OSPF_ROUTER_REGEX.match(ospf_data)
if match:
ospf_id = int(match.group(1))
router_id = match.group(2)
self._current_config['ospf'] = ospf_id
self._current_config['router_id'] = router_id
def _update_ospf_interfaces(self, ospf_interfaces):
interfaces = self._current_config['interfaces'] = dict()
lines = ospf_interfaces.split('\n')
for line in lines:
line = line.strip()
match = self.OSPF_IF_REGEX.match(line)
if match:
name = match.group(1)
area = match.group(2)
for prefix in ("Vlan", "Loopback"):
if name.startswith(prefix):
name = name.replace(prefix, prefix + ' ')
interfaces[name] = area
def _get_ospf_config(self, ospf_id):
cmd = 'show ip ospf %s | include Process' % ospf_id
return show_cmd(self._module, cmd, json_fmt=False, fail_on_error=False)
def _get_ospf_interfaces_config(self, ospf_id):
cmd = 'show ip ospf interface %s brief' % ospf_id
return show_cmd(self._module, cmd, json_fmt=False, fail_on_error=False)
def load_current_config(self):
# called in base class in run function
ospf_id = self._required_config['ospf']
self._current_config = dict()
ospf_data = self._get_ospf_config(ospf_id)
if ospf_data:
self._update_ospf_data(ospf_data)
ospf_interfaces = self._get_ospf_interfaces_config(ospf_id)
if ospf_interfaces:
self._update_ospf_interfaces(ospf_interfaces)
def _generate_no_ospf_commands(self):
req_ospf_id = self._required_config['ospf']
curr_ospf_id = self._current_config.get('ospf')
if curr_ospf_id == req_ospf_id:
cmd = 'no router ospf %s' % req_ospf_id
self._commands.append(cmd)
def _get_interface_command_name(self, if_name):
if if_name.startswith('Eth'):
return if_name.replace("Eth", "ethernet ")
if if_name.startswith('Po'):
return if_name.replace("Po", "port-channel ")
if if_name.startswith('Vlan'):
return if_name.replace("Vlan", "vlan")
if if_name.startswith('Loopback'):
return if_name.replace("Loopback", "loopback")
self._module.fail_json(
msg='invalid interface name: %s' % if_name)
def _get_interface_area_cmd(self, if_name, area):
interface_prefix = self._get_interface_command_name(if_name)
if area:
area_cmd = 'ip ospf area %s' % area
else:
area_cmd = 'no ip ospf area'
cmd = 'interface %s %s' % (interface_prefix, area_cmd)
return cmd
def _generate_ospf_commands(self):
req_router_id = self._required_config['router_id']
req_ospf_id = self._required_config['ospf']
curr_router_id = self._current_config.get('router_id')
curr_ospf_id = self._current_config.get('ospf')
if curr_ospf_id != req_ospf_id or req_router_id != curr_router_id:
cmd = 'router ospf %s' % req_ospf_id
self._commands.append(cmd)
if req_router_id != curr_router_id:
if req_router_id:
cmd = 'router-id %s' % req_router_id
else:
cmd = 'no router-id'
self._commands.append(cmd)
self._commands.append('exit')
req_interfaces = self._required_config['interfaces']
curr_interfaces = self._current_config.get('interfaces', dict())
for if_name, area in iteritems(req_interfaces):
curr_area = curr_interfaces.get(if_name)
if curr_area != area:
cmd = self._get_interface_area_cmd(if_name, area)
self._commands.append(cmd)
for if_name in curr_interfaces:
if if_name not in req_interfaces:
cmd = self._get_interface_area_cmd(if_name, None)
self._commands.append(cmd)
def generate_commands(self):
req_state = self._required_config['state']
if req_state == 'absent':
return self._generate_no_ospf_commands()
return self._generate_ospf_commands()
def main():
""" main entry point for module execution
"""
OnyxOspfModule.main()
if __name__ == '__main__':
main()
| gpl-3.0 | 1,230,194,085,279,071,000 | 33.407563 | 92 | 0.587007 | false |
vladikoff/fxa-mochitest | tests/mozbase/mozdevice/sut_tests/dmunit.py | 5 | 1720 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import types
import unittest
import mozlog
from mozdevice import devicemanager
from mozdevice import devicemanagerSUT
ip = ''
port = 0
heartbeat_port = 0
log_level = mozlog.ERROR
class DeviceManagerTestCase(unittest.TestCase):
"""DeviceManager tests should subclass this.
"""
"""Set to False in your derived class if this test
should not be run on the Python agent.
"""
runs_on_test_device = True
def _setUp(self):
""" Override this if you want set-up code in your test."""
return
def setUp(self):
self.dm = devicemanagerSUT.DeviceManagerSUT(host=ip, port=port,
logLevel=log_level)
self.dmerror = devicemanager.DMError
self._setUp()
class DeviceManagerTestLoader(unittest.TestLoader):
def __init__(self, isTestDevice=False):
self.isTestDevice = isTestDevice
def loadTestsFromModuleName(self, module_name):
"""Loads tests from modules unless the SUT is a test device and
the test case has runs_on_test_device set to False
"""
tests = []
module = __import__(module_name)
for name in dir(module):
obj = getattr(module, name)
if (isinstance(obj, (type, types.ClassType)) and
issubclass(obj, unittest.TestCase)) and \
(not self.isTestDevice or obj.runs_on_test_device):
tests.append(self.loadTestsFromTestCase(obj))
return self.suiteClass(tests)
| mpl-2.0 | -657,658,439,314,412,700 | 30.272727 | 71 | 0.640116 | false |
glyph/E-Max | epywrap.py | 1 | 13586 | # Copyright (C) 2012
# See LICENSE.txt for details.
"""
Epytext (and general Python docstring) wrapper
==============================================
Utility for wrapping docstrings in Python; specifically, docstrings in U{Epytext
<http://epydoc.sourceforge.net/manual-epytext.html>} format, or those that are
close enough.
The wrapping herein generally adheres to all the conventions set forth by the
Twisted project U{http://twistedmatrix.com/}.
Currently (obviously) the only supported editor is U{Sublime Text 2
<http://www.sublimetext.com/>} but a sufficiently enterprising individual could
either use this file as a script (no dependencies!) by piping the contents of
the docstring to it, or call L{wrapPythonDocstring} and preserve point position.
"""
from __future__ import unicode_literals
import re
from uuid import uuid4
__all__ = [
"wrapPythonDocstring"
]
def isUnderline(expr):
return bool(re.match("[=]+$", expr) or re.match("[-]+$", expr))
def startslist(x):
return (x == '-' or (x.endswith(".") and x[:-1].isdigit()))
class RegularParagraph(object):
otherIndent = ""
def __init__(self, pointTracker, fixedIndent="", hangIndent="",
followIndent=""):
self.words = []
self.fixedIndent = fixedIndent
self.hangIndent = hangIndent
self.followIndent = followIndent
self.more = None
self.pointTracker = pointTracker
self._unwrappedLines = 0
self._headingType = None
self._headingPoints = []
def matchesTag(self, other):
return False
def __nonzero__(self):
return bool(self.words)
def all(self):
while self is not None:
#print self.__class__.__name__
if self:
yield self
self = self.more
def setIsHeading(self, headingType):
self._headingType = headingType
def isHeading(self):
return bool(self._headingType)
def add(self, line):
clean = self.pointTracker.peek(line)
stripped = clean.strip()
if stripped:
self._unwrappedLines += 1
active = self
firstword = list(self.pointTracker.filterWords(line.split()))[0]
if stripped.startswith("@"):
fp = FieldParagraph(pointTracker=self.pointTracker)
fp.words.extend(line.split())
active = self.more = fp
elif isUnderline(stripped) and self._unwrappedLines == 2:
# This paragraph is actually a section heading.
active.setIsHeading(stripped[0])
self._headingPoints = self.pointTracker.extractPoints(line)
# FIXME: should respect leading indentation.
active = self.nextRegular()
elif startslist(firstword):
# Aesthetically I prefer a 2-space indent here, but the
# convention in the codebase seems to be 4 spaces.
LIST_INDENT = 4
# FIXME: this also needs to respect leading indentation so it
# can properly represent nested lists.
hangIndent = self.pointTracker.lengthOf(firstword) + 1
fi = self.fixedIndent
if not (self.words and startslist(self.words[0])):
fi += (" " * LIST_INDENT)
fp = RegularParagraph(
pointTracker=self.pointTracker,
fixedIndent=fi,
hangIndent=" " * hangIndent,
followIndent=self.followIndent,
)
fp.words.extend(line.split())
active = self.more = fp
else:
self.words.extend(line.split())
if stripped.endswith("::"):
active.more = PreFormattedParagraph(
active,
indentBegins=len(clean) - len(clean.lstrip())
)
active = active.more
return active
else:
rawstrip = line.strip()
if rawstrip:
self.words.append(rawstrip)
if len(list(self.pointTracker.filterWords(self.words))):
return self.nextRegular()
return self
def wrap(self, output, indentation, width):
if not self.words:
return
thisLine = self.firstIndent(indentation)
first = True
prevWord = ''
for word in self.words:
if not self.pointTracker.isWord(word):
thisLine += word
continue
if ((prevWord.endswith(".") or prevWord.endswith("?") or
prevWord.endswith("!")) and not prevWord[:-1].isdigit()):
words = prevWord.split(".")[:-1]
if ( len(words) > 1 and
[self.pointTracker.lengthOf(x) for x in words] ==
[1] * len(words) ):
# acronym
spaces = 1
else:
spaces = 2
else:
spaces = 1
prevWord = word
if ( self.pointTracker.lengthOf(thisLine) +
self.pointTracker.lengthOf(word) + spaces <= width ):
if first:
first = not first
else:
thisLine += (" " * spaces)
thisLine += word
else:
output.write(self.pointTracker.scan(thisLine, output.tell()))
output.write("\n")
thisLine = self.restIndent(indentation) + word
output.write(self.pointTracker.scan(thisLine, output.tell()))
output.write("\n")
if self.isHeading():
indentText = self.firstIndent(indentation)
lineSize = self.pointTracker.lengthOf(thisLine) - len(indentText)
output.write(self.pointTracker.scan(
indentText + ''.join(self._headingPoints) +
(self._headingType * lineSize), output.tell()
))
output.write("\n")
def firstIndent(self, indentation):
return indentation + self.fixedIndent
def restIndent(self, indentation):
return (indentation + self.fixedIndent + self.hangIndent +
self.otherIndent)
def genRegular(self):
return RegularParagraph(pointTracker=self.pointTracker,
fixedIndent=self.nextIndent(),
followIndent=self.nextIndent())
def nextRegular(self):
self.more = self.genRegular()
return self.more
def nextIndent(self):
return self.followIndent
class FieldParagraph(RegularParagraph):
otherIndent = " "
def nextIndent(self):
return " "
def matchesTag(self, other):
if isinstance(other, FieldParagraph):
myWords = list(self.pointTracker.filterWords(self.words))
theirWords = list(self.pointTracker.filterWords(other.words))
if ( set([myWords[0], theirWords[0]]) ==
set(["@return:", "@rtype:"]) ):
# matching @return and @rtype fields.
return True
elif len(myWords) > 1 and len(theirWords) > 1:
# matching @param and @type fields.
return myWords[1] == theirWords[1]
return False
else:
return False
class PreFormattedParagraph(object):
def __init__(self, before, indentBegins):
self.lines = []
self.before = before
pointTracker = before.pointTracker
fixedIndent = (before.fixedIndent + before.hangIndent +
before.otherIndent)
self.indentBegins = indentBegins
self.fixedIndent = fixedIndent
self.more = None
self.pointTracker = pointTracker
def matchesTag(self, other):
return False
def add(self, line):
actualLine = self.pointTracker.peek(line)
if actualLine.strip():
if len(actualLine) - len(actualLine.lstrip()) <= self.indentBegins:
next = self.more = self.before.genRegular()
return next.add(line)
self.lines.append(line.rstrip())
else:
self.lines.append(line.strip())
return self
def fixIndentation(self):
while self.lines and not self.lines[0].strip():
self.lines.pop(0)
while self.lines and not self.lines[-1].strip():
self.lines.pop()
if not self.lines:
return
cleanLines = map(self.pointTracker.peek, self.lines)
commonLeadingIndent = min([len(x) - len(x.lstrip()) for x in cleanLines
if x.strip()])
newLines = []
for actualLine, line in zip(cleanLines, self.lines):
if actualLine != line and line[:commonLeadingIndent].strip():
# There's a marker, and it's in the leading whitespace.
# Explicitly reposition the marker at the beginning of the fixed
# indentation.
line = (self.pointTracker.marker +
actualLine[commonLeadingIndent:])
else:
line = line.rstrip()[commonLeadingIndent:]
newLines.append(line)
self.lines = newLines
def wrap(self, output, indentation, width):
# OK, now we know about all the lines we're going to know about.
self.fixIndentation()
for line in self.lines:
if self.pointTracker.peek(line):
output.write(indentation + " " + self.fixedIndent)
output.write(self.pointTracker.scan(line, output.tell()))
output.write("\n")
class PointTracker(object):
"""
Object for keeping track of where the insertion points are.
"""
def __init__(self, point):
self.point = point
self.marker = "{" + unicode(uuid4()) + "}"
self.outPoints = []
def annotate(self, text):
"""
Add point references to a block of text.
"""
return text[:self.point] + self.marker + text[self.point:]
def filterWords(self, words):
for word in words:
if self.isWord(word):
yield self.peek(word)
def isWord(self, text):
"""
Is the given word actually a word, or just an artifact of the
point-tracking process? If it's just the point marker by itself, then
no, it isn't, and don't insert additional whitespace after it.
"""
return not (text == self.marker)
def lengthOf(self, word):
"""
How long would this word be if it didn't have any point-markers in it?
"""
return len(self.peek(word))
def peek(self, word):
"""
What would this word look like if it didn't have any point-markers in
it?
"""
return word.replace(self.marker, "")
def extractPoints(self, text):
"""
Return a C{list} of all point markers contained in the text.
"""
if self.marker in text:
return [self.marker]
return []
def scan(self, text, offset):
"""
Scan some text for point markers, remember them, and remove them.
"""
idx = text.find(self.marker)
if idx == -1:
return text
self.outPoints.append(idx + offset)
return self.peek(text)
def wrapPythonDocstring(docstring, output, indentation=" ",
width=79, point=0):
"""
Wrap a given Python docstring.
@param docstring: the docstring itself (just the stuff between the quotes).
@type docstring: unicode
@param output: The unicode output file to write the wrapped docstring to.
@type output: L{file}-like (C{write} takes unicode.)
@param indentation: a string (consisting only of spaces) indicating the
amount of space to shift by. Don't adjust this. It's always 4 spaces.
PEP8 says so.
@type indentation: L{unicode}
@param width: The maximum number of characters allowed in a wrapped line.
@type width: L{int}
@param point: The location of the cursor in the text, as an offset from the
beginning of the docstring. If this function is being used from within
a graphical editor, this parameter can be used (in addition to the
return value of this function) to reposition the cursor at the relative
position which the user will expect.
@return: The new location of the cursor.
"""
# TODO: multiple points; usable, for example, for start and end of a
# currently active selection.
pt = PointTracker(point)
start = paragraph = RegularParagraph(pt)
docstring = pt.annotate(docstring)
for line in docstring.split("\n"):
paragraph = paragraph.add(line)
prevp = None
for paragraph in start.all():
if not paragraph.matchesTag(prevp):
output.write("\n")
prevp = paragraph
paragraph.wrap(output, indentation, width)
output.write(indentation)
return pt.outPoints[0]
if __name__ == '__main__':
import sys
from cStringIO import StringIO
io = StringIO()
indata = sys.stdin.read()
firstline = [line for line in indata.split("\n") if line][0]
wrapPythonDocstring(indata, io,
indentation=" " * (len(firstline) - len(firstline.lstrip())))
sys.stdout.write(io.getvalue())
sys.stdout.flush()
| mit | 243,352,205,550,284,770 | 31.347619 | 85 | 0.568085 | false |
owlabs/incubator-airflow | tests/contrib/operators/test_dataflow_operator.py | 1 | 10028 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from airflow.contrib.operators.dataflow_operator import \
DataFlowPythonOperator, DataFlowJavaOperator, \
DataflowTemplateOperator, GoogleCloudBucketHelper
from airflow.version import version
from tests.compat import mock
TASK_ID = 'test-dataflow-operator'
JOB_NAME = 'test-dataflow-pipeline'
TEMPLATE = 'gs://dataflow-templates/wordcount/template_file'
PARAMETERS = {
'inputFile': 'gs://dataflow-samples/shakespeare/kinglear.txt',
'output': 'gs://test/output/my_output'
}
PY_FILE = 'gs://my-bucket/my-object.py'
JAR_FILE = 'example/test.jar'
JOB_CLASS = 'com.test.NotMain'
PY_OPTIONS = ['-m']
DEFAULT_OPTIONS_PYTHON = DEFAULT_OPTIONS_JAVA = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
}
DEFAULT_OPTIONS_TEMPLATE = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'tempLocation': 'gs://test/temp',
'zone': 'us-central1-f'
}
ADDITIONAL_OPTIONS = {
'output': 'gs://test/output',
'labels': {'foo': 'bar'}
}
TEST_VERSION = 'v{}'.format(version.replace('.', '-').replace('+', '-'))
EXPECTED_ADDITIONAL_OPTIONS = {
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION}
}
POLL_SLEEP = 30
GCS_HOOK_STRING = 'airflow.contrib.operators.dataflow_operator.{}'
class DataFlowPythonOperatorTest(unittest.TestCase):
def setUp(self):
self.dataflow = DataFlowPythonOperator(
task_id=TASK_ID,
py_file=PY_FILE,
job_name=JOB_NAME,
py_options=PY_OPTIONS,
dataflow_default_options=DEFAULT_OPTIONS_PYTHON,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP)
def test_init(self):
"""Test DataFlowPythonOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.job_name, JOB_NAME)
self.assertEqual(self.dataflow.py_file, PY_FILE)
self.assertEqual(self.dataflow.py_options, PY_OPTIONS)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_PYTHON)
self.assertEqual(self.dataflow.options,
EXPECTED_ADDITIONAL_OPTIONS)
@mock.patch('airflow.contrib.operators.dataflow_operator.DataFlowHook')
@mock.patch(GCS_HOOK_STRING.format('GoogleCloudBucketHelper'))
def test_exec(self, gcs_hook, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_python_workflow.
"""
start_python_hook = dataflow_mock.return_value.start_python_dataflow
gcs_download_hook = gcs_hook.return_value.google_cloud_to_local
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
expected_options = {
'project': 'test',
'staging_location': 'gs://test/staging',
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION}
}
gcs_download_hook.assert_called_once_with(PY_FILE)
start_python_hook.assert_called_once_with(JOB_NAME, expected_options, mock.ANY,
PY_OPTIONS)
self.assertTrue(self.dataflow.py_file.startswith('/tmp/dataflow'))
class DataFlowJavaOperatorTest(unittest.TestCase):
def setUp(self):
self.dataflow = DataFlowJavaOperator(
task_id=TASK_ID,
jar=JAR_FILE,
job_name=JOB_NAME,
job_class=JOB_CLASS,
dataflow_default_options=DEFAULT_OPTIONS_JAVA,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP)
def test_init(self):
"""Test DataflowTemplateOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.job_name, JOB_NAME)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_JAVA)
self.assertEqual(self.dataflow.job_class, JOB_CLASS)
self.assertEqual(self.dataflow.jar, JAR_FILE)
self.assertEqual(self.dataflow.options,
EXPECTED_ADDITIONAL_OPTIONS)
@mock.patch('airflow.contrib.operators.dataflow_operator.DataFlowHook')
@mock.patch(GCS_HOOK_STRING.format('GoogleCloudBucketHelper'))
def test_exec(self, gcs_hook, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_java_workflow.
"""
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_download_hook = gcs_hook.return_value.google_cloud_to_local
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_download_hook.assert_called_once_with(JAR_FILE)
start_java_hook.assert_called_once_with(JOB_NAME, mock.ANY,
mock.ANY, JOB_CLASS)
class DataFlowTemplateOperatorTest(unittest.TestCase):
def setUp(self):
self.dataflow = DataflowTemplateOperator(
task_id=TASK_ID,
template=TEMPLATE,
job_name=JOB_NAME,
parameters=PARAMETERS,
dataflow_default_options=DEFAULT_OPTIONS_TEMPLATE,
poll_sleep=POLL_SLEEP)
def test_init(self):
"""Test DataflowTemplateOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.job_name, JOB_NAME)
self.assertEqual(self.dataflow.template, TEMPLATE)
self.assertEqual(self.dataflow.parameters, PARAMETERS)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_TEMPLATE)
@mock.patch('airflow.contrib.operators.dataflow_operator.DataFlowHook')
def test_exec(self, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_template_workflow.
"""
start_template_hook = dataflow_mock.return_value.start_template_dataflow
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
expected_options = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'tempLocation': 'gs://test/temp',
'zone': 'us-central1-f'
}
start_template_hook.assert_called_once_with(JOB_NAME, expected_options,
PARAMETERS, TEMPLATE)
class GoogleCloudBucketHelperTest(unittest.TestCase):
@mock.patch(
'airflow.contrib.operators.dataflow_operator.GoogleCloudBucketHelper.__init__'
)
def test_invalid_object_path(self, mock_parent_init):
# This is just the path of a bucket hence invalid filename
file_name = 'gs://test-bucket'
mock_parent_init.return_value = None
gcs_bucket_helper = GoogleCloudBucketHelper()
gcs_bucket_helper._gcs_hook = mock.Mock()
with self.assertRaises(Exception) as context:
gcs_bucket_helper.google_cloud_to_local(file_name)
self.assertEqual(
'Invalid Google Cloud Storage (GCS) object path: {}'.format(file_name),
str(context.exception))
@mock.patch(
'airflow.contrib.operators.dataflow_operator.GoogleCloudBucketHelper.__init__'
)
def test_valid_object(self, mock_parent_init):
file_name = 'gs://test-bucket/path/to/obj.jar'
mock_parent_init.return_value = None
gcs_bucket_helper = GoogleCloudBucketHelper()
gcs_bucket_helper._gcs_hook = mock.Mock()
def _mock_download(bucket, object, filename=None):
text_file_contents = 'text file contents'
with open(filename, 'w') as text_file:
text_file.write(text_file_contents)
return text_file_contents
gcs_bucket_helper._gcs_hook.download.side_effect = _mock_download
local_file = gcs_bucket_helper.google_cloud_to_local(file_name)
self.assertIn('obj.jar', local_file)
@mock.patch(
'airflow.contrib.operators.dataflow_operator.GoogleCloudBucketHelper.__init__'
)
def test_empty_object(self, mock_parent_init):
file_name = 'gs://test-bucket/path/to/obj.jar'
mock_parent_init.return_value = None
gcs_bucket_helper = GoogleCloudBucketHelper()
gcs_bucket_helper._gcs_hook = mock.Mock()
def _mock_download(bucket, object, filename=None):
text_file_contents = ''
with open(filename, 'w') as text_file:
text_file.write(text_file_contents)
return text_file_contents
gcs_bucket_helper._gcs_hook.download.side_effect = _mock_download
with self.assertRaises(Exception) as context:
gcs_bucket_helper.google_cloud_to_local(file_name)
self.assertEqual(
'Failed to download Google Cloud Storage (GCS) object: {}'.format(file_name),
str(context.exception))
| apache-2.0 | -2,379,878,648,778,868,700 | 37.718147 | 89 | 0.64988 | false |
kasioumis/invenio | invenio/legacy/bibcirculation/utils.py | 13 | 30901 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibCirculation Utils: Auxiliary methods of BibCirculation """
__revision__ = "$Id$"
import datetime
import random
import re
import time
from invenio.legacy.bibrecord import get_fieldvalues
from invenio.utils.url import make_invenio_opener
from invenio.legacy.search_engine import get_field_tags
from invenio.legacy.bibsched.bibtask import task_low_level_submission
from invenio.utils.text import encode_for_xml
from invenio.base.i18n import gettext_set_language
from invenio.config import CFG_SITE_URL, CFG_TMPDIR, CFG_SITE_LANG
import invenio.legacy.bibcirculation.db_layer as db
from invenio.legacy.bibcirculation.config import \
CFG_BIBCIRCULATION_WORKING_DAYS, \
CFG_BIBCIRCULATION_HOLIDAYS, \
CFG_CERN_SITE, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF, \
CFG_BIBCIRCULATION_ITEM_STATUS_IN_PROCESS, \
CFG_BIBCIRCULATION_REQUEST_STATUS_PENDING, \
CFG_BIBCIRCULATION_REQUEST_STATUS_WAITING, \
CFG_BIBCIRCULATION_LOAN_STATUS_ON_LOAN, \
CFG_BIBCIRCULATION_LOAN_STATUS_EXPIRED, \
CFG_BIBCIRCULATION_LOAN_STATUS_RETURNED
DICC_REGEXP = re.compile("^\{('[^']*': ?('[^']*'|\"[^\"]+\"|[0-9]*|None)(, ?'[^']*': ?('[^']*'|\"[^\"]+\"|[0-9]*|None))*)?\}$")
BIBCIRCULATION_OPENER = make_invenio_opener('BibCirculation')
def search_user(column, string):
if string is not None:
string = string.strip()
if CFG_CERN_SITE == 1:
if column == 'name':
result = db.search_borrower_by_name(string)
else:
if column == 'email':
try:
result = db.search_borrower_by_email(string)
except:
result = ()
else:
try:
result = db.search_borrower_by_ccid(string)
except:
result = ()
if result == ():
from invenio.legacy.bibcirculation.cern_ldap \
import get_user_info_from_ldap
ldap_info = 'busy'
while ldap_info == 'busy':
time.sleep(1)
if column == 'id' or column == 'ccid':
ldap_info = get_user_info_from_ldap(ccid=string)
elif column == 'email':
ldap_info = get_user_info_from_ldap(email=string)
else:
ldap_info = get_user_info_from_ldap(nickname=string)
if len(ldap_info) == 0:
result = ()
else:
try:
name = ldap_info['displayName'][0]
except KeyError:
name = ""
try:
email = ldap_info['mail'][0]
except KeyError:
email = ""
try:
phone = ldap_info['telephoneNumber'][0]
except KeyError:
phone = ""
try:
address = ldap_info['physicalDeliveryOfficeName'][0]
except KeyError:
address = ""
try:
mailbox = ldap_info['postOfficeBox'][0]
except KeyError:
mailbox = ""
try:
ccid = ldap_info['employeeID'][0]
except KeyError:
ccid = ""
try:
db.new_borrower(ccid, name, email, phone,
address, mailbox, '')
except:
pass
result = db.search_borrower_by_ccid(int(ccid))
else:
if column == 'name':
result = db.search_borrower_by_name(string)
elif column == 'email':
result = db.search_borrower_by_email(string)
else:
result = db.search_borrower_by_id(string)
return result
def update_user_info_from_ldap(user_id):
from invenio.legacy.bibcirculation.cern_ldap import get_user_info_from_ldap
ccid = db.get_borrower_ccid(user_id)
ldap_info = get_user_info_from_ldap(ccid=ccid)
if not ldap_info:
result = ()
else:
try:
name = ldap_info['displayName'][0]
except KeyError:
name = ""
try:
email = ldap_info['mail'][0]
except KeyError:
email = ""
try:
phone = ldap_info['telephoneNumber'][0]
except KeyError:
phone = ""
try:
address = ldap_info['physicalDeliveryOfficeName'][0]
except KeyError:
address = ""
try:
mailbox = ldap_info['postOfficeBox'][0]
except KeyError:
mailbox = ""
db.update_borrower(user_id, name, email, phone, address, mailbox)
result = db.search_borrower_by_ccid(int(ccid))
return result
def get_book_cover(isbn):
"""
Retrieve book cover using Amazon web services.
@param isbn: book's isbn
@type isbn: string
@return book cover
"""
from xml.dom import minidom
# connect to AWS
"""cover_xml = BIBCIRCULATION_OPENER.open('http://ecs.amazonaws.com/onca/xml' \
'?Service=AWSECommerceService&AWSAccessKeyId=' \
+ CFG_BIBCIRCULATION_AMAZON_ACCESS_KEY + \
'&Operation=ItemSearch&Condition=All&' \
'ResponseGroup=Images&SearchIndex=Books&' \
'Keywords=' + isbn)"""
cover_xml=""
# parse XML
try:
xml_img = minidom.parse(cover_xml)
retrieve_book_cover = xml_img.getElementsByTagName('MediumImage')
book_cover = retrieve_book_cover.item(0).firstChild.firstChild.data
except:
book_cover = "%s/img/book_cover_placeholder.gif" % (CFG_SITE_URL)
return book_cover
def book_information_from_MARC(recid):
"""
Retrieve book's information from MARC
@param recid: identify the record. Primary key of bibrec.
@type recid: int
@return tuple with title, year, author, isbn and editor.
"""
# FIXME do the same that book_title_from_MARC
book_title = book_title_from_MARC(recid)
book_year = ''.join(get_fieldvalues(recid, "260__c"))
author_tags = ['100__a', '700__a', '721__a']
book_author = ''
for tag in author_tags:
l = get_fieldvalues(recid, tag)
for c in l:
book_author += c + '; '
book_author = book_author[:-2]
l = get_fieldvalues(recid, "020__a")
book_isbn = ''
for isbn in l:
book_isbn += isbn + ', '
book_isbn = book_isbn[:-2]
book_editor = ', '.join(get_fieldvalues(recid, "260__a") + \
get_fieldvalues(recid, "260__b"))
return (book_title, book_year, book_author, book_isbn, book_editor)
def book_title_from_MARC(recid):
"""
Retrieve book's title from MARC
@param recid: identify the record. Primary key of bibrec.
@type recid: int
@return book's title
"""
title_tags = get_field_tags('title')
book_title = ''
i = 0
while book_title == '' and i < len(title_tags):
l = get_fieldvalues(recid, title_tags[i])
for candidate in l:
book_title = book_title + candidate + ': '
i += 1
book_title = book_title[:-2]
return book_title
def update_status_if_expired(loan_id):
"""
Update the loan's status if status is 'expired'.
@param loan_id: identify the loan. Primary key of crcLOAN.
@type loan_id: int
"""
loan_status = db.get_loan_status(loan_id)
if loan_status == CFG_BIBCIRCULATION_LOAN_STATUS_EXPIRED:
db.update_loan_status(CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, loan_id)
return
def get_next_day(date_string):
"""
Get the next day
@param date_string: date
@type date_string: string
return next day
"""
# add 1 day
more_1_day = datetime.timedelta(days=1)
# convert date_string to datetime format
tmp_date = time.strptime(date_string, '%Y-%m-%d')
# calculate the new date (next day)
next_day = datetime.datetime(tmp_date[0], tmp_date[1], tmp_date[2]) \
+ more_1_day
return next_day
def generate_new_due_date(days):
"""
Generate a new due date (today + X days = new due date).
@param days: number of days
@type days: string
@return new due date
"""
today = datetime.date.today()
more_X_days = datetime.timedelta(days=days)
tmp_date = today + more_X_days
week_day = tmp_date.strftime('%A')
due_date = tmp_date.strftime('%Y-%m-%d')
due_date_validated = False
while not due_date_validated:
if week_day in CFG_BIBCIRCULATION_WORKING_DAYS \
and due_date not in CFG_BIBCIRCULATION_HOLIDAYS:
due_date_validated = True
else:
next_day = get_next_day(due_date)
due_date = next_day.strftime('%Y-%m-%d')
week_day = next_day.strftime('%A')
return due_date
def renew_loan_for_X_days(barcode):
"""
Renew a loan based on its loan period
@param barcode: identify the item. Primary key of crcITEM.
@type barcode: string
@return new due date
"""
loan_period = db.get_loan_period(barcode)
if loan_period == '4 weeks':
due_date = generate_new_due_date(30)
else:
due_date = generate_new_due_date(7)
return due_date
def make_copy_available(request_id):
"""
Change the status of a copy for
CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF when
an hold request was cancelled.
@param request_id: identify the request: Primary key of crcLOANREQUEST
@type request_id: int
"""
barcode_requested = db.get_requested_barcode(request_id)
db.update_item_status(CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF, barcode_requested)
update_requests_statuses(barcode_requested)
def print_new_loan_information(req, ln=CFG_SITE_LANG):
"""
Create a printable format with the information of the last
loan who has been registered on the table crcLOAN.
"""
_ = gettext_set_language(ln)
# get the last loan from crcLOAN
(recid, borrower_id, due_date) = db.get_last_loan()
# get book's information
(book_title, book_year, book_author,
book_isbn, book_editor) = book_information_from_MARC(recid)
# get borrower's data/information (name, address, email)
(borrower_name, borrower_address,
borrower_mailbox, borrower_email) = db.get_borrower_data(borrower_id)
# Generate printable format
req.content_type = "text/html"
req.send_http_header()
out = """<table style='width:95%; margin:auto; max-width: 600px;'>"""
out += """
<tr>
<td><img src="%s/img/CERN_CDS_logo.png"></td>
</tr>
</table><br />""" % (CFG_SITE_URL)
out += """<table style='color: #79d; font-size: 82%; width:95%;
margin:auto; max-width: 400px;'>"""
out += """ <tr>
<td align="center">
<h2><strong>%s</strong></h2>
</td>
</tr>""" % (_("Loan information"))
out += """ <tr>
<td align="center"><strong>%s</strong></td>
</tr>""" % (_("This book has been sent to you:"))
out += """</table><br />"""
out += """<table style='color: #79d; font-size: 82%; width:95%;
margin:auto; max-width: 400px;'>"""
out += """ <tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
""" % (_("Title"), book_title,
_("Author"), book_author,
_("Editor"), book_editor,
_("ISBN"), book_isbn,
_("Year"), book_year)
out += """</table><br />"""
out += """<table style='color: #79d; font-size: 82%; width:95%;
margin:auto; max-width: 400px;'>"""
out += """ <tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td>
<td style='color: black;'>%s</td>
</tr>
""" % (_("Name"), borrower_name,
_("Mailbox"), borrower_mailbox,
_("Address"), borrower_address,
_("Email"), borrower_email)
out += """</table>
<br />"""
out += """<table style='color: #79d; font-size: 82%; width:95%;
margin:auto; max-width: 400px;'>"""
out += """ <tr>
<td align="center"><h2><strong>%s: %s</strong></h2></td>
</tr>""" % (_("Due date"), due_date)
out += """</table>"""
out += """<table style='color: #79d; font-size: 82%; width:95%;
margin:auto; max-width: 800px;'>
<tr>
<td>
<input type="button" onClick='window.print()'
value='Print' style='color: #fff;
background: #36c; font-weight: bold;'>
</td>
</tr>
</table>
"""
req.write("<html>")
req.write(out)
req.write("</html>")
return "\n"
def print_pending_hold_requests_information(req, ln):
"""
Create a printable format with all the information about all
pending hold requests.
"""
_ = gettext_set_language(ln)
requests = db.get_pdf_request_data(CFG_BIBCIRCULATION_REQUEST_STATUS_PENDING)
req.content_type = "text/html"
req.send_http_header()
out = """<table style='width:100%; margin:auto; max-width: 1024px;'>"""
out += """
<tr>
<td><img src="%s/img/CERN_CDS_logo.png"></td>
</tr>
</table><br />""" % (CFG_SITE_URL)
out += """<table style='color: #79d; font-size: 82%;
width:95%; margin:auto; max-width: 1024px;'>"""
out += """ <tr>
<td align="center"><h2><strong>%s</strong></h2></td>
</tr>""" % (_("List of pending hold requests"))
out += """ <tr>
<td align="center"><strong>%s</strong></td>
</tr>""" % (time.ctime())
out += """</table><br/>"""
out += """<table style='color: #79d; font-size: 82%;
width:95%; margin:auto; max-width: 1024px;'>"""
out += """<tr>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
</tr>
""" % (_("Borrower"),
_("Item"),
_("Library"),
_("Location"),
_("From"),
_("To"),
_("Request date"))
for (recid, borrower_name, library_name, location,
date_from, date_to, request_date) in requests:
out += """<tr style='color: black;'>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
</tr>
""" % (borrower_name, book_title_from_MARC(recid),
library_name, location, date_from, date_to,
request_date)
out += """</table>
<br />
<br />
<table style='color: #79d; font-size: 82%;
width:95%; margin:auto; max-width: 1024px;'>
<tr>
<td>
<input type=button value='Back' onClick="history.go(-1)"
style='color: #fff; background: #36c;
font-weight: bold;'>
<input type="button" onClick='window.print()'
value='Print' style='color: #fff;
background: #36c; font-weight: bold;'>
</td>
</tr>
</table>"""
req.write("<html>")
req.write(out)
req.write("</html>")
return "\n"
def get_item_info_for_search_result(recid):
"""
Get the item's info from MARC in order to create a
search result with more details
@param recid: identify the record. Primary key of bibrec.
@type recid: int
@return book's informations (author, editor and number of copies)
"""
book_author = ' '.join(get_fieldvalues(recid, "100__a") + \
get_fieldvalues(recid, "100__u"))
book_editor = ' , '.join(get_fieldvalues(recid, "260__a") + \
get_fieldvalues(recid, "260__b") + \
get_fieldvalues(recid, "260__c"))
book_copies = ' '.join(get_fieldvalues(recid, "964__a"))
book_infos = (book_author, book_editor, book_copies)
return book_infos
def update_request_data(request_id):
"""
Update the status of a given request.
@param request_id: identify the request: Primary key of crcLOANREQUEST
@type request_id: int
"""
barcode = db.get_request_barcode(request_id)
is_on_loan = db.is_item_on_loan(barcode)
if is_on_loan is not None:
db.update_item_status(CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, barcode)
else:
db.update_item_status(CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF, barcode)
update_requests_statuses(barcode)
return True
def compare_dates(date):
"""
Compare given date with today
@param date: given date
@type date: string
@return boolean
"""
if date < time.strftime("%Y-%m-%d"):
return False
else:
return True
def validate_date_format(date):
"""
Verify the date format
@param date: given date
@type date: string
@return boolean
"""
try:
if time.strptime(date, "%Y-%m-%d"):
if compare_dates(date):
return True
else:
return False
except ValueError:
return False
def create_ill_record(book_info):
"""
Create a new ILL record
@param book_info: book's information
@type book_info: tuple
@return MARC record
"""
(title, author, place, publisher, year, edition, isbn) = book_info
ill_record = """
<record>
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="a">%(isbn)s</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">%(author)s</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">%(title)s</subfield>
</datafield>
<datafield tag="250" ind1=" " ind2=" ">
<subfield code="a">%(edition)s</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="a">%(place)s</subfield>
<subfield code="b">%(publisher)s</subfield>
<subfield code="c">%(year)s</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ILLBOOK</subfield>
</datafield>
</record>
""" % {'isbn': encode_for_xml(isbn),
'author': encode_for_xml(author),
'title': encode_for_xml(title),
'edition': encode_for_xml(edition),
'place': encode_for_xml(place),
'publisher': encode_for_xml(publisher),
'year': encode_for_xml(year)}
file_path = '%s/%s_%s.xml' % (CFG_TMPDIR, 'bibcirculation_ill_book',
time.strftime("%Y%m%d_%H%M%S"))
xml_file = open(file_path, 'w')
xml_file.write(ill_record)
xml_file.close()
# Pass XML file to BibUpload.
task_low_level_submission('bibupload', 'bibcirculation',
'-P', '5', '-i', file_path)
return ill_record
def wash_recid_from_ILL_request(ill_request_id):
"""
Get dictionnary and wash recid values.
@param ill_request_id: identify the ILL request. Primray key of crcILLREQUEST
@type ill_request_id: int
@return recid
"""
book_info = db.get_ill_book_info(ill_request_id)
if looks_like_dictionary(book_info):
book_info = eval(book_info)
else:
book_info = None
try:
recid = int(book_info['recid'])
except KeyError:
recid = None
return recid
def all_copies_are_missing(recid):
"""
Verify if all copies of an item are missing
@param recid: identify the record. Primary key of bibrec
@type recid: int
@return boolean
"""
copies_status = db.get_copies_status(recid)
number_of_missing = 0
if copies_status == None:
return True
else:
for (status) in copies_status:
if status == 'missing':
number_of_missing += 1
if number_of_missing == len(copies_status):
return True
else:
return False
#def has_copies(recid):
# """
# Verify if a recid is item (has copies)
#
# @param recid: identify the record. Primary key of bibrec
# @type recid: int
#
# @return boolean
# """
#
# copies_status = db.get_copies_status(recid)
#
# if copies_status is None:
# return False
# else:
# if len(copies_status) == 0:
# return False
# else:
# return True
def generate_email_body(template, loan_id, ill=0):
"""
Generate the body of an email for loan recalls.
@param template: email template
@type template: string
@param loan_id: identify the loan. Primary key of crcLOAN.
@type loan_id: int
@return email(body)
"""
if ill:
# Inter library loan.
out = template
else:
recid = db.get_loan_recid(loan_id)
(book_title, book_year, book_author,
book_isbn, book_editor) = book_information_from_MARC(int(recid))
out = template % (book_title, book_year, book_author,
book_isbn, book_editor)
return out
def create_item_details_url(recid, ln):
url = '/admin2/bibcirculation/get_item_details?ln=%s&recid=%s' % (ln,
str(recid))
return CFG_SITE_URL + url
def tag_all_requests_as_done(barcode, user_id):
recid = db.get_id_bibrec(barcode)
description = db.get_item_description(barcode)
list_of_barcodes = db.get_barcodes(recid, description)
for bc in list_of_barcodes:
db.tag_requests_as_done(user_id, bc)
def update_requests_statuses(barcode):
recid = db.get_id_bibrec(barcode)
description = db.get_item_description(barcode)
list_of_pending_requests = db.get_requests(recid, description,
CFG_BIBCIRCULATION_REQUEST_STATUS_PENDING)
some_copy_available = False
copies_status = db.get_copies_status(recid, description)
if copies_status is not None:
for status in copies_status:
if status in (CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF,
CFG_BIBCIRCULATION_ITEM_STATUS_IN_PROCESS):
some_copy_available = True
if len(list_of_pending_requests) == 1:
if not some_copy_available:
db.update_loan_request_status(CFG_BIBCIRCULATION_REQUEST_STATUS_WAITING,
list_of_pending_requests[0][0])
else:
return list_of_pending_requests[0][0]
elif len(list_of_pending_requests) == 0:
if some_copy_available:
list_of_waiting_requests = db.get_requests(recid, description,
CFG_BIBCIRCULATION_REQUEST_STATUS_WAITING)
if len(list_of_waiting_requests) > 0:
db.update_loan_request_status(CFG_BIBCIRCULATION_REQUEST_STATUS_PENDING,
list_of_waiting_requests[0][0])
return list_of_waiting_requests[0][0]
elif len(list_of_pending_requests) > 1:
for request in list_of_pending_requests:
db.update_loan_request_status(CFG_BIBCIRCULATION_REQUEST_STATUS_WAITING,
request[0])
list_of_waiting_requests = db.get_requests(recid, description,
CFG_BIBCIRCULATION_REQUEST_STATUS_WAITING)
if some_copy_available:
db.update_loan_request_status(CFG_BIBCIRCULATION_REQUEST_STATUS_PENDING,
list_of_waiting_requests[0][0])
return list_of_waiting_requests[0][0]
return None
def is_periodical(recid):
rec_type = get_fieldvalues(recid, "690C_a")
if len(rec_type) > 0:
for value in rec_type:
if value == 'PERI':
return True
return False
def has_date_format(date):
if type(date) is not str:
return False
date = date.strip()
if len(date) is not 10:
return False
elif date[4] is not '-' and date[7] is not '-':
return False
else:
year = date[:4]
month = date[5:7]
day = date[8:]
return year.isdigit() and month.isdigit() and day.isdigit()
def generate_tmp_barcode():
tmp_barcode = 'tmp-' + str(random.random())[-8:]
while(db.barcode_in_use(tmp_barcode)):
tmp_barcode = 'tmp-' + str(random.random())[-8:]
return tmp_barcode
def check_database():
from invenio.legacy.dbquery import run_sql
r1 = run_sql(""" SELECT it.barcode, it.status, ln.status
FROM crcITEM it, crcLOAN ln
WHERE ln.barcode=it.barcode
AND it.status=%s
AND ln.status!=%s
AND ln.status!=%s
AND ln.status!=%s
""", (CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN,
CFG_BIBCIRCULATION_LOAN_STATUS_ON_LOAN,
CFG_BIBCIRCULATION_LOAN_STATUS_EXPIRED,
CFG_BIBCIRCULATION_LOAN_STATUS_RETURNED))
r2 = run_sql(""" SELECT it.barcode
FROM crcITEM it, crcLOAN ln
WHERE ln.barcode=it.barcode
AND it.status=%s
AND (ln.status=%s or ln.status=%s)
""", (CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF,
CFG_BIBCIRCULATION_LOAN_STATUS_ON_LOAN,
CFG_BIBCIRCULATION_LOAN_STATUS_EXPIRED))
r3 = run_sql(""" SELECT l1.barcode, l1.id,
DATE_FORMAT(l1.loaned_on,'%%Y-%%m-%%d %%H:%%i:%%s'),
DATE_FORMAT(l2.loaned_on,'%%Y-%%m-%%d %%H:%%i:%%s')
FROM crcLOAN l1,
crcLOAN l2
WHERE l1.id!=l2.id
AND l1.status!=%s
AND l1.status=l2.status
AND l1.barcode=l2.barcode
ORDER BY l1.loaned_on
""", (CFG_BIBCIRCULATION_LOAN_STATUS_RETURNED, ))
r4 = run_sql(""" SELECT id, id_crcBORROWER, barcode,
due_date, number_of_renewals
FROM crcLOAN
WHERE status=%s
AND due_date>NOW()
""", (CFG_BIBCIRCULATION_LOAN_STATUS_EXPIRED, ))
return (len(r1), len(r2), len(r3), len(r4))
def looks_like_dictionary(candidate_string):
if re.match(DICC_REGEXP, candidate_string):
return True
else:
return False
| gpl-2.0 | -7,628,996,201,829,646,000 | 31.424974 | 127 | 0.507686 | false |
pchavanne/yatt | tests/test_ticker.py | 1 | 5056 | import datetime
from yatt import BASE_CURRENCY
from yatt.ticker import aapl, agg, amzn, goog, msft, spy, eurusd, eurgbp, eurchf
from yatt.ticker import Ticker, Stock, Index, Future, Fx
from yatt.ticker import Tickers
timestamp = datetime.datetime(2000, 0o1, 0o1)
def test_ticker():
ticker = Ticker(symbol='AAPL', currency='USD')
assert ticker.symbol == 'AAPL'
assert ticker.name == 'AAPL'
assert ticker.currency == 'USD'
assert ticker.slippage == 0
assert ticker.commission == 0
assert ticker.last_timestamp is None
assert ticker.last_value is None
ticker = Ticker(symbol='AAPL', currency='USD', name='apple', slippage=0.5, commission=1.5, last_timestamp=timestamp, last_value=130)
assert ticker.symbol == 'AAPL'
assert ticker.name == 'apple'
assert ticker.currency == 'USD'
assert ticker.slippage == 0.5
assert ticker.commission == 1.5
assert ticker.last_timestamp == timestamp
assert ticker.last_value == 130
assert str(ticker) == 'AAPL'
assert ticker.__repr__() == 'Ticker AAPL'
ticker2 = Ticker(symbol='AAPL', currency='USD', slippage=0.5, commission=1.5, last_timestamp=timestamp, last_value=150)
assert ticker == ticker2
ticker3 = Ticker(symbol='AAPL', currency='EUR')
assert ticker != ticker3
def test_stock():
assert issubclass(Stock, Ticker)
stock = Stock(symbol='TSLA', currency='USD')
assert stock.symbol == 'TSLA'
assert stock.currency == 'USD'
assert stock.repo == 0
assert stock.dividend is None
assert isinstance(aapl, Stock)
assert aapl.symbol == 'AAPL'
assert aapl.currency == 'USD'
assert isinstance(agg, Stock)
assert agg.symbol == 'AGG'
assert agg.currency == 'USD'
assert isinstance(amzn, Stock)
assert amzn.symbol == 'AMZN'
assert amzn.currency == 'USD'
assert isinstance(goog, Stock)
assert goog.symbol == 'GOOG'
assert goog.currency == 'USD'
assert isinstance(msft, Stock)
assert msft.symbol == 'MSFT'
assert msft.currency == 'USD'
def test_index():
assert issubclass(Index, Ticker)
index = Index(symbol='SX5E', currency='EUR')
assert index.symbol == 'SX5E'
assert index.currency == 'EUR'
assert isinstance(spy, Index)
assert spy.symbol == 'SPY'
assert spy.currency == 'USD'
def test_future():
assert issubclass(Future, Ticker)
future = Future(symbol='SX5E', currency='EUR', maturity=timestamp, multiplier=10)
assert future.symbol == 'SX5E'
assert future.currency == 'EUR'
assert future.maturity == timestamp
assert future.multiplier == 10
future2 = Future(symbol='SX5E', currency='EUR', maturity=timestamp, multiplier=10)
assert future2 == future
future.maturity = datetime.datetime(2001, 1, 1)
assert future2 != future
future2 = Future(symbol='SX5E', currency='EUR', maturity=timestamp, multiplier=20)
assert future2 != future
def test_fx():
assert issubclass(Fx, Ticker)
fx = Fx(symbol='USDJPY', currency='JPY')
assert fx.symbol == 'USDJPY'
assert fx.currency == 'JPY'
assert isinstance(eurusd, Fx)
assert eurusd.symbol == 'EURUSD'
assert eurusd.currency == 'USD'
assert isinstance(eurgbp, Fx)
assert eurgbp.symbol == 'EURGBP'
assert eurgbp.currency == 'GBP'
assert isinstance(eurchf, Fx)
assert eurchf.symbol == 'EURCHF'
assert eurchf.currency == 'CHF'
def test_tickers():
bnp = Stock(symbol='BNP', currency='EUR', last_timestamp=timestamp, last_value=60)
smi = Index(symbol='SMI', currency='CHF', last_timestamp=timestamp, last_value=9000)
aapl.last_timestamp = timestamp
aapl.last_value = 130
assert issubclass(Tickers, list)
tickers = Tickers()
assert tickers == []
assert tickers.base_currency == BASE_CURRENCY
assert tickers.fx == []
tickers.append(bnp)
assert tickers == [bnp]
assert tickers.fx == []
tickers.append(aapl)
assert tickers == [bnp, aapl]
assert tickers.fx == [eurusd]
assert tickers == [bnp, aapl]
tickers.append(smi)
assert tickers.fx == [eurusd, eurchf]
tickers = Tickers(tickers_list=aapl)
assert tickers == [aapl]
assert tickers.fx == [eurusd]
tickers = Tickers(tickers_list=[bnp, aapl, smi])
assert tickers == [bnp, aapl, smi]
assert tickers.fx == [eurusd, eurchf]
assert tickers.ticker_from_symbol('AAPL') == aapl
assert tickers.ticker_from_symbol('SMI') == smi
assert tickers.ticker_from_symbol('EURUSD') == eurusd
assert tickers.all_tickers == [bnp, aapl, smi, eurusd, eurchf]
assert not tickers.is_synchronized
eurusd2 = tickers.ticker_from_symbol('EURUSD')
eurusd2.last_timestamp = timestamp
eurusd2.last_value = 1.13
eurchf2 = tickers.ticker_from_symbol('EURCHF')
eurchf2.last_timestamp = timestamp
eurchf2.last_value = 1.09
assert tickers.is_synchronized
assert tickers.snapshot == {'AAPL': 130, 'BNP': 60, 'EURCHF': 1.09, 'EURUSD': 1.13, 'SMI': 9000,
'timestamp': datetime.datetime(2000, 1, 1, 0, 0)}
| mit | -4,341,416,197,623,288,300 | 35.374101 | 136 | 0.664755 | false |
nightpool/CORE-Scouting-Server | views/commit.py | 2 | 2391 | from werkzeug import exceptions as ex
import simplejson as json
import flask
import wtforms_me
import wtforms.fields
import model.commit
import config
"""api used for submitting commits"""
blueprint = flask.Blueprint("commits", __name__, url_prefix="/commit")
MatchForm = wtforms_me.model_form(model.commit.MatchCommit)
MatchForm.event = wtforms.fields.HiddenField(**MatchForm.event.kwargs)
@blueprint.route('/submit', methods=["GET","POST"])
def submit_commit():
form = MatchForm(flask.request.form)
if flask.request.method == "POST" and form.validate():
form.save()
flask.flash('Thanks for your submission! <a href="/commit/{}">Edit it —></a>'
.format(form.instance.key))
return flask.redirect("/commit/submit")
return flask.render_template('commit_submit.html', form=form, type="match")
@blueprint.route('/', methods=["GET","POST"])
def commit_search():
query = {"event":config.event, "match_type":'q'}
errors = []
if flask.request.method == "POST" and flask.request.form.get("query", None):
try:
query = json.loads(flask.request.form.get("query"))
except Exception, e:
print e
errors.append(e.message)
objects = []
try:
objects = list(model.commit.MatchCommit.objects(**query).order_by("-time"))
except Exception, e:
print e
errors.append(e.message)
return flask.render_template("commit_search.html", objects=objects,
query=json.dumps(query), errors=errors)
@blueprint.route('/<cid>', methods=["GET","POST"])
def get_commit(cid):
try:
c = model.commit.get_commit(cid)
e_key, match_type, match_num, team = model.commit.parse_cid(cid)
obj = {"event":e_key, "match_type":match_type, "match_num": match_num, "team": team}
print c
form = MatchForm(flask.request.form, instance=c) if c else MatchForm(flask.request.form, **obj)
except ValueError:
raise ex.BadRequest("Commit id %s malformatted." % cid)
if flask.request.method == "POST" and form.validate():
if c: print c; c.delete()
form = MatchForm(flask.request.form)
form.save()
flask.flash("Thanks for your submission!")
return flask.redirect("/commit/submit")
return flask.render_template("commit_submit.html", form=form, type="match", furl=flask.request.url)
| gpl-3.0 | 6,203,396,403,978,926,000 | 37.564516 | 103 | 0.651192 | false |
giavac/tadhack_paris_2015 | send_email.py | 1 | 4640 | #!/usr/bin/env python
import speech_recognition as sr
import sys
import smtplib
from os.path import basename
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
import subprocess
import syslog
BASE_URL = 'https:// WEB PAGE IP ADDRESS HERE :8090'
#debug = True
debug = False
disable_transcription = False
#disable_transcription = True
send_from = ' FROM EMAIL ADDRESS HERE '
send_to = ' TO EMAIL ADDRESS HERE '
syslog.syslog('Processing started')
def send_mail(send_from, send_to, subject, text, files=None, server="127.0.0.1"):
assert isinstance(send_to, list)
print("Subject: " + subject)
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = send_from
msg['To'] = COMMASPACE.join(send_to)
msg['Date'] = formatdate(localtime=True)
msg.attach(MIMEText(text, 'html'))
for f in files or []:
with open(f, "rb") as fil:
msg.attach(MIMEApplication(
fil.read(),
Content_Disposition='attachment; filename="%s"' % basename(f),
Name=basename(f)
))
smtp = smtplib.SMTP(server)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.close()
#########################################
def transcript(WAV_FILE):
r = sr.Recognizer()
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source) # read the entire WAV file
try:
return r.recognize_google(audio)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
return ""
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
return ""
##############################################
video_file = sys.argv[1]
print("Processing video file: " + video_file)
#/tmp/kurento/alice_bob_1449919920.webm
tmp = video_file[13:-5]
pieces = tmp.split('_')
syslog.syslog("Pieces length: " + str(len(pieces)))
callee = pieces[0]
syslog.syslog("CallEE: " + callee)
caller = pieces[1]
syslog.syslog("CallER: " + caller)
timestamp = pieces[2]
syslog.syslog("TIMESTAMP: " + timestamp)
callmebackgsmno = ""
if (len(pieces) >= 4):
callmebackgsmno = pieces[3]
syslog.syslog("Callmeback GSM no: " + callmebackgsmno)
extracted_audio_file_base = callee + '_' + caller + '_' + timestamp
command_to_execute = 'ffmpeg -i ' + video_file + ' -vn -acodec copy ' + extracted_audio_file_base + '.ogg'
print("Preparing to run command: " + command_to_execute)
syslog.syslog("Preparing to run command: " + command_to_execute)
if not debug:
subprocess.call(command_to_execute, shell=True)
print("Extracted!")
syslog.syslog("Extracted!")
command_to_execute = 'ffmpeg -i ' + extracted_audio_file_base + '.ogg ' + extracted_audio_file_base + '.wav'
print("Preparing to run command: " + command_to_execute)
syslog.syslog("Preparing to run command: " + command_to_execute)
if not debug:
subprocess.call(command_to_execute, shell=True)
extracted_audio_file = extracted_audio_file_base + '.wav'
print("Extracted: " + extracted_audio_file)
files = [extracted_audio_file]
print("Transcribing..........")
syslog.syslog("Transcribing..........")
transcription = ""
if (not disable_transcription):
#transcription = transcript("man1_nb.wav")
transcription = transcript(extracted_audio_file)
syslog.syslog("Transcription: " + transcription)
video_file = callee + '_' + caller + '_' + timestamp
if (callmebackgsmno):
video_file += '_' + callmebackgsmno
video_file += '.webm'
video_url = BASE_URL + '/play.html?file_uri=' + video_file
transcription_sentence = ""
if (transcription):
transcription_sentence = "It says: <br>" + transcription + "<br>"
callmeback_sentence = ""
if (callmebackgsmno):
callmeback_url = BASE_URL + '/call.html?number=' + callmebackgsmno + '&name=' + caller
callmeback_sentence = "<a href='" + callmeback_url + "'>Please call me back at +" + callmebackgsmno + "</a><br>"
body = """ \
<html>
<head></head>
<body>
<p>Hi %s!<br>
Here is a <a href="%s">videomessage</a> from %s.<br>
%s <br>
%s <br>
Yours,<br>
The Cool Voicemail<br>
</p>
</body>
</html>
""" % (callee, video_url, caller, transcription_sentence, callmeback_sentence)
subject = 'Videomessage from ' + caller
print "sending email now... with body: " + body
syslog.syslog("sending email now... with body: " + body)
if not debug:
send_mail(send_from, send_to, subject, body, files)
print("DONE.")
| mit | 3,338,549,925,515,630,000 | 28.18239 | 116 | 0.649353 | false |
kuiwei/kuiwei | lms/djangoapps/instructor/tests/test_ecommerce.py | 12 | 15076 | """
Unit tests for Ecommerce feature flag in new instructor dashboard.
"""
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from courseware.tests.tests import TEST_DATA_MONGO_MODULESTORE
from student.tests.factories import AdminFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from course_modes.models import CourseMode
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegistrationCode
from mock import patch
from student.roles import CourseFinanceAdminRole
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class TestECommerceDashboardViews(ModuleStoreTestCase):
"""
Check for E-commerce view on the new instructor dashboard
"""
def setUp(self):
self.course = CourseFactory.create()
# Create instructor account
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
mode = CourseMode(
course_id=self.course.id.to_deprecated_string(), mode_slug='honor',
mode_display_name='honor', min_price=10, currency='usd'
)
mode.save()
# URL for instructor dash
self.url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.e_commerce_link = '<a href="" data-section="e-commerce">E-Commerce</a>'
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
def test_pass_e_commerce_tab_in_instructor_dashboard(self):
"""
Test Pass E-commerce Tab is in the Instructor Dashboard
"""
response = self.client.get(self.url)
self.assertTrue(self.e_commerce_link in response.content)
def test_user_has_finance_admin_rights_in_e_commerce_tab(self):
response = self.client.get(self.url)
self.assertTrue(self.e_commerce_link in response.content)
# Total amount html should render in e-commerce page, total amount will be 0
total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course.id)
self.assertTrue('<span>Total Amount: <span>$' + str(total_amount) + '</span></span>' in response.content)
self.assertTrue('Download All e-Commerce Purchase' in response.content)
# removing the course finance_admin role of login user
CourseFinanceAdminRole(self.course.id).remove_users(self.instructor)
# total amount should not be visible in e-commerce page if the user is not finance admin
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course.id)
self.assertFalse('Download All e-Commerce Purchase' in response.content)
self.assertFalse('<span>Total Amount: <span>$' + str(total_amount) + '</span></span>' in response.content)
def test_user_view_course_price(self):
"""
test to check if the user views the set price button and price in
the instructor dashboard
"""
response = self.client.get(self.url)
self.assertTrue(self.e_commerce_link in response.content)
# Total amount html should render in e-commerce page, total amount will be 0
course_honor_mode = CourseMode.mode_for_course(self.course.id, 'honor')
price = course_honor_mode.min_price
self.assertTrue('Course Price: <span>$' + str(price) + '</span>' in response.content)
self.assertFalse('+ Set Price</a></span>' in response.content)
# removing the course finance_admin role of login user
CourseFinanceAdminRole(self.course.id).remove_users(self.instructor)
# total amount should not be visible in e-commerce page if the user is not finance admin
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertFalse('+ Set Price</a></span>' in response.content)
def test_update_course_price_check(self):
price = 200
# course B
course2 = CourseFactory.create(org='EDX', display_name='test_course', number='100')
mode = CourseMode(
course_id=course2.id.to_deprecated_string(), mode_slug='honor',
mode_display_name='honor', min_price=30, currency='usd'
)
mode.save()
# course A update
CourseMode.objects.filter(course_id=self.course.id).update(min_price=price)
set_course_price_url = reverse('set_course_mode_price', kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'course_price': price, 'currency': 'usd'}
response = self.client.post(set_course_price_url, data)
self.assertTrue('CourseMode price updated successfully' in response.content)
# Course A updated total amount should be visible in e-commerce page if the user is finance admin
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertTrue('Course Price: <span>$' + str(price) + '</span>' in response.content)
def test_user_admin_set_course_price(self):
"""
test to set the course price related functionality.
test al the scenarios for setting a new course price
"""
set_course_price_url = reverse('set_course_mode_price', kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'course_price': '12%', 'currency': 'usd'}
# Value Error course price should be a numeric value
response = self.client.post(set_course_price_url, data)
self.assertTrue("Please Enter the numeric value for the course price" in response.content)
# validation check passes and course price is successfully added
data['course_price'] = 100
response = self.client.post(set_course_price_url, data)
self.assertTrue("CourseMode price updated successfully" in response.content)
course_honor_mode = CourseMode.objects.get(mode_slug='honor')
course_honor_mode.delete()
# Course Mode not exist with mode slug honor
response = self.client.post(set_course_price_url, data)
self.assertTrue("CourseMode with the mode slug({mode_slug}) DoesNotExist".format(mode_slug='honor') in response.content)
def test_add_coupon(self):
"""
Test Add Coupon Scenarios. Handle all the HttpResponses return by add_coupon view
"""
# URL for add_coupon
add_coupon_url = reverse('add_coupon', kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'code': 'A2314', 'course_id': self.course.id.to_deprecated_string(),
'description': 'ADSADASDSAD', 'created_by': self.instructor, 'discount': 5
}
response = self.client.post(add_coupon_url, data)
self.assertTrue("coupon with the coupon code ({code}) added successfully".format(code=data['code']) in response.content)
data = {
'code': 'A2314', 'course_id': self.course.id.to_deprecated_string(),
'description': 'asdsasda', 'created_by': self.instructor, 'discount': 99
}
response = self.client.post(add_coupon_url, data)
self.assertTrue("coupon with the coupon code ({code}) already exist".format(code='A2314') in response.content)
response = self.client.post(self.url)
self.assertTrue('<td>ADSADASDSAD</td>' in response.content)
self.assertTrue('<td>A2314</td>' in response.content)
self.assertFalse('<td>111</td>' in response.content)
data = {
'code': 'A2345314', 'course_id': self.course.id.to_deprecated_string(),
'description': 'asdsasda', 'created_by': self.instructor, 'discount': 199
}
response = self.client.post(add_coupon_url, data)
self.assertTrue("Please Enter the Coupon Discount Value Less than or Equal to 100" in response.content)
data['discount'] = '25%'
response = self.client.post(add_coupon_url, data=data)
self.assertTrue('Please Enter the Integer Value for Coupon Discount' in response.content)
course_registration = CourseRegistrationCode(
code='Vs23Ws4j', course_id=self.course.id.to_deprecated_string(),
transaction_group_name='Test Group', created_by=self.instructor
)
course_registration.save()
data['code'] = 'Vs23Ws4j'
response = self.client.post(add_coupon_url, data)
self.assertTrue("The code ({code}) that you have tried to define is already in use as a registration code"
.format(code=data['code']) in response.content)
def test_delete_coupon(self):
"""
Test Delete Coupon Scenarios. Handle all the HttpResponses return by remove_coupon view
"""
coupon = Coupon(
code='AS452', description='asdsadsa', course_id=self.course.id.to_deprecated_string(),
percentage_discount=10, created_by=self.instructor
)
coupon.save()
response = self.client.post(self.url)
self.assertTrue('<td>AS452</td>' in response.content)
# URL for remove_coupon
delete_coupon_url = reverse('remove_coupon', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(delete_coupon_url, {'id': coupon.id})
self.assertTrue('coupon with the coupon id ({coupon_id}) updated successfully'.format(coupon_id=coupon.id) in response.content)
coupon.is_active = False
coupon.save()
response = self.client.post(delete_coupon_url, {'id': coupon.id})
self.assertTrue('coupon with the coupon id ({coupon_id}) is already inactive'.format(coupon_id=coupon.id) in response.content)
response = self.client.post(delete_coupon_url, {'id': 24454})
self.assertTrue('coupon with the coupon id ({coupon_id}) DoesNotExist'.format(coupon_id=24454) in response.content)
response = self.client.post(delete_coupon_url, {'id': ''})
self.assertTrue('coupon id is None' in response.content)
def test_get_coupon_info(self):
"""
Test Edit Coupon Info Scenarios. Handle all the HttpResponses return by edit_coupon_info view
"""
coupon = Coupon(
code='AS452', description='asdsadsa', course_id=self.course.id.to_deprecated_string(),
percentage_discount=10, created_by=self.instructor
)
coupon.save()
# URL for edit_coupon_info
edit_url = reverse('get_coupon_info', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(edit_url, {'id': coupon.id})
self.assertTrue('coupon with the coupon id ({coupon_id}) updated successfully'.format(coupon_id=coupon.id) in response.content)
response = self.client.post(edit_url, {'id': 444444})
self.assertTrue('coupon with the coupon id ({coupon_id}) DoesNotExist'.format(coupon_id=444444) in response.content)
response = self.client.post(edit_url, {'id': ''})
self.assertTrue('coupon id not found"' in response.content)
coupon.is_active = False
coupon.save()
response = self.client.post(edit_url, {'id': coupon.id})
self.assertTrue("coupon with the coupon id ({coupon_id}) is already inactive".format(coupon_id=coupon.id) in response.content)
def test_update_coupon(self):
"""
Test Update Coupon Info Scenarios. Handle all the HttpResponses return by update_coupon view
"""
coupon = Coupon(
code='AS452', description='asdsadsa', course_id=self.course.id.to_deprecated_string(),
percentage_discount=10, created_by=self.instructor
)
coupon.save()
response = self.client.post(self.url)
self.assertTrue('<td>AS452</td>' in response.content)
data = {
'coupon_id': coupon.id, 'code': 'update_code', 'discount': '12',
'course_id': coupon.course_id.to_deprecated_string()
}
# URL for update_coupon
update_coupon_url = reverse('update_coupon', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(update_coupon_url, data=data)
self.assertTrue('coupon with the coupon id ({coupon_id}) updated Successfully'.format(coupon_id=coupon.id)in response.content)
response = self.client.post(self.url)
self.assertTrue('<td>update_code</td>' in response.content)
self.assertTrue('<td>12</td>' in response.content)
data['coupon_id'] = 1000 # Coupon Not Exist with this ID
response = self.client.post(update_coupon_url, data=data)
self.assertTrue('coupon with the coupon id ({coupon_id}) DoesNotExist'.format(coupon_id=1000) in response.content)
data['coupon_id'] = coupon.id
data['discount'] = 123
response = self.client.post(update_coupon_url, data=data)
self.assertTrue('Please Enter the Coupon Discount Value Less than or Equal to 100' in response.content)
data['discount'] = '25%'
response = self.client.post(update_coupon_url, data=data)
self.assertTrue('Please Enter the Integer Value for Coupon Discount' in response.content)
data['coupon_id'] = '' # Coupon id is not provided
response = self.client.post(update_coupon_url, data=data)
self.assertTrue('coupon id not found' in response.content)
coupon1 = Coupon(
code='11111', description='coupon', course_id=self.course.id.to_deprecated_string(),
percentage_discount=20, created_by=self.instructor
)
coupon1.save()
data = {'coupon_id': coupon.id, 'code': '11111', 'discount': '12'} # pylint: disable=E1101
response = self.client.post(update_coupon_url, data=data)
self.assertTrue('coupon with the coupon id ({coupon_id}) already exist'.format(coupon_id=coupon.id) in response.content) # pylint: disable=E1101
course_registration = CourseRegistrationCode(
code='Vs23Ws4j', course_id=self.course.id.to_deprecated_string(),
transaction_group_name='Test Group', created_by=self.instructor
)
course_registration.save()
data = {'coupon_id': coupon.id, 'code': 'Vs23Ws4j', # pylint: disable=E1101
'discount': '6', 'course_id': coupon.course_id.to_deprecated_string()} # pylint: disable=E1101
response = self.client.post(update_coupon_url, data=data)
self.assertTrue("The code ({code}) that you have tried to define is already in use as a registration code".
format(code=data['code']) in response.content)
| agpl-3.0 | -3,937,674,436,112,374,300 | 48.429508 | 153 | 0.658132 | false |
rail/treeherder | treeherder/autoclassify/management/commands/autoclassify.py | 2 | 2433 | import logging
from collections import defaultdict
from django.core.management.base import BaseCommand, CommandError
from treeherder.autoclassify import matchers
from treeherder.model.models import FailureLine, Matcher, FailureMatch
logger = logging.getLogger(__name__)
# The minimum goodness of match we need to mark a particular match as the best match
AUTOCLASSIFY_CUTOFF_RATIO = 0.8
# Initialisation needed to associate matcher functions with the matcher objects
matchers.register()
class Command(BaseCommand):
args = '<job_guid>, <repository>'
help = 'Mark failures on a job.'
def handle(self, *args, **options):
if not len(args) == 2:
raise CommandError('3 arguments required, %s given' % len(args))
job_id, repository = args
match_errors(repository, job_id)
def match_errors(repository, job_guid):
unmatched_failures = FailureLine.objects.unmatched_for_job(repository, job_guid)
if not unmatched_failures:
return
all_matched = set()
for matcher in Matcher.objects.registered_matchers():
matches = matcher(unmatched_failures)
for match in matches:
match.failure_line.matches.add(
FailureMatch(score=match.score,
matcher=matcher.db_object,
classified_failure=match.classified_failure))
match.failure_line.save()
logger.info("Matched failure %i with intermittent %i" %
(match.failure_line.id, match.classified_failure.id))
all_matched.add(match.failure_line)
if all_lines_matched(unmatched_failures):
break
for failure_line in all_matched:
# TODO: store all matches
best_match = failure_line.best_match(AUTOCLASSIFY_CUTOFF_RATIO)
if best_match:
best_match.is_best = True
best_match.save()
def all_lines_matched(failure_lines):
failure_score_dict = defaultdict(list)
query = FailureMatch.objects.filter(
failure_line__in=failure_lines).only('failure_line_id', 'score')
for failure_match in query:
failure_score_dict[failure_match.failure_line_id].append(failure_match.score)
for failure_line in failure_lines:
scores = failure_score_dict[failure_line.id]
if not scores or not all(score >= 1 for score in scores):
return False
return True
| mpl-2.0 | 8,161,660,184,938,220,000 | 31.44 | 85 | 0.660501 | false |
SummerLW/Perf-Insight-Report | telemetry/telemetry/internal/util/file_handle.py | 35 | 1976 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
_next_file_id = 0
class FileHandle(object):
def __init__(self, temp_file=None, absolute_path=None):
"""Constructs a FileHandle object.
This constructor should not be used by the user; rather it is preferred to
use the module-level GetAbsPath and FromTempFile functions.
Args:
temp_file: An instance of a temporary file object.
absolute_path: A path; should not be passed if tempfile is and vice-versa.
extension: A string that specifies the file extension. It must starts with
".".
"""
# Exactly one of absolute_path or temp_file must be specified.
assert (absolute_path is None) != (temp_file is None)
self._temp_file = temp_file
self._absolute_path = absolute_path
global _next_file_id
self._id = _next_file_id
_next_file_id += 1
@property
def id(self):
return self._id
@property
def extension(self):
return os.path.splitext(self.GetAbsPath())[1]
def GetAbsPath(self):
"""Returns the path to the pointed-to file relative to the given start path.
Args:
start: A string representing a starting path.
Returns:
A string giving the relative path from path to this file.
"""
if self._temp_file:
self._temp_file.close()
return self._temp_file.name
else:
return self._absolute_path
def FromTempFile(temp_file):
"""Constructs a FileHandle pointing to a temporary file.
Returns:
A FileHandle referring to a named temporary file.
"""
return FileHandle(temp_file)
def FromFilePath(path):
"""Constructs a FileHandle from an absolute file path.
Args:
path: A string giving the absolute path to a file.
Returns:
A FileHandle referring to the file at the specified path.
"""
return FileHandle(None, os.path.abspath(path))
| bsd-3-clause | -6,093,322,646,984,613,000 | 26.068493 | 80 | 0.686235 | false |
amurzeau/streamlink-debian | src/streamlink/session.py | 2 | 19160 | import logging
import pkgutil
from collections import OrderedDict
from functools import lru_cache
from socket import AF_INET, AF_INET6
import requests
import requests.packages.urllib3.util.connection as urllib3_connection
from requests.packages.urllib3.util.connection import allowed_gai_family
from streamlink import __version__, plugins
from streamlink.compat import is_win32
from streamlink.exceptions import NoPluginError, PluginError
from streamlink.logger import StreamlinkLogger
from streamlink.options import Options
from streamlink.plugin import Plugin, api
from streamlink.utils import load_module, update_scheme
from streamlink.utils.l10n import Localization
# Ensure that the Logger class returned is Streamslink's for using the API (for backwards compatibility)
logging.setLoggerClass(StreamlinkLogger)
log = logging.getLogger(__name__)
class PythonDeprecatedWarning(UserWarning):
pass
class Streamlink:
"""A Streamlink session is used to keep track of plugins,
options and log settings."""
def __init__(self, options=None):
self.http = api.HTTPSession()
self.options = Options({
"interface": None,
"ipv4": False,
"ipv6": False,
"hds-live-edge": 10.0,
"hds-segment-attempts": 3,
"hds-segment-threads": 1,
"hds-segment-timeout": 10.0,
"hds-timeout": 60.0,
"hls-live-edge": 3,
"hls-segment-attempts": 3,
"hls-segment-ignore-names": [],
"hls-segment-threads": 1,
"hls-segment-timeout": 10.0,
"hls-segment-stream-data": False,
"hls-timeout": 60.0,
"hls-playlist-reload-attempts": 3,
"hls-playlist-reload-time": "default",
"hls-start-offset": 0,
"hls-duration": None,
"http-stream-timeout": 60.0,
"ringbuffer-size": 1024 * 1024 * 16, # 16 MB
"rtmp-timeout": 60.0,
"rtmp-rtmpdump": is_win32 and "rtmpdump.exe" or "rtmpdump",
"rtmp-proxy": None,
"stream-segment-attempts": 3,
"stream-segment-threads": 1,
"stream-segment-timeout": 10.0,
"stream-timeout": 60.0,
"subprocess-errorlog": False,
"subprocess-errorlog-path": None,
"ffmpeg-ffmpeg": None,
"ffmpeg-fout": None,
"ffmpeg-video-transcode": None,
"ffmpeg-audio-transcode": None,
"ffmpeg-copyts": False,
"ffmpeg-start-at-zero": False,
"mux-subtitles": False,
"locale": None,
"user-input-requester": None
})
if options:
self.options.update(options)
self.plugins = OrderedDict({})
self.load_builtin_plugins()
def set_option(self, key, value):
"""Sets general options used by plugins and streams originating
from this session object.
:param key: key of the option
:param value: value to set the option to
**Available options**:
======================== =========================================
interface (str) Set the network interface,
default: ``None``
ipv4 (bool) Resolve address names to IPv4 only.
This option overrides ipv6, default: ``False``
ipv6 (bool) Resolve address names to IPv6 only.
This option overrides ipv4, default: ``False``
hds-live-edge (float) Specify the time live HDS
streams will start from the edge of
stream, default: ``10.0``
hds-segment-attempts (int) How many attempts should be done
to download each HDS segment, default: ``3``
hds-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``
hds-segment-timeout (float) HDS segment connect and read
timeout, default: ``10.0``
hds-timeout (float) Timeout for reading data from
HDS streams, default: ``60.0``
hls-live-edge (int) How many segments from the end
to start live streams on, default: ``3``
hls-segment-attempts (int) How many attempts should be done
to download each HLS segment, default: ``3``
hls-segment-ignore-names (str[]) List of segment names without
file endings which should get filtered out,
default: ``[]``
hls-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``
hls-segment-stream-data (bool) Stream HLS segment downloads,
default: ``False``
hls-segment-timeout (float) HLS segment connect and read
timeout, default: ``10.0``
hls-timeout (float) Timeout for reading data from
HLS streams, default: ``60.0``
http-proxy (str) Specify a HTTP proxy to use for
all HTTP requests
https-proxy (str) Specify a HTTPS proxy to use for
all HTTPS requests
http-cookies (dict or str) A dict or a semi-colon (;)
delimited str of cookies to add to each
HTTP request, e.g. ``foo=bar;baz=qux``
http-headers (dict or str) A dict or semi-colon (;)
delimited str of headers to add to each
HTTP request, e.g. ``foo=bar;baz=qux``
http-query-params (dict or str) A dict or a ampersand (&)
delimited string of query parameters to
add to each HTTP request,
e.g. ``foo=bar&baz=qux``
http-trust-env (bool) Trust HTTP settings set in the
environment, such as environment
variables (HTTP_PROXY, etc) and
~/.netrc authentication
http-ssl-verify (bool) Verify SSL certificates,
default: ``True``
http-ssl-cert (str or tuple) SSL certificate to use,
can be either a .pem file (str) or a
.crt/.key pair (tuple)
http-timeout (float) General timeout used by all HTTP
requests except the ones covered by
other options, default: ``20.0``
http-stream-timeout (float) Timeout for reading data from
HTTP streams, default: ``60.0``
subprocess-errorlog (bool) Log errors from subprocesses to
a file located in the temp directory
subprocess-errorlog-path (str) Log errors from subprocesses to
a specific file
ringbuffer-size (int) The size of the internal ring
buffer used by most stream types,
default: ``16777216`` (16MB)
rtmp-proxy (str) Specify a proxy (SOCKS) that RTMP
streams will use
rtmp-rtmpdump (str) Specify the location of the
rtmpdump executable used by RTMP streams,
e.g. ``/usr/local/bin/rtmpdump``
rtmp-timeout (float) Timeout for reading data from
RTMP streams, default: ``60.0``
ffmpeg-ffmpeg (str) Specify the location of the
ffmpeg executable use by Muxing streams
e.g. ``/usr/local/bin/ffmpeg``
ffmpeg-verbose (bool) Log stderr from ffmpeg to the
console
ffmpeg-verbose-path (str) Specify the location of the
ffmpeg stderr log file
ffmpeg-fout (str) The output file format
when muxing with ffmpeg
e.g. ``matroska``
ffmpeg-video-transcode (str) The codec to use if transcoding
video when muxing with ffmpeg
e.g. ``h264``
ffmpeg-audio-transcode (str) The codec to use if transcoding
audio when muxing with ffmpeg
e.g. ``aac``
ffmpeg-copyts (bool) When used with ffmpeg, do not shift input timestamps.
ffmpeg-start-at-zero (bool) When used with ffmpeg and copyts,
shift input timestamps so they start at zero
default: ``False``
mux-subtitles (bool) Mux available subtitles into the
output stream.
stream-segment-attempts (int) How many attempts should be done
to download each segment, default: ``3``.
General option used by streams not
covered by other options.
stream-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``.
General option used by streams not
covered by other options.
stream-segment-timeout (float) Segment connect and read
timeout, default: ``10.0``.
General option used by streams not
covered by other options.
stream-timeout (float) Timeout for reading data from
stream, default: ``60.0``.
General option used by streams not
covered by other options.
locale (str) Locale setting, in the RFC 1766 format
eg. en_US or es_ES
default: ``system locale``.
user-input-requester (UserInputRequester) instance of UserInputRequester
to collect input from the user at runtime. Must be
set before the plugins are loaded.
default: ``UserInputRequester``.
======================== =========================================
"""
if key == "interface":
for scheme, adapter in self.http.adapters.items():
if scheme not in ("http://", "https://"):
continue
if not value:
adapter.poolmanager.connection_pool_kw.pop("source_address")
else:
adapter.poolmanager.connection_pool_kw.update(
# https://docs.python.org/3/library/socket.html#socket.create_connection
source_address=(value, 0)
)
self.options.set(key, None if not value else value)
elif key == "ipv4" or key == "ipv6":
self.options.set(key, value)
if value:
self.options.set("ipv6" if key == "ipv4" else "ipv4", False)
urllib3_connection.allowed_gai_family = \
(lambda: AF_INET) if key == "ipv4" else (lambda: AF_INET6)
else:
urllib3_connection.allowed_gai_family = allowed_gai_family
elif key == "http-proxy":
self.http.proxies["http"] = update_scheme("http://", value)
if "https" not in self.http.proxies:
self.http.proxies["https"] = update_scheme("http://", value)
elif key == "https-proxy":
self.http.proxies["https"] = update_scheme("https://", value)
elif key == "http-cookies":
if isinstance(value, dict):
self.http.cookies.update(value)
else:
self.http.parse_cookies(value)
elif key == "http-headers":
if isinstance(value, dict):
self.http.headers.update(value)
else:
self.http.parse_headers(value)
elif key == "http-query-params":
if isinstance(value, dict):
self.http.params.update(value)
else:
self.http.parse_query_params(value)
elif key == "http-trust-env":
self.http.trust_env = value
elif key == "http-ssl-verify":
self.http.verify = value
elif key == "http-disable-dh":
if value:
requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':!DH'
try:
requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST = \
requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS.encode("ascii")
except AttributeError:
# no ssl to disable the cipher on
pass
elif key == "http-ssl-cert":
self.http.cert = value
elif key == "http-timeout":
self.http.timeout = value
else:
self.options.set(key, value)
def get_option(self, key):
"""Returns current value of specified option.
:param key: key of the option
"""
if key == "http-proxy":
return self.http.proxies.get("http")
elif key == "https-proxy":
return self.http.proxies.get("https")
elif key == "http-cookies":
return self.http.cookies
elif key == "http-headers":
return self.http.headers
elif key == "http-query-params":
return self.http.params
elif key == "http-trust-env":
return self.http.trust_env
elif key == "http-ssl-verify":
return self.http.verify
elif key == "http-ssl-cert":
return self.http.cert
elif key == "http-timeout":
return self.http.timeout
else:
return self.options.get(key)
def set_plugin_option(self, plugin, key, value):
"""Sets plugin specific options used by plugins originating
from this session object.
:param plugin: name of the plugin
:param key: key of the option
:param value: value to set the option to
"""
if plugin in self.plugins:
plugin = self.plugins[plugin]
plugin.set_option(key, value)
def get_plugin_option(self, plugin, key):
"""Returns current value of plugin specific option.
:param plugin: name of the plugin
:param key: key of the option
"""
if plugin in self.plugins:
plugin = self.plugins[plugin]
return plugin.get_option(key)
@lru_cache(maxsize=128)
def resolve_url(self, url, follow_redirect=True):
"""Attempts to find a plugin that can use this URL.
The default protocol (http) will be prefixed to the URL if
not specified.
Raises :exc:`NoPluginError` on failure.
:param url: a URL to match against loaded plugins
:param follow_redirect: follow redirects
"""
url = update_scheme("http://", url)
available_plugins = []
for name, plugin in self.plugins.items():
if plugin.can_handle_url(url):
available_plugins.append(plugin)
available_plugins.sort(key=lambda x: x.priority(url), reverse=True)
if available_plugins:
return available_plugins[0](url)
if follow_redirect:
# Attempt to handle a redirect URL
try:
res = self.http.head(url, allow_redirects=True, acceptable_status=[501])
# Fall back to GET request if server doesn't handle HEAD.
if res.status_code == 501:
res = self.http.get(url, stream=True)
if res.url != url:
return self.resolve_url(res.url, follow_redirect=follow_redirect)
except PluginError:
pass
raise NoPluginError
def resolve_url_no_redirect(self, url):
"""Attempts to find a plugin that can use this URL.
The default protocol (http) will be prefixed to the URL if
not specified.
Raises :exc:`NoPluginError` on failure.
:param url: a URL to match against loaded plugins
"""
return self.resolve_url(url, follow_redirect=False)
def streams(self, url, **params):
"""Attempts to find a plugin and extract streams from the *url*.
*params* are passed to :func:`Plugin.streams`.
Raises :exc:`NoPluginError` if no plugin is found.
"""
plugin = self.resolve_url(url)
return plugin.streams(**params)
def get_plugins(self):
"""Returns the loaded plugins for the session."""
return self.plugins
def load_builtin_plugins(self):
self.load_plugins(plugins.__path__[0])
def load_plugins(self, path: str) -> bool:
"""Attempt to load plugins from the path specified.
:param path: full path to a directory where to look for plugins
:return: success
"""
success = False
user_input_requester = self.get_option("user-input-requester")
for loader, name, ispkg in pkgutil.iter_modules([path]):
# set the full plugin module name
module_name = f"streamlink.plugins.{name}"
try:
mod = load_module(module_name, path)
except ImportError:
log.exception(f"Failed to load plugin {name} from {path}\n")
continue
if not hasattr(mod, "__plugin__") or not issubclass(mod.__plugin__, Plugin):
continue
success = True
plugin = mod.__plugin__
plugin.bind(self, name, user_input_requester)
if plugin.module in self.plugins:
log.debug(f"Plugin {plugin.module} is being overridden by {mod.__file__}")
self.plugins[plugin.module] = plugin
return success
@property
def version(self):
return __version__
@property
def localization(self):
return Localization(self.get_option("locale"))
__all__ = ["Streamlink"]
| bsd-2-clause | 1,286,060,438,278,671,000 | 38.586777 | 104 | 0.516388 | false |
lvmgeo/GISPython | GISPython/PublisherHealper.py | 1 | 20261 | # -*- coding: utf-8 -*-
"""
Deployment publishing operations module
"""
import codecs
import os
import shutil
import hashlib
import datetime
import ZipHelper
import xmlParamsHealper
import JsonParamsHelper
class PublisherHealperConfig:
"""Class for setting up publisher Healper"""
moduleName = "" # name of the module to be processing
destinationDir = "" # folder to deploy to
sourceDir = "" # folder from with to deploy
doBackup = False # does publisher need to make a backup
bacupType = 'Folder' # "Folder" to backup whole destinationDir, "Files" to backup only owerwritten files
backupFolder = "" # folder in witch the backup will be stored
includeFolders = [] # folders to publish
# includeFolders = [ # SAMPLE
# {
# "folder": "testFolder", # Folder to include. Do not provide this for source root folder
# "recursive": True, # Process folder recursively? Default is False
# "includeExtensions": ["py"], # extensions to be included. Do not provide this for all files in Folder
# "excludeExtensions": ["pyc"], # extensions to be excluded. Do not provide this if dont needed
# "clearExtensions": ["pyc"], # extensions to be deleted from destination. Do not provide this if dont needed
# "includeFiles": ["somespecificfile.py"], # files to be specificly included. Do not provide this if dont needed
# "excludeFiles": ["somespecificfile.py"], # files to be specificly excluded. Do not provide this if dont needed
# "clearFiles": ["somespecificfile.py"], # files to be specificly deleted from destination. Do not provide this if dont needed
# "renameFiles": {"somefilenamefromtorename.py": "somewithdifferentname.py"}
# }
# ]
configFilesJson = [] # config files of type Json to be processed
configFilesXML = [] # config files of type XML to be processed
# configFilesXML = [ # SAMPLE
# {
# "file": "Web.config", # relative path in destination
# "changes": [ # List of changes to be made
# {
# "xpath": '/Test/Level1/Level2/Level3', # xpath to tag to be changed (first found will be processed)
# "atribute": "someatribute", # Atribute to be updated. Do not provide this if tag text is to be updated
# "value": "value to be writen" # value to be writen
# }
# ]
# }
# ]
replacementMap = {}
# replacementMap = { # SAMPLE
# 'test.json': {
# '[find sting to replace]': 'replacement value'
# }
# }
class PublisherHealper(object):
"""Class for easing the Rar file operations"""
def __init__(self):
"""Class initialization procedure
Args:
self: The reserved object 'self'
"""
self.backup_zip_file = ''
def Deply(self, config):
"""Does the dployment
Args:
self: The reserved object 'self'
config ([PublisherHealperConfig]): Configuration of deplyment
"""
print u'... start publish for {}'.format(config.moduleName)
self.backup_zip_file = "{}_{}.zip".format(config.moduleName, _now_for_file())
destination_dir = config.destinationDir
if not os.path.exists(destination_dir):
raise AttributeError(u'destination folder {} not found'.format(destination_dir))
self.__create_backup(config)
for folder in config.includeFolders:
self.__do_deploy(folder, config)
self.__do_process_xml(config)
self.__do_process_json(config)
self.__do_string_repalce(config)
def __create_backup(self, config):
"""Does the backup creation
Args:
self: The reserved object 'self'
config ([PublisherHealperConfig]): Configuration of deplyment
"""
if hasattr(config, "doBackup") and config.doBackup:
if config.bacupType.upper() == 'FOLDER':
backup_dir = config.backupFolder
if not os.path.exists(backup_dir):
os.makedirs(backup_dir)
print u'... created backup folder {}'.format(backup_dir)
backup_file_name = os.path.join(backup_dir, self.backup_zip_file)
ZipHelper.ZipHelper().CompressDir(config.destinationDir, backup_file_name)
print u'... backup created!'
def __create_backup_one_file(self, file_path, config):
"""Does the backup creation for one file
Args:
self: The reserved object 'self'
config ([PublisherHealperConfig]): Configuration of deplyment
"""
if hasattr(config, "doBackup") and config.doBackup:
if config.bacupType.upper() == 'FILES':
backup_dir = config.backupFolder
if not os.path.exists(backup_dir):
os.makedirs(backup_dir)
print u'... created backup folder {}'.format(backup_dir)
backup_file_name = os.path.join(backup_dir, self.backup_zip_file)
ZipHelper.ZipHelper().CompressFileList(
filePathList=[file_path],
zipFileName=backup_file_name,
base_dir=config.destinationDir,
append=os.path.exists(backup_file_name))
print u'... file {} backup created!'.format(file_path)
def __do_deploy(self, folder, config):
"""Does the backup creation
Args:
self: The reserved object 'self'
folder ([string]): relative path to folder to be processed
config ([PublisherHealperConfig]): Configuration of deplyment
"""
self.__clear(folder, config)
files_to_copy = self.__files_to_copy(folder, config)
self.__do_copy_files_to_dest(folder, files_to_copy, config)
def __clear(self, folder, config):
"""Clears unnececery files
Args:
self: The reserved object 'self'
folder ([string]): relative path to folder to be processed
config ([PublisherHealperConfig]): Configuration of deplyment
"""
if folder.has_key("clearExtensions") or folder.has_key("clearFiles"):
clear_extensions = folder[u'clearExtensions'] if folder.has_key("clearExtensions") else []
clear_files = folder[u'clearFiles'] if folder.has_key("clearFiles") else []
recursive = folder[u'recursive'] if folder.has_key("recursive") else False
source_dir = os.path.join(config.sourceDir, folder["folder"]) if folder.has_key("folder") else config.sourceDir
destination_dir = os.path.join(config.destinationDir, folder["folder"]) if folder.has_key("folder") else config.destinationDir
if not recursive:
include_folders = []
else:
include_folders = _find_all_folders(destination_dir)
include_folders.append(destination_dir)
files_to_delete = []
for infolder in include_folders:
for ext in clear_extensions:
destination_folder = infolder.replace(source_dir, destination_dir)
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
print u'... output folder created {}'.format(destination_folder)
found_files = _find_file(os.path.join(destination_dir, infolder), ext)
if found_files:
files_to_delete = files_to_delete + found_files
for file_to_clear in clear_files:
file_name = os.path.join(destination_dir, infolder, file_to_clear)
if os.path.exists(file_name):
files_to_delete.append(file_name)
for file_to_delate in files_to_delete:
os.remove(file_to_delate)
print u'... file deleted {}'.format(file_to_delate)
def __files_to_copy(self, folder, config):
"""Finds files to be copyed
Args:
self: The reserved object 'self'
folder ([string]): relative path to folder to be processed
config ([PublisherHealperConfig]): Configuration of deplyment
"""
recursive = folder[u'recursive'] if folder.has_key("recursive") else False
source_dir = os.path.join(config.sourceDir, folder["folder"]) if folder.has_key("folder") else config.sourceDir
destination_dir = os.path.join(config.destinationDir, folder["folder"]) if folder.has_key("folder") else config.destinationDir
if not recursive:
include_folders = []
else:
include_folders = _find_all_folders(source_dir)
include_folders.append(source_dir)
files_to_copy = []
if folder.has_key("includeExtensions") or folder.has_key("includeFiles"):
files_to_copy = self.__find_files_to_include(folder, include_folders, source_dir, destination_dir)
else:
files_to_copy = self.__find_all_files_to_include(folder, include_folders, source_dir, destination_dir)
if folder.has_key("excludeExtensions") or folder.has_key("excludeFiles"):
files_to_copy = self.__exclude_files(folder, files_to_copy)
return files_to_copy
def __find_files_to_include(self, folder, include_folders, source_dir, destination_dir):
files_to_copy = []
include_extensions = folder[u'includeExtensions'] if folder.has_key("includeExtensions") else []
include_files = folder[u'includeFiles'] if folder.has_key("includeFiles") else []
for infolder in include_folders:
for ext in include_extensions:
found_files = _find_file(infolder, ext)
if found_files:
files_to_copy = files_to_copy + found_files
if not infolder == source_dir:
destination_folder = infolder.replace(source_dir, destination_dir)
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
print u'... output folder created {}'.format(destination_folder)
for file_name in include_files:
found_files = _find_file_by_name(infolder, file_name)
if found_files:
files_to_copy = files_to_copy + found_files
if not infolder == source_dir:
destination_folder = infolder.replace(source_dir, destination_dir)
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
print u'... output folder created {}'.format(destination_folder)
return files_to_copy
def __find_all_files_to_include(self, folder, include_folders, source_dir, destination_dir):
files_to_copy = []
for infolder in include_folders:
found_files = _find_all_files(os.path.join(source_dir, infolder))
if found_files:
files_to_copy = files_to_copy + found_files
if not folder == source_dir:
dir_name = infolder.replace(source_dir + '\\', '')
destination_folder = os.path.join(destination_dir, dir_name)
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
print u'... output folder created {}'.format(destination_folder)
return files_to_copy
def __exclude_files(self, folder, files_to_copy):
exclude_extensions = folder[u'excludeExtensions'] if folder.has_key("excludeExtensions") else []
exclude_files = folder[u'excludeFiles'] if folder.has_key("excludeFiles") else []
for ext in exclude_extensions:
files_to_copy = list(fn for fn in files_to_copy if not os.path.basename(fn).lower().endswith('.' + (ext.lower())))
for exclude_file in exclude_files:
files_to_copy = list(fn for fn in files_to_copy if not os.path.basename(fn).lower() == exclude_file.lower())
return files_to_copy
def __do_copy_files_to_dest(self, folder, files_to_copy, config,):
"""Finds files to be copyed
Args:
self: The reserved object 'self'
folder ([string]): relative path to folder to be processed
files_to_copy ([list]): path of files to be copyed
config ([PublisherHealperConfig]): Configuration of deplyment
"""
source_dir = os.path.join(config.sourceDir, folder["folder"]) if folder.has_key("folder") else config.sourceDir
destination_dir = os.path.join(config.destinationDir, folder["folder"]) if folder.has_key("folder") else config.destinationDir
for copy_file in files_to_copy:
dest_file = copy_file
dest_file = dest_file.replace(source_dir, destination_dir)
dest_file = self.__rename_file_if_needed(dest_file, folder)
replaced = False
thesame = False
if os.path.exists(dest_file):
copy_hash = _md5(copy_file)
dest_hash = _md5(dest_file)
if copy_hash <> dest_hash:
self.__create_backup_one_file(dest_file, config)
os.remove(dest_file)
replaced = True
else:
thesame = True
if not thesame:
if not os.path.isdir(os.path.dirname(dest_file)):
os.makedirs(os.path.dirname(dest_file))
shutil.copy2(copy_file, dest_file)
if not replaced:
print u'... file copy {}'.format(dest_file)
else:
print u'... file replace {}'.format(dest_file)
def __rename_file_if_needed(self, dest_file, folder):
rename_files = folder[u'renameFiles'] if folder.has_key("renameFiles") else {}
dir_name, file_name = os.path.split(dest_file)
for rename_file in rename_files:
if file_name.upper() == rename_file.upper():
return os.path.join(dir_name, rename_files[rename_file])
return dest_file
def __do_process_xml(self, config):
"""Changes required values in config xml
Args:
self: The reserved object 'self'
config ([PublisherHealperConfig]): Configuration of deplyment
"""
for config_file in config.configFilesXML:
params_helper = xmlParamsHealper.XMLParams(None, None, os.path.join(config.destinationDir, config_file['file']))
params_helper.GetParams()
for change in config_file['changes']:
is_string = False
do_append = False
if change.has_key('string'):
if change['string']:
is_string = True
if change.has_key('append'):
if change['append']:
do_append = True
if do_append:
attribute = None
key = None
if change.has_key("atribute"):
attribute = change['atribute']
if change.has_key("appendKey"):
key = change['appendKey']
params_helper.AppendValueByPath(change['xpath'], key, change['value'], attribute, isString=is_string)
else:
if change.has_key("atribute"):
params_helper.UpdateAtributeByPath(change['xpath'], change['atribute'], change['value'])
else:
params_helper.UpdateValueByPath(change['xpath'], change['value'])
params_helper.WriteParams()
print u'... config file {} updated'.format(config_file['file'])
def __do_process_json(self, config):
"""Changes required values in config xml
Args:
self: The reserved object 'self'
config ([PublisherHealperConfig]): Configuration of deplyment
"""
for config_file in config.configFilesJson:
params_helper = JsonParamsHelper.JsonParams(None, None, os.path.join(config.destinationDir, config_file['file']))
params_helper.GetParams()
for change in config_file['changes']:
is_json = False
do_append = False
if change.has_key('json'):
if change['json']:
is_json = True
if change.has_key('append'):
if change['append']:
do_append = True
if do_append:
params_helper.AppendValueByPath(change['xpath'], change['appendKey'], change['value'], is_json)
else:
params_helper.UpdateValueByPath(change['xpath'], change['value'], is_json)
params_helper.WriteParams(False)
print u'... config file {} updated'.format(config_file['file'])
def __do_string_repalce(self, config):
"""Replace required values by sring replacement
Args:
self: The reserved object 'self'
config ([PublisherHealperConfig]): Configuration of deplyment
"""
for file_name in config.replacementMap:
replacement_map = config.replacementMap[file_name]
path = os.path.join(config.destinationDir, file_name)
_replace_in_file(path, replacement_map)
print u'... file {} replaced strings'.format(path)
def _replace_in_file(path, replace_map):
"""replaces values in files using replace_map
"""
with codecs.open(path, 'r') as f:
newlines = []
for line in f.readlines():
for key, value in replace_map.items():
line = line.replace(key, value)
newlines.append(line)
with open(path, 'w') as f:
for line in newlines:
f.write(line)
def _find_all_files(directory):
"""Finds files in the directory
Args:
dir: The directory in which to look for the file
"""
found_files = [directory + "\\" + fn
for fn in os.listdir(directory) if os.path.isfile(directory + "\\" + fn)]
found_files.sort()
return found_files
def _find_file(directory, ext):
"""Finds files in the directory
Args:
Dir: The directory in which to look for the file
Ext: The extension to search for
"""
found_files = [directory + "\\" + fn
for fn in os.listdir(directory) if fn.lower().endswith('.' + (ext.lower()))]
found_files.sort()
return found_files
def _find_file_by_name(directory, file_name):
"""Finds files in the directory
Args:
Dir: The directory in which to look for the file
fileName: File name to search for
"""
found_files = [directory + "\\" + fn
for fn in os.listdir(directory) if fn.lower() == file_name.lower()]
found_files.sort()
return found_files
def _find_all_folders(directory):
"""Finds files in the directory
Args:
Dir: The directory in which to look for the file
Ext: The extension to search for
"""
result = []
for root, dirs, files in os.walk(directory):
for name in dirs:
result.append(os.path.join(root, name))
return result
def _md5(filename):
"""calculates file md5 cheksumm
Args:
fname ([string]): File path
Returns:
[string]: hex digest
"""
hash_md5 = hashlib.md5()
with open(filename, "rb") as opened_file:
for chunk in iter(lambda: opened_file.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def _now_for_file():
"""returns date now formated for filename
Returns:
[string]: [date reprezentation as string]
"""
return datetime.datetime.strftime(datetime.datetime.now(), "%Y%m%d_%H%M%S")
| gpl-3.0 | -2,723,904,783,419,873,000 | 42.292735 | 138 | 0.577958 | false |
Exgibichi/statusquo | test/functional/getblocktemplate_longpoll.py | 1 | 3120 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test longpolling with getblocktemplate."""
from test_framework.test_framework import StatusquoTestFramework
from test_framework.util import *
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(StatusquoTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.nodes[0].generate(10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
# min_relay_fee is fee per 1000 bytes, which should be more than enough.
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| mit | 5,559,137,650,237,787,000 | 41.739726 | 112 | 0.664103 | false |
BirkbeckCTP/janeway | src/core/homepage_elements/carousel/plugin_settings.py | 1 | 2057 | from django.db.utils import OperationalError
from django.contrib.contenttypes.models import ContentType
PLUGIN_NAME = 'Carousel'
DESCRIPTION = 'This is a homepage element that renders a carousel.'
AUTHOR = 'Martin Paul Eve'
def install():
import core.models as core_models
import journal.models as journal_models
import press.models as press_models
# check whether this homepage element has already been installed for all journals
journals = journal_models.Journal.objects.all()
for journal in journals:
content_type = ContentType.objects.get_for_model(journal)
element, created = core_models.HomepageElement.objects.get_or_create(
name=PLUGIN_NAME,
configure_url='carousel_settings',
template_path='journal/homepage_elements/carousel.html',
content_type=content_type,
object_id=journal.pk,
has_config=True,
defaults={'available_to_press': True})
element.save()
presses = press_models.Press.objects.all()
for press in presses:
content_type = ContentType.objects.get_for_model(press)
element, created = core_models.HomepageElement.objects.get_or_create(
name=PLUGIN_NAME,
configure_url='carousel_settings',
template_path='journal/homepage_elements/carousel.html',
content_type=content_type,
object_id=press.pk,
has_config=True,
defaults={'available_to_press': True})
element.save()
def hook_registry():
try:
install()
return {
'yield_homepage_element_context': {
'module': 'core.homepage_elements.carousel.hooks',
'function': 'yield_homepage_element_context',
'name': PLUGIN_NAME,
}
}
except OperationalError:
# if we get here the database hasn't yet been created
return {}
except BaseException: # if we get here, well, something has gone very wrong
return {}
| agpl-3.0 | -8,702,155,116,632,089,000 | 33.283333 | 86 | 0.633447 | false |
NeuralEnsemble/elephant | elephant/asset/asset.py | 2 | 102992 | # -*- coding: utf-8 -*-
"""
ASSET is a statistical method :cite:`asset-Torre16_e1004939` for the detection
of repeating sequences of synchronous spiking events in parallel spike trains.
ASSET analysis class object of finding patterns
-----------------------------------------------
.. autosummary::
:toctree: _toctree/asset/
ASSET
Patterns post-exploration
-------------------------
.. autosummary::
:toctree: _toctree/asset/
synchronous_events_intersection
synchronous_events_difference
synchronous_events_identical
synchronous_events_no_overlap
synchronous_events_contained_in
synchronous_events_contains_all
synchronous_events_overlap
Tutorial
--------
:doc:`View tutorial <../tutorials/asset>`
Run tutorial interactively:
.. image:: https://mybinder.org/badge.svg
:target: https://mybinder.org/v2/gh/NeuralEnsemble/elephant/master
?filepath=doc/tutorials/asset.ipynb
Examples
--------
In this example we
* simulate two noisy synfire chains;
* shuffle the neurons to destroy visual appearance;
* run ASSET analysis to recover the original neurons arrangement.
1. Simulate two noise synfire chains, shuffle the neurons to destroy the
pattern visually, and store shuffled activations in neo.SpikeTrains.
>>> import neo
>>> import numpy as np
>>> import quantities as pq
>>> np.random.seed(10)
>>> spiketrain = np.linspace(0, 50, num=10)
>>> np.random.shuffle(spiketrain)
>>> spiketrains = np.c_[spiketrain, spiketrain + 100]
>>> spiketrains += np.random.random_sample(spiketrains.shape) * 5
>>> spiketrains = [neo.SpikeTrain(st, units='ms', t_stop=1 * pq.s)
... for st in spiketrains]
2. Create `ASSET` class object that holds spike trains.
`ASSET` requires at least one argument - a list of spike trains. If
`spiketrains_y` is not provided, the same spike trains are used to build an
intersection matrix with.
>>> from elephant import asset
>>> asset_obj = asset.ASSET(spiketrains, bin_size=3*pq.ms)
3. Build the intersection matrix `imat`:
>>> imat = asset_obj.intersection_matrix()
4. Estimate the probability matrix `pmat`, using the analytical method:
>>> pmat = asset_obj.probability_matrix_analytical(imat,
... kernel_width=50*pq.ms)
5. Compute the joint probability matrix `jmat`, using a suitable filter:
>>> jmat = asset_obj.joint_probability_matrix(pmat, filter_shape=(5, 1),
... n_largest=3)
6. Create the masked version of the intersection matrix, `mmat`, from `pmat`
and `jmat`:
>>> mmat = asset_obj.mask_matrices([pmat, jmat], thresholds=.9)
7. Cluster significant elements of imat into diagonal structures:
>>> cmat = asset_obj.cluster_matrix_entries(mmat, max_distance=11,
... min_neighbors=3, stretch=5)
9. Extract sequences of synchronous events:
>>> sses = asset_obj.extract_synchronous_events(cmat)
The ASSET found the following sequences of synchronous events:
>>> sses
{1: {(36, 2): {5},
(37, 4): {1},
(40, 6): {4},
(41, 7): {8},
(43, 9): {2},
(47, 14): {7},
(48, 15): {0},
(50, 17): {9}}}
To visualize them, refer to Viziphant documentation and an example plot
:func:`viziphant.asset.plot_synchronous_events`.
"""
from __future__ import division, print_function, unicode_literals
import math
import os
import subprocess
import sys
import tempfile
import warnings
from pathlib import Path
import neo
import numpy as np
import quantities as pq
import scipy.spatial
import scipy.stats
from sklearn.cluster import dbscan
from sklearn.metrics import pairwise_distances, pairwise_distances_chunked
from tqdm import trange, tqdm
import elephant.conversion as conv
from elephant import spike_train_surrogates
from elephant.utils import get_cuda_capability_major
try:
from mpi4py import MPI
mpi_accelerated = True
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
except ImportError:
mpi_accelerated = False
size = 1
rank = 0
__all__ = [
"ASSET",
"synchronous_events_intersection",
"synchronous_events_difference",
"synchronous_events_identical",
"synchronous_events_no_overlap",
"synchronous_events_contained_in",
"synchronous_events_contains_all",
"synchronous_events_overlap"
]
# =============================================================================
# Some Utility Functions to be dealt with in some way or another
# =============================================================================
def _signals_same_attribute(signals, attr_name):
"""
Check whether a list of signals (`neo.AnalogSignal` or `neo.SpikeTrain`)
have same attribute `attr_name`. If so, return that value. Otherwise,
raise ValueError.
Parameters
----------
signals : list
A list of signals (e.g. `neo.AnalogSignal` or `neo.SpikeTrain`) having
attribute `attr_name`.
Returns
-------
pq.Quantity
The value of the common attribute `attr_name` of the list of signals.
Raises
------
ValueError
If `signals` is an empty list.
If `signals` have different `attr_name` attribute values.
"""
if len(signals) == 0:
raise ValueError('Empty signals list')
attribute = getattr(signals[0], attr_name)
for sig in signals[1:]:
if getattr(sig, attr_name) != attribute:
raise ValueError(
"Signals have different '{}' values".format(attr_name))
return attribute
def _quantities_almost_equal(x, y):
"""
Returns True if two quantities are almost equal, i.e., if `x - y` is
"very close to 0" (not larger than machine precision for floats).
Parameters
----------
x : pq.Quantity
First Quantity to compare.
y : pq.Quantity
Second Quantity to compare. Must have same unit type as `x`, but not
necessarily the same shape. Any shapes of `x` and `y` for which `x - y`
can be calculated are permitted.
Returns
-------
np.ndarray
Array of `bool`, which is True at any position where `x - y` is almost
zero.
Notes
-----
Not the same as `numpy.testing.assert_allclose` (which does not work
with Quantities) and `numpy.testing.assert_almost_equal` (which works only
with decimals)
"""
eps = np.finfo(float).eps
relative_diff = (x - y).magnitude
return np.all([-eps <= relative_diff, relative_diff <= eps], axis=0)
def _transactions(spiketrains, bin_size, t_start, t_stop, ids=None):
"""
Transform parallel spike trains into a list of sublists, called
transactions, each corresponding to a time bin and containing the list
of spikes in `spiketrains` falling into that bin.
To compute each transaction, the spike trains are binned (with adjacent
exclusive binning) and clipped (i.e., spikes from the same train falling
in the same bin are counted as one event). The list of spike IDs within
each bin form the corresponding transaction.
Parameters
----------
spiketrains : list of neo.SpikeTrain or list of tuple
A list of `neo.SpikeTrain` objects, or list of pairs
(Train_ID, `neo.SpikeTrain`), where `Train_ID` can be any hashable
object.
bin_size : pq.Quantity
Width of each time bin. Time is binned to determine synchrony.
t_start : pq.Quantity
The starting time. Only spikes occurring at times `t >= t_start` are
considered. The first transaction contains spikes falling into the
time segment `[t_start, t_start+bin_size]`.
If None, takes the value of `spiketrain.t_start`, common for all
input `spiketrains` (raises ValueError if it's not the case).
Default: None
t_stop : pq.Quantity
The ending time. Only spikes occurring at times `t < t_stop` are
considered.
If None, takes the value of `spiketrain.t_stop`, common for all
input `spiketrains` (raises ValueError if it's not the case).
Default: None
ids : list of int, optional
List of spike train IDs.
If None, the IDs `0` to `N-1` are used, where `N` is the number of
input spike trains.
Default: None
Returns
-------
list of list
A list of transactions, where each transaction corresponds to a time
bin and represents the list of spike train IDs having a spike in that
time bin.
Raises
------
TypeError
If `spiketrains` is not a list of `neo.SpikeTrain` or a list of tuples
(id, `neo.SpikeTrain`).
"""
if all(isinstance(st, neo.SpikeTrain) for st in spiketrains):
trains = spiketrains
if ids is None:
ids = range(len(spiketrains))
else:
# (id, SpikeTrain) pairs
try:
ids, trains = zip(*spiketrains)
except TypeError:
raise TypeError('spiketrains must be either a list of ' +
'SpikeTrains or a list of (id, SpikeTrain) pairs')
# Bin the spike trains and take for each of them the ids of filled bins
binned = conv.BinnedSpikeTrain(
trains, bin_size=bin_size, t_start=t_start, t_stop=t_stop)
filled_bins = binned.spike_indices
# Compute and return the transaction list
return [[train_id for train_id, b in zip(ids, filled_bins)
if bin_id in b] for bin_id in range(binned.n_bins)]
def _analog_signal_step_interp(signal, times):
"""
Compute the step-wise interpolation of a signal at desired times.
Given a signal (e.g. a `neo.AnalogSignal`) `s` taking values `s[t0]` and
`s[t1]` at two consecutive time points `t0` and `t1` (`t0 < t1`), the value
of the step-wise interpolation at time `t: t0 <= t < t1` is given by
`s[t] = s[t0]`.
Parameters
----------
signal : neo.AnalogSignal
The analog signal, containing the discretization of the function to
interpolate.
times : pq.Quantity
A vector of time points at which the step interpolation is computed.
Returns
-------
pq.Quantity
Object with same shape of `times` and containing
the values of the interpolated signal at the time points in `times`.
"""
dt = signal.sampling_period
# Compute the ids of the signal times to the left of each time in times
time_ids = np.floor(
((times - signal.t_start) / dt).rescale(
pq.dimensionless).magnitude).astype('i')
return (signal.magnitude[time_ids] * signal.units).rescale(signal.units)
# =============================================================================
# HERE ASSET STARTS
# =============================================================================
def _stretched_metric_2d(x, y, stretch, ref_angle, working_memory=None):
r"""
Given a list of points on the real plane, identified by their abscissa `x`
and ordinate `y`, compute a stretched transformation of the Euclidean
distance among each of them.
The classical euclidean distance `d` between points `(x1, y1)` and
`(x2, y2)`, i.e., :math:`\sqrt((x1-x2)^2 + (y1-y2)^2)`, is multiplied by a
factor
.. math::
1 + (stretch - 1.) * \abs(\sin(ref_angle - \theta)),
where :math:`\theta` is the angle between the points and the 45 degree
direction (i.e., the line `y = x`).
The stretching factor thus steadily varies between 1 (if the line
connecting `(x1, y1)` and `(x2, y2)` has inclination `ref_angle`) and
`stretch` (if that line has inclination `90 + ref_angle`).
Parameters
----------
x : (n,) np.ndarray
Array of abscissas of all points among which to compute the distance.
y : (n,) np.ndarray
Array of ordinates of all points among which to compute the distance
(same shape as `x`).
stretch : float
Maximum stretching factor, applied if the line connecting the points
has inclination `90 + ref_angle`.
ref_angle : float
Reference angle in degrees (i.e., the inclination along which the
stretching factor is 1).
Returns
-------
D : (n,n) np.ndarray
Square matrix of distances between all pairs of points.
"""
alpha = np.deg2rad(ref_angle) # reference angle in radians
# Create the array of points (one per row) for which to compute the
# stretched distance
points = np.column_stack([x, y])
x_array = np.expand_dims(x, axis=0)
y_array = np.expand_dims(y, axis=0)
def calculate_stretch_mat(theta_mat, D_mat):
# Transform [-pi, pi] back to [-pi/2, pi/2]
theta_mat[theta_mat < -np.pi / 2] += np.pi
theta_mat[theta_mat > np.pi / 2] -= np.pi
# Compute the matrix of stretching factors for each pair of points.
# Equivalent to:
# stretch_mat = 1 + (stretch - 1.) * np.abs(np.sin(alpha - theta))
_stretch_mat = np.subtract(alpha, theta_mat, out=theta_mat)
_stretch_mat = np.sin(_stretch_mat, out=_stretch_mat)
_stretch_mat = np.abs(_stretch_mat, out=_stretch_mat)
_stretch_mat = np.multiply(stretch - 1, _stretch_mat, out=_stretch_mat)
_stretch_mat = np.add(1, _stretch_mat, out=_stretch_mat)
_stretch_mat = np.multiply(D_mat, _stretch_mat, out=_stretch_mat)
return _stretch_mat
if working_memory is None:
# Compute the matrix D[i, j] of euclidean distances among points
# i and j
D = pairwise_distances(points)
# Compute the angular coefficients of the line between each pair of
# points
# dX[i,j]: x difference between points i and j
# dY[i,j]: y difference between points i and j
dX = x_array.T - x_array
dY = y_array.T - y_array
# Compute the matrix Theta of angles between each pair of points
theta = np.arctan2(dY, dX, dtype=np.float32)
stretch_mat = calculate_stretch_mat(theta, D)
else:
start = 0
# x and y sizes are the same
stretch_mat = np.empty((len(x), len(y)), dtype=np.float32)
for D_chunk in pairwise_distances_chunked(
points, working_memory=working_memory):
chunk_size = D_chunk.shape[0]
dX = x_array[:, start: start + chunk_size].T - x_array
dY = y_array[:, start: start + chunk_size].T - y_array
theta_chunk = np.arctan2(
dY, dX, out=stretch_mat[start: start + chunk_size, :])
# stretch_mat (theta_chunk) is updated in-place here
calculate_stretch_mat(theta_chunk, D_chunk)
start += chunk_size
# Return the stretched distance matrix
return stretch_mat
def _interpolate_signals(signals, sampling_times, verbose=False):
"""
Interpolate signals at given sampling times.
"""
# Reshape all signals to one-dimensional array object (e.g. AnalogSignal)
for i, signal in enumerate(signals):
if signal.ndim == 2:
signals[i] = signal.flatten()
elif signal.ndim > 2:
raise ValueError('elements in fir_rates must have 2 dimensions')
if verbose:
print('create time slices of the rates...')
# Interpolate in the time bins
interpolated_signal = np.vstack([_analog_signal_step_interp(
signal, sampling_times).rescale('Hz').magnitude
for signal in signals]) * pq.Hz
return interpolated_signal
class _GPUBackend:
"""
Parameters
----------
max_chunk_size: int or None, optional
Defines the maximum chunk size used in the `_split_axis` function. The
users typically don't need to set this parameter manually - it's used
to simulate scenarios when the input matrix is so large that it cannot
fit into GPU memory. Setting this parameter manually can resolve GPU
memory errors in case automatic parameters adjustment fails.
Notes
-----
1. PyOpenCL backend takes some time to compile the kernel for the first
time - the caching will affect your benchmarks unless you run each
program twice.
2. Pinned Host Memory.
Host (CPU) data allocations are pageable by default. The GPU cannot
access data directly from pageable host memory, so when a data transfer
from pageable host memory to device memory is invoked, the CUDA driver
must first allocate a temporary page-locked, or "pinned", host array,
copy the host data to the pinned array, and then transfer the data from
the pinned array to device memory, as illustrated at
https://developer.nvidia.com/blog/how-optimize-data-transfers-cuda-cc/
Same for OpenCL. Therefore, Python memory analyzers show increments in
the used RAM each time an OpenCL/CUDA buffer is created. As with any
Python objects, PyOpenCL and PyCUDA clean up and free allocated memory
automatically when garbage collection is executed.
"""
def __init__(self, max_chunk_size=None):
self.max_chunk_size = max_chunk_size
def _choose_backend(self):
# If CUDA is detected, always use CUDA.
# If OpenCL is detected, don't use it by default to avoid the system
# becoming unresponsive until the program terminates.
use_cuda = int(os.getenv("ELEPHANT_USE_CUDA", '1'))
use_opencl = int(os.getenv("ELEPHANT_USE_OPENCL", '1'))
cuda_detected = get_cuda_capability_major() != 0
if use_cuda and cuda_detected:
return self.pycuda
if use_opencl:
return self.pyopencl
return self.cpu
def _split_axis(self, chunk_size, axis_size, min_chunk_size=None):
chunk_size = min(chunk_size, axis_size)
if self.max_chunk_size is not None:
chunk_size = min(chunk_size, self.max_chunk_size)
if min_chunk_size is not None and chunk_size < min_chunk_size:
raise ValueError(f"[GPU not enough memory] Impossible to split "
f"the array into chunks of size at least "
f"{min_chunk_size} to fit into GPU memory")
n_chunks = math.ceil(axis_size / chunk_size)
chunk_size = math.ceil(axis_size / n_chunks) # align in size
if min_chunk_size is not None:
chunk_size = max(chunk_size, min_chunk_size)
split_idx = list(range(0, axis_size, chunk_size))
last_id = split_idx[-1]
last_size = axis_size - last_id # last is the smallest
split_idx = list(zip(split_idx[:-1], split_idx[1:]))
if min_chunk_size is not None and last_size < min_chunk_size:
# Overlap the last chunk with the previous.
# The overlapped part (intersection) will be computed twice.
last_id = axis_size - min_chunk_size
split_idx.append((last_id, axis_size))
return chunk_size, split_idx
class _JSFUniformOrderStat3D(_GPUBackend):
def __init__(self, n, d, precision='float', verbose=False,
cuda_threads=64, cuda_cwr_loops=32, tolerance=1e-5,
max_chunk_size=None):
super().__init__(max_chunk_size=max_chunk_size)
if d > n:
raise ValueError(f"d ({d}) must be less or equal n ({n})")
self.n = n
self.d = d
self.precision = precision
self.verbose = verbose and rank == 0
self.cuda_threads = cuda_threads
self.cuda_cwr_loops = cuda_cwr_loops
self.map_iterations = self._create_iteration_table()
bits = 32 if precision == "float" else 64
self.dtype = np.dtype(f"float{bits}")
self.tolerance = tolerance
@property
def num_iterations(self):
# map_iterations table is populated with element indices, not counts;
# therefore, we add 1
return self.map_iterations[:, -1].sum() + 1
def _create_iteration_table(self):
# do not use numpy arrays - they are limited to uint64
map_iterations = [list(range(self.n))]
for row_id in range(1, self.d):
prev_row = map_iterations[row_id - 1]
curr_row = [0] * (row_id + 1)
for col_id in range(row_id + 1, self.n):
cumsum = prev_row[col_id] + curr_row[-1]
curr_row.append(cumsum)
map_iterations.append(curr_row)
# here we can wrap the resulting array in numpy:
# if at least one item is greater than 2<<63 - 1,
# the data type will be set to 'object'
map_iterations = np.vstack(map_iterations)
return map_iterations
def _combinations_with_replacement(self):
# Generate sequences of {a_i} such that
# a_0 >= a_1 >= ... >= a_(d-1) and
# d-i <= a_i <= n, for each i in [0, d-1].
#
# Almost equivalent to
# list(itertools.combinations_with_replacement(range(n, 0, -1), r=d))
# [::-1]
#
# Example:
# _combinations_with_replacement(n=13, d=3) -->
# (3, 2, 1), (3, 2, 2), (3, 3, 1), ... , (13, 13, 12), (13, 13, 13).
#
# The implementation follows the insertion sort algorithm:
# insert a new element a_i from right to left to keep the reverse
# sorted order. Now substitute increment operation for insert.
if self.d > self.n:
return
if self.d == 1:
for matrix_entry in range(1, self.n + 1):
yield (matrix_entry,)
return
sequence_sorted = list(range(self.d, 0, -1))
input_order = tuple(sequence_sorted) # fixed
while sequence_sorted[0] != self.n + 1:
for last_element in range(1, sequence_sorted[-2] + 1):
sequence_sorted[-1] = last_element
yield tuple(sequence_sorted)
increment_id = self.d - 2
while increment_id > 0 and sequence_sorted[increment_id - 1] == \
sequence_sorted[increment_id]:
increment_id -= 1
sequence_sorted[increment_id + 1:] = input_order[increment_id + 1:]
sequence_sorted[increment_id] += 1
def cpu(self, log_du):
log_1 = np.log(1.)
# Compute the log of the integral's coefficient
logK = np.sum(np.log(np.arange(1, self.n + 1)))
# Add to the 3D matrix u a bottom layer equal to 0 and a
# top layer equal to 1. Then compute the difference du along
# the first dimension.
# prepare arrays for usage inside the loop
di_scratch = np.empty_like(log_du, dtype=np.int32)
log_du_scratch = np.empty_like(log_du)
# precompute log(factorial)s
# pad with a zero to get 0! = 1
log_factorial = np.hstack((0, np.cumsum(np.log(range(1, self.n + 1)))))
# compute the probabilities for each unique row of du
# only loop over the indices and do all du entries at once
# using matrix algebra
# initialise probabilities to 0
P_total = np.zeros(
log_du.shape[0],
dtype=np.float32 if self.precision == 'float' else np.float64
)
for iter_id, matrix_entries in enumerate(
tqdm(self._combinations_with_replacement(),
total=self.num_iterations,
desc="Joint survival function",
disable=not self.verbose)):
# if we are running with MPI
if mpi_accelerated and iter_id % size != rank:
continue
# we only need the differences of the indices:
di = -np.diff((self.n,) + matrix_entries + (0,))
# reshape the matrix to be compatible with du
di_scratch[:, range(len(di))] = di
# use precomputed factorials
sum_log_di_factorial = log_factorial[di].sum()
# Compute for each i,j the contribution to the probability
# given by this step, and add it to the total probability
# Use precomputed log
np.copyto(log_du_scratch, log_du)
# for each a=0,1,...,A-1 and b=0,1,...,B-1, replace du with 1
# whenever di_scratch = 0, so that du ** di_scratch = 1 (this
# avoids nans when both du and di_scratch are 0, and is
# mathematically correct)
log_du_scratch[di_scratch == 0] = log_1
di_log_du = di_scratch * log_du_scratch
sum_di_log_du = di_log_du.sum(axis=1)
logP = sum_di_log_du - sum_log_di_factorial
P_total += np.exp(logP + logK)
if mpi_accelerated:
totals = np.zeros_like(P_total)
# exchange all the results
mpi_float_type = MPI.FLOAT \
if self.precision == 'float' else MPI.DOUBLE
comm.Allreduce(
[P_total, mpi_float_type],
[totals, mpi_float_type],
op=MPI.SUM)
# We need to return the collected totals instead of the local
# P_total
P_total = totals
return P_total
def _compile_template(self, template_name, **kwargs):
from jinja2 import Template
cu_template_path = Path(__file__).parent / template_name
cu_template = Template(cu_template_path.read_text())
asset_cu = cu_template.render(
precision=self.precision,
CWR_LOOPS=self.cuda_cwr_loops,
N=self.n, D=self.d, **kwargs)
return asset_cu
def pyopencl(self, log_du, device_id=0):
import pyopencl as cl
import pyopencl.array as cl_array
self._check_input(log_du)
it_todo = self.num_iterations
u_length = log_du.shape[0]
context = cl.create_some_context(interactive=False)
if self.verbose:
print("Available OpenCL devices:\n", context.devices)
device = context.devices[device_id]
# A queue bounded to the device
queue = cl.CommandQueue(context)
max_l_block = device.local_mem_size // (
self.dtype.itemsize * (self.d + 2))
n_threads = min(self.cuda_threads, max_l_block,
device.max_work_group_size)
if n_threads > 32:
# It's more efficient to make the number of threads
# a multiple of the warp size (32).
n_threads -= n_threads % 32
iteration_table_str = ", ".join(f"{val}LU" for val in
self.map_iterations.flatten())
iteration_table_str = "{%s}" % iteration_table_str
log_factorial = np.r_[0, np.cumsum(np.log(range(1, self.n + 1)))]
logK = log_factorial[-1]
log_factorial_str = ", ".join(f"{val:.10f}" for val in log_factorial)
log_factorial_str = "{%s}" % log_factorial_str
atomic_int = 'int' if self.precision == 'float' else 'long'
# GPU_MAX_HEAP_SIZE OpenCL flag is set to 2 Gb (1 << 31) by default
mem_avail = min(device.max_mem_alloc_size, device.global_mem_size,
1 << 31)
# 4 * (D + 1) * size + 8 * size == mem_avail
chunk_size = mem_avail // (4 * log_du.shape[1] + self.dtype.itemsize)
chunk_size, split_idx = self._split_axis(chunk_size=chunk_size,
axis_size=u_length)
P_total = np.empty(u_length, dtype=self.dtype)
P_total_gpu = cl_array.Array(queue, shape=chunk_size, dtype=self.dtype)
for i_start, i_end in split_idx:
log_du_gpu = cl_array.to_device(queue, log_du[i_start: i_end],
async_=True)
P_total_gpu.fill(0, queue=queue)
chunk_size = i_end - i_start
l_block = min(n_threads, chunk_size)
l_num_blocks = math.ceil(chunk_size / l_block)
grid_size = math.ceil(it_todo / (n_threads * self.cuda_cwr_loops))
if grid_size > l_num_blocks:
# make grid_size divisible by l_num_blocks
grid_size -= grid_size % l_num_blocks
else:
# grid_size must be at least l_num_blocks
grid_size = l_num_blocks
if self.verbose:
print(f"[Joint prob. matrix] it_todo={it_todo}, "
f"grid_size={grid_size}, L_BLOCK={l_block}, "
f"N_THREADS={n_threads}")
# OpenCL defines unsigned long as uint64, therefore we're adding
# the LU suffix, not LLU, which would indicate unsupported uint128
# data type format.
asset_cl = self._compile_template(
template_name="joint_pmat.cl",
L=f"{chunk_size}LU",
L_BLOCK=l_block,
L_NUM_BLOCKS=l_num_blocks,
ITERATIONS_TODO=f"{it_todo}LU",
logK=f"{logK:.10f}f",
iteration_table=iteration_table_str,
log_factorial=log_factorial_str,
ATOMIC_UINT=f"unsigned {atomic_int}",
ASSET_ENABLE_DOUBLE_SUPPORT=int(self.precision == "double")
)
program = cl.Program(context, asset_cl).build()
# synchronize
cl.enqueue_barrier(queue)
kernel = program.jsf_uniform_orderstat_3d_kernel
kernel(queue, (grid_size,), (n_threads,),
P_total_gpu.data, log_du_gpu.data, g_times_l=True)
P_total_gpu[:chunk_size].get(ary=P_total[i_start: i_end])
return P_total
def pycuda(self, log_du):
try:
# PyCuda should not be in requirements-extra because CPU limited
# users won't be able to install Elephant.
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
import pycuda.driver as drv
from pycuda.compiler import SourceModule
except ImportError as err:
raise ImportError(
"Install pycuda with 'pip install pycuda'") from err
self._check_input(log_du)
it_todo = self.num_iterations
u_length = log_du.shape[0]
device = pycuda.autoinit.device
max_l_block = device.MAX_SHARED_MEMORY_PER_BLOCK // (
self.dtype.itemsize * (self.d + 2))
n_threads = min(self.cuda_threads, max_l_block,
device.MAX_THREADS_PER_BLOCK)
if n_threads > device.WARP_SIZE:
# It's more efficient to make the number of threads
# a multiple of the warp size (32).
n_threads -= n_threads % device.WARP_SIZE
log_factorial = np.r_[0, np.cumsum(np.log(range(1, self.n + 1)))]
log_factorial = log_factorial.astype(self.dtype)
logK = log_factorial[-1]
free, total = drv.mem_get_info()
# 4 * (D + 1) * size + 8 * size == mem_avail
chunk_size = free // (4 * log_du.shape[1] + self.dtype.itemsize)
chunk_size, split_idx = self._split_axis(chunk_size=chunk_size,
axis_size=u_length)
P_total = np.empty(u_length, dtype=self.dtype)
P_total_gpu = gpuarray.GPUArray(chunk_size, dtype=self.dtype)
log_du_gpu = drv.mem_alloc(4 * chunk_size * log_du.shape[1])
for i_start, i_end in split_idx:
drv.memcpy_htod_async(dest=log_du_gpu, src=log_du[i_start: i_end])
P_total_gpu.fill(0)
chunk_size = i_end - i_start
l_block = min(n_threads, chunk_size)
l_num_blocks = math.ceil(chunk_size / l_block)
grid_size = math.ceil(it_todo / (n_threads * self.cuda_cwr_loops))
grid_size = min(grid_size, device.MAX_GRID_DIM_X)
if grid_size > l_num_blocks:
# make grid_size divisible by l_num_blocks
grid_size -= grid_size % l_num_blocks
else:
# grid_size must be at least l_num_blocks
grid_size = l_num_blocks
if self.verbose:
print(f"[Joint prob. matrix] it_todo={it_todo}, "
f"grid_size={grid_size}, L_BLOCK={l_block}, "
f"N_THREADS={n_threads}")
asset_cu = self._compile_template(
template_name="joint_pmat.cu",
L=f"{chunk_size}LLU",
L_BLOCK=l_block,
L_NUM_BLOCKS=l_num_blocks,
ITERATIONS_TODO=f"{it_todo}LLU",
logK=f"{logK:.10f}f",
)
module = SourceModule(asset_cu)
iteration_table_gpu, _ = module.get_global("iteration_table")
iteration_table = self.map_iterations.astype(np.uint64)
drv.memcpy_htod(iteration_table_gpu, iteration_table)
log_factorial_gpu, _ = module.get_global("log_factorial")
drv.memcpy_htod(log_factorial_gpu, log_factorial)
drv.Context.synchronize()
kernel = module.get_function("jsf_uniform_orderstat_3d_kernel")
kernel(P_total_gpu.gpudata, log_du_gpu, grid=(grid_size, 1),
block=(n_threads, 1, 1))
P_total_gpu[:chunk_size].get(ary=P_total[i_start: i_end])
return P_total
def _cuda(self, log_du):
# Compile a self-contained joint_pmat_old.cu file and run it
# in a terminal. Having this function is useful to debug ASSET CUDA
# application because it's self-contained and the logic is documented.
# Don't use this backend when the 'log_du' arrays are huge because
# of the disk I/O operations.
# A note to developers: remove this backend in half a year once the
# pycuda backend proves to be stable.
self._check_input(log_du)
asset_cu = self._compile_template(
template_name="joint_pmat_old.cu",
L=f"{log_du.shape[0]}LLU",
N_THREADS=self.cuda_threads,
ITERATIONS_TODO=f"{self.num_iterations}LLU",
ASSET_DEBUG=int(self.verbose)
)
with tempfile.TemporaryDirectory() as asset_tmp_folder:
asset_cu_path = os.path.join(asset_tmp_folder, 'asset.cu')
asset_bin_path = os.path.join(asset_tmp_folder, 'asset.o')
with open(asset_cu_path, 'w') as f:
f.write(asset_cu)
# -O3 optimization flag is for the host code only;
# by default, GPU device code is optimized with -O3.
# -w to ignore warnings.
compile_cmd = ['nvcc', '-w', '-O3', '-o', asset_bin_path,
asset_cu_path]
if self.precision == 'double' and get_cuda_capability_major() >= 6:
# atomicAdd(double) requires compute capability 6.x
compile_cmd.extend(['-arch', 'sm_60'])
compile_status = subprocess.run(
compile_cmd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if self.verbose:
print(compile_status.stdout.decode())
print(compile_status.stderr.decode(), file=sys.stderr)
compile_status.check_returncode()
log_du_path = os.path.join(asset_tmp_folder, "log_du.dat")
P_total_path = os.path.join(asset_tmp_folder, "P_total.dat")
with open(log_du_path, 'wb') as f:
log_du.tofile(f)
run_status = subprocess.run(
[asset_bin_path, log_du_path, P_total_path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if self.verbose:
print(run_status.stdout.decode())
print(run_status.stderr.decode(), file=sys.stderr)
run_status.check_returncode()
with open(P_total_path, 'rb') as f:
P_total = np.fromfile(f, dtype=self.dtype)
return P_total
def _check_input(self, log_du):
it_todo = self.num_iterations
if it_todo > np.iinfo(np.uint64).max:
raise ValueError(f"it_todo ({it_todo}) is larger than MAX_UINT64."
" Only Python backend is supported.")
# Don't convert log_du to float32 transparently for the user to avoid
# situations when the user accidentally passes an array with float64.
# Doing so wastes memory for nothing.
if log_du.dtype != np.float32:
raise ValueError("'log_du' must be a float32 array")
if log_du.shape[1] != self.d + 1:
raise ValueError(f"log_du.shape[1] ({log_du.shape[1]}) must be "
f"equal to D+1 ({self.d + 1})")
def compute(self, u):
if u.shape[1] != self.d:
raise ValueError("Invalid input data shape axis 1: expected {}, "
"got {}".format(self.d, u.shape[1]))
# A faster and memory efficient implementation of
# du = np.diff(u, prepend=0, append=1, axis=1).astype(np.float32)
du = np.empty((u.shape[0], u.shape[1] + 1), dtype=np.float32)
du[:, 0] = u[:, 0]
np.subtract(u[:, 1:], u[:, :-1], out=du[:, 1:-1])
np.subtract(1, u[:, -1], out=du[:, -1])
# precompute logarithms
# ignore warnings about infinities, see inside the loop:
# we replace 0 * ln(0) by 1 to get exp(0 * ln(0)) = 0 ** 0 = 1
# the remaining infinities correctly evaluate to
# exp(ln(0)) = exp(-inf) = 0
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
log_du = np.log(du, out=du)
jsf_backend = self._choose_backend()
P_total = jsf_backend(log_du)
# Captures non-finite values like NaN, inf
inside = (P_total > -self.tolerance) & (P_total < 1 + self.tolerance)
outside_vals = P_total[~inside]
if len(outside_vals) > 0:
# A watchdog for unexpected results.
warnings.warn(f"{len(outside_vals)}/{P_total.shape[0]} values of "
"the computed joint prob. matrix lie outside of the "
f"valid [0, 1] interval:\n{outside_vals}\nIf you're "
"using PyOpenCL backend, make sure you've disabled "
"GPU Hangcheck as described here https://"
"software.intel.com/content/www/us/en/develop/"
"documentation/get-started-with-intel-oneapi-"
"base-linux/top/before-you-begin.html\n"
"Clipping the output array to 0 and 1.")
P_total = np.clip(P_total, a_min=0., a_max=1., out=P_total)
return P_total
class _PMatNeighbors(_GPUBackend):
"""
Parameters
----------
filter_shape : tuple of int
A pair of integers representing the kernel shape `(l, w)`.
n_largest : int
The number of largest neighbors to collect for each entry in `mat`.
"""
def __init__(self, filter_shape, n_largest, max_chunk_size=None):
super().__init__(max_chunk_size=max_chunk_size)
self.n_largest = n_largest
self.max_chunk_size = max_chunk_size
filter_size, filter_width = filter_shape
if filter_width >= filter_size:
raise ValueError('filter_shape width must be lower than length')
if not ((filter_width % 2) and (filter_size % 2)):
warnings.warn(
'The kernel is not centered on the datapoint in whose'
'calculation it is used. Consider using odd values'
'for both entries of filter_shape.')
# Construct the kernel
filt = np.ones((filter_size, filter_size), dtype=bool)
filt = np.triu(filt, -filter_width)
filt = np.tril(filt, filter_width)
if n_largest > len(filt.nonzero()[0]):
raise ValueError(f"Too small filter shape {filter_shape} to "
f"select {n_largest} largest elements.")
self.filter_kernel = filt
def _check_input(self, mat):
symmetric = np.all(np.diagonal(mat) == 0.5)
# Check consistent arguments
filter_size = self.filter_kernel.shape[0]
if (symmetric and mat.shape[0] < 2 * filter_size - 1) \
or (not symmetric and min(mat.shape) < filter_size):
raise ValueError(f"'filter_shape' {self.filter_kernel.shape} is "
f"too large for the input matrix of shape "
f"{mat.shape}")
if mat.dtype != np.float32:
raise ValueError("The input matrix dtype must be float32.")
def pyopencl(self, mat):
import pyopencl as cl
import pyopencl.array as cl_array
from jinja2 import Template
context = cl.create_some_context(interactive=False)
device = context.devices[0]
queue = cl.CommandQueue(context)
# if the matrix is symmetric the diagonal was set to 0.5
# when computing the probability matrix
symmetric = np.all(np.diagonal(mat) == 0.5)
self._check_input(mat)
filt_size = self.filter_kernel.shape[0] # filt is a square matrix
filt_rows, filt_cols = self.filter_kernel.nonzero()
filt_rows = "{%s}" % ", ".join(f"{row}U" for row in filt_rows)
filt_cols = "{%s}" % ", ".join(f"{col}U" for col in filt_cols)
lmat_padded = np.zeros((mat.shape[0], mat.shape[1], self.n_largest),
dtype=np.float32)
if symmetric:
mat = mat[filt_size:]
lmat = lmat_padded[filt_size + filt_size // 2: -filt_size // 2 + 1]
else:
lmat = lmat_padded[filt_size // 2: -filt_size // 2 + 1]
# GPU_MAX_HEAP_SIZE OpenCL flag is set to 2 Gb (1 << 31) by default
mem_avail = min(device.max_mem_alloc_size, device.global_mem_size,
1 << 31)
# 4 * size * n_cols * n_largest + 4 * (size + filt_size) * n_cols
chunk_size = (mem_avail // 4 - filt_size * lmat.shape[1]) // (
lmat.shape[1] * (self.n_largest + 1))
chunk_size, split_idx = self._split_axis(chunk_size=chunk_size,
axis_size=lmat.shape[0],
min_chunk_size=filt_size)
pmat_cl_path = Path(__file__).parent / "pmat_neighbors.cl"
pmat_cl_template = Template(pmat_cl_path.read_text())
lmat_gpu = cl_array.Array(
queue, shape=(chunk_size, lmat.shape[1], self.n_largest),
dtype=np.float32
)
for i_start, i_end in split_idx:
mat_gpu = cl_array.to_device(queue,
mat[i_start: i_end + filt_size],
async_=True)
lmat_gpu.fill(0, queue=queue)
chunk_size = i_end - i_start
it_todo = chunk_size * (lmat.shape[1] - filt_size + 1)
pmat_neighbors_cl = pmat_cl_template.render(
FILT_SIZE=filt_size,
N_LARGEST=self.n_largest,
PMAT_COLS=f"{lmat.shape[1]}LU",
Y_OFFSET=f"{i_start}LU",
NONZERO_SIZE=self.filter_kernel.sum(),
SYMMETRIC=int(symmetric),
filt_rows=filt_rows,
filt_cols=filt_cols
)
program = cl.Program(context, pmat_neighbors_cl).build()
# synchronize
cl.enqueue_barrier(queue)
kernel = program.pmat_neighbors
# When the grid size is set to the total number of work items to
# execute and the local size is set to None, PyOpenCL chooses the
# number of threads automatically such that the total number of
# work items exactly matches the desired number of iterations.
kernel(queue, (it_todo,), None, lmat_gpu.data, mat_gpu.data)
lmat_gpu[:chunk_size].get(ary=lmat[i_start: i_end])
return lmat_padded
def pycuda(self, mat):
from jinja2 import Template
try:
# PyCuda should not be in requirements-extra because CPU limited
# users won't be able to install Elephant.
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
import pycuda.driver as drv
from pycuda.compiler import SourceModule
except ImportError as err:
raise ImportError(
"Install pycuda with 'pip install pycuda'") from err
# if the matrix is symmetric the diagonal was set to 0.5
# when computing the probability matrix
symmetric = np.all(np.diagonal(mat) == 0.5)
self._check_input(mat)
device = pycuda.autoinit.device
n_threads = device.MAX_THREADS_PER_BLOCK
filt_size = self.filter_kernel.shape[0]
filt_rows, filt_cols = self.filter_kernel.nonzero()
lmat_padded = np.zeros((mat.shape[0], mat.shape[1], self.n_largest),
dtype=np.float32)
if symmetric:
mat = mat[filt_size:]
lmat = lmat_padded[filt_size + filt_size // 2: -filt_size // 2 + 1]
else:
lmat = lmat_padded[filt_size // 2: -filt_size // 2 + 1]
free, total = drv.mem_get_info()
# 4 * size * n_cols * n_largest + 4 * (size + filt_size) * n_cols
chunk_size = (free // 4 - filt_size * lmat.shape[1]) // (
lmat.shape[1] * (self.n_largest + 1))
chunk_size, split_idx = self._split_axis(chunk_size=chunk_size,
axis_size=lmat.shape[0],
min_chunk_size=filt_size)
pmat_cu_path = Path(__file__).parent / "pmat_neighbors.cu"
pmat_cu_template = Template(pmat_cu_path.read_text())
lmat_gpu = gpuarray.GPUArray(
(chunk_size, lmat.shape[1], self.n_largest), dtype=np.float32)
mat_gpu = drv.mem_alloc(4 * (chunk_size + filt_size) * mat.shape[1])
for i_start, i_end in split_idx:
drv.memcpy_htod_async(dest=mat_gpu,
src=mat[i_start: i_end + filt_size])
lmat_gpu.fill(0)
chunk_size = i_end - i_start
it_todo = chunk_size * (lmat.shape[1] - filt_size + 1)
pmat_neighbors_cu = pmat_cu_template.render(
FILT_SIZE=filt_size,
N_LARGEST=self.n_largest,
PMAT_COLS=f"{lmat.shape[1]}LLU",
Y_OFFSET=f"{i_start}LLU",
NONZERO_SIZE=self.filter_kernel.sum(),
SYMMETRIC=int(symmetric),
IT_TODO=it_todo,
)
module = SourceModule(pmat_neighbors_cu)
filt_rows_gpu, _ = module.get_global("filt_rows")
drv.memcpy_htod(filt_rows_gpu, filt_rows.astype(np.uint32))
filt_cols_gpu, _ = module.get_global("filt_cols")
drv.memcpy_htod(filt_cols_gpu, filt_cols.astype(np.uint32))
drv.Context.synchronize()
grid_size = math.ceil(it_todo / n_threads)
if grid_size > device.MAX_GRID_DIM_X:
raise ValueError("Cannot launch a CUDA kernel with "
f"{grid_size} num. of blocks. Adjust the "
"'max_chunk_size' parameter.")
kernel = module.get_function("pmat_neighbors")
kernel(lmat_gpu.gpudata, mat_gpu, grid=(grid_size, 1),
block=(n_threads, 1, 1))
lmat_gpu[:chunk_size].get(ary=lmat[i_start: i_end])
return lmat_padded
def compute(self, mat):
"""
Build the 3D matrix `L` of largest neighbors of elements in a 2D matrix
`mat`.
For each entry `mat[i, j]`, collects the `n_largest` elements with
largest values around `mat[i, j]`, say `z_i, i=1,2,...,n_largest`,
and assigns them to `L[i, j, :]`.
The zone around `mat[i, j]` where largest neighbors are collected from
is a rectangular area (kernel) of shape `(l, w) = filter_shape`
centered around `mat[i, j]` and aligned along the diagonal.
If `mat` is symmetric, only the triangle below the diagonal is
considered.
Parameters
----------
mat : np.ndarray
A square matrix of real-valued elements.
Returns
-------
lmat : np.ndarray
A matrix of shape `(l, w, n_largest)` containing along the last
dimension `lmat[i, j, :]` the largest neighbors of `mat[i, j]`.
"""
backend = self._choose_backend()
lmat = backend(mat)
return lmat
def cpu(self, mat):
# if the matrix is symmetric the diagonal was set to 0.5
# when computing the probability matrix
symmetric = np.all(np.diagonal(mat) == 0.5)
self._check_input(mat)
filter_size = self.filter_kernel.shape[0]
# Initialize the matrix of d-largest values as a matrix of zeroes
lmat = np.zeros((mat.shape[0], mat.shape[1], self.n_largest),
dtype=np.float32)
N_bin_y = mat.shape[0]
N_bin_x = mat.shape[1]
# if the matrix is symmetric do not use kernel positions intersected
# by the diagonal
if symmetric:
bin_range_y = range(filter_size, N_bin_y - filter_size + 1)
else:
bin_range_y = range(N_bin_y - filter_size + 1)
bin_range_x = range(N_bin_x - filter_size + 1)
# compute matrix of largest values
for y in bin_range_y:
if symmetric:
# x range depends on y position
bin_range_x = range(y - filter_size + 1)
for x in bin_range_x:
patch = mat[y: y + filter_size, x: x + filter_size]
mskd = patch[self.filter_kernel]
largest_vals = np.sort(mskd)[-self.n_largest:]
lmat[y + (filter_size // 2), x + (filter_size // 2), :] = \
largest_vals
return lmat
def synchronous_events_intersection(sse1, sse2, intersection='linkwise'):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of positions `(iK, jK)` of matrix entries and
associated synchronous events `SK`, finds the intersection among them.
The intersection can be performed 'pixelwise' or 'linkwise'.
* if 'pixelwise', it yields a new SSE which retains only events in
`sse1` whose pixel position matches a pixel position in `sse2`. This
operation is not symmetric:
`intersection(sse1, sse2) != intersection(sse2, sse1)`.
* if 'linkwise', an additional step is performed where each retained
synchronous event `SK` in `sse1` is intersected with the
corresponding event in `sse2`. This yields a symmetric operation:
`intersection(sse1, sse2) = intersection(sse2, sse1)`.
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Each is a dictionary of pixel positions `(i, j)` as keys and sets `S`
of synchronous events as values (see above).
intersection : {'pixelwise', 'linkwise'}, optional
The type of intersection to perform among the two SSEs (see above).
Default: 'linkwise'
Returns
-------
sse_new : dict
A new SSE (same structure as `sse1` and `sse2`) which retains only the
events of `sse1` associated to keys present both in `sse1` and `sse2`.
If `intersection = 'linkwise'`, such events are additionally
intersected with the associated events in `sse2`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
sse_new = sse1.copy()
for pixel1 in sse1.keys():
if pixel1 not in sse2.keys():
del sse_new[pixel1]
if intersection == 'linkwise':
for pixel1, link1 in sse_new.items():
sse_new[pixel1] = link1.intersection(sse2[pixel1])
if len(sse_new[pixel1]) == 0:
del sse_new[pixel1]
elif intersection == 'pixelwise':
pass
else:
raise ValueError(
"intersection (=%s) can only be" % intersection +
" 'pixelwise' or 'linkwise'")
return sse_new
def synchronous_events_difference(sse1, sse2, difference='linkwise'):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), computes the difference between `sse1` and `sse2`.
The difference can be performed 'pixelwise' or 'linkwise':
* if 'pixelwise', it yields a new SSE which contains all (and only) the
events in `sse1` whose pixel position doesn't match any pixel in
`sse2`.
* if 'linkwise', for each pixel `(i, j)` in `sse1` and corresponding
synchronous event `S1`, if `(i, j)` is a pixel in `sse2`
corresponding to the event `S2`, it retains the set difference
`S1 - S2`. If `(i, j)` is not a pixel in `sse2`, it retains the full
set `S1`.
Note that in either case the difference is a non-symmetric operation:
`intersection(sse1, sse2) != intersection(sse2, sse1)`.
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values (see above).
difference : {'pixelwise', 'linkwise'}, optional
The type of difference to perform between `sse1` and `sse2` (see
above).
Default: 'linkwise'
Returns
-------
sse_new : dict
A new SSE (same structure as `sse1` and `sse2`) which retains the
difference between `sse1` and `sse2`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
sse_new = sse1.copy()
for pixel1 in sse1.keys():
if pixel1 in sse2.keys():
if difference == 'pixelwise':
del sse_new[pixel1]
elif difference == 'linkwise':
sse_new[pixel1] = sse_new[pixel1].difference(sse2[pixel1])
if len(sse_new[pixel1]) == 0:
del sse_new[pixel1]
else:
raise ValueError(
"difference (=%s) can only be" % difference +
" 'pixelwise' or 'linkwise'")
return sse_new
def _remove_empty_events(sse):
"""
Given a sequence of synchronous events (SSE) `sse` consisting of a pool of
pixel positions and associated synchronous events (see below), returns a
copy of `sse` where all empty events have been removed.
`sse` must be provided as a dictionary of type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse : dict
A dictionary of pixel positions `(i, j)` as keys, and sets `S` of
synchronous events as values (see above).
Returns
-------
sse_new : dict
A copy of `sse` where all empty events have been removed.
"""
sse_new = sse.copy()
for pixel, link in sse.items():
if link == set([]):
del sse_new[pixel]
return sse_new
def synchronous_events_identical(sse1, sse2):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), determines whether `sse1` is strictly contained in `sse2`.
`sse1` is strictly contained in `sse2` if all its pixels are pixels of
`sse2`,
if its associated events are subsets of the corresponding events
in `sse2`, and if `sse2` contains events, or neuron IDs in some event,
which do not belong to `sse1` (i.e., `sse1` and `sse2` are not identical).
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values.
Returns
-------
bool
True if `sse1` is identical to `sse2`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
# Remove empty links from sse11 and sse22, if any
sse11 = _remove_empty_events(sse1)
sse22 = _remove_empty_events(sse2)
# Return whether sse11 == sse22
return sse11 == sse22
def synchronous_events_no_overlap(sse1, sse2):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), determines whether `sse1` and `sse2` are disjoint.
Two SSEs are disjoint if they don't share pixels, or if the events
associated to common pixels are disjoint.
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values.
Returns
-------
bool
True if `sse1` is disjoint from `sse2`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
# Remove empty links from sse11 and sse22, if any
sse11 = _remove_empty_events(sse1)
sse22 = _remove_empty_events(sse2)
# If both SSEs are empty, return False (we consider them equal)
if sse11 == {} and sse22 == {}:
return False
common_pixels = set(sse11.keys()).intersection(set(sse22.keys()))
if len(common_pixels) == 0:
return True
if all(sse11[p].isdisjoint(sse22[p]) for p in common_pixels):
return True
return False
def synchronous_events_contained_in(sse1, sse2):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), determines whether `sse1` is strictly contained in `sse2`.
`sse1` is strictly contained in `sse2` if all its pixels are pixels of
`sse2`, if its associated events are subsets of the corresponding events
in `sse2`, and if `sse2` contains non-empty events, or neuron IDs in some
event, which do not belong to `sse1` (i.e., `sse1` and `sse2` are not
identical).
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values.
Returns
-------
bool
True if `sse1` is a subset of `sse2`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
# Remove empty links from sse11 and sse22, if any
sse11 = _remove_empty_events(sse1)
sse22 = _remove_empty_events(sse2)
# Return False if sse11 and sse22 are disjoint
if synchronous_events_identical(sse11, sse22):
return False
# Return False if any pixel in sse1 is not contained in sse2, or if any
# link of sse1 is not a subset of the corresponding link in sse2.
# Otherwise (if sse1 is a subset of sse2) continue
for pixel1, link1 in sse11.items():
if pixel1 not in sse22.keys():
return False
if not link1.issubset(sse22[pixel1]):
return False
# Check that sse1 is a STRICT subset of sse2, i.e. that sse2 contains at
# least one pixel or neuron id not present in sse1.
return not synchronous_events_identical(sse11, sse22)
def synchronous_events_contains_all(sse1, sse2):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), determines whether `sse1` strictly contains `sse2`.
`sse1` strictly contains `sse2` if it contains all pixels of `sse2`, if all
associated events in `sse1` contain those in `sse2`, and if `sse1`
additionally contains other pixels / events not contained in `sse2`.
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values.
Returns
-------
bool
True if `sse1` strictly contains `sse2`.
Notes
-----
`synchronous_events_contains_all(sse1, sse2)` is identical to
`synchronous_events_is_subsequence(sse2, sse1)`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
return synchronous_events_contained_in(sse2, sse1)
def synchronous_events_overlap(sse1, sse2):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), determines whether the two SSEs overlap.
The SSEs overlap if they are not equal and none of them is a superset of
the other one but they are also not disjoint.
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values.
Returns
-------
bool
True if `sse1` and `sse2` overlap.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
contained_in = synchronous_events_contained_in(sse1, sse2)
contains_all = synchronous_events_contains_all(sse1, sse2)
identical = synchronous_events_identical(sse1, sse2)
is_disjoint = synchronous_events_no_overlap(sse1, sse2)
return not (contained_in or contains_all or identical or is_disjoint)
def _signals_t_start_stop(signals, t_start=None, t_stop=None):
if t_start is None:
t_start = _signals_same_attribute(signals, 't_start')
if t_stop is None:
t_stop = _signals_same_attribute(signals, 't_stop')
return t_start, t_stop
def _intersection_matrix(spiketrains, spiketrains_y, bin_size, t_start_x,
t_start_y, t_stop_x, t_stop_y, normalization=None):
if spiketrains_y is None:
spiketrains_y = spiketrains
# Compute the binned spike train matrices, along both time axes
spiketrains_binned = conv.BinnedSpikeTrain(
spiketrains, bin_size=bin_size,
t_start=t_start_x, t_stop=t_stop_x)
spiketrains_binned_y = conv.BinnedSpikeTrain(
spiketrains_y, bin_size=bin_size,
t_start=t_start_y, t_stop=t_stop_y)
# Compute imat by matrix multiplication
bsts_x = spiketrains_binned.sparse_matrix
bsts_y = spiketrains_binned_y.sparse_matrix
# Compute the number of spikes in each bin, for both time axes
# 'A1' property returns self as a flattened ndarray.
spikes_per_bin_x = bsts_x.sum(axis=0).A1
spikes_per_bin_y = bsts_y.sum(axis=0).A1
# Compute the intersection matrix imat
imat = bsts_x.T.dot(bsts_y).toarray().astype(np.float32)
for ii in range(bsts_x.shape[1]):
# Normalize the row
col_sum = bsts_x[:, ii].sum()
if normalization is None or col_sum == 0:
norm_coef = 1.
elif normalization == 'intersection':
norm_coef = np.minimum(
spikes_per_bin_x[ii], spikes_per_bin_y)
elif normalization == 'mean':
# geometric mean
norm_coef = np.sqrt(
spikes_per_bin_x[ii] * spikes_per_bin_y)
elif normalization == 'union':
norm_coef = np.array([(bsts_x[:, ii]
+ bsts_y[:, jj]).count_nonzero()
for jj in range(bsts_y.shape[1])])
else:
raise ValueError(
"Invalid parameter 'norm': {}".format(normalization))
# If normalization required, for each j such that bsts_y[j] is
# identically 0 the code above sets imat[:, j] to identically nan.
# Substitute 0s instead.
imat[ii, :] = np.divide(imat[ii, :], norm_coef,
out=np.zeros(imat.shape[1],
dtype=np.float32),
where=norm_coef != 0)
# Return the intersection matrix and the edges of the bins used for the
# x and y axes, respectively.
return imat
class ASSET(object):
"""
Analysis of Sequences of Synchronous EvenTs class.
Parameters
----------
spiketrains_i, spiketrains_j : list of neo.SpikeTrain
Input spike trains for the first and second time dimensions,
respectively, to compute the p-values from.
If `spiketrains_y` is None, it's set to `spiketrains`.
bin_size : pq.Quantity, optional
The width of the time bins used to compute the probability matrix.
t_start_i, t_start_j : pq.Quantity, optional
The start time of the binning for the first and second axes,
respectively.
If None, the attribute `t_start` of the spike trains is used
(if the same for all spike trains).
Default: None
t_stop_i, t_stop_j : pq.Quantity, optional
The stop time of the binning for the first and second axes,
respectively.
If None, the attribute `t_stop` of the spike trains is used
(if the same for all spike trains).
Default: None
verbose : bool, optional
If True, print messages and show progress bar.
Default: True
Raises
------
ValueError
If the `t_start` & `t_stop` times are not (one of):
perfectly aligned;
fully disjoint.
"""
def __init__(self, spiketrains_i, spiketrains_j=None, bin_size=3 * pq.ms,
t_start_i=None, t_start_j=None, t_stop_i=None, t_stop_j=None,
verbose=True):
self.spiketrains_i = spiketrains_i
if spiketrains_j is None:
spiketrains_j = spiketrains_i
self.spiketrains_j = spiketrains_j
self.bin_size = bin_size
self.t_start_i, self.t_stop_i = _signals_t_start_stop(
spiketrains_i,
t_start=t_start_i,
t_stop=t_stop_i)
self.t_start_j, self.t_stop_j = _signals_t_start_stop(
spiketrains_j,
t_start=t_start_j,
t_stop=t_stop_j)
self.verbose = verbose and rank == 0
msg = 'The time intervals for x and y need to be either identical ' \
'or fully disjoint, but they are:\n' \
'x: ({}, {}) and y: ({}, {}).'.format(self.t_start_i,
self.t_stop_i,
self.t_start_j,
self.t_stop_j)
# the starts have to be perfectly aligned for the binning to work
# the stops can differ without impacting the binning
if self.t_start_i == self.t_start_j:
if not _quantities_almost_equal(self.t_stop_i, self.t_stop_j):
raise ValueError(msg)
elif (self.t_start_i < self.t_start_j < self.t_stop_i) \
or (self.t_start_i < self.t_stop_j < self.t_stop_i):
raise ValueError(msg)
# Compute the binned spike train matrices, along both time axes
self.spiketrains_binned_i = conv.BinnedSpikeTrain(
self.spiketrains_i, bin_size=self.bin_size,
t_start=self.t_start_i, t_stop=self.t_stop_i)
self.spiketrains_binned_j = conv.BinnedSpikeTrain(
self.spiketrains_j, bin_size=self.bin_size,
t_start=self.t_start_j, t_stop=self.t_stop_j)
@property
def x_edges(self):
"""
A Quantity array of `n+1` edges of the bins used for the horizontal
axis of the intersection matrix, where `n` is the number of bins that
time was discretized in.
"""
return self.spiketrains_binned_i.bin_edges.rescale(self.bin_size.units)
@property
def y_edges(self):
"""
A Quantity array of `n+1` edges of the bins used for the vertical axis
of the intersection matrix, where `n` is the number of bins that
time was discretized in.
"""
return self.spiketrains_binned_j.bin_edges.rescale(self.bin_size.units)
def is_symmetric(self):
"""
Returns
-------
bool
Whether the intersection matrix is symmetric or not.
See Also
--------
ASSET.intersection_matrix
"""
return _quantities_almost_equal(self.x_edges[0], self.y_edges[0])
def intersection_matrix(self, normalization=None):
"""
Generates the intersection matrix from a list of spike trains.
Given a list of `neo.SpikeTrain`, consider two binned versions of them
differing for the starting and ending times of the binning:
`t_start_x`, `t_stop_x`, `t_start_y` and `t_stop_y` respectively (the
time intervals can be either identical or completely disjoint). Then
calculate the intersection matrix `M` of the two binned data, where
`M[i,j]` is the overlap of bin `i` in the first binned data and bin `j`
in the second binned data (i.e., the number of spike trains spiking at
both bin `i` and bin `j`).
The matrix entries can be normalized to values between `0` and `1` via
different normalizations (see "Parameters" section).
Parameters
----------
normalization : {'intersection', 'mean', 'union'} or None, optional
The normalization type to be applied to each entry `M[i,j]` of the
intersection matrix `M`. Given the sets `s_i` and `s_j` of neuron
IDs in the bins `i` and `j` respectively, the normalization
coefficient can be:
* None: no normalisation (row counts)
* 'intersection': `len(intersection(s_i, s_j))`
* 'mean': `sqrt(len(s_1) * len(s_2))`
* 'union': `len(union(s_i, s_j))`
Default: None
Returns
-------
imat : (n,n) np.ndarray
The floating point intersection matrix of a list of spike trains.
It has the shape `(n, n)`, where `n` is the number of bins that
time was discretized in.
"""
imat = _intersection_matrix(self.spiketrains_i, self.spiketrains_j,
self.bin_size,
self.t_start_i, self.t_start_j,
self.t_stop_i, self.t_stop_j,
normalization=normalization)
return imat
def probability_matrix_montecarlo(self, n_surrogates, imat=None,
surrogate_method='dither_spikes',
surrogate_dt=None):
"""
Given a list of parallel spike trains, estimate the cumulative
probability of each entry in their intersection matrix by a Monte Carlo
approach using surrogate data.
Contrarily to the analytical version (see
:func:`ASSET.probability_matrix_analytical`) the Monte Carlo one does
not incorporate the assumptions of Poissonianity in the null
hypothesis.
The method produces surrogate spike trains (using one of several
methods at disposal, see "Parameters" section) and calculates their
intersection matrix `M`. For each entry `(i, j)`, the intersection CDF
`P[i, j]` is then given by:
.. centered:: P[i, j] = #(spike_train_surrogates such that
M[i, j] < I[i, j]) / #(spike_train_surrogates)
If `P[i, j]` is large (close to 1), `I[i, j]` is statistically
significant: the probability to observe an overlap equal to or larger
than `I[i, j]` under the null hypothesis is `1 - P[i, j]`, very small.
Parameters
----------
n_surrogates : int
The number of spike train surrogates to generate for the bootstrap
procedure.
imat : (n,n) np.ndarray or None, optional
The floating point intersection matrix of a list of spike trains.
It has the shape `(n, n)`, where `n` is the number of bins that
time was discretized in.
If None, the output of :func:`ASSET.intersection_matrix` is used.
Default: None
surrogate_method : {'dither_spike_train', 'dither_spikes',
'jitter_spikes',
'randomise_spikes', 'shuffle_isis',
'joint_isi_dithering'}, optional
The method to generate surrogate spike trains. Refer to the
:func:`spike_train_surrogates.surrogates` documentation for more
information about each surrogate method. Note that some of these
methods need `surrogate_dt` parameter, others ignore it.
Default: 'dither_spike_train'
surrogate_dt : pq.Quantity, optional
For surrogate methods shifting spike times randomly around their
original time ('dither_spike_train', 'dither_spikes') or replacing
them randomly within a certain window ('jitter_spikes'),
`surrogate_dt` represents the size of that shift (window). For
other methods, `surrogate_dt` is ignored.
If None, it's set to `self.bin_size * 5`.
Default: None
Returns
-------
pmat : np.ndarray
The cumulative probability matrix. `pmat[i, j]` represents the
estimated probability of having an overlap between bins `i` and `j`
STRICTLY LOWER than the observed overlap, under the null hypothesis
of independence of the input spike trains.
Notes
-----
We recommend playing with `surrogate_dt` parameter to see how it
influences the result matrix. For this, refer to the ASSET tutorial.
See Also
--------
ASSET.probability_matrix_analytical : analytical derivation of the
matrix
"""
if imat is None:
# Compute the intersection matrix of the original data
imat = self.intersection_matrix()
if surrogate_dt is None:
surrogate_dt = self.bin_size * 5
symmetric = self.is_symmetric()
# Generate surrogate spike trains as a list surrs
# Compute the p-value matrix pmat; pmat[i, j] counts the fraction of
# surrogate data whose intersection value at (i, j) is lower than or
# equal to that of the original data
pmat = np.zeros(imat.shape, dtype=np.int32)
for surr_id in trange(n_surrogates, desc="pmat_bootstrap",
disable=not self.verbose):
if mpi_accelerated and surr_id % size != rank:
continue
surrogates = [spike_train_surrogates.surrogates(
st, n_surrogates=1,
method=surrogate_method,
dt=surrogate_dt,
decimals=None,
edges=True)[0]
for st in self.spiketrains_i]
if symmetric:
surrogates_y = surrogates
else:
surrogates_y = [spike_train_surrogates.surrogates(
st, n_surrogates=1, method=surrogate_method,
dt=surrogate_dt, decimals=None, edges=True)[0]
for st in self.spiketrains_j]
imat_surr = _intersection_matrix(surrogates, surrogates_y,
self.bin_size,
self.t_start_i, self.t_start_j,
self.t_stop_i, self.t_stop_j)
pmat += (imat_surr <= (imat - 1))
del imat_surr
if mpi_accelerated:
pmat = comm.allreduce(pmat, op=MPI.SUM)
pmat = pmat * 1. / n_surrogates
if symmetric:
np.fill_diagonal(pmat, 0.5)
return pmat
def probability_matrix_analytical(self, imat=None,
firing_rates_x='estimate',
firing_rates_y='estimate',
kernel_width=100 * pq.ms):
r"""
Given a list of spike trains, approximates the cumulative probability
of each entry in their intersection matrix.
The approximation is analytical and works under the assumptions that
the input spike trains are independent and Poisson. It works as
follows:
* Bin each spike train at the specified `bin_size`: this yields a
binary array of 1s (spike in bin) and 0s (no spike in bin;
clipping used);
* If required, estimate the rate profile of each spike train by
convolving the binned array with a boxcar kernel of user-defined
length;
* For each neuron `k` and each pair of bins `i` and `j`, compute
the probability :math:`p_ijk` that neuron `k` fired in both bins
`i` and `j`.
* Approximate the probability distribution of the intersection
value at `(i, j)` by a Poisson distribution with mean parameter
:math:`l = \sum_k (p_ijk)`,
justified by Le Cam's approximation of a sum of independent
Bernouilli random variables with a Poisson distribution.
Parameters
----------
imat : (n,n) np.ndarray or None, optional
The intersection matrix of a list of spike trains.
It has the shape `(n, n)`, where `n` is the number of bins that
time was discretized in.
If None, the output of :func:`ASSET.intersection_matrix` is used.
Default: None
firing_rates_x, firing_rates_y : list of neo.AnalogSignal or 'estimate'
If a list, `firing_rates[i]` is the firing rate of the spike train
`spiketrains[i]`.
If 'estimate', firing rates are estimated by simple boxcar kernel
convolution, with the specified `kernel_width`.
Default: 'estimate'
kernel_width : pq.Quantity, optional
The total width of the kernel used to estimate the rate profiles
when `firing_rates` is 'estimate'.
Default: 100 * pq.ms
Returns
-------
pmat : np.ndarray
The cumulative probability matrix. `pmat[i, j]` represents the
estimated probability of having an overlap between bins `i` and `j`
STRICTLY LOWER than the observed overlap, under the null hypothesis
of independence of the input spike trains.
"""
if imat is None:
# Compute the intersection matrix of the original data
imat = self.intersection_matrix()
symmetric = self.is_symmetric()
bsts_x_matrix = self.spiketrains_binned_i.to_bool_array()
if symmetric:
bsts_y_matrix = bsts_x_matrix
else:
bsts_y_matrix = self.spiketrains_binned_j.to_bool_array()
# Check that the nr. neurons is identical between the two axes
if bsts_x_matrix.shape[0] != bsts_y_matrix.shape[0]:
raise ValueError(
'Different number of neurons along the x and y axis!')
# Define the firing rate profiles
if firing_rates_x == 'estimate':
# If rates are to be estimated, create the rate profiles as
# Quantity objects obtained by boxcar-kernel convolution
fir_rate_x = self._rate_of_binned_spiketrain(bsts_x_matrix,
kernel_width)
elif isinstance(firing_rates_x, list):
# If rates provided as lists of AnalogSignals, create time slices
# for both axes, interpolate in the time bins of interest and
# convert to Quantity
fir_rate_x = _interpolate_signals(
firing_rates_x, self.spiketrains_binned_i.bin_edges[:-1],
self.verbose)
else:
raise ValueError(
'fir_rates_x must be a list or the string "estimate"')
if symmetric:
fir_rate_y = fir_rate_x
elif firing_rates_y == 'estimate':
fir_rate_y = self._rate_of_binned_spiketrain(bsts_y_matrix,
kernel_width)
elif isinstance(firing_rates_y, list):
# If rates provided as lists of AnalogSignals, create time slices
# for both axes, interpolate in the time bins of interest and
# convert to Quantity
fir_rate_y = _interpolate_signals(
firing_rates_y, self.spiketrains_binned_j.bin_edges[:-1],
self.verbose)
else:
raise ValueError(
'fir_rates_y must be a list or the string "estimate"')
# For each neuron, compute the prob. that that neuron spikes in any bin
if self.verbose:
print('compute the prob. that each neuron fires in each pair of '
'bins...')
rate_bins_x = (fir_rate_x * self.bin_size).simplified.magnitude
spike_probs_x = 1. - np.exp(-rate_bins_x)
if symmetric:
spike_probs_y = spike_probs_x
else:
rate_bins_y = (fir_rate_y * self.bin_size).simplified.magnitude
spike_probs_y = 1. - np.exp(-rate_bins_y)
# Compute the matrix Mu[i, j] of parameters for the Poisson
# distributions which describe, at each (i, j), the approximated
# overlap probability. This matrix is just the sum of the probability
# matrices p_ijk computed for each neuron k:
# p_ijk is the probability that neuron k spikes in both bins i and j.
# The sum of outer products is equivalent to a dot product.
if self.verbose:
print(
"compute the probability matrix by Le Cam's approximation...")
Mu = spike_probs_x.T.dot(spike_probs_y)
# A straightforward implementation is:
# pmat_shape = spike_probs_x.shape[1], spike_probs_y.shape[1]
# Mu = np.zeros(pmat_shape, dtype=np.float64)
# for probx, proby in zip(spike_probs_x, spike_probs_y):
# Mu += np.outer(probx, proby)
# Compute the probability matrix obtained from imat using the Poisson
# pdfs
pmat = scipy.stats.poisson.cdf(imat - 1, Mu)
if symmetric:
# Substitute 0.5 to the elements along the main diagonal
if self.verbose:
print("substitute 0.5 to elements along the main diagonal...")
np.fill_diagonal(pmat, 0.5)
return pmat
def joint_probability_matrix(self, pmat, filter_shape, n_largest,
min_p_value=1e-5, precision='float',
cuda_threads=64, cuda_cwr_loops=32,
tolerance=1e-5):
"""
Map a probability matrix `pmat` to a joint probability matrix `jmat`,
where `jmat[i, j]` is the joint p-value of the largest neighbors of
`pmat[i, j]`.
The values of `pmat` are assumed to be uniformly distributed in the
range [0, 1]. Centered a rectangular kernel of shape
`filter_shape=(l, w)` around each entry `pmat[i, j]`,
aligned along the diagonal where `pmat[i, j]` lies into, extracts the
`n_largest` values falling within the kernel and computes their joint
p-value `jmat[i, j]`.
Parameters
----------
pmat : np.ndarray
A square matrix, the output of
:func:`ASSET.probability_matrix_montecarlo` or
:func:`ASSET.probability_matrix_analytical`, of cumulative
probability values between 0 and 1. The values are assumed
to be uniformly distributed in the said range.
filter_shape : tuple of int
A pair of integers representing the kernel shape `(l, w)`.
n_largest : int
The number of the largest neighbors to collect for each entry in
`jmat`.
min_p_value : float, optional
The minimum p-value in range `[0, 1)` for individual entries in
`pmat`. Each `pmat[i, j]` is set to
`min(pmat[i, j], 1-p_value_min)` to avoid that a single highly
significant value in `pmat` (extreme case: `pmat[i, j] = 1`) yields
joint significance of itself and its neighbors.
Default: 1e-5
<<<<<<< HEAD:elephant/asset.py
=======
precision : {'float', 'double'}, optional
Single or double floating-point precision for the resulting `jmat`
matrix.
* `'float'`: 32 bits; the tolerance error is ``≲1e-3``.
* `'double'`: 64 bits; the tolerance error is ``<1e-5``.
Double floating-point precision is typically x4 times slower than
the single floating-point equivalent.
Default: 'float'
cuda_threads : int, optional
[CUDA/OpenCL performance parameter that does not influence the
result.]
The number of CUDA/OpenCL threads per block (in X axis) between 1
and 1024 and is used only if CUDA or OpenCL backend is enabled.
For performance reasons, it should be a multiple of 32.
Old GPUs (Tesla K80) perform faster with `cuda_threads` larger
than 64 while new series (Tesla T4) with capabilities 6.x and more
work best with 32 threads.
Default: 64
cuda_cwr_loops : int, optional
[CUDA/OpenCL performance parameter that does not influence the
result.]
A positive integer that defines the number of fast
'combinations_with_replacement' loops to run to reduce branch
divergence. This parameter influences the performance when the
number of iterations is huge (`>1e8`); in such cases, increase
the value.
Default: 32
tolerance : float, optional
Tolerance is used to catch unexpected behavior of billions of
floating point additions, when the number of iterations is huge
or the data arrays are large. A warning is thrown when the
resulting joint prob. matrix values are outside of the acceptable
range ``[-tolerance, 1.0 + tolerance]``.
Default: 1e-5
>>>>>>> master:elephant/asset/asset.py
Returns
-------
jmat : np.ndarray
The joint probability matrix associated to `pmat`.
Notes
-----
1. By default, if CUDA is detected, CUDA acceleration is used. CUDA
backend is **~X1000** faster than the Python implementation.
To turn off CUDA features, set the environment flag
``ELEPHANT_USE_CUDA`` to ``0``. Otherwise
2. If PyOpenCL is installed and detected, PyOpenCL backend is used.
PyOpenCL backend is **~X100** faster than the Python implementation.
To turn off OpenCL features, set the environment flag
``ELEPHANT_USE_OPENCL`` to ``0``.
When using PyOpenCL backend, make sure you've disabled GPU Hangcheck
as described in the `Intel GPU developers documentation
<https://software.intel.com/content/www/us/en/develop/
documentation/get-started-with-intel-oneapi-base-linux/top/
before-you-begin.html>`_. Do it with caution - using your built-in
Intel graphics card to perform computations may make the system
unresponsive until the compute program terminates.
"""
l, w = filter_shape
# Find for each P_ij in the probability matrix its neighbors and
# maximize them by the maximum value 1-p_value_min
pmat = np.asarray(pmat, dtype=np.float32)
pmat_neighb_obj = _PMatNeighbors(filter_shape=filter_shape,
n_largest=n_largest)
pmat_neighb = pmat_neighb_obj.compute(pmat)
pmat_neighb = np.minimum(pmat_neighb, 1. - min_p_value,
out=pmat_neighb)
# in order to avoid doing the same calculation multiple times:
# find all unique sets of values in pmat_neighb
# and store the corresponding indices
# flatten the second and third dimension in order to use np.unique
pmat_neighb = pmat_neighb.reshape(pmat.size, n_largest)
pmat_neighb, pmat_neighb_indices = np.unique(pmat_neighb, axis=0,
return_inverse=True)
# Compute the joint p-value matrix jpvmat
n = l * (1 + 2 * w) - w * (
w + 1) # number of entries covered by kernel
jsf = _JSFUniformOrderStat3D(n=n, d=pmat_neighb.shape[1],
precision=precision,
verbose=self.verbose,
cuda_threads=cuda_threads,
cuda_cwr_loops=cuda_cwr_loops,
tolerance=tolerance)
jpvmat = jsf.compute(u=pmat_neighb)
# restore the original shape using the stored indices
jpvmat = jpvmat[pmat_neighb_indices].reshape(pmat.shape)
return 1. - jpvmat
@staticmethod
def mask_matrices(matrices, thresholds):
"""
Given a list of `matrices` and a list of `thresholds`, return a boolean
matrix `B` ("mask") such that `B[i,j]` is True if each input matrix in
the list strictly exceeds the corresponding threshold at that position.
If multiple matrices are passed along with only one threshold the same
threshold is applied to all matrices.
Parameters
----------
matrices : list of np.ndarray
The matrices which are compared to the respective thresholds to
build the mask. All matrices must have the same shape.
Typically, it is a list `[pmat, jmat]`, i.e., the (cumulative)
probability and joint probability matrices.
thresholds : float or list of float
The significance thresholds for each matrix in `matrices`.
Returns
-------
mask : np.ndarray
Boolean mask matrix with the shape of the input matrices.
Raises
------
ValueError
If `matrices` or `thresholds` is an empty list.
If `matrices` and `thresholds` have different lengths.
See Also
--------
ASSET.probability_matrix_montecarlo : for `pmat` generation
ASSET.probability_matrix_analytical : for `pmat` generation
ASSET.joint_probability_matrix : for `jmat` generation
"""
if len(matrices) == 0:
raise ValueError("Empty list of matrices")
if isinstance(thresholds, float):
thresholds = np.full(shape=len(matrices), fill_value=thresholds)
if len(matrices) != len(thresholds):
raise ValueError(
'`matrices` and `thresholds` must have same length')
mask = np.ones_like(matrices[0], dtype=bool)
for (mat, thresh) in zip(matrices, thresholds):
mask &= mat > thresh
# Replace nans, coming from False * np.inf, with zeros
mask[np.isnan(mask)] = False
return mask
@staticmethod
def cluster_matrix_entries(mask_matrix, max_distance, min_neighbors,
stretch, working_memory=None):
r"""
Given a matrix `mask_matrix`, replaces its positive elements with
integers representing different cluster IDs. Each cluster comprises
close-by elements.
In ASSET analysis, `mask_matrix` is a thresholded ("masked") version
of the intersection matrix `imat`, whose values are those of `imat`
only if considered statistically significant, and zero otherwise.
A cluster is built by pooling elements according to their distance,
via the DBSCAN algorithm (see `sklearn.cluster.DBSCAN` class). Elements
form a neighbourhood if at least one of them has a distance not larger
than `max_distance` from the others, and if they are at least
`min_neighbors`. Overlapping neighborhoods form a cluster:
* Clusters are assigned integers from `1` to the total number `k`
of clusters;
* Unclustered ("isolated") positive elements of `mask_matrix` are
assigned value `-1`;
* Non-positive elements are assigned the value `0`.
The distance between the positions of two positive elements in
`mask_matrix` is given by a Euclidean metric which is stretched if the
two positions are not aligned along the 45 degree direction (the main
diagonal direction), as more, with maximal stretching along the
anti-diagonal. Specifically, the Euclidean distance between positions
`(i1, j1)` and `(i2, j2)` is stretched by a factor
.. math::
1 + (\mathtt{stretch} - 1.) *
\left|\sin((\pi / 4) - \theta)\right|,
where :math:`\theta` is the angle between the pixels and the 45 degree
direction. The stretching factor thus varies between 1 and `stretch`.
Parameters
----------
mask_matrix : np.ndarray
The boolean matrix, whose elements with positive values are to be
clustered. The output of :func:`ASSET.mask_matrices`.
max_distance : float
The maximum distance between two elements in `mask_matrix` to be
a part of the same neighbourhood in the DBSCAN algorithm.
min_neighbors : int
The minimum number of elements to form a neighbourhood.
stretch : float
The stretching factor of the euclidean metric for elements aligned
along the 135 degree direction (anti-diagonal). The actual
stretching increases from 1 to `stretch` as the direction of the
two elements moves from the 45 to the 135 degree direction.
`stretch` must be greater than 1.
working_memory : int or None, optional
The sought maximum memory in MiB for temporary distance matrix
chunks. When None (default), no chunking is performed. This
parameter is passed directly to
``sklearn.metrics.pairwise_distances_chunked`` function and it
has no influence on the outcome matrix. Instead, it control the
memory VS speed trade-off.
Default: None
Returns
-------
cluster_mat : np.ndarray
A matrix with the same shape of `mask_matrix`, each of whose
elements is either:
* a positive integer (cluster ID) if the element is part of a
cluster;
* `0` if the corresponding element in `mask_matrix` is
non-positive;
* `-1` if the element does not belong to any cluster.
See Also
--------
sklearn.cluster.DBSCAN
"""
# Don't do anything if mat is identically zero
if np.all(mask_matrix == 0):
return mask_matrix
# List the significant pixels of mat in a 2-columns array
xpos_sgnf, ypos_sgnf = np.where(mask_matrix > 0)
# Compute the matrix D[i, j] of euclidean distances between pixels i
# and j
try:
D = _stretched_metric_2d(
xpos_sgnf, ypos_sgnf, stretch=stretch, ref_angle=45,
working_memory=working_memory
)
except MemoryError as err:
raise MemoryError("Set 'working_memory=100' or another value to "
"chunk the data") from err
# Cluster positions of significant pixels via dbscan
core_samples, config = dbscan(
D, eps=max_distance, min_samples=min_neighbors,
metric='precomputed')
# Construct the clustered matrix, where each element has value
# * i = 1 to k if it belongs to a cluster i,
# * 0 if it is not significant,
# * -1 if it is significant but does not belong to any cluster
cluster_mat = np.zeros_like(mask_matrix, dtype=np.int32)
cluster_mat[xpos_sgnf, ypos_sgnf] = \
config * (config == -1) + (config + 1) * (config >= 0)
return cluster_mat
def extract_synchronous_events(self, cmat, ids=None):
"""
Given a list of spike trains, a bin size, and a clustered
intersection matrix obtained from those spike trains via ASSET
analysis, extracts the sequences of synchronous events (SSEs)
corresponding to clustered elements in the cluster matrix.
Parameters
----------
cmat : (n,n) np.ndarray
The cluster matrix, the output of
:func:`ASSET.cluster_matrix_entries`.
ids : list, optional
A list of spike train IDs. If provided, `ids[i]` is the identity
of `spiketrains[i]`. If None, the IDs `0,1,...,n-1` are used.
Default: None
Returns
-------
sse_dict : dict
A dictionary `D` of SSEs, where each SSE is a sub-dictionary `Dk`,
`k=1,...,K`, where `K` is the max positive integer in `cmat` (i.e.,
the total number of clusters in `cmat`):
.. centered:: D = {1: D1, 2: D2, ..., K: DK}
Each sub-dictionary `Dk` represents the k-th diagonal structure
(i.e., the k-th cluster) in `cmat`, and is of the form
.. centered:: Dk = {(i1, j1): S1, (i2, j2): S2, ..., (iL, jL): SL}.
The keys `(i, j)` represent the positions (time bin IDs) of all
elements in `cmat` that compose the SSE (i.e., that take value `l`
and therefore belong to the same cluster), and the values `Sk` are
sets of neuron IDs representing a repeated synchronous event (i.e.,
spiking at time bins `i` and `j`).
"""
nr_worms = cmat.max() # number of different clusters ("worms") in cmat
if nr_worms <= 0:
return {}
# Compute the transactions associated to the two binnings
tracts_x = _transactions(
self.spiketrains_i, bin_size=self.bin_size, t_start=self.t_start_i,
t_stop=self.t_stop_i,
ids=ids)
if self.spiketrains_j is self.spiketrains_i:
diag_id = 0
tracts_y = tracts_x
else:
if self.is_symmetric():
diag_id = 0
tracts_y = tracts_x
else:
diag_id = None
tracts_y = _transactions(
self.spiketrains_j, bin_size=self.bin_size,
t_start=self.t_start_j, t_stop=self.t_stop_j, ids=ids)
# Reconstruct each worm, link by link
sse_dict = {}
for k in range(1, nr_worms + 1): # for each worm
# worm k is a list of links (each link will be 1 sublist)
worm_k = {}
pos_worm_k = np.array(
np.where(cmat == k)).T # position of all links
# if no link lies on the reference diagonal
if all([y - x != diag_id for (x, y) in pos_worm_k]):
for bin_x, bin_y in pos_worm_k: # for each link
# reconstruct the link
link_l = set(tracts_x[bin_x]).intersection(
tracts_y[bin_y])
# and assign it to its pixel
worm_k[(bin_x, bin_y)] = link_l
sse_dict[k] = worm_k
return sse_dict
def _rate_of_binned_spiketrain(self, binned_spiketrains, kernel_width):
"""
Calculate the rate of binned spiketrains using convolution with
a boxcar kernel.
"""
if self.verbose:
print('compute rates by boxcar-kernel convolution...')
# Create the boxcar kernel and convolve it with the binned spike trains
k = int((kernel_width / self.bin_size).simplified.item())
kernel = np.full(k, fill_value=1. / k)
rate = np.vstack([np.convolve(bst, kernel, mode='same')
for bst in binned_spiketrains])
# The convolution results in an array decreasing at the borders due
# to absence of spikes beyond the borders. Replace the first and last
# (k//2) elements with the (k//2)-th / (n-k//2)-th ones, respectively
k2 = k // 2
for i in range(rate.shape[0]):
rate[i, :k2] = rate[i, k2]
rate[i, -k2:] = rate[i, -k2 - 1]
# Multiply the firing rates by the proper unit
rate = rate * (1. / self.bin_size).rescale('Hz')
return rate
| bsd-3-clause | 1,215,215,558,205,362,000 | 38.949573 | 79 | 0.58162 | false |
3nids/QGIS | tests/src/python/test_qgsdelimitedtextprovider_wanted.py | 12 | 73024 | # -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgsdelimitedtextprovider_wanted.py
---------------------
Date : May 2013
Copyright : (C) 2013 by Chris Crook
Email : ccrook at linz dot govt dot nz
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Chris Crook'
__date__ = 'May 2013'
__copyright__ = '(C) 2013, Chris Crook'
def test_002_load_csv_file():
wanted = {}
wanted['uri'] = 'file://test.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Basic unquoted record',
'data': 'Some data',
'info': 'Some info',
'field_5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Quoted field',
'data': 'Quoted data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': '4',
'description': 'Quoted newlines',
'data': 'Line 1\nLine 2\n\nLine 4',
'info': 'No data',
'field_5': 'NULL',
'#fid': 5,
'#geometry': 'None',
},
9: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
10: {
'id': '6',
'description': 'Missing fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'#fid': 10,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_003_field_naming():
wanted = {}
wanted['uri'] = 'file://testfields.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Generation of field names',
'data': 'Some data',
'field_4': 'Some info',
'data_2': 'NULL',
'28': 'NULL',
'24.5': 'NULL',
'field_3_1': 'NULL',
'data_1': 'NULL',
'field_10': 'NULL',
'field_11': 'NULL',
'field_12': 'last data',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_004_max_fields():
wanted = {}
wanted['uri'] = 'file://testfields.csv?geomType=none&maxFields=7&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Generation of field names',
'data': 'Some data',
'field_4': 'Some info',
'data_1': 'NULL',
'28': 'NULL',
'24.5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_005_load_whitespace():
wanted = {}
wanted['uri'] = 'file://test.space?geomType=none&type=whitespace'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Simple_whitespace_file',
'data': 'data1',
'info': 'info1',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Whitespace_at_start_of_line',
'data': 'data2',
'info': 'info2',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Tab_whitespace',
'data': 'data3',
'info': 'info3',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': '4',
'description': 'Multiple_whitespace_characters',
'data': 'data4',
'info': 'info4',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 5,
'#geometry': 'None',
},
6: {
'id': '5',
'description': 'Extra_fields',
'data': 'data5',
'info': 'info5',
'field_5': 'message5',
'field_6': 'rubbish5',
'#fid': 6,
'#geometry': 'None',
},
7: {
'id': '6',
'description': 'Missing_fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 7,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_006_quote_escape():
wanted = {}
wanted['uri'] = 'file://test.pipe?geomType=none"e="&delimiter=|&escape=\\'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Using pipe delimiter',
'data': 'data 1',
'info': 'info 1',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Using backslash escape on pipe',
'data': 'data 2 | piped',
'info': 'info2',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Backslash escaped newline',
'data': 'data3 \nline2 \nline3',
'info': 'info3',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
7: {
'id': '4',
'description': 'Empty field',
'data': 'NULL',
'info': 'info4',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 7,
'#geometry': 'None',
},
8: {
'id': '5',
'description': 'Quoted field',
'data': 'More | piped data',
'info': 'info5',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 8,
'#geometry': 'None',
},
9: {
'id': '6',
'description': 'Escaped quote',
'data': 'Field "citation" ',
'info': 'info6',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 9,
'#geometry': 'None',
},
10: {
'id': '7',
'description': 'Missing fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 10,
'#geometry': 'None',
},
11: {
'id': '8',
'description': 'Extra fields',
'data': 'data8',
'info': 'info8',
'field_5': 'message8',
'field_6': 'more',
'#fid': 11,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_007_multiple_quote():
wanted = {}
wanted['uri'] = 'file://test.quote?geomType=none"e=\'"&type=csv&escape="\''
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Multiple quotes 1',
'data': 'Quoted,data1',
'info': 'info1',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Multiple quotes 2',
'data': 'Quoted,data2',
'info': 'info2',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Leading and following whitespace',
'data': 'Quoted, data3',
'info': 'info3',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': '4',
'description': 'Embedded quotes 1',
'data': 'Quoted \'\'"\'\' data4',
'info': 'info4',
'#fid': 5,
'#geometry': 'None',
},
6: {
'id': '5',
'description': 'Embedded quotes 2',
'data': 'Quoted \'""\' data5',
'info': 'info5',
'#fid': 6,
'#geometry': 'None',
},
10: {
'id': '9',
'description': 'Final record',
'data': 'date9',
'info': 'info9',
'#fid': 10,
'#geometry': 'None',
},
}
wanted['log'] = [
'Errors in file test.quote',
'3 records discarded due to invalid format',
'The following lines were not loaded into QGIS due to errors:',
'Invalid record format at line 7',
'Invalid record format at line 8',
'Invalid record format at line 9',
]
return wanted
def test_008_badly_formed_quotes():
wanted = {}
wanted['uri'] = 'file://test.badquote?geomType=none"e="&type=csv&escape="'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
4: {
'id': '3',
'description': 'Recovered after unclosed quore',
'data': 'Data ok',
'info': 'inf3',
'#fid': 4,
'#geometry': 'None',
},
}
wanted['log'] = [
'Errors in file test.badquote',
'2 records discarded due to invalid format',
'The following lines were not loaded into QGIS due to errors:',
'Invalid record format at line 2',
'Invalid record format at line 5',
]
return wanted
def test_009_skip_lines():
wanted = {}
wanted['uri'] = 'file://test2.csv?geomType=none&skipLines=2&type=csv&useHeader=no'
wanted['fieldTypes'] = ['integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
3: {
'id': '3',
'description': 'Less data',
'field_1': '3',
'field_2': 'Less data',
'field_3': 'data3',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_010_read_coordinates():
wanted = {}
wanted['uri'] = 'file://testpt.csv?yField=geom_y&xField=geom_x&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'double', 'double']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Basic point',
'geom_x': '10.5',
'geom_y': '20.82',
'#fid': 2,
'#geometry': 'Point (10.5 20.82)',
},
3: {
'id': '2',
'description': 'Integer point',
'geom_x': '11.0',
'geom_y': '22.0',
'#fid': 3,
'#geometry': 'Point (11 22)',
},
5: {
'id': '4',
'description': 'Final point',
'geom_x': '13.0',
'geom_y': '23.0',
'#fid': 5,
'#geometry': 'Point (13 23)',
},
}
wanted['log'] = [
'Errors in file testpt.csv',
'1 records discarded due to invalid geometry definitions',
'The following lines were not loaded into QGIS due to errors:',
'Invalid X or Y fields at line 4',
]
return wanted
def test_011_read_wkt():
wanted = {}
wanted['uri'] = 'file://testwkt.csv?delimiter=|&type=csv&wktField=geom_wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Point wkt',
'#fid': 2,
'#geometry': 'Point (10 20)',
},
3: {
'id': '2',
'description': 'Multipoint wkt',
'#fid': 3,
'#geometry': 'MultiPoint ((10 20),(11 21))',
},
9: {
'id': '8',
'description': 'EWKT prefix',
'#fid': 9,
'#geometry': 'Point (10 10)',
},
10: {
'id': '9',
'description': 'Informix prefix',
'#fid': 10,
'#geometry': 'Point (10 10)',
},
11: {
'id': '10',
'description': 'Measure in point',
'#fid': 11,
'#geometry': 'PointM (10 20 30)',
},
}
wanted['log'] = [
'Errors in file testwkt.csv',
'1 records discarded due to invalid geometry definitions',
'10 records discarded due to incompatible geometry types',
'The following lines were not loaded into QGIS due to errors:',
'Invalid WKT at line 8',
]
return wanted
def test_012_read_wkt_point():
wanted = {}
wanted['uri'] = 'file://testwkt.csv?geomType=point&delimiter=|&type=csv&wktField=geom_wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Point wkt',
'#fid': 2,
'#geometry': 'Point (10 20)',
},
3: {
'id': '2',
'description': 'Multipoint wkt',
'#fid': 3,
'#geometry': 'MultiPoint ((10 20),(11 21))',
},
9: {
'id': '8',
'description': 'EWKT prefix',
'#fid': 9,
'#geometry': 'Point (10 10)',
},
10: {
'id': '9',
'description': 'Informix prefix',
'#fid': 10,
'#geometry': 'Point (10 10)',
},
11: {
'id': '10',
'description': 'Measure in point',
'#fid': 11,
'#geometry': 'PointM (10 20 30)',
},
}
wanted['log'] = [
'Errors in file testwkt.csv',
'1 records discarded due to invalid geometry definitions',
'10 records discarded due to incompatible geometry types',
'The following lines were not loaded into QGIS due to errors:',
'Invalid WKT at line 8',
]
return wanted
def test_013_read_wkt_line():
wanted = {}
wanted['uri'] = 'file://testwkt.csv?geomType=line&delimiter=|&type=csv&wktField=geom_wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 1
wanted['data'] = {
4: {
'id': '3',
'description': 'Linestring wkt',
'#fid': 4,
'#geometry': 'LineString (10 20, 11 21)',
},
5: {
'id': '4',
'description': 'Multiline string wkt',
'#fid': 5,
'#geometry': 'MultiLineString ((10 20, 11 21), (20 30, 21 31))',
},
12: {
'id': '11',
'description': 'Measure in line',
'#fid': 12,
'#geometry': 'LineStringM (10 20 30, 11 21 31)',
},
13: {
'id': '12',
'description': 'Z in line',
'#fid': 13,
'#geometry': 'LineStringZ (10 20 30, 11 21 31)',
},
14: {
'id': '13',
'description': 'Measure and Z in line',
'#fid': 14,
'#geometry': 'LineStringZM (10 20 30 40, 11 21 31 41)',
},
15: {
'id': '14',
'description': 'CircularString',
'#fid': 15,
'#geometry': 'CircularString (268 415, 227 505, 227 406)',
},
17: {
'id': '16',
'description': 'CompoundCurve',
'#fid': 17,
'#geometry': 'CompoundCurve ((5 3, 5 13), CircularString(5 13, 7 15, 9 13), (9 13, 9 3), CircularString(9 3, 7 1, 5 3))',
},
}
wanted['log'] = [
'Errors in file testwkt.csv',
'1 records discarded due to invalid geometry definitions',
'8 records discarded due to incompatible geometry types',
'The following lines were not loaded into QGIS due to errors:',
'Invalid WKT at line 8',
]
return wanted
def test_014_read_wkt_polygon():
wanted = {}
wanted['uri'] = 'file://testwkt.csv?geomType=polygon&delimiter=|&type=csv&wktField=geom_wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 2
wanted['data'] = {
6: {
'id': '5',
'description': 'Polygon wkt',
'#fid': 6,
'#geometry': 'Polygon ((10 10,10 20,20 20,20 10,10 10),(14 14,14 16,16 16,14 14))',
},
7: {
'id': '6',
'description': 'MultiPolygon wkt',
'#fid': 7,
'#geometry': 'MultiPolygon (((10 10,10 20,20 20,20 10,10 10),(14 14,14 16,16 16,14 14)),((30 30,30 35,35 35,30 30)))',
},
16: {
'id': '15',
'description': 'CurvePolygon',
'#fid': 16,
'#geometry': 'CurvePolygon (CircularString (1 3, 3 5, 4 7, 7 3, 1 3))',
},
}
wanted['log'] = [
'Errors in file testwkt.csv',
'1 records discarded due to invalid geometry definitions',
'12 records discarded due to incompatible geometry types',
'The following lines were not loaded into QGIS due to errors:',
'Invalid WKT at line 8',
]
return wanted
def test_015_read_dms_xy():
wanted = {}
wanted['uri'] = 'file://testdms.csv?yField=lat&xField=lon&type=csv&xyDms=yes'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
3: {
'id': '1',
'description': 'Basic DMS string',
'lon': '1 5 30.6',
'lat': '35 51 20',
'#fid': 3,
'#geometry': 'Point (1.09183333 35.85555556)',
},
4: {
'id': '2',
'description': 'Basic DMS string 2',
'lon': '1 05 30.6005',
'lat': '035 51 20',
'#fid': 4,
'#geometry': 'Point (1.09183347 35.85555556)',
},
5: {
'id': '3',
'description': 'Basic DMS string 3',
'lon': '1 05 30.6',
'lat': '35 59 9.99',
'#fid': 5,
'#geometry': 'Point (1.09183333 35.98610833)',
},
7: {
'id': '4',
'description': 'Prefix sign 1',
'lon': 'n1 05 30.6',
'lat': 'e035 51 20',
'#fid': 7,
'#geometry': 'Point (1.09183333 35.85555556)',
},
8: {
'id': '5',
'description': 'Prefix sign 2',
'lon': 'N1 05 30.6',
'lat': 'E035 51 20',
'#fid': 8,
'#geometry': 'Point (1.09183333 35.85555556)',
},
9: {
'id': '6',
'description': 'Prefix sign 3',
'lon': 'N 1 05 30.6',
'lat': 'E 035 51 20',
'#fid': 9,
'#geometry': 'Point (1.09183333 35.85555556)',
},
10: {
'id': '7',
'description': 'Prefix sign 4',
'lon': 'S1 05 30.6',
'lat': 'W035 51 20',
'#fid': 10,
'#geometry': 'Point (-1.09183333 -35.85555556)',
},
11: {
'id': '8',
'description': 'Prefix sign 5',
'lon': '+1 05 30.6',
'lat': '+035 51 20',
'#fid': 11,
'#geometry': 'Point (1.09183333 35.85555556)',
},
12: {
'id': '9',
'description': 'Prefix sign 6',
'lon': '-1 05 30.6',
'lat': '-035 51 20',
'#fid': 12,
'#geometry': 'Point (-1.09183333 -35.85555556)',
},
14: {
'id': '10',
'description': 'Postfix sign 1',
'lon': '1 05 30.6n',
'lat': '035 51 20e',
'#fid': 14,
'#geometry': 'Point (1.09183333 35.85555556)',
},
15: {
'id': '11',
'description': 'Postfix sign 2',
'lon': '1 05 30.6N',
'lat': '035 51 20E',
'#fid': 15,
'#geometry': 'Point (1.09183333 35.85555556)',
},
16: {
'id': '12',
'description': 'Postfix sign 3',
'lon': '1 05 30.6 N',
'lat': '035 51 20 E',
'#fid': 16,
'#geometry': 'Point (1.09183333 35.85555556)',
},
17: {
'id': '13',
'description': 'Postfix sign 4',
'lon': '1 05 30.6S',
'lat': '035 51 20W',
'#fid': 17,
'#geometry': 'Point (-1.09183333 -35.85555556)',
},
18: {
'id': '14',
'description': 'Postfix sign 5',
'lon': '1 05 30.6+',
'lat': '035 51 20+',
'#fid': 18,
'#geometry': 'Point (1.09183333 35.85555556)',
},
19: {
'id': '15',
'description': 'Postfix sign 6',
'lon': '1 05 30.6-',
'lat': '035 51 20-',
'#fid': 19,
'#geometry': 'Point (-1.09183333 -35.85555556)',
},
21: {
'id': '16',
'description': 'Leading and trailing blanks 1',
'lon': ' 1 05 30.6',
'lat': '035 51 20 ',
'#fid': 21,
'#geometry': 'Point (1.09183333 35.85555556)',
},
22: {
'id': '17',
'description': 'Leading and trailing blanks 2',
'lon': ' N 1 05 30.6',
'lat': '035 51 20 E ',
'#fid': 22,
'#geometry': 'Point (1.09183333 35.85555556)',
},
24: {
'id': '18',
'description': 'Alternative characters for D,M,S',
'lon': '1d05m30.6s S',
'lat': "35d51'20",
'#fid': 24,
'#geometry': 'Point (-1.09183333 35.85555556)',
},
25: {
'id': '19',
'description': 'Degrees/minutes format',
'lon': '1 05.23',
'lat': '4 55.03',
'#fid': 25,
'#geometry': 'Point (1.08716667 4.91716667)',
},
}
wanted['log'] = [
'Errors in file testdms.csv',
'5 records discarded due to invalid geometry definitions',
'The following lines were not loaded into QGIS due to errors:',
'Invalid X or Y fields at line 27',
'Invalid X or Y fields at line 28',
'Invalid X or Y fields at line 29',
'Invalid X or Y fields at line 30',
'Invalid X or Y fields at line 31',
]
return wanted
def test_016_decimal_point():
wanted = {}
wanted['uri'] = 'file://testdp.csv?yField=geom_y&xField=geom_x&type=csv&delimiter=;&decimalPoint=,'
wanted['fieldTypes'] = ['integer', 'text', 'double', 'double', 'double', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Comma as decimal point 1',
'geom_x': '10.0',
'geom_y': '20.0',
'other': '30.0',
'text field': 'Field with , in it',
'#fid': 2,
'#geometry': 'Point (10 20)',
},
3: {
'id': '2',
'description': 'Comma as decimal point 2',
'geom_x': '12.0',
'geom_y': '25.003',
'other': '-38.55',
'text field': 'Plain text field',
'#fid': 3,
'#geometry': 'Point (12 25.003)',
},
}
wanted['log'] = []
return wanted
def test_017_regular_expression_1():
wanted = {}
wanted['uri'] = 'file://testre.txt?geomType=none&trimFields=Y&delimiter=RE(?:GEXP)?&type=regexp'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Basic regular expression test',
'data': 'data1',
'info': 'info',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Basic regular expression test 2',
'data': 'data2',
'info': 'info2',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_018_regular_expression_2():
wanted = {}
wanted['uri'] = 'file://testre.txt?geomType=none&trimFields=Y&delimiter=(RE)((?:GEXP)?)&type=regexp'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'RE': 'RE',
'GEXP': 'GEXP',
'description': 'RE',
'RE_1': 'RE',
'GEXP_1': 'GEXP',
'data': 'data1',
'RE_2': 'RE',
'GEXP_2': 'GEXP',
'info': 'info',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'RE': 'RE',
'GEXP': 'GEXP',
'description': 'RE',
'RE_1': 'RE',
'GEXP_1': '',
'data': 'data2',
'RE_2': 'RE',
'GEXP_2': '',
'info': 'info2',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_019_regular_expression_3():
wanted = {}
wanted['uri'] = 'file://testre2.txt?geomType=none&trimFields=Y&delimiter=^(.{5})(.{30})(.{5,})&type=regexp'
wanted['fieldTypes'] = ['integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Anchored regexp',
'information': 'Some data',
'#fid': 2,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Anchored regexp recovered',
'information': 'Some data',
'#fid': 4,
'#geometry': 'None',
},
}
wanted['log'] = [
'Errors in file testre2.txt',
'1 records discarded due to invalid format',
'The following lines were not loaded into QGIS due to errors:',
'Invalid record format at line 3',
]
return wanted
def test_020_regular_expression_4():
wanted = {}
wanted['uri'] = 'file://testre3.txt?geomType=none&delimiter=x?&type=regexp'
wanted['fieldTypes'] = ['text', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': 'f',
'description': 'i',
's': 'f',
'm': 'i',
'a': '.',
'l': '.',
'l_1': 'i',
'field_6': 'l',
'field_7': 'e',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_021_regular_expression_5():
wanted = {}
wanted['uri'] = 'file://testre3.txt?geomType=none&delimiter=\\b&type=regexp'
wanted['fieldTypes'] = ['text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': 'fi',
'description': '..',
'small': 'fi',
'field_2': '..',
'field_3': 'ile',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_022_utf8_encoded_file():
wanted = {}
wanted['uri'] = 'file://testutf8.csv?geomType=none&delimiter=|&type=csv&encoding=utf-8'
wanted['fieldTypes'] = ['integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Correctly read UTF8 encoding',
'name': 'Field has \u0101cc\xe8nt\xe9d text',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_023_latin1_encoded_file():
wanted = {}
wanted['uri'] = 'file://testlatin1.csv?geomType=none&delimiter=|&type=csv&encoding=latin1'
wanted['fieldTypes'] = ['integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Correctly read latin1 encoding',
'name': 'This test is \xa9',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_024_filter_rect_xy():
wanted = {}
wanted['uri'] = 'file://testextpt.txt?yField=y&delimiter=|&type=csv&xField=x'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'integer']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
10: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
1002: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
1010: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
}
wanted['log'] = [
'Request 2 did not return any data',
]
return wanted
def test_025_filter_rect_wkt():
wanted = {}
wanted['uri'] = 'file://testextw.txt?delimiter=|&type=csv&wktField=wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 1
wanted['data'] = {
2: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
4: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
5: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
6: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
7: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
1002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
1004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
1006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
}
wanted['log'] = [
'Request 2 did not return any data',
]
return wanted
def test_026_filter_fid():
wanted = {}
wanted['uri'] = 'file://test.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
3: {
'id': '2',
'description': 'Quoted field',
'data': 'Quoted data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
1009: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
3003: {
'id': '2',
'description': 'Quoted field',
'data': 'Quoted data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = [
'Request 2 did not return any data',
]
return wanted
def test_027_filter_attributes():
wanted = {}
wanted['uri'] = 'file://test.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': 'None',
'description': 'Basic unquoted record',
'data': 'None',
'info': 'Some info',
'field_5': 'None',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': 'None',
'description': 'Quoted field',
'data': 'None',
'info': 'Unquoted',
'field_5': 'None',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': 'None',
'description': 'Escaped quotes',
'data': 'None',
'info': 'Unquoted',
'field_5': 'None',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': 'None',
'description': 'Quoted newlines',
'data': 'None',
'info': 'No data',
'field_5': 'None',
'#fid': 5,
'#geometry': 'None',
},
9: {
'id': 'None',
'description': 'Extra fields',
'data': 'None',
'info': 'info',
'field_5': 'None',
'#fid': 9,
'#geometry': 'None',
},
10: {
'id': 'None',
'description': 'Missing fields',
'data': 'None',
'info': 'NULL',
'field_5': 'None',
'#fid': 10,
'#geometry': 'None',
},
1009: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
2009: {
'id': 'None',
'description': 'Extra fields',
'data': 'None',
'info': 'info',
'field_5': 'None',
'#fid': 9,
'#geometry': 'None',
},
3009: {
'id': 'None',
'description': 'Extra fields',
'data': 'None',
'info': 'info',
'field_5': 'None',
'#fid': 9,
'#geometry': 'None',
},
4009: {
'id': 'None',
'description': 'Extra fields',
'data': 'None',
'info': 'info',
'field_5': 'None',
'#fid': 9,
'#geometry': 'None',
},
5009: {
'id': 'None',
'description': 'None',
'data': 'None',
'info': 'None',
'field_5': 'None',
'#fid': 9,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_028_substring_test():
wanted = {}
wanted['uri'] = 'file://test.csv?geomType=none&type=csv&subset=id%20%25%202%20%3D%201'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Basic unquoted record',
'data': 'Some data',
'info': 'Some info',
'field_5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
9: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_029_file_watcher():
wanted = {}
wanted['uri'] = 'file://file?geomType=none&type=csv&watchFile=yes'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
3: {
'id': '2',
'description': 'pooh',
'name': 'pooh',
'#fid': 3,
'#geometry': 'None',
},
1002: {
'id': '1',
'description': 'rabbit',
'name': 'rabbit',
'#fid': 2,
'#geometry': 'None',
},
1003: {
'id': '2',
'description': 'pooh',
'name': 'pooh',
'#fid': 3,
'#geometry': 'None',
},
4003: {
'id': '2',
'description': 'pooh',
'name': 'pooh',
'#fid': 3,
'#geometry': 'None',
},
5004: {
'id': '3',
'description': 'tiger',
'name': 'tiger',
'#fid': 4,
'#geometry': 'None',
},
6002: {
'id': '1',
'description': 'rabbit',
'name': 'rabbit',
'#fid': 2,
'#geometry': 'None',
},
6003: {
'id': '2',
'description': 'pooh',
'name': 'pooh',
'#fid': 3,
'#geometry': 'None',
},
6004: {
'id': '3',
'description': 'tiger',
'name': 'tiger',
'#fid': 4,
'#geometry': 'None',
},
9002: {
'id': '5',
'description': 'toad',
'name': 'toad',
'#fid': 2,
'#geometry': 'None',
},
10002: {
'id': '5',
'description': 'toad',
'name': 'toad',
'#fid': 2,
'#geometry': 'None',
},
10003: {
'id': '6',
'description': 'mole',
'name': 'mole',
'#fid': 3,
'#geometry': 'None',
},
10004: {
'id': '7',
'description': 'badger',
'name': 'badger',
'#fid': 4,
'#geometry': 'None',
},
16002: {
'id': '5',
'description': 'toad',
'name': 'toad',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = [
'Request 2 did not return any data',
'Request 7 did not return any data',
'Request 11 did not return any data',
'Request 13 did not return any data',
'Request 14 did not return any data',
'Errors in file temp_file',
'The file has been updated by another application - reloading',
'Errors in file temp_file',
'The file has been updated by another application - reloading',
'Errors in file temp_file',
'The file has been updated by another application - reloading',
]
return wanted
def test_030_filter_rect_xy_spatial_index():
wanted = {}
wanted['uri'] = 'file://testextpt.txt?spatialIndex=Y&yField=y&delimiter=|&type=csv&xField=x'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'integer']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
10: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
1002: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
1010: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
3002: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
3003: {
'id': '2',
'description': 'Outside 1',
'x': '5',
'y': '35',
'#fid': 3,
'#geometry': 'Point (5 35)',
},
3004: {
'id': '3',
'description': 'Outside 2',
'x': '5',
'y': '55',
'#fid': 4,
'#geometry': 'Point (5 55)',
},
3005: {
'id': '4',
'description': 'Outside 3',
'x': '15',
'y': '55',
'#fid': 5,
'#geometry': 'Point (15 55)',
},
3006: {
'id': '5',
'description': 'Outside 4',
'x': '35',
'y': '55',
'#fid': 6,
'#geometry': 'Point (35 55)',
},
3007: {
'id': '6',
'description': 'Outside 5',
'x': '35',
'y': '45',
'#fid': 7,
'#geometry': 'Point (35 45)',
},
3008: {
'id': '7',
'description': 'Outside 7',
'x': '35',
'y': '25',
'#fid': 8,
'#geometry': 'Point (35 25)',
},
3009: {
'id': '8',
'description': 'Outside 8',
'x': '15',
'y': '25',
'#fid': 9,
'#geometry': 'Point (15 25)',
},
3010: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
4002: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
4003: {
'id': '2',
'description': 'Outside 1',
'x': '5',
'y': '35',
'#fid': 3,
'#geometry': 'Point (5 35)',
},
4004: {
'id': '3',
'description': 'Outside 2',
'x': '5',
'y': '55',
'#fid': 4,
'#geometry': 'Point (5 55)',
},
4005: {
'id': '4',
'description': 'Outside 3',
'x': '15',
'y': '55',
'#fid': 5,
'#geometry': 'Point (15 55)',
},
4006: {
'id': '5',
'description': 'Outside 4',
'x': '35',
'y': '55',
'#fid': 6,
'#geometry': 'Point (35 55)',
},
4007: {
'id': '6',
'description': 'Outside 5',
'x': '35',
'y': '45',
'#fid': 7,
'#geometry': 'Point (35 45)',
},
4008: {
'id': '7',
'description': 'Outside 7',
'x': '35',
'y': '25',
'#fid': 8,
'#geometry': 'Point (35 25)',
},
4009: {
'id': '8',
'description': 'Outside 8',
'x': '15',
'y': '25',
'#fid': 9,
'#geometry': 'Point (15 25)',
},
4010: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
}
wanted['log'] = [
'Request 2 did not return any data',
]
return wanted
def test_031_filter_rect_wkt_spatial_index():
wanted = {}
wanted['uri'] = 'file://testextw.txt?spatialIndex=Y&delimiter=|&type=csv&wktField=wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 1
wanted['data'] = {
2: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
4: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
5: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
6: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
7: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
1002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
1004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
1006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
3002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
3003: {
'id': '2',
'description': 'Outside',
'#fid': 3,
'#geometry': 'LineString (0 0, 0 10)',
},
3004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
3005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
3006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
3007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
4002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
4003: {
'id': '2',
'description': 'Outside',
'#fid': 3,
'#geometry': 'LineString (0 0, 0 10)',
},
4004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
4005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
4006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
4007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
}
wanted['log'] = [
'Request 2 did not return any data',
]
return wanted
def test_032_filter_rect_wkt_create_spatial_index():
wanted = {}
wanted['uri'] = 'file://testextw.txt?delimiter=|&type=csv&wktField=wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 1
wanted['data'] = {
2: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
4: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
5: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
6: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
7: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
1002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
1003: {
'id': '2',
'description': 'Outside',
'#fid': 3,
'#geometry': 'LineString (0 0, 0 10)',
},
1004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
1005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
1006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
1007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
3002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
3004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
3005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
3006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
3007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
4002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
4004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
4006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
6002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
6003: {
'id': '2',
'description': 'Outside',
'#fid': 3,
'#geometry': 'LineString (0 0, 0 10)',
},
6004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
6005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
6006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
6007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
7002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
7003: {
'id': '2',
'description': 'Outside',
'#fid': 3,
'#geometry': 'LineString (0 0, 0 10)',
},
7004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
7005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
7006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
7007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
}
wanted['log'] = [
'Request 5 did not return any data',
]
return wanted
def test_033_reset_subset_string():
wanted = {}
wanted['uri'] = 'file://test.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Basic unquoted record',
'data': 'Some data',
'info': 'Some info',
'field_5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Quoted field',
'data': 'Quoted data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': '4',
'description': 'Quoted newlines',
'data': 'Line 1\nLine 2\n\nLine 4',
'info': 'No data',
'field_5': 'NULL',
'#fid': 5,
'#geometry': 'None',
},
9: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
10: {
'id': '6',
'description': 'Missing fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'#fid': 10,
'#geometry': 'None',
},
2002: {
'id': '1',
'description': 'Basic unquoted record',
'data': 'Some data',
'info': 'Some info',
'field_5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
2004: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
2009: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
4010: {
'id': '6',
'description': 'Missing fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'#fid': 10,
'#geometry': 'None',
},
6004: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
8002: {
'id': '1',
'description': 'Basic unquoted record',
'data': 'Some data',
'info': 'Some info',
'field_5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
8004: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
8009: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
10003: {
'id': '2',
'description': 'Quoted field',
'data': 'Quoted data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
10005: {
'id': '4',
'description': 'Quoted newlines',
'data': 'Line 1\nLine 2\n\nLine 4',
'info': 'No data',
'field_5': 'NULL',
'#fid': 5,
'#geometry': 'None',
},
10010: {
'id': '6',
'description': 'Missing fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'#fid': 10,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_034_csvt_file():
wanted = {}
wanted['uri'] = 'file://testcsvt.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'double', 'text', 'text', 'text', 'text', 'text', 'text', 'longlong', 'longlong']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Test csvt 1',
'fint': '1',
'freal': '1.2',
'fstr': '1',
'fstr_1': 'text',
'fdatetime': '2015-03-02T12:30:00',
'fdate': '2014-12-30',
'ftime': '23:55',
'flong': '-456',
'flonglong': '-678',
'field_12': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Test csvt 2',
'fint': '3',
'freal': '1.5',
'fstr': '99',
'fstr_1': '23.5',
'fdatetime': '80',
'fdate': '2015-03-28',
'ftime': '2014-12-30',
'flong': '01:55',
'flonglong': '9189304972279762602',
'field_12': '-3123724580211819352',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_035_csvt_file2():
wanted = {}
wanted['uri'] = 'file://testcsvt2.txt?geomType=none&delimiter=|&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'double', 'integer', 'text', 'integer']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Test csvt 1',
'f1': '1',
'f2': '1.2',
'f3': '1',
'f4': 'text',
'f5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Test csvt 2',
'f1': '3',
'f2': '1.5',
'f3': '99',
'f4': '23.5',
'f5': '80',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_036_csvt_file_invalid_types():
wanted = {}
wanted['uri'] = 'file://testcsvt3.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'double', 'integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Test csvt 1',
'f1': '1',
'f2': '1.2',
'f3': '1',
'f4': 'text',
'f5': 'times',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Test csvt 2',
'f1': '3',
'f2': '1.5',
'f3': '99',
'f4': '23.5',
'f5': '80',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = [
'Errors in file testcsvt3.csv',
'File type string in testcsvt3.csvt is not correctly formatted',
]
return wanted
def test_037_csvt_file_invalid_file():
wanted = {}
wanted['uri'] = 'file://testcsvt4.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'double', 'integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Test csvt 1',
'f1': '1',
'f2': '1.2',
'f3': '1',
'f4': 'text',
'f5': 'times',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Test csvt 2',
'f1': '3',
'f2': '1.5',
'f3': '99',
'f4': '23.5',
'f5': '80',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_038_type_inference():
wanted = {}
wanted['uri'] = 'file://testtypes.csv?yField=lat&xField=lon&type=csv'
wanted['fieldTypes'] = ['text', 'double', 'double', 'text', 'text', 'integer', 'longlong', 'double', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': 'line1',
'description': '1.0',
'lon': '1.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'NULL',
'int': '0',
'longlong': '0',
'real': 'NULL',
'text2': '1',
'#fid': 2,
'#geometry': 'Point (1 1)',
},
3: {
'id': 'line2',
'description': '1.0',
'lon': '1.0',
'lat': '5.0',
'empty': 'NULL',
'text': '1',
'int': 'NULL',
'longlong': '9189304972279762602',
'real': '1.3',
'text2': '-4',
'#fid': 3,
'#geometry': 'Point (1 5)',
},
4: {
'id': 'line3',
'description': '5.0',
'lon': '5.0',
'lat': '5.0',
'empty': 'NULL',
'text': '1xx',
'int': '2',
'longlong': '345',
'real': '2.0',
'text2': '1x',
'#fid': 4,
'#geometry': 'Point (5 5)',
},
5: {
'id': 'line4',
'description': '5.0',
'lon': '5.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'A string',
'int': '-3456',
'longlong': '-3123724580211819352',
'real': '-123.56',
'text2': 'NULL',
'#fid': 5,
'#geometry': 'Point (5 1)',
},
6: {
'id': 'line5',
'description': '3.0',
'lon': '3.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'NULL',
'int': 'NULL',
'longlong': 'NULL',
'real': '0.00023',
'text2': '23',
'#fid': 6,
'#geometry': 'Point (3 1)',
},
7: {
'id': 'line6',
'description': '1.0',
'lon': '1.0',
'lat': '3.0',
'empty': 'NULL',
'text': '1.5',
'int': '9',
'longlong': '42',
'real': '99.0',
'text2': '0',
'#fid': 7,
'#geometry': 'Point (1 3)',
},
}
wanted['log'] = []
return wanted
def test_039_issue_13749():
wanted = {}
wanted['uri'] = 'file://test13749.csv?yField=geom_y&xField=geom_x&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'double', 'double']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'No geom',
'geom_x': 'NULL',
'geom_y': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Point1',
'geom_x': '11.0',
'geom_y': '22.0',
'#fid': 3,
'#geometry': 'Point (11 22)',
},
4: {
'id': '3',
'description': 'Point2',
'geom_x': '15.0',
'geom_y': '23.0',
'#fid': 4,
'#geometry': 'Point (15 23)',
},
5: {
'id': '4',
'description': 'Point3',
'geom_x': '13.0',
'geom_y': '23.0',
'#fid': 5,
'#geometry': 'Point (13 23)',
},
}
wanted['log'] = [
'Errors in file test13749.csv',
'1 records have missing geometry definitions',
]
return wanted
def test_040_issue_14666():
wanted = {}
wanted['uri'] = 'file://test14666.csv?yField=y&xField=x&type=csv&delimiter=\\t'
wanted['fieldTypes'] = ['integer', 'double', 'double']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': '7.15417',
'x': '7.15417',
'y': '50.680622',
'#fid': 2,
'#geometry': 'Point (7.1541699999999997 50.68062199999999962)',
},
3: {
'id': '2',
'description': '7.119219',
'x': '7.119219',
'y': '50.739814',
'#fid': 3,
'#geometry': 'Point (7.11921900000000019 50.73981400000000264)',
},
4: {
'id': '3',
'description': 'NULL',
'x': 'NULL',
'y': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': '4',
'description': 'NULL',
'x': 'NULL',
'y': 'NULL',
'#fid': 5,
'#geometry': 'None',
},
6: {
'id': '5',
'description': '7.129229',
'x': '7.129229',
'y': '50.703692',
'#fid': 6,
'#geometry': 'Point (7.12922899999999959 50.70369199999999665)',
},
}
wanted['log'] = [
'Errors in file test14666.csv',
'2 records have missing geometry definitions',
]
return wanted
def test_041_no_detect_type():
wanted = {}
wanted['uri'] = 'file://testtypes.csv?yField=lat&xField=lon&type=csv&detectTypes=no'
wanted['fieldTypes'] = ['text', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': 'line1',
'description': '1.0',
'lon': '1.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'NULL',
'int': '0',
'longlong': '0',
'real': 'NULL',
'text2': '1',
'#fid': 2,
'#geometry': 'Point (1 1)',
},
3: {
'id': 'line2',
'description': '1.0',
'lon': '1.0',
'lat': '5.0',
'empty': 'NULL',
'text': '1',
'int': 'NULL',
'longlong': '9189304972279762602',
'real': '1.3',
'text2': '-4',
'#fid': 3,
'#geometry': 'Point (1 5)',
},
4: {
'id': 'line3',
'description': '5.0',
'lon': '5.0',
'lat': '5.0',
'empty': 'NULL',
'text': '1xx',
'int': '2',
'longlong': '345',
'real': '2',
'text2': '1x',
'#fid': 4,
'#geometry': 'Point (5 5)',
},
5: {
'id': 'line4',
'description': '5.0',
'lon': '5.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'A string',
'int': '-3456',
'longlong': '-3123724580211819352',
'real': '-123.56',
'text2': 'NULL',
'#fid': 5,
'#geometry': 'Point (5 1)',
},
6: {
'id': 'line5',
'description': '3.0',
'lon': '3.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'NULL',
'int': 'NULL',
'longlong': 'NULL',
'real': '23e-5',
'text2': '23',
'#fid': 6,
'#geometry': 'Point (3 1)',
},
7: {
'id': 'line6',
'description': '1.0',
'lon': '1.0',
'lat': '3.0',
'empty': 'NULL',
'text': '1.5',
'int': '9',
'longlong': '42',
'real': '99',
'text2': '0',
'#fid': 7,
'#geometry': 'Point (1 3)',
},
}
wanted['log'] = [
]
return wanted
def test_042_no_detect_types_csvt():
wanted = {}
wanted['uri'] = 'file://testcsvt.csv?geomType=none&type=csv&detectTypes=no'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'double', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Test csvt 1',
'fint': '1',
'freal': '1.2',
'fstr': '1',
'fstr_1': 'text',
'fdatetime': '2015-03-02T12:30:00',
'fdate': '2014-12-30',
'ftime': '23:55',
'flong': '-456',
'flonglong': '-678',
'field_12': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Test csvt 2',
'fint': '3',
'freal': '1.5',
'fstr': '99',
'fstr_1': '23.5',
'fdatetime': '80',
'fdate': '2015-03-28',
'ftime': '2014-12-30',
'flong': '01:55',
'flonglong': '9189304972279762602',
'field_12': '-3123724580211819352',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = [
]
return wanted
| gpl-2.0 | 6,256,318,146,744,454,000 | 27.205485 | 139 | 0.376342 | false |
kennedyshead/home-assistant | homeassistant/components/websocket_api/http.py | 2 | 8634 | """View to accept incoming websocket connection."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from contextlib import suppress
import datetime as dt
import logging
from typing import Any, Final
from aiohttp import WSMsgType, web
import async_timeout
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import Event, HomeAssistant, callback
from homeassistant.helpers.event import async_call_later
from .auth import AuthPhase, auth_required_message
from .const import (
CANCELLATION_ERRORS,
DATA_CONNECTIONS,
MAX_PENDING_MSG,
PENDING_MSG_PEAK,
PENDING_MSG_PEAK_TIME,
SIGNAL_WEBSOCKET_CONNECTED,
SIGNAL_WEBSOCKET_DISCONNECTED,
URL,
)
from .error import Disconnect
from .messages import message_to_json
_WS_LOGGER: Final = logging.getLogger(f"{__name__}.connection")
class WebsocketAPIView(HomeAssistantView):
"""View to serve a websockets endpoint."""
name: str = "websocketapi"
url: str = URL
requires_auth: bool = False
async def get(self, request: web.Request) -> web.WebSocketResponse:
"""Handle an incoming websocket connection."""
return await WebSocketHandler(request.app["hass"], request).async_handle()
class WebSocketAdapter(logging.LoggerAdapter):
"""Add connection id to websocket messages."""
def process(self, msg: str, kwargs: Any) -> tuple[str, Any]:
"""Add connid to websocket log messages."""
return f'[{self.extra["connid"]}] {msg}', kwargs
class WebSocketHandler:
"""Handle an active websocket client connection."""
def __init__(self, hass: HomeAssistant, request: web.Request) -> None:
"""Initialize an active connection."""
self.hass = hass
self.request = request
self.wsock: web.WebSocketResponse | None = None
self._to_write: asyncio.Queue = asyncio.Queue(maxsize=MAX_PENDING_MSG)
self._handle_task: asyncio.Task | None = None
self._writer_task: asyncio.Task | None = None
self._logger = WebSocketAdapter(_WS_LOGGER, {"connid": id(self)})
self._peak_checker_unsub: Callable[[], None] | None = None
async def _writer(self) -> None:
"""Write outgoing messages."""
# Exceptions if Socket disconnected or cancelled by connection handler
assert self.wsock is not None
with suppress(RuntimeError, ConnectionResetError, *CANCELLATION_ERRORS):
while not self.wsock.closed:
message = await self._to_write.get()
if message is None:
break
self._logger.debug("Sending %s", message)
await self.wsock.send_str(message)
# Clean up the peaker checker when we shut down the writer
if self._peak_checker_unsub is not None:
self._peak_checker_unsub()
self._peak_checker_unsub = None
@callback
def _send_message(self, message: str | dict[str, Any]) -> None:
"""Send a message to the client.
Closes connection if the client is not reading the messages.
Async friendly.
"""
if not isinstance(message, str):
message = message_to_json(message)
try:
self._to_write.put_nowait(message)
except asyncio.QueueFull:
self._logger.error(
"Client exceeded max pending messages [2]: %s", MAX_PENDING_MSG
)
self._cancel()
if self._to_write.qsize() < PENDING_MSG_PEAK:
if self._peak_checker_unsub:
self._peak_checker_unsub()
self._peak_checker_unsub = None
return
if self._peak_checker_unsub is None:
self._peak_checker_unsub = async_call_later(
self.hass, PENDING_MSG_PEAK_TIME, self._check_write_peak
)
@callback
def _check_write_peak(self, _utc_time: dt.datetime) -> None:
"""Check that we are no longer above the write peak."""
self._peak_checker_unsub = None
if self._to_write.qsize() < PENDING_MSG_PEAK:
return
self._logger.error(
"Client unable to keep up with pending messages. Stayed over %s for %s seconds",
PENDING_MSG_PEAK,
PENDING_MSG_PEAK_TIME,
)
self._cancel()
@callback
def _cancel(self) -> None:
"""Cancel the connection."""
if self._handle_task is not None:
self._handle_task.cancel()
if self._writer_task is not None:
self._writer_task.cancel()
async def async_handle(self) -> web.WebSocketResponse:
"""Handle a websocket response."""
request = self.request
wsock = self.wsock = web.WebSocketResponse(heartbeat=55)
await wsock.prepare(request)
self._logger.debug("Connected from %s", request.remote)
self._handle_task = asyncio.current_task()
@callback
def handle_hass_stop(event: Event) -> None:
"""Cancel this connection."""
self._cancel()
unsub_stop = self.hass.bus.async_listen(
EVENT_HOMEASSISTANT_STOP, handle_hass_stop
)
# As the webserver is now started before the start
# event we do not want to block for websocket responses
self._writer_task = asyncio.create_task(self._writer())
auth = AuthPhase(self._logger, self.hass, self._send_message, request)
connection = None
disconnect_warn = None
try:
self._send_message(auth_required_message())
# Auth Phase
try:
with async_timeout.timeout(10):
msg = await wsock.receive()
except asyncio.TimeoutError as err:
disconnect_warn = "Did not receive auth message within 10 seconds"
raise Disconnect from err
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING):
raise Disconnect
if msg.type != WSMsgType.TEXT:
disconnect_warn = "Received non-Text message."
raise Disconnect
try:
msg_data = msg.json()
except ValueError as err:
disconnect_warn = "Received invalid JSON."
raise Disconnect from err
self._logger.debug("Received %s", msg_data)
connection = await auth.async_handle(msg_data)
self.hass.data[DATA_CONNECTIONS] = (
self.hass.data.get(DATA_CONNECTIONS, 0) + 1
)
self.hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_WEBSOCKET_CONNECTED
)
# Command phase
while not wsock.closed:
msg = await wsock.receive()
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING):
break
if msg.type != WSMsgType.TEXT:
disconnect_warn = "Received non-Text message."
break
try:
msg_data = msg.json()
except ValueError:
disconnect_warn = "Received invalid JSON."
break
self._logger.debug("Received %s", msg_data)
connection.async_handle(msg_data)
except asyncio.CancelledError:
self._logger.info("Connection closed by client")
except Disconnect:
pass
except Exception: # pylint: disable=broad-except
self._logger.exception("Unexpected error inside websocket API")
finally:
unsub_stop()
if connection is not None:
connection.async_close()
try:
self._to_write.put_nowait(None)
# Make sure all error messages are written before closing
await self._writer_task
await wsock.close()
except asyncio.QueueFull: # can be raised by put_nowait
self._writer_task.cancel()
finally:
if disconnect_warn is None:
self._logger.debug("Disconnected")
else:
self._logger.warning("Disconnected: %s", disconnect_warn)
if connection is not None:
self.hass.data[DATA_CONNECTIONS] -= 1
self.hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_WEBSOCKET_DISCONNECTED
)
return wsock
| apache-2.0 | -8,793,060,592,120,405,000 | 32.858824 | 92 | 0.587098 | false |
prathik/thrift | lib/py/src/TSCons.py | 237 | 1267 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from os import path
from SCons.Builder import Builder
def scons_env(env, add=''):
opath = path.dirname(path.abspath('$TARGET'))
lstr = 'thrift --gen cpp -o ' + opath + ' ' + add + ' $SOURCE'
cppbuild = Builder(action=lstr)
env.Append(BUILDERS={'ThriftCpp': cppbuild})
def gen_cpp(env, dir, file):
scons_env(env)
suffixes = ['_types.h', '_types.cpp']
targets = map(lambda s: 'gen-cpp/' + file + s, suffixes)
return env.ThriftCpp(targets, dir + file + '.thrift')
| apache-2.0 | -4,181,457,289,920,411,000 | 35.2 | 64 | 0.724546 | false |
ShakedY/ai-project | py2.5/lib/python2.5/curses/wrapper.py | 19 | 1650 | """curses.wrapper
Contains one function, wrapper(), which runs another function which
should be the rest of your curses-based application. If the
application raises an exception, wrapper() will restore the terminal
to a sane state so you can read the resulting traceback.
"""
import sys, curses
def wrapper(func, *args, **kwds):
"""Wrapper function that initializes curses and calls another function,
restoring normal keyboard/screen behavior on error.
The callable object 'func' is then passed the main window 'stdscr'
as its first argument, followed by any other arguments passed to
wrapper().
"""
res = None
try:
# Initialize curses
stdscr=curses.initscr()
# Turn off echoing of keys, and enter cbreak mode,
# where no buffering is performed on keyboard input
curses.noecho()
curses.cbreak()
# In keypad mode, escape sequences for special keys
# (like the cursor keys) will be interpreted and
# a special value like curses.KEY_LEFT will be returned
stdscr.keypad(1)
# Start color, too. Harmless if the terminal doesn't have
# color; user can test with has_color() later on. The try/catch
# works around a minor bit of over-conscientiousness in the curses
# module -- the error return from C start_color() is ignorable.
try:
curses.start_color()
except:
pass
return func(stdscr, *args, **kwds)
finally:
# Set everything back to normal
stdscr.keypad(0)
curses.echo()
curses.nocbreak()
curses.endwin()
| gpl-3.0 | -798,320,571,777,117,700 | 32 | 75 | 0.653333 | false |
pbenner/adaptive-sampling | adaptive_sampling/policy.py | 1 | 7380 | #! /usr/bin/env python
# Copyright (C) 2012 Philipp Benner
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import Queue
import copy
import interface
import random
import statistics
import sys
import threading
# call the interface
################################################################################
def utility(counts_v, data, bin_options):
"""Call the binning library."""
events = len(counts_v)
counts = statistics.countStatistic(counts_v)
alpha = data['alpha']
beta = data['beta']
gamma = data['gamma']
return interface.utility(events, counts, alpha, beta, gamma, bin_options)
def utilityAt(i, counts_v, data, bin_options):
"""Call the binning library."""
events = len(counts_v)
counts = statistics.countStatistic(counts_v)
alpha = data['alpha']
beta = data['beta']
gamma = data['gamma']
return interface.utilityAt(i, events, counts, alpha, beta, gamma, bin_options)
# tools
################################################################################
def computeKey(position, counts):
return tuple([position]+map(tuple, counts))
# determine the value of a sampling path
################################################################################
def value(path, counts, data, bin_options, hashutil):
if len(path) == 0:
return 0.0
# if necessary compute the local utility for this count statistic
key = computeKey(path[0], counts)
if not hashutil.get(key):
hashutil[key] = utilityAt(path[0], counts, data, bin_options)
# get the local utility from the hashmap
(expectation, utility) = hashutil.get(key)
for i in range(data['K']):
counts[i][path[0]] += 1
utility += expectation[i]*value(path[1:], counts, data, bin_options, hashutil)
counts[i][path[0]] -= 1
return utility
# optimize a sampling path similar to the policy iteration algorithm
################################################################################
def optimize_entry(i, path_value, path, counts, data, bin_options, hashutil):
changed = False
stimuli = range(len(counts[0]))
stimuli.remove(path[i])
path_prime = copy.deepcopy(path)
for x in stimuli:
path_prime[i] = x
path_value_prime = value(path_prime, counts, data, bin_options, hashutil)
if path_value_prime > path_value:
changed = True
path_value = path_value_prime
path[i] = x
return (path_value, path, changed)
def optimize(path, counts, data, bin_options, hashutil, full=False):
changed = True
path_value = value(path, counts, data, bin_options, hashutil)
decisions = range(len(path))
if not full:
decisions.remove(0)
while changed:
for i in decisions:
(path_value, path, changed) = optimize_entry(i, path_value, path, counts, data, bin_options, hashutil)
return (path_value, path)
def u_star(length, counts, data, bin_options):
if length <= 1:
return utility(counts, data, bin_options)[1]
stimuli = range(len(counts[0]))
path = [ random.choice(stimuli) for i in range(length) ]
utility = [ 0.0 for i in stimuli ]
hashutil = {}
for x in stimuli:
path[0] = x
(path_value, path) = optimize(path, counts, data, bin_options, hashutil)
utility[x] = path_value
return utility
# threaded optimization of sampling paths
################################################################################
class OptimizationThread(threading.Thread):
def __init__(self, length, counts, data, bin_options, queue_in, queue_out):
threading.Thread.__init__(self)
self.length = length
self.counts = copy.deepcopy(counts)
self.data = copy.deepcopy(data)
self.bin_options = bin_options
self.queue_in = queue_in
self.queue_out = queue_out
self.hashutil = {}
def run(self):
stimuli = range(len(self.counts[0]))
path = [ random.choice(stimuli) for i in range(self.length) ]
while True:
# get stimulus from queue
x = self.queue_in.get()
if self.bin_options['verbose']:
sys.stderr.write('Processing stimulus ' + str(x) + '.\n')
# set first element of the path to this stimulus
path[0] = x
# optimize all other elements of the path
(path_value, path) = optimize(path, self.counts, self.data, self.bin_options, self.hashutil)
# push result
self.queue_out.put((x, path_value))
self.queue_in.task_done()
def threaded_u_star(length, counts, data, bin_options):
if length <= 1:
return utility(counts, data, bin_options)[1]
utility_queue_in = Queue.Queue()
utility_queue_out = Queue.Queue()
utility_threads = []
stimuli = range(len(counts[0]))
utility_vector = [ 0.0 for i in stimuli ]
# launch daemon threads
for i in range(bin_options['threads']):
t = OptimizationThread(length, counts, data, bin_options,
utility_queue_in, utility_queue_out)
t.setDaemon(True)
t.start()
utility_threads += [t]
# fill queue and start computation
stimuli = range(len(counts[0]))
for x in stimuli:
utility_queue_in.put(x)
# wait for threads
utility_queue_in.join()
# process results
while not utility_queue_out.empty():
x, path_value = utility_queue_out.get()
utility_vector[x] = path_value
utility_queue_out.task_done()
return utility_vector
# test functions
################################################################################
def test1(counts, data, bin_options):
hashutil = {}
for i in range(data['L']):
print [i],
print ": ",
print value([i], counts, data, bin_options, hashutil)
for i in range(data['L']):
for j in range(data['L']):
print [i, j],
print ": ",
print value([i, j], counts, data, bin_options, hashutil)
for i in range(data['L']):
for j in range(data['L']):
for k in range(data['L']):
print [i, j, k],
print ": ",
print value([i, j, k], counts, data, bin_options, hashutil)
for i in range(data['L']):
for j in range(data['L']):
for k in range(data['L']):
for l in range(data['L']):
print [i, j, k, l],
print ": ",
print value([i, j, k, l], counts, data, bin_options, hashutil)
| gpl-2.0 | -8,745,588,336,565,216,000 | 35.35468 | 114 | 0.564228 | false |
mglukhikh/intellij-community | python/lib/Lib/distutils/bcppcompiler.py | 85 | 15086 | """distutils.bcppcompiler
Contains BorlandCCompiler, an implementation of the abstract CCompiler class
for the Borland C++ compiler.
"""
# This implementation by Lyle Johnson, based on the original msvccompiler.py
# module and using the directions originally published by Gordon Williams.
# XXX looks like there's a LOT of overlap between these two classes:
# someone should sit down and factor out the common code as
# WindowsCCompiler! --GPW
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: bcppcompiler.py 37828 2004-11-10 22:23:15Z loewis $"
import sys, os
from distutils.errors import \
DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError, UnknownFileError
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils.file_util import write_file
from distutils.dep_util import newer
from distutils import log
class BCPPCompiler(CCompiler) :
"""Concrete class that implements an interface to the Borland C/C++
compiler, as defined by the CCompiler abstract class.
"""
compiler_type = 'bcpp'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = _c_extensions + _cpp_extensions
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
# These executables are assumed to all be in the path.
# Borland doesn't seem to use any special registry settings to
# indicate their installation locations.
self.cc = "bcc32.exe"
self.linker = "ilink32.exe"
self.lib = "tlib.exe"
self.preprocess_options = None
self.compile_options = ['/tWM', '/O2', '/q', '/g0']
self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0']
self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_static = []
self.ldflags_exe = ['/Gn', '/q', '/x']
self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r']
# -- Worker methods ------------------------------------------------
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
compile_opts = extra_preargs or []
compile_opts.append ('-c')
if debug:
compile_opts.extend (self.compile_options_debug)
else:
compile_opts.extend (self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
# XXX why do the normpath here?
src = os.path.normpath(src)
obj = os.path.normpath(obj)
# XXX _setup_compile() did a mkpath() too but before the normpath.
# Is it possible to skip the normpath?
self.mkpath(os.path.dirname(obj))
if ext == '.res':
# This is already a binary file -- skip it.
continue # the 'for' loop
if ext == '.rc':
# This needs to be compiled to a .res file -- do it now.
try:
self.spawn (["brcc32", "-fo", obj, src])
except DistutilsExecError, msg:
raise CompileError, msg
continue # the 'for' loop
# The next two are both for the real compiler.
if ext in self._c_extensions:
input_opt = ""
elif ext in self._cpp_extensions:
input_opt = "-P"
else:
# Unknown file type -- no extra options. The compiler
# will probably fail, but let it just in case this is a
# file the compiler recognizes even if we don't.
input_opt = ""
output_opt = "-o" + obj
# Compiler command line syntax is: "bcc32 [options] file(s)".
# Note that the source file names must appear at the end of
# the command line.
try:
self.spawn ([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs + [src])
except DistutilsExecError, msg:
raise CompileError, msg
return objects
# compile ()
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
(objects, output_dir) = self._fix_object_args (objects, output_dir)
output_filename = \
self.library_filename (output_libname, output_dir=output_dir)
if self._need_link (objects, output_filename):
lib_args = [output_filename, '/u'] + objects
if debug:
pass # XXX what goes here?
try:
self.spawn ([self.lib] + lib_args)
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# create_static_lib ()
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# XXX this ignores 'build_temp'! should follow the lead of
# msvccompiler.py
(objects, output_dir) = self._fix_object_args (objects, output_dir)
(libraries, library_dirs, runtime_library_dirs) = \
self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
if runtime_library_dirs:
log.warn("I don't know what to do with 'runtime_library_dirs': %s",
str(runtime_library_dirs))
if output_dir is not None:
output_filename = os.path.join (output_dir, output_filename)
if self._need_link (objects, output_filename):
# Figure out linker args based on type of target.
if target_desc == CCompiler.EXECUTABLE:
startup_obj = 'c0w32'
if debug:
ld_args = self.ldflags_exe_debug[:]
else:
ld_args = self.ldflags_exe[:]
else:
startup_obj = 'c0d32'
if debug:
ld_args = self.ldflags_shared_debug[:]
else:
ld_args = self.ldflags_shared[:]
# Create a temporary exports file for use by the linker
if export_symbols is None:
def_file = ''
else:
head, tail = os.path.split (output_filename)
modname, ext = os.path.splitext (tail)
temp_dir = os.path.dirname(objects[0]) # preserve tree structure
def_file = os.path.join (temp_dir, '%s.def' % modname)
contents = ['EXPORTS']
for sym in (export_symbols or []):
contents.append(' %s=_%s' % (sym, sym))
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# Borland C++ has problems with '/' in paths
objects2 = map(os.path.normpath, objects)
# split objects in .obj and .res files
# Borland C++ needs them at different positions in the command line
objects = [startup_obj]
resources = []
for file in objects2:
(base, ext) = os.path.splitext(os.path.normcase(file))
if ext == '.res':
resources.append(file)
else:
objects.append(file)
for l in library_dirs:
ld_args.append("/L%s" % os.path.normpath(l))
ld_args.append("/L.") # we sometimes use relative paths
# list of object files
ld_args.extend(objects)
# XXX the command-line syntax for Borland C++ is a bit wonky;
# certain filenames are jammed together in one big string, but
# comma-delimited. This doesn't mesh too well with the
# Unix-centric attitude (with a DOS/Windows quoting hack) of
# 'spawn()', so constructing the argument list is a bit
# awkward. Note that doing the obvious thing and jamming all
# the filenames and commas into one argument would be wrong,
# because 'spawn()' would quote any filenames with spaces in
# them. Arghghh!. Apparently it works fine as coded...
# name of dll/exe file
ld_args.extend([',',output_filename])
# no map file and start libraries
ld_args.append(',,')
for lib in libraries:
# see if we find it and if there is a bcpp specific lib
# (xxx_bcpp.lib)
libfile = self.find_library_file(library_dirs, lib, debug)
if libfile is None:
ld_args.append(lib)
# probably a BCPP internal library -- don't warn
else:
# full name which prefers bcpp_xxx.lib over xxx.lib
ld_args.append(libfile)
# some default libraries
ld_args.append ('import32')
ld_args.append ('cw32mt')
# def file for export symbols
ld_args.extend([',',def_file])
# add resource files
ld_args.append(',')
ld_args.extend(resources)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath (os.path.dirname (output_filename))
try:
self.spawn ([self.linker] + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# link ()
# -- Miscellaneous methods -----------------------------------------
def find_library_file (self, dirs, lib, debug=0):
# List of effective library names to try, in order of preference:
# xxx_bcpp.lib is better than xxx.lib
# and xxx_d.lib is better than xxx.lib if debug is set
#
# The "_bcpp" suffix is to handle a Python installation for people
# with multiple compilers (primarily Distutils hackers, I suspect
# ;-). The idea is they'd have one static library for each
# compiler they care about, since (almost?) every Windows compiler
# seems to have a different format for static libraries.
if debug:
dlib = (lib + "_d")
try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib)
else:
try_names = (lib + "_bcpp", lib)
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename(name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res':
# these can go unchanged
obj_names.append (os.path.join (output_dir, base + ext))
elif ext == '.rc':
# these need to be compiled to .res-files
obj_names.append (os.path.join (output_dir, base + '.res'))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def preprocess (self,
source,
output_file=None,
macros=None,
include_dirs=None,
extra_preargs=None,
extra_postargs=None):
(_, macros, include_dirs) = \
self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = ['cpp32.exe'] + pp_opts
if output_file is not None:
pp_args.append('-o' + output_file)
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or the
# source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError, msg:
print msg
raise CompileError, msg
# preprocess()
| apache-2.0 | 6,596,594,083,285,174,000 | 36.904523 | 80 | 0.529962 | false |
nttks/edx-platform | lms/djangoapps/course_wiki/tests/test_tab.py | 158 | 2454 | """
Tests for wiki views.
"""
from django.conf import settings
from django.test.client import RequestFactory
from courseware.tabs import get_course_tab_list
from student.tests.factories import AdminFactory, UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class WikiTabTestCase(ModuleStoreTestCase):
"""Test cases for Wiki Tab."""
def setUp(self):
super(WikiTabTestCase, self).setUp()
self.course = CourseFactory.create()
self.instructor = AdminFactory.create()
self.user = UserFactory()
def get_wiki_tab(self, user, course):
"""Returns true if the "Wiki" tab is shown."""
request = RequestFactory().request()
request.user = user
all_tabs = get_course_tab_list(request, course)
wiki_tabs = [tab for tab in all_tabs if tab.name == 'Wiki']
return wiki_tabs[0] if len(wiki_tabs) == 1 else None
def test_wiki_enabled_and_public(self):
"""
Test wiki tab when Enabled setting is True and the wiki is open to
the public.
"""
settings.WIKI_ENABLED = True
self.course.allow_public_wiki_access = True
self.assertIsNotNone(self.get_wiki_tab(self.user, self.course))
def test_wiki_enabled_and_not_public(self):
"""
Test wiki when it is enabled but not open to the public
"""
settings.WIKI_ENABLED = True
self.course.allow_public_wiki_access = False
self.assertIsNone(self.get_wiki_tab(self.user, self.course))
self.assertIsNotNone(self.get_wiki_tab(self.instructor, self.course))
def test_wiki_enabled_false(self):
"""Test wiki tab when Enabled setting is False"""
settings.WIKI_ENABLED = False
self.assertIsNone(self.get_wiki_tab(self.user, self.course))
self.assertIsNone(self.get_wiki_tab(self.instructor, self.course))
def test_wiki_visibility(self):
"""Test toggling of visibility of wiki tab"""
settings.WIKI_ENABLED = True
self.course.allow_public_wiki_access = True
wiki_tab = self.get_wiki_tab(self.user, self.course)
self.assertIsNotNone(wiki_tab)
self.assertTrue(wiki_tab.is_hideable)
wiki_tab.is_hidden = True
self.assertTrue(wiki_tab['is_hidden'])
wiki_tab['is_hidden'] = False
self.assertFalse(wiki_tab.is_hidden)
| agpl-3.0 | -8,002,586,178,862,367,000 | 36.753846 | 77 | 0.664629 | false |
salguarnieri/intellij-community | plugins/hg4idea/testData/bin/mercurial/keepalive.py | 91 | 25918 | # This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see
# <http://www.gnu.org/licenses/>.
# This file is part of urlgrabber, a high-level cross-protocol url-grabber
# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
# Modified by Benoit Boissinot:
# - fix for digest auth (inspired from urllib2.py @ Python v2.4)
# Modified by Dirkjan Ochtman:
# - import md5 function from a local util module
# Modified by Martin Geisler:
# - moved md5 function from local util module to this module
# Modified by Augie Fackler:
# - add safesend method and use it to prevent broken pipe errors
# on large POST requests
"""An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
>>> import urllib2
>>> from keepalive import HTTPHandler
>>> keepalive_handler = HTTPHandler()
>>> opener = urllib2.build_opener(keepalive_handler)
>>> urllib2.install_opener(opener)
>>>
>>> fo = urllib2.urlopen('http://www.python.org')
If a connection to a given host is requested, and all of the existing
connections are still in use, another connection will be opened. If
the handler tries to use an existing connection but it fails in some
way, it will be closed and removed from the pool.
To remove the handler, simply re-run build_opener with no arguments, and
install that opener.
You can explicitly close connections by using the close_connection()
method of the returned file-like object (described below) or you can
use the handler methods:
close_connection(host)
close_all()
open_connections()
NOTE: using the close_connection and close_all methods of the handler
should be done with care when using multiple threads.
* there is nothing that prevents another thread from creating new
connections immediately after connections are closed
* no checks are done to prevent in-use connections from being closed
>>> keepalive_handler.close_all()
EXTRA ATTRIBUTES AND METHODS
Upon a status of 200, the object returned has a few additional
attributes and methods, which should not be used if you want to
remain consistent with the normal urllib2-returned objects:
close_connection() - close the connection to the host
readlines() - you know, readlines()
status - the return status (i.e. 404)
reason - english translation of status (i.e. 'File not found')
If you want the best of both worlds, use this inside an
AttributeError-catching try:
>>> try: status = fo.status
>>> except AttributeError: status = None
Unfortunately, these are ONLY there if status == 200, so it's not
easy to distinguish between non-200 responses. The reason is that
urllib2 tries to do clever things with error codes 301, 302, 401,
and 407, and it wraps the object upon return.
For python versions earlier than 2.4, you can avoid this fancy error
handling by setting the module-level global HANDLE_ERRORS to zero.
You see, prior to 2.4, it's the HTTP Handler's job to determine what
to handle specially, and what to just pass up. HANDLE_ERRORS == 0
means "pass everything up". In python 2.4, however, this job no
longer belongs to the HTTP Handler and is now done by a NEW handler,
HTTPErrorProcessor. Here's the bottom line:
python version < 2.4
HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
errors
HANDLE_ERRORS == 0 pass everything up, error processing is
left to the calling code
python version >= 2.4
HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
HANDLE_ERRORS == 0 (default) pass everything up, let the
other handlers (specifically,
HTTPErrorProcessor) decide what to do
In practice, setting the variable either way makes little difference
in python 2.4, so for the most consistent behavior across versions,
you probably just want to use the defaults, which will give you
exceptions on errors.
"""
# $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
import errno
import httplib
import socket
import thread
import urllib2
DEBUG = None
import sys
if sys.version_info < (2, 4):
HANDLE_ERRORS = 1
else: HANDLE_ERRORS = 0
class ConnectionManager(object):
"""
The connection manager must be able to:
* keep track of all existing
"""
def __init__(self):
self._lock = thread.allocate_lock()
self._hostmap = {} # map hosts to a list of connections
self._connmap = {} # map connections to host
self._readymap = {} # map connection to ready state
def add(self, host, connection, ready):
self._lock.acquire()
try:
if host not in self._hostmap:
self._hostmap[host] = []
self._hostmap[host].append(connection)
self._connmap[connection] = host
self._readymap[connection] = ready
finally:
self._lock.release()
def remove(self, connection):
self._lock.acquire()
try:
try:
host = self._connmap[connection]
except KeyError:
pass
else:
del self._connmap[connection]
del self._readymap[connection]
self._hostmap[host].remove(connection)
if not self._hostmap[host]: del self._hostmap[host]
finally:
self._lock.release()
def set_ready(self, connection, ready):
try:
self._readymap[connection] = ready
except KeyError:
pass
def get_ready_conn(self, host):
conn = None
self._lock.acquire()
try:
if host in self._hostmap:
for c in self._hostmap[host]:
if self._readymap[c]:
self._readymap[c] = 0
conn = c
break
finally:
self._lock.release()
return conn
def get_all(self, host=None):
if host:
return list(self._hostmap.get(host, []))
else:
return dict(self._hostmap)
class KeepAliveHandler(object):
def __init__(self):
self._cm = ConnectionManager()
#### Connection Management
def open_connections(self):
"""return a list of connected hosts and the number of connections
to each. [('foo.com:80', 2), ('bar.org', 1)]"""
return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
def close_connection(self, host):
"""close connection(s) to <host>
host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
no error occurs if there is no connection to that host."""
for h in self._cm.get_all(host):
self._cm.remove(h)
h.close()
def close_all(self):
"""close all open connections"""
for host, conns in self._cm.get_all().iteritems():
for h in conns:
self._cm.remove(h)
h.close()
def _request_closed(self, request, host, connection):
"""tells us that this request is now closed and that the
connection is ready for another request"""
self._cm.set_ready(connection, 1)
def _remove_connection(self, host, connection, close=0):
if close:
connection.close()
self._cm.remove(connection)
#### Transaction Execution
def http_open(self, req):
return self.do_open(HTTPConnection, req)
def do_open(self, http_class, req):
host = req.get_host()
if not host:
raise urllib2.URLError('no host given')
try:
h = self._cm.get_ready_conn(host)
while h:
r = self._reuse_connection(h, req, host)
# if this response is non-None, then it worked and we're
# done. Break out, skipping the else block.
if r:
break
# connection is bad - possibly closed by server
# discard it and ask for the next free connection
h.close()
self._cm.remove(h)
h = self._cm.get_ready_conn(host)
else:
# no (working) free connections were found. Create a new one.
h = http_class(host)
if DEBUG:
DEBUG.info("creating new connection to %s (%d)",
host, id(h))
self._cm.add(host, h, 0)
self._start_transaction(h, req)
r = h.getresponse()
except (socket.error, httplib.HTTPException), err:
raise urllib2.URLError(err)
# if not a persistent connection, don't try to reuse it
if r.will_close:
self._cm.remove(h)
if DEBUG:
DEBUG.info("STATUS: %s, %s", r.status, r.reason)
r._handler = self
r._host = host
r._url = req.get_full_url()
r._connection = h
r.code = r.status
r.headers = r.msg
r.msg = r.reason
if r.status == 200 or not HANDLE_ERRORS:
return r
else:
return self.parent.error('http', req, r,
r.status, r.msg, r.headers)
def _reuse_connection(self, h, req, host):
"""start the transaction with a re-used connection
return a response object (r) upon success or None on failure.
This DOES not close or remove bad connections in cases where
it returns. However, if an unexpected exception occurs, it
will close and remove the connection before re-raising.
"""
try:
self._start_transaction(h, req)
r = h.getresponse()
# note: just because we got something back doesn't mean it
# worked. We'll check the version below, too.
except (socket.error, httplib.HTTPException):
r = None
except: # re-raises
# adding this block just in case we've missed
# something we will still raise the exception, but
# lets try and close the connection and remove it
# first. We previously got into a nasty loop
# where an exception was uncaught, and so the
# connection stayed open. On the next try, the
# same exception was raised, etc. The trade-off is
# that it's now possible this call will raise
# a DIFFERENT exception
if DEBUG:
DEBUG.error("unexpected exception - closing "
"connection to %s (%d)", host, id(h))
self._cm.remove(h)
h.close()
raise
if r is None or r.version == 9:
# httplib falls back to assuming HTTP 0.9 if it gets a
# bad header back. This is most likely to happen if
# the socket has been closed by the server since we
# last used the connection.
if DEBUG:
DEBUG.info("failed to re-use connection to %s (%d)",
host, id(h))
r = None
else:
if DEBUG:
DEBUG.info("re-using connection to %s (%d)", host, id(h))
return r
def _start_transaction(self, h, req):
# What follows mostly reimplements HTTPConnection.request()
# except it adds self.parent.addheaders in the mix.
headers = req.headers.copy()
if sys.version_info >= (2, 4):
headers.update(req.unredirected_hdrs)
headers.update(self.parent.addheaders)
headers = dict((n.lower(), v) for n, v in headers.items())
skipheaders = {}
for n in ('host', 'accept-encoding'):
if n in headers:
skipheaders['skip_' + n.replace('-', '_')] = 1
try:
if req.has_data():
data = req.get_data()
h.putrequest('POST', req.get_selector(), **skipheaders)
if 'content-type' not in headers:
h.putheader('Content-type',
'application/x-www-form-urlencoded')
if 'content-length' not in headers:
h.putheader('Content-length', '%d' % len(data))
else:
h.putrequest('GET', req.get_selector(), **skipheaders)
except (socket.error), err:
raise urllib2.URLError(err)
for k, v in headers.items():
h.putheader(k, v)
h.endheaders()
if req.has_data():
h.send(data)
class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler):
pass
class HTTPResponse(httplib.HTTPResponse):
# we need to subclass HTTPResponse in order to
# 1) add readline() and readlines() methods
# 2) add close_connection() methods
# 3) add info() and geturl() methods
# in order to add readline(), read must be modified to deal with a
# buffer. example: readline must read a buffer and then spit back
# one line at a time. The only real alternative is to read one
# BYTE at a time (ick). Once something has been read, it can't be
# put back (ok, maybe it can, but that's even uglier than this),
# so if you THEN do a normal read, you must first take stuff from
# the buffer.
# the read method wraps the original to accommodate buffering,
# although read() never adds to the buffer.
# Both readline and readlines have been stolen with almost no
# modification from socket.py
def __init__(self, sock, debuglevel=0, strict=0, method=None):
httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
self.fileno = sock.fileno
self.code = None
self._rbuf = ''
self._rbufsize = 8096
self._handler = None # inserted by the handler later
self._host = None # (same)
self._url = None # (same)
self._connection = None # (same)
_raw_read = httplib.HTTPResponse.read
def close(self):
if self.fp:
self.fp.close()
self.fp = None
if self._handler:
self._handler._request_closed(self, self._host,
self._connection)
def close_connection(self):
self._handler._remove_connection(self._host, self._connection, close=1)
self.close()
def info(self):
return self.headers
def geturl(self):
return self._url
def read(self, amt=None):
# the _rbuf test is only in this first if for speed. It's not
# logically necessary
if self._rbuf and not amt is None:
L = len(self._rbuf)
if amt > L:
amt -= L
else:
s = self._rbuf[:amt]
self._rbuf = self._rbuf[amt:]
return s
s = self._rbuf + self._raw_read(amt)
self._rbuf = ''
return s
# stolen from Python SVN #68532 to fix issue1088
def _read_chunked(self, amt):
chunk_left = self.chunk_left
value = ''
# XXX This accumulates chunks by repeated string concatenation,
# which is not efficient as the number or size of chunks gets big.
while True:
if chunk_left is None:
line = self.fp.readline()
i = line.find(';')
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
chunk_left = int(line, 16)
except ValueError:
# close the connection as protocol synchronization is
# probably lost
self.close()
raise httplib.IncompleteRead(value)
if chunk_left == 0:
break
if amt is None:
value += self._safe_read(chunk_left)
elif amt < chunk_left:
value += self._safe_read(amt)
self.chunk_left = chunk_left - amt
return value
elif amt == chunk_left:
value += self._safe_read(amt)
self._safe_read(2) # toss the CRLF at the end of the chunk
self.chunk_left = None
return value
else:
value += self._safe_read(chunk_left)
amt -= chunk_left
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline()
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line == '\r\n':
break
# we read everything; close the "file"
self.close()
return value
def readline(self, limit=-1):
i = self._rbuf.find('\n')
while i < 0 and not (0 < limit <= len(self._rbuf)):
new = self._raw_read(self._rbufsize)
if not new:
break
i = new.find('\n')
if i >= 0:
i = i + len(self._rbuf)
self._rbuf = self._rbuf + new
if i < 0:
i = len(self._rbuf)
else:
i = i + 1
if 0 <= limit < len(self._rbuf):
i = limit
data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
return data
def readlines(self, sizehint = 0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
def safesend(self, str):
"""Send `str' to the server.
Shamelessly ripped off from httplib to patch a bad behavior.
"""
# _broken_pipe_resp is an attribute we set in this function
# if the socket is closed while we're sending data but
# the server sent us a response before hanging up.
# In that case, we want to pretend to send the rest of the
# outgoing data, and then let the user use getresponse()
# (which we wrap) to get this last response before
# opening a new socket.
if getattr(self, '_broken_pipe_resp', None) is not None:
return
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise httplib.NotConnected
# send the data to the server. if we get a broken pipe, then close
# the socket. we want to reconnect when somebody tries to send again.
#
# NOTE: we DO propagate the error, though, because we cannot simply
# ignore the error... the caller will know if they can retry.
if self.debuglevel > 0:
print "send:", repr(str)
try:
blocksize = 8192
read = getattr(str, 'read', None)
if read is not None:
if self.debuglevel > 0:
print "sending a read()able"
data = read(blocksize)
while data:
self.sock.sendall(data)
data = read(blocksize)
else:
self.sock.sendall(str)
except socket.error, v:
reraise = True
if v[0] == errno.EPIPE: # Broken pipe
if self._HTTPConnection__state == httplib._CS_REQ_SENT:
self._broken_pipe_resp = None
self._broken_pipe_resp = self.getresponse()
reraise = False
self.close()
if reraise:
raise
def wrapgetresponse(cls):
"""Wraps getresponse in cls with a broken-pipe sane version.
"""
def safegetresponse(self):
# In safesend() we might set the _broken_pipe_resp
# attribute, in which case the socket has already
# been closed and we just need to give them the response
# back. Otherwise, we use the normal response path.
r = getattr(self, '_broken_pipe_resp', None)
if r is not None:
return r
return cls.getresponse(self)
safegetresponse.__doc__ = cls.getresponse.__doc__
return safegetresponse
class HTTPConnection(httplib.HTTPConnection):
# use the modified response class
response_class = HTTPResponse
send = safesend
getresponse = wrapgetresponse(httplib.HTTPConnection)
#########################################################################
##### TEST FUNCTIONS
#########################################################################
def error_handler(url):
global HANDLE_ERRORS
orig = HANDLE_ERRORS
keepalive_handler = HTTPHandler()
opener = urllib2.build_opener(keepalive_handler)
urllib2.install_opener(opener)
pos = {0: 'off', 1: 'on'}
for i in (0, 1):
print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
HANDLE_ERRORS = i
try:
fo = urllib2.urlopen(url)
fo.read()
fo.close()
try:
status, reason = fo.status, fo.reason
except AttributeError:
status, reason = None, None
except IOError, e:
print " EXCEPTION: %s" % e
raise
else:
print " status = %s, reason = %s" % (status, reason)
HANDLE_ERRORS = orig
hosts = keepalive_handler.open_connections()
print "open connections:", hosts
keepalive_handler.close_all()
def md5(s):
try:
from hashlib import md5 as _md5
except ImportError:
from md5 import md5 as _md5
global md5
md5 = _md5
return _md5(s)
def continuity(url):
format = '%25s: %s'
# first fetch the file with the normal http handler
opener = urllib2.build_opener()
urllib2.install_opener(opener)
fo = urllib2.urlopen(url)
foo = fo.read()
fo.close()
m = md5.new(foo)
print format % ('normal urllib', m.hexdigest())
# now install the keepalive handler and try again
opener = urllib2.build_opener(HTTPHandler())
urllib2.install_opener(opener)
fo = urllib2.urlopen(url)
foo = fo.read()
fo.close()
m = md5.new(foo)
print format % ('keepalive read', m.hexdigest())
fo = urllib2.urlopen(url)
foo = ''
while True:
f = fo.readline()
if f:
foo = foo + f
else: break
fo.close()
m = md5.new(foo)
print format % ('keepalive readline', m.hexdigest())
def comp(N, url):
print ' making %i connections to:\n %s' % (N, url)
sys.stdout.write(' first using the normal urllib handlers')
# first use normal opener
opener = urllib2.build_opener()
urllib2.install_opener(opener)
t1 = fetch(N, url)
print ' TIME: %.3f s' % t1
sys.stdout.write(' now using the keepalive handler ')
# now install the keepalive handler and try again
opener = urllib2.build_opener(HTTPHandler())
urllib2.install_opener(opener)
t2 = fetch(N, url)
print ' TIME: %.3f s' % t2
print ' improvement factor: %.2f' % (t1 / t2)
def fetch(N, url, delay=0):
import time
lens = []
starttime = time.time()
for i in range(N):
if delay and i > 0:
time.sleep(delay)
fo = urllib2.urlopen(url)
foo = fo.read()
fo.close()
lens.append(len(foo))
diff = time.time() - starttime
j = 0
for i in lens[1:]:
j = j + 1
if not i == lens[0]:
print "WARNING: inconsistent length on read %i: %i" % (j, i)
return diff
def test_timeout(url):
global DEBUG
dbbackup = DEBUG
class FakeLogger(object):
def debug(self, msg, *args):
print msg % args
info = warning = error = debug
DEBUG = FakeLogger()
print " fetching the file to establish a connection"
fo = urllib2.urlopen(url)
data1 = fo.read()
fo.close()
i = 20
print " waiting %i seconds for the server to close the connection" % i
while i > 0:
sys.stdout.write('\r %2i' % i)
sys.stdout.flush()
time.sleep(1)
i -= 1
sys.stderr.write('\r')
print " fetching the file a second time"
fo = urllib2.urlopen(url)
data2 = fo.read()
fo.close()
if data1 == data2:
print ' data are identical'
else:
print ' ERROR: DATA DIFFER'
DEBUG = dbbackup
def test(url, N=10):
print "checking error handler (do this on a non-200)"
try: error_handler(url)
except IOError:
print "exiting - exception will prevent further tests"
sys.exit()
print
print "performing continuity test (making sure stuff isn't corrupted)"
continuity(url)
print
print "performing speed comparison"
comp(N, url)
print
print "performing dropped-connection check"
test_timeout(url)
if __name__ == '__main__':
import time
import sys
try:
N = int(sys.argv[1])
url = sys.argv[2]
except (IndexError, ValueError):
print "%s <integer> <url>" % sys.argv[0]
else:
test(url, N)
| apache-2.0 | -7,124,741,989,817,662,000 | 33.057819 | 80 | 0.57134 | false |
sincerefly/getEastmoneyReport | guping_new/statistics/sta-3.py | 1 | 2384 | #!/bin/env python
#encoding:utf-8
from pymongo import MongoClient
import datetime
# Settings
mongopath = "localhost" # 数据库地址
startDate = "20150104" # 检索数据开始日期
endDate = "20150529" # 检索数据结束日期
#endDate = "20150227" # 检索数据结束日期(三个月预留)
nowDate = datetime.datetime.now().strftime("%Y%m%d") # 当前日期
# Functions
def isNotWorkDay():
today = datetime.datetime.now().strftime("%w")
if today in [6, 0]: # 如果周六周日则退出脚本
exit(0)
print today
def clientMongo():
client = MongoClient(mongopath, 27017)
db = client.guping
return db if db else False
def getArticleInfo(db):
return db.dfcf_company.find({})
def startSta(art_list, db):
i = 0
author_dict = {}
for art in art_list:
company = art["company"].encode("utf-8")
author_list = art["author"]
#print author_list
for au in author_list:
au = au.encode("utf-8")
grow = art["grow"]
if author_dict.has_key(au):
author_dict[au]["count"] +=1
author_dict[au]["grow"].append(grow)
else:
author_dict[au] = {}
author_dict[au]["count"] = 1
author_dict[au]["grow"] = []
author_dict[au]["grow"].append(grow)
author_dict[au]["company"] = company
#print author_dict
for key in author_dict:
count = author_dict[key]["count"]
grow_list = author_dict[key]["grow"]
avgUp = round(sum(grow_list) / len(grow_list), 4)
company = author_dict[key]["company"]
print key + "\t" + str(count) + "\t" + str(avgUp) + "\t" + company
d = {
"author": key,
"count": count,
"avgUp": avgUp,
"company": company
}
#db.dfcf_author_f_test.insert(d)
db.dfcf_author_f.update({'author':author}, {'$set':d}, upsert = True)
return 0
# main function
if __name__ == "__main__":
if isNotWorkDay():
exit(0)
db = clientMongo()
if db:
print "Client Mongo Success"
else:
print "Client Mongo failed"
exit(0)
article_list = getArticleInfo(db)
# 获取日期区间内股票涨幅情况
startSta(article_list, db)
| mit | -637,123,934,862,235,800 | 19.178571 | 77 | 0.537168 | false |
AutomatedTester/selenium | py/test/selenium/test_prompts.py | 65 | 2425 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium import selenium
import unittest
import time
class TestPrompts(unittest.TestCase):
def setUp(self):
self.selenium = selenium("localhost", \
4444, "*firefoxproxy", "http://www.w3schools.com")
self.selenium.start()
def test_alert(self):
sel = self.selenium
sel.open("/js/tryit.asp?filename=tryjs_alert")
sel.select_frame("view")
sel.click("css=input[value='Show alert box']")
self.assertEqual(sel.get_alert(), "Hello! I am an alert box!")
def test_confirm_accept(self):
sel = self.selenium
sel.open("/js/tryit.asp?filename=tryjs_confirm")
sel.select_frame("view")
sel.choose_ok_on_next_confirmation()
sel.click("css=input[value='Show a confirm box']")
self.assertEqual(sel.get_alert(), "You pressed OK!")
def test_confirm_cancel(self):
sel = self.selenium
sel.open("/js/tryit.asp?filename=tryjs_confirm")
sel.select_frame("view")
sel.choose_ok_on_next_confirmation()
sel.click("css=input[value='Show a confirm box']")
self.assertEqual(sel.get_alert(), "You pressed OK!")
def test_prompt(self):
sel = self.selenium
sel.open("/js/tryit.asp?filename=tryjs_prompt")
sel.select_frame("view")
sel.answer_on_next_prompt('Flying Monkey')
sel.click("css=input[value='Show prompt box']")
self.assertEqual(sel.get_html_source(), '<head></head><body>Hello Flying Monkey! How are you today?</body>')
def tearDown(self):
self.selenium.stop()
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -3,046,586,584,835,951,600 | 37.492063 | 116 | 0.669278 | false |
sbalde/edx-platform | common/lib/xmodule/xmodule/modulestore/tests/factories.py | 16 | 21208 | """
Factories for use in tests of XBlocks.
"""
import functools
import inspect
import pprint
import pymongo.message
import threading
import traceback
from collections import defaultdict
from decorator import contextmanager
from uuid import uuid4
from factory import Factory, Sequence, lazy_attribute_sequence, lazy_attribute
from factory.containers import CyclicDefinitionError
from mock import Mock, patch
from nose.tools import assert_less_equal, assert_greater_equal
import dogstats_wrapper as dog_stats_api
from opaque_keys.edx.locations import Location
from opaque_keys.edx.keys import UsageKey
from xblock.core import XBlock
from xmodule.modulestore import prefer_xmodules, ModuleStoreEnum
from xmodule.tabs import CourseTab
from xmodule.x_module import DEPRECATION_VSCOMPAT_EVENT
class Dummy(object):
pass
class XModuleFactoryLock(threading.local):
"""
This class exists to store whether XModuleFactory can be accessed in a safe
way (meaning, in a context where the data it creates will be cleaned up).
Users of XModuleFactory (or its subclasses) should only call XModuleFactoryLock.enable
after ensuring that a) the modulestore will be cleaned up, and b) that XModuleFactoryLock.disable
will be called.
"""
def __init__(self):
super(XModuleFactoryLock, self).__init__()
self._enabled = False
def enable(self):
"""
Enable XModuleFactories. This should only be turned in a context
where the modulestore will be reset at the end of the test (such
as inside ModuleStoreTestCase).
"""
self._enabled = True
def disable(self):
"""
Disable XModuleFactories. This should be called once the data
from the factory has been cleaned up.
"""
self._enabled = False
def is_enabled(self):
"""
Return whether XModuleFactories are enabled.
"""
return self._enabled
XMODULE_FACTORY_LOCK = XModuleFactoryLock()
class XModuleFactory(Factory):
"""
Factory for XModules
"""
# We have to give a Factory a FACTORY_FOR.
# However, the class that we create is actually determined by the category
# specified in the factory
FACTORY_FOR = Dummy
@lazy_attribute
def modulestore(self):
msg = "XMODULE_FACTORY_LOCK not enabled. Please use ModuleStoreTestCase as your test baseclass."
assert XMODULE_FACTORY_LOCK.is_enabled(), msg
from xmodule.modulestore.django import modulestore
return modulestore()
last_course = threading.local()
class CourseFactory(XModuleFactory):
"""
Factory for XModule courses.
"""
org = Sequence('org.{}'.format)
number = Sequence('course_{}'.format)
display_name = Sequence('Run {}'.format)
# pylint: disable=unused-argument
@classmethod
def _create(cls, target_class, **kwargs):
# All class attributes (from this class and base classes) are
# passed in via **kwargs. However, some of those aren't actual field values,
# so pop those off for use separately
org = kwargs.pop('org', None)
# because the factory provides a default 'number' arg, prefer the non-defaulted 'course' arg if any
number = kwargs.pop('course', kwargs.pop('number', None))
store = kwargs.pop('modulestore')
name = kwargs.get('name', kwargs.get('run', Location.clean(kwargs.get('display_name'))))
run = kwargs.pop('run', name)
user_id = kwargs.pop('user_id', ModuleStoreEnum.UserID.test)
# Pass the metadata just as field=value pairs
kwargs.update(kwargs.pop('metadata', {}))
default_store_override = kwargs.pop('default_store', None)
with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
if default_store_override is not None:
with store.default_store(default_store_override):
new_course = store.create_course(org, number, run, user_id, fields=kwargs)
else:
new_course = store.create_course(org, number, run, user_id, fields=kwargs)
last_course.loc = new_course.location
return new_course
class LibraryFactory(XModuleFactory):
"""
Factory for creating a content library
"""
org = Sequence('org{}'.format)
library = Sequence('lib{}'.format)
display_name = Sequence('Test Library {}'.format)
# pylint: disable=unused-argument
@classmethod
def _create(cls, target_class, **kwargs):
"""
Create a library with a unique name and key.
All class attributes (from this class and base classes) are automagically
passed in via **kwargs.
"""
# some of the kwargst actual field values, so pop those off for use separately:
org = kwargs.pop('org')
library = kwargs.pop('library')
store = kwargs.pop('modulestore')
user_id = kwargs.pop('user_id', ModuleStoreEnum.UserID.test)
# Pass the metadata just as field=value pairs
kwargs.update(kwargs.pop('metadata', {}))
default_store_override = kwargs.pop('default_store', ModuleStoreEnum.Type.split)
with store.default_store(default_store_override):
new_library = store.create_library(org, library, user_id, fields=kwargs)
return new_library
class ItemFactory(XModuleFactory):
"""
Factory for XModule items.
"""
category = 'chapter'
parent = None
@lazy_attribute_sequence
def display_name(self, n):
return "{} {}".format(self.category, n)
@lazy_attribute
def location(self):
if self.display_name is None:
dest_name = uuid4().hex
else:
dest_name = self.display_name.replace(" ", "_")
new_location = self.parent_location.course_key.make_usage_key(
self.category,
dest_name
)
return new_location
@lazy_attribute
def parent_location(self):
default_location = getattr(last_course, 'loc', None)
try:
parent = self.parent
# This error is raised if the caller hasn't provided either parent or parent_location
# In this case, we'll just return the default parent_location
except CyclicDefinitionError:
return default_location
if parent is None:
return default_location
return parent.location
@classmethod
def _create(cls, target_class, **kwargs):
"""
Uses ``**kwargs``:
:parent_location: (required): the location of the parent module
(e.g. the parent course or section)
:category: the category of the resulting item.
:data: (optional): the data for the item
(e.g. XML problem definition for a problem item)
:display_name: (optional): the display name of the item
:metadata: (optional): dictionary of metadata attributes
:boilerplate: (optional) the boilerplate for overriding field values
:publish_item: (optional) whether or not to publish the item (default is True)
:target_class: is ignored
"""
# All class attributes (from this class and base classes) are
# passed in via **kwargs. However, some of those aren't actual field values,
# so pop those off for use separately
# catch any old style users before they get into trouble
assert 'template' not in kwargs
parent_location = kwargs.pop('parent_location', None)
data = kwargs.pop('data', None)
category = kwargs.pop('category', None)
display_name = kwargs.pop('display_name', None)
metadata = kwargs.pop('metadata', {})
location = kwargs.pop('location')
user_id = kwargs.pop('user_id', ModuleStoreEnum.UserID.test)
publish_item = kwargs.pop('publish_item', True)
assert isinstance(location, UsageKey)
assert location != parent_location
store = kwargs.pop('modulestore')
# This code was based off that in cms/djangoapps/contentstore/views.py
parent = kwargs.pop('parent', None) or store.get_item(parent_location)
with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
if 'boilerplate' in kwargs:
template_id = kwargs.pop('boilerplate')
clz = XBlock.load_class(category, select=prefer_xmodules)
template = clz.get_template(template_id)
assert template is not None
metadata.update(template.get('metadata', {}))
if not isinstance(data, basestring):
data.update(template.get('data'))
# replace the display name with an optional parameter passed in from the caller
if display_name is not None:
metadata['display_name'] = display_name
module = store.create_child(
user_id,
parent.location,
location.block_type,
block_id=location.block_id,
metadata=metadata,
definition_data=data,
runtime=parent.runtime,
fields=kwargs,
)
# VS[compat] cdodge: This is a hack because static_tabs also have references from the course module, so
# if we add one then we need to also add it to the policy information (i.e. metadata)
# we should remove this once we can break this reference from the course to static tabs
if category == 'static_tab':
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:itemfactory_create_static_tab",
u"block:{}".format(location.block_type),
)
)
course = store.get_course(location.course_key)
course.tabs.append(
CourseTab.load('static_tab', name='Static Tab', url_slug=location.name)
)
store.update_item(course, user_id)
# parent and publish the item, so it can be accessed
if 'detached' not in module._class_tags:
parent.children.append(location)
store.update_item(parent, user_id)
if publish_item:
published_parent = store.publish(parent.location, user_id)
# module is last child of parent
return published_parent.get_children()[-1]
else:
return store.get_item(location)
elif publish_item:
return store.publish(location, user_id)
else:
return module
@contextmanager
def check_exact_number_of_calls(object_with_method, method_name, num_calls):
"""
Instruments the given method on the given object to verify the number of calls to the
method is exactly equal to 'num_calls'.
"""
with check_number_of_calls(object_with_method, method_name, num_calls, num_calls):
yield
def check_number_of_calls(object_with_method, method_name, maximum_calls, minimum_calls=1):
"""
Instruments the given method on the given object to verify the number of calls to the method is
less than or equal to the expected maximum_calls and greater than or equal to the expected minimum_calls.
"""
return check_sum_of_calls(object_with_method, [method_name], maximum_calls, minimum_calls)
class StackTraceCounter(object):
"""
A class that counts unique stack traces underneath a particular stack frame.
"""
def __init__(self, stack_depth, include_arguments=True):
"""
Arguments:
stack_depth (int): The number of stack frames above this constructor to capture.
include_arguments (bool): Whether to store the arguments that are passed
when capturing a stack trace.
"""
self.include_arguments = include_arguments
self._top_of_stack = traceback.extract_stack(limit=stack_depth)[0]
if self.include_arguments:
self._stacks = defaultdict(lambda: defaultdict(int))
else:
self._stacks = defaultdict(int)
def capture_stack(self, args, kwargs):
"""
Record the stack frames starting at the caller of this method, and
ending at the top of the stack as defined by the ``stack_depth``.
Arguments:
args: The positional arguments to capture at this stack frame
kwargs: The keyword arguments to capture at this stack frame
"""
# pylint: disable=broad-except
stack = traceback.extract_stack()[:-2]
if self._top_of_stack in stack:
stack = stack[stack.index(self._top_of_stack):]
if self.include_arguments:
safe_args = []
for arg in args:
try:
safe_args.append(repr(arg))
except Exception as exc:
safe_args.append('<un-repr-able value: {}'.format(exc))
safe_kwargs = {}
for key, kwarg in kwargs.items():
try:
safe_kwargs[key] = repr(kwarg)
except Exception as exc:
safe_kwargs[key] = '<un-repr-able value: {}'.format(exc)
self._stacks[tuple(stack)][tuple(safe_args), tuple(safe_kwargs.items())] += 1
else:
self._stacks[tuple(stack)] += 1
@property
def total_calls(self):
"""
Return the total number of stacks recorded.
"""
return sum(self.stack_calls(stack) for stack in self._stacks)
def stack_calls(self, stack):
"""
Return the number of calls to the supplied ``stack``.
"""
if self.include_arguments:
return sum(self._stacks[stack].values())
else:
return self._stacks[stack]
def __iter__(self):
"""
Iterate over all unique captured stacks.
"""
return iter(sorted(self._stacks.keys(), key=lambda stack: (self.stack_calls(stack), stack), reverse=True))
def __getitem__(self, stack):
"""
Return the set of captured calls with the supplied stack.
"""
return self._stacks[stack]
@classmethod
def capture_call(cls, func, stack_depth, include_arguments=True):
"""
A decorator that wraps ``func``, and captures each call to ``func``,
recording the stack trace, and optionally the arguments that the function
is called with.
Arguments:
func: the function to wrap
stack_depth: how far up the stack to truncate the stored stack traces (
this is counted from the call to ``capture_call``, rather than calls
to the captured function).
"""
stacks = StackTraceCounter(stack_depth, include_arguments)
# pylint: disable=missing-docstring
@functools.wraps(func)
def capture(*args, **kwargs):
stacks.capture_stack(args, kwargs)
return func(*args, **kwargs)
capture.stack_counter = stacks
return capture
@contextmanager
def check_sum_of_calls(object_, methods, maximum_calls, minimum_calls=1, include_arguments=True):
"""
Instruments the given methods on the given object to verify that the total sum of calls made to the
methods falls between minumum_calls and maximum_calls.
"""
mocks = {
method: StackTraceCounter.capture_call(
getattr(object_, method),
stack_depth=7,
include_arguments=include_arguments
)
for method in methods
}
with patch.multiple(object_, **mocks):
yield
call_count = sum(capture_fn.stack_counter.total_calls for capture_fn in mocks.values())
# Assertion errors don't handle multi-line values, so pretty-print to std-out instead
if not minimum_calls <= call_count <= maximum_calls:
messages = ["Expected between {} and {} calls, {} were made.\n\n".format(
minimum_calls,
maximum_calls,
call_count,
)]
for method_name, capture_fn in mocks.items():
stack_counter = capture_fn.stack_counter
messages.append("{!r} was called {} times:\n".format(
method_name,
stack_counter.total_calls
))
for stack in stack_counter:
messages.append(" called {} times:\n\n".format(stack_counter.stack_calls(stack)))
messages.append(" " + " ".join(traceback.format_list(stack)))
messages.append("\n\n")
if include_arguments:
for (args, kwargs), count in stack_counter[stack].items():
messages.append(" called {} times with:\n".format(count))
messages.append(" args: {}\n".format(args))
messages.append(" kwargs: {}\n\n".format(dict(kwargs)))
print "".join(messages)
# verify the counter actually worked by ensuring we have counted greater than (or equal to) the minimum calls
assert_greater_equal(call_count, minimum_calls)
# now verify the number of actual calls is less than (or equal to) the expected maximum
assert_less_equal(call_count, maximum_calls)
def mongo_uses_error_check(store):
"""
Does mongo use the error check as a separate message?
"""
if hasattr(store, 'mongo_wire_version'):
return store.mongo_wire_version() <= 1
if hasattr(store, 'modulestores'):
return any([mongo_uses_error_check(substore) for substore in store.modulestores])
return False
@contextmanager
def check_mongo_calls_range(max_finds=float("inf"), min_finds=0, max_sends=None, min_sends=None):
"""
Instruments the given store to count the number of calls to find (incl find_one) and the number
of calls to send_message which is for insert, update, and remove (if you provide num_sends). At the
end of the with statement, it compares the counts to the bounds provided in the arguments.
:param max_finds: the maximum number of find calls expected
:param min_finds: the minimum number of find calls expected
:param max_sends: If non-none, make sure number of send calls are <=max_sends
:param min_sends: If non-none, make sure number of send calls are >=min_sends
"""
with check_sum_of_calls(
pymongo.message,
['query', 'get_more'],
max_finds,
min_finds,
):
if max_sends is not None or min_sends is not None:
with check_sum_of_calls(
pymongo.message,
# mongo < 2.6 uses insert, update, delete and _do_batched_insert. >= 2.6 _do_batched_write
['insert', 'update', 'delete', '_do_batched_write_command', '_do_batched_insert', ],
max_sends if max_sends is not None else float("inf"),
min_sends if min_sends is not None else 0,
):
yield
else:
yield
@contextmanager
def check_mongo_calls(num_finds=0, num_sends=None):
"""
Instruments the given store to count the number of calls to find (incl find_one) and the number
of calls to send_message which is for insert, update, and remove (if you provide num_sends). At the
end of the with statement, it compares the counts to the num_finds and num_sends.
:param num_finds: the exact number of find calls expected
:param num_sends: If none, don't instrument the send calls. If non-none, count and compare to
the given int value.
"""
with check_mongo_calls_range(num_finds, num_finds, num_sends, num_sends):
yield
# This dict represents the attribute keys for a course's 'about' info.
# Note: The 'video' attribute is intentionally excluded as it must be
# handled separately; its value maps to an alternate key name.
# Reference : cms/djangoapps/models/settings/course_details.py
ABOUT_ATTRIBUTES = {
'effort': "Testing effort",
}
class CourseAboutFactory(XModuleFactory):
"""
Factory for XModule course about.
"""
@classmethod
def _create(cls, target_class, **kwargs): # pylint: disable=unused-argument
"""
Uses **kwargs:
effort: effor information
video : video link
"""
user_id = kwargs.pop('user_id', None)
course_id, course_runtime = kwargs.pop("course_id"), kwargs.pop("course_runtime")
store = kwargs.pop('modulestore')
for about_key in ABOUT_ATTRIBUTES:
about_item = store.create_xblock(course_runtime, course_id, 'about', about_key)
about_item.data = ABOUT_ATTRIBUTES[about_key]
store.update_item(about_item, user_id, allow_not_found=True)
about_item = store.create_xblock(course_runtime, course_id, 'about', 'video')
about_item.data = "www.youtube.com/embed/testing-video-link"
store.update_item(about_item, user_id, allow_not_found=True)
| agpl-3.0 | -6,414,569,919,637,659,000 | 35.755633 | 115 | 0.619436 | false |
XiaodunServerGroup/medicalmooc | common/lib/capa/capa/tests/__init__.py | 8 | 1737 | """Tools for helping with testing capa."""
import gettext
import os
import os.path
import fs.osfs
from capa.capa_problem import LoncapaProblem, LoncapaSystem
from mock import Mock, MagicMock
import xml.sax.saxutils as saxutils
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
def tst_render_template(template, context):
"""
A test version of render to template. Renders to the repr of the context, completely ignoring
the template name. To make the output valid xml, quotes the content, and wraps it in a <div>
"""
return '<div>{0}</div>'.format(saxutils.escape(repr(context)))
def calledback_url(dispatch='score_update'):
return dispatch
xqueue_interface = MagicMock()
xqueue_interface.send_to_queue.return_value = (0, 'Success!')
def test_capa_system():
"""
Construct a mock LoncapaSystem instance.
"""
the_system = Mock(
spec=LoncapaSystem,
ajax_url='/dummy-ajax-url',
anonymous_student_id='student',
cache=None,
can_execute_unsafe_code=lambda: False,
DEBUG=True,
filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")),
i18n=gettext.NullTranslations(),
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
render_template=tst_render_template,
seed=0,
STATIC_URL='/dummy-static/',
xqueue={'interface': xqueue_interface, 'construct_callback': calledback_url, 'default_queuename': 'testqueue', 'waittime': 10},
)
return the_system
def new_loncapa_problem(xml, capa_system=None):
"""Construct a `LoncapaProblem` suitable for unit tests."""
return LoncapaProblem(xml, id='1', seed=723, capa_system=capa_system or test_capa_system())
| agpl-3.0 | -1,424,710,685,869,028,600 | 29.473684 | 135 | 0.678181 | false |
zsulocal/pycoin | pycoin/tx/script/check_signature.py | 1 | 11210 | # -*- coding: utf-8 -*-
"""
Parse, stream, create, sign and verify Bitcoin transactions as Tx structures.
The MIT License (MIT)
Copyright (c) 2015 by Richard Kiss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from ... import ecdsa
from ...encoding import sec_to_public_pair, EncodingError
from ...intbytes import byte2int, indexbytes, iterbytes
from . import der
from . import ScriptError
from . import errno
from .flags import (
VERIFY_NULLDUMMY, VERIFY_NULLFAIL, VERIFY_STRICTENC, VERIFY_MINIMALDATA,
VERIFY_DERSIG, VERIFY_LOW_S, VERIFY_WITNESS_PUBKEYTYPE
)
from .microcode import VCH_TRUE, VCH_FALSE
from .tools import bin_script, delete_subscript, int_from_script_bytes
def _check_valid_signature_1(sig):
ls = len(sig)
if ls < 9 or ls > 73:
raise ScriptError("bad signature size", errno.SIG_DER)
if sig[0] != 0x30:
raise ScriptError("bad signature byte 0", errno.SIG_DER)
if sig[1] != ls - 3:
raise ScriptError("signature size wrong", errno.SIG_DER)
r_len = sig[3]
if 5 + r_len >= ls:
raise ScriptError("r length exceed signature size", errno.SIG_DER)
def _check_valid_signature_2(sig):
ls = len(sig)
r_len = sig[3]
s_len = sig[5 + r_len]
if r_len + s_len + 7 != ls:
raise ScriptError("r and s size exceed signature size", errno.SIG_DER)
if sig[2] != 2:
raise ScriptError("R value region does not start with 0x02", errno.SIG_DER)
if r_len == 0:
raise ScriptError("zero-length R value", errno.SIG_DER)
if sig[4] & 0x80:
raise ScriptError("sig R value not allowed to be negative", errno.SIG_DER)
if r_len > 1 and sig[4] == 0 and not (sig[5] & 0x80):
raise ScriptError(
"R value can't have leading 0 byte unless doing so would make it negative", errno.SIG_DER)
if sig[r_len + 4] != 2:
raise ScriptError("S value region does not start with 0x02", errno.SIG_DER)
if s_len == 0:
raise ScriptError("zero-length S value", errno.SIG_DER)
if sig[r_len + 6] & 0x80:
raise ScriptError("negative S values not allowed", errno.SIG_DER)
if s_len > 1 and sig[r_len + 6] == 0 and not (sig[r_len + 7] & 0x80):
raise ScriptError(
"S value can't have leading 0 byte unless doing so would make it negative", errno.SIG_DER)
def check_valid_signature(sig):
# ported from bitcoind src/script/interpreter.cpp IsValidSignatureEncoding
sig = [s for s in iterbytes(sig)]
_check_valid_signature_1(sig)
_check_valid_signature_2(sig)
def check_low_der_signature(sig_pair):
# IsLowDERSignature
r, s = sig_pair
hi_s = ecdsa.generator_secp256k1.curve().p() - s
if hi_s < s:
raise ScriptError("signature has high S value", errno.SIG_HIGH_S)
def check_defined_hashtype_signature(sig):
# IsDefinedHashtypeSignature
from pycoin.tx.Tx import SIGHASH_ALL, SIGHASH_SINGLE, SIGHASH_ANYONECANPAY
if len(sig) == 0:
raise ScriptError("signature is length 0")
hash_type = indexbytes(sig, -1) & (~SIGHASH_ANYONECANPAY)
if hash_type < SIGHASH_ALL or hash_type > SIGHASH_SINGLE:
raise ScriptError("bad hash type after signature", errno.SIG_HASHTYPE)
def parse_signature_blob(sig_blob, flags=0):
if len(sig_blob) == 0:
raise ValueError("empty sig_blob")
if flags & (VERIFY_DERSIG | VERIFY_LOW_S | VERIFY_STRICTENC):
check_valid_signature(sig_blob)
if flags & VERIFY_STRICTENC:
check_defined_hashtype_signature(sig_blob)
sig_pair = der.sigdecode_der(sig_blob[:-1], use_broken_open_ssl_mechanism=True)
signature_type = ord(sig_blob[-1:])
if flags & VERIFY_LOW_S:
check_low_der_signature(sig_pair)
return sig_pair, signature_type
def check_public_key_encoding(blob):
lb = len(blob)
if lb >= 33:
fb = byte2int(blob)
if fb == 4:
if lb == 65:
return
elif fb in (2, 3):
if lb == 33:
return
raise ScriptError("invalid public key blob", errno.PUBKEYTYPE)
def op_checksig(stack, signature_for_hash_type_f, expected_hash_type, tmp_script, flags):
try:
pair_blob = stack.pop()
sig_blob = stack.pop()
verify_strict = not not (flags & VERIFY_STRICTENC)
# if verify_strict flag is set, we fail the script immediately on bad encoding
if verify_strict:
check_public_key_encoding(pair_blob)
if flags & VERIFY_WITNESS_PUBKEYTYPE:
if byte2int(pair_blob) not in (2, 3) or len(pair_blob) != 33:
raise ScriptError("uncompressed key in witness", errno.WITNESS_PUBKEYTYPE)
sig_pair, signature_type = parse_signature_blob(sig_blob, flags)
public_pair = sec_to_public_pair(pair_blob, strict=verify_strict)
except (der.UnexpectedDER, ValueError, EncodingError):
stack.append(VCH_FALSE)
return
if expected_hash_type not in (None, signature_type):
raise ScriptError("wrong hash type")
# Drop the signature, since there's no way for a signature to sign itself
# see: Bitcoin Core/script/interpreter.cpp::EvalScript()
if not getattr(signature_for_hash_type_f, "skip_delete", False):
tmp_script = delete_subscript(tmp_script, bin_script([sig_blob]))
signature_hash = signature_for_hash_type_f(signature_type, script=tmp_script)
if ecdsa.verify(ecdsa.generator_secp256k1, public_pair, signature_hash, sig_pair):
stack.append(VCH_TRUE)
else:
if flags & VERIFY_NULLFAIL:
if len(sig_blob) > 0:
raise ScriptError("bad signature not NULL", errno.NULLFAIL)
stack.append(VCH_FALSE)
def sig_blob_matches(sig_blobs, public_pair_blobs, tmp_script, signature_for_hash_type_f,
flags, exit_early=False):
"""
sig_blobs: signature blobs
public_pair_blobs: a list of public pair blobs
tmp_script: the script as of the last code separator
signature_for_hash_type_f: signature_for_hash_type_f
flags: verification flags to apply
exit_early: if True, we may exit early if one of the sig_blobs is incorrect or misplaced. Used
for checking a supposedly validated transaction. A -1 indicates no match.
Returns a list of indices into public_pairs. If exit_early is True, it may return early.
If sig_blob_indices isn't long enough or contains a -1, the signature is not valid.
"""
strict_encoding = not not (flags & VERIFY_STRICTENC)
# Drop the signatures, since there's no way for a signature to sign itself
if not getattr(signature_for_hash_type_f, "skip_delete", False):
for sig_blob in sig_blobs:
tmp_script = delete_subscript(tmp_script, bin_script([sig_blob]))
sig_cache = {}
sig_blob_indices = []
ppb_idx = -1
while sig_blobs and len(sig_blobs) <= len(public_pair_blobs):
if exit_early and -1 in sig_blob_indices:
break
sig_blob, sig_blobs = sig_blobs[0], sig_blobs[1:]
try:
sig_pair, signature_type = parse_signature_blob(sig_blob, flags)
except (der.UnexpectedDER, ValueError):
sig_blob_indices.append(-1)
continue
if signature_type not in sig_cache:
sig_cache[signature_type] = signature_for_hash_type_f(signature_type, script=tmp_script)
try:
ppp = ecdsa.possible_public_pairs_for_signature(
ecdsa.generator_secp256k1, sig_cache[signature_type], sig_pair)
except ecdsa.NoSuchPointError:
ppp = []
while len(sig_blobs) < len(public_pair_blobs):
public_pair_blob, public_pair_blobs = public_pair_blobs[0], public_pair_blobs[1:]
ppb_idx += 1
if strict_encoding:
check_public_key_encoding(public_pair_blob)
if flags & VERIFY_WITNESS_PUBKEYTYPE:
if byte2int(public_pair_blob) not in (2, 3) or len(public_pair_blob) != 33:
raise ScriptError("uncompressed key in witness", errno.WITNESS_PUBKEYTYPE)
try:
public_pair = sec_to_public_pair(public_pair_blob, strict=strict_encoding)
except EncodingError:
public_pair = None
if public_pair in ppp:
sig_blob_indices.append(ppb_idx)
break
else:
sig_blob_indices.append(-1)
return sig_blob_indices
def op_checkmultisig(stack, signature_for_hash_type_f, expected_hash_type, tmp_script, flags):
require_minimal = flags & VERIFY_MINIMALDATA
key_count = int_from_script_bytes(stack.pop(), require_minimal=require_minimal)
if key_count < 0 or key_count > 20:
raise ScriptError("key_count not in range 0 to 20", errno.PUBKEY_COUNT)
public_pair_blobs = [stack.pop() for _ in range(key_count)]
signature_count = int_from_script_bytes(stack.pop(), require_minimal=require_minimal)
if signature_count < 0 or signature_count > key_count:
raise ScriptError(
"invalid number of signatures: %d for %d keys" % (signature_count, key_count), errno.SIG_COUNT)
sig_blobs = [stack.pop() for _ in range(signature_count)]
# check that we have the required hack 00 byte
hack_byte = stack.pop()
if flags & VERIFY_NULLDUMMY and hack_byte != b'':
raise ScriptError("bad dummy byte in checkmultisig", errno.SIG_NULLDUMMY)
sig_blob_indices = sig_blob_matches(
sig_blobs, public_pair_blobs, tmp_script, signature_for_hash_type_f, flags, exit_early=True)
sig_ok = VCH_FALSE
if -1 not in sig_blob_indices and len(sig_blob_indices) == len(sig_blobs):
# bitcoin requires the signatures to be in the same order as the public keys
# so let's make sure the indices are strictly increasing
for i in range(len(sig_blob_indices) - 1):
if sig_blob_indices[i] >= sig_blob_indices[i+1]:
break
else:
sig_ok = VCH_TRUE
if not sig_ok and flags & VERIFY_NULLFAIL:
for sig_blob in sig_blobs:
if len(sig_blob) > 0:
raise ScriptError("bad signature not NULL", errno.NULLFAIL)
stack.append(sig_ok)
return key_count
| mit | 461,443,307,147,393,300 | 39.469314 | 107 | 0.656467 | false |
mattilyra/scikit-learn | sklearn/cluster/affinity_propagation_.py | 60 | 10688 | """Affinity Propagation clustering algorithm."""
# Author: Alexandre Gramfort [email protected]
# Gael Varoquaux [email protected]
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import as_float_array, check_array
from ..utils.validation import check_is_fitted
from ..metrics import euclidean_distances
from ..metrics import pairwise_distances_argmin
def affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200,
damping=0.5, copy=True, verbose=False,
return_n_iter=False):
"""Perform Affinity Propagation Clustering of data
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
S : array-like, shape (n_samples, n_samples)
Matrix of similarities between points
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
copy : boolean, optional, default: True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency
verbose : boolean, optional, default: False
The verbosity level
return_n_iter : bool, default False
Whether or not to return the number of iterations.
Returns
-------
cluster_centers_indices : array, shape (n_clusters,)
index of clusters centers
labels : array, shape (n_samples,)
cluster labels for each point
n_iter : int
number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
S = as_float_array(S, copy=copy)
n_samples = S.shape[0]
if S.shape[0] != S.shape[1]:
raise ValueError("S must be a square array (shape=%s)" % repr(S.shape))
if preference is None:
preference = np.median(S)
if damping < 0.5 or damping >= 1:
raise ValueError('damping must be >= 0.5 and < 1')
random_state = np.random.RandomState(0)
# Place preference on the diagonal of S
S.flat[::(n_samples + 1)] = preference
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
# Intermediate results
tmp = np.zeros((n_samples, n_samples))
# Remove degeneracies
S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) *
random_state.randn(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
for it in range(max_iter):
# tmp = A + S; compute responsibilities
np.add(A, S, tmp)
I = np.argmax(tmp, axis=1)
Y = tmp[ind, I] # np.max(A + S, axis=1)
tmp[ind, I] = -np.inf
Y2 = np.max(tmp, axis=1)
# tmp = Rnew
np.subtract(S, Y[:, None], tmp)
tmp[ind, I] = S[ind, I] - Y2
# Damping
tmp *= 1 - damping
R *= damping
R += tmp
# tmp = Rp; compute availabilities
np.maximum(R, 0, tmp)
tmp.flat[::n_samples + 1] = R.flat[::n_samples + 1]
# tmp = -Anew
tmp -= np.sum(tmp, axis=0)
dA = np.diag(tmp).copy()
tmp.clip(0, np.inf, tmp)
tmp.flat[::n_samples + 1] = dA
# Damping
tmp *= 1 - damping
A *= damping
A -= tmp
# Check for convergence
E = (np.diag(A) + np.diag(R)) > 0
e[:, it % convergence_iter] = E
K = np.sum(E, axis=0)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = (np.sum((se == convergence_iter) + (se == 0))
!= n_samples)
if (not unconverged and (K > 0)) or (it == max_iter):
if verbose:
print("Converged after %d iterations." % it)
break
else:
if verbose:
print("Did not converge")
I = np.where(np.diag(A + R) > 0)[0]
K = I.size # Identify exemplars
if K > 0:
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
labels = np.empty((n_samples, 1))
cluster_centers_indices = None
labels.fill(np.nan)
if return_n_iter:
return cluster_centers_indices, labels, it + 1
else:
return cluster_centers_indices, labels
###############################################################################
class AffinityPropagation(BaseEstimator, ClusterMixin):
"""Perform Affinity Propagation Clustering of data.
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations.
copy : boolean, optional, default: True
Make a copy of input data.
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number
of exemplars, ie of clusters, is influenced by the input
preferences value. If the preferences are not passed as arguments,
they will be set to the median of the input similarities.
affinity : string, optional, default=``euclidean``
Which affinity to use. At the moment ``precomputed`` and
``euclidean`` are supported. ``euclidean`` uses the
negative squared euclidean distance between points.
verbose : boolean, optional, default: False
Whether to be verbose.
Attributes
----------
cluster_centers_indices_ : array, shape (n_clusters,)
Indices of cluster centers
cluster_centers_ : array, shape (n_clusters, n_features)
Cluster centers (if affinity != ``precomputed``).
labels_ : array, shape (n_samples,)
Labels of each point
affinity_matrix_ : array, shape (n_samples, n_samples)
Stores the affinity matrix used in ``fit``.
n_iter_ : int
Number of iterations taken to converge.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
The algorithmic complexity of affinity propagation is quadratic
in the number of points.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
def __init__(self, damping=.5, max_iter=200, convergence_iter=15,
copy=True, preference=None, affinity='euclidean',
verbose=False):
self.damping = damping
self.max_iter = max_iter
self.convergence_iter = convergence_iter
self.copy = copy
self.verbose = verbose
self.preference = preference
self.affinity = affinity
@property
def _pairwise(self):
return self.affinity == "precomputed"
def fit(self, X, y=None):
""" Create affinity matrix from negative euclidean distances, then
apply affinity propagation clustering.
Parameters
----------
X: array-like, shape (n_samples, n_features) or (n_samples, n_samples)
Data matrix or, if affinity is ``precomputed``, matrix of
similarities / affinities.
"""
X = check_array(X, accept_sparse='csr')
if self.affinity == "precomputed":
self.affinity_matrix_ = X
elif self.affinity == "euclidean":
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
else:
raise ValueError("Affinity must be 'precomputed' or "
"'euclidean'. Got %s instead"
% str(self.affinity))
self.cluster_centers_indices_, self.labels_, self.n_iter_ = \
affinity_propagation(
self.affinity_matrix_, self.preference, max_iter=self.max_iter,
convergence_iter=self.convergence_iter, damping=self.damping,
copy=self.copy, verbose=self.verbose, return_n_iter=True)
if self.affinity != "precomputed":
self.cluster_centers_ = X[self.cluster_centers_indices_].copy()
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : array, shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_indices_")
if not hasattr(self, "cluster_centers_"):
raise ValueError("Predict method is not supported when "
"affinity='precomputed'.")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause | -3,684,058,154,956,254,700 | 31.987654 | 79 | 0.597773 | false |
badp/ganeti | test/py/cmdlib/testsupport/__init__.py | 2 | 1758 | #
#
# Copyright (C) 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Support classes and functions for testing the cmdlib module.
"""
from cmdlib.testsupport.cmdlib_testcase import CmdlibTestCase, \
withLockedLU
from cmdlib.testsupport.config_mock import ConfigMock
from cmdlib.testsupport.iallocator_mock import patchIAllocator
from cmdlib.testsupport.utils_mock import patchUtils
from cmdlib.testsupport.lock_manager_mock import LockManagerMock
from cmdlib.testsupport.netutils_mock import patchNetutils, HostnameMock
from cmdlib.testsupport.processor_mock import ProcessorMock
from cmdlib.testsupport.rpc_runner_mock import CreateRpcRunnerMock, \
RpcResultsBuilder
from cmdlib.testsupport.ssh_mock import patchSsh
__all__ = ["CmdlibTestCase",
"withLockedLU",
"ConfigMock",
"CreateRpcRunnerMock",
"HostnameMock",
"patchIAllocator",
"patchUtils",
"patchNetutils",
"patchSsh",
"LockManagerMock",
"ProcessorMock",
"RpcResultsBuilder",
]
| gpl-2.0 | 7,558,838,990,091,404,000 | 34.16 | 72 | 0.731513 | false |
OlexandrI/pyside | paste/lint.py | 1 | 15002 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Also licenced under the Apache License, 2.0: http://opensource.org/licenses/apache2.0.php
# Licensed to PSF under a Contributor Agreement
"""
Middleware to check for obedience to the WSGI specification.
Some of the things this checks:
* Signature of the application and start_response (including that
keyword arguments are not used).
* Environment checks:
- Environment is a dictionary (and not a subclass).
- That all the required keys are in the environment: REQUEST_METHOD,
SERVER_NAME, SERVER_PORT, wsgi.version, wsgi.input, wsgi.errors,
wsgi.multithread, wsgi.multiprocess, wsgi.run_once
- That HTTP_CONTENT_TYPE and HTTP_CONTENT_LENGTH are not in the
environment (these headers should appear as CONTENT_LENGTH and
CONTENT_TYPE).
- Warns if QUERY_STRING is missing, as the cgi module acts
unpredictably in that case.
- That CGI-style variables (that don't contain a .) have
(non-unicode) string values
- That wsgi.version is a tuple
- That wsgi.url_scheme is 'http' or 'https' (@@: is this too
restrictive?)
- Warns if the REQUEST_METHOD is not known (@@: probably too
restrictive).
- That SCRIPT_NAME and PATH_INFO are empty or start with /
- That at least one of SCRIPT_NAME or PATH_INFO are set.
- That CONTENT_LENGTH is a positive integer.
- That SCRIPT_NAME is not '/' (it should be '', and PATH_INFO should
be '/').
- That wsgi.input has the methods read, readline, readlines, and
__iter__
- That wsgi.errors has the methods flush, write, writelines
* The status is a string, contains a space, starts with an integer,
and that integer is in range (> 100).
* That the headers is a list (not a subclass, not another kind of
sequence).
* That the items of the headers are tuples of strings.
* That there is no 'status' header (that is used in CGI, but not in
WSGI).
* That the headers don't contain newlines or colons, end in _ or -, or
contain characters codes below 037.
* That Content-Type is given if there is content (CGI often has a
default content type, but WSGI does not).
* That no Content-Type is given when there is no content (@@: is this
too restrictive?)
* That the exc_info argument to start_response is a tuple or None.
* That all calls to the writer are with strings, and no other methods
on the writer are accessed.
* That wsgi.input is used properly:
- .read() is called with zero or one argument
- That it returns a string
- That readline, readlines, and __iter__ return strings
- That .close() is not called
- No other methods are provided
* That wsgi.errors is used properly:
- .write() and .writelines() is called with a string
- That .close() is not called, and no other methods are provided.
* The response iterator:
- That it is not a string (it should be a list of a single string; a
string will work, but perform horribly).
- That .next() returns a string
- That the iterator is not iterated over until start_response has
been called (that can signal either a server or application
error).
- That .close() is called (doesn't raise exception, only prints to
sys.stderr, because we only know it isn't called when the object
is garbage collected).
"""
import re
import sys
from types import DictType, StringType, TupleType, ListType
import warnings
header_re = re.compile(r'^[a-zA-Z][a-zA-Z0-9\-_]*$')
bad_header_value_re = re.compile(r'[\000-\037]')
class WSGIWarning(Warning):
"""
Raised in response to WSGI-spec-related warnings
"""
def middleware(application, global_conf=None):
"""
When applied between a WSGI server and a WSGI application, this
middleware will check for WSGI compliancy on a number of levels.
This middleware does not modify the request or response in any
way, but will throw an AssertionError if anything seems off
(except for a failure to close the application iterator, which
will be printed to stderr -- there's no way to throw an exception
at that point).
"""
def lint_app(*args, **kw):
assert len(args) == 2, "Two arguments required"
assert not kw, "No keyword arguments allowed"
environ, start_response = args
check_environ(environ)
# We use this to check if the application returns without
# calling start_response:
start_response_started = []
def start_response_wrapper(*args, **kw):
assert len(args) == 2 or len(args) == 3, (
"Invalid number of arguments: %s" % args)
assert not kw, "No keyword arguments allowed"
status = args[0]
headers = args[1]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
check_status(status)
check_headers(headers)
check_content_type(status, headers)
check_exc_info(exc_info)
start_response_started.append(None)
return WriteWrapper(start_response(*args))
environ['wsgi.input'] = InputWrapper(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorWrapper(environ['wsgi.errors'])
iterator = application(environ, start_response_wrapper)
assert iterator is not None and iterator != False, (
"The application must return an iterator, if only an empty list")
check_iterator(iterator)
return IteratorWrapper(iterator, start_response_started)
return lint_app
class InputWrapper(object):
def __init__(self, wsgi_input):
self.input = wsgi_input
def read(self, *args):
assert len(args) <= 1
v = self.input.read(*args)
assert type(v) is type("")
return v
def readline(self, *args):
v = self.input.readline(*args)
assert type(v) is type("")
return v
def readlines(self, *args):
assert len(args) <= 1
lines = self.input.readlines(*args)
assert type(lines) is type([])
for line in lines:
assert type(line) is type("")
return lines
def __iter__(self):
while 1:
line = self.readline()
if not line:
return
yield line
def close(self):
assert 0, "input.close() must not be called"
class ErrorWrapper(object):
def __init__(self, wsgi_errors):
self.errors = wsgi_errors
def write(self, s):
assert type(s) is type("")
self.errors.write(s)
def flush(self):
self.errors.flush()
def writelines(self, seq):
for line in seq:
self.write(line)
def close(self):
assert 0, "errors.close() must not be called"
class WriteWrapper(object):
def __init__(self, wsgi_writer):
self.writer = wsgi_writer
def __call__(self, s):
assert type(s) is type("")
self.writer(s)
class PartialIteratorWrapper(object):
def __init__(self, wsgi_iterator):
self.iterator = wsgi_iterator
def __iter__(self):
# We want to make sure __iter__ is called
return IteratorWrapper(self.iterator)
class IteratorWrapper(object):
def __init__(self, wsgi_iterator, check_start_response):
self.original_iterator = wsgi_iterator
self.iterator = iter(wsgi_iterator)
self.closed = False
self.check_start_response = check_start_response
def __iter__(self):
return self
def __next__(self):
assert not self.closed, (
"Iterator read after closed")
v = next(self.iterator)
if self.check_start_response is not None:
assert self.check_start_response, (
"The application returns and we started iterating over its body, but start_response has not yet been called")
self.check_start_response = None
return v
def close(self):
self.closed = True
if hasattr(self.original_iterator, 'close'):
self.original_iterator.close()
def __del__(self):
if not self.closed:
sys.stderr.write(
"Iterator garbage collected without being closed")
assert self.closed, (
"Iterator garbage collected without being closed")
def check_environ(environ):
assert type(environ) is DictType, (
"Environment is not of the right type: %r (environment: %r)"
% (type(environ), environ))
for key in ['REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once']:
assert key in environ, (
"Environment missing required key: %r" % key)
for key in ['HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH']:
assert key not in environ, (
"Environment should not have the key: %s "
"(use %s instead)" % (key, key[5:]))
if 'QUERY_STRING' not in environ:
warnings.warn(
'QUERY_STRING is not in the WSGI environment; the cgi '
'module will use sys.argv when this variable is missing, '
'so application errors are more likely',
WSGIWarning)
for key in list(environ.keys()):
if '.' in key:
# Extension, we don't care about its type
continue
assert type(environ[key]) is StringType, (
"Environmental variable %s is not a string: %r (value: %r)"
% (key, type(environ[key]), environ[key]))
assert type(environ['wsgi.version']) is TupleType, (
"wsgi.version should be a tuple (%r)" % environ['wsgi.version'])
assert environ['wsgi.url_scheme'] in ('http', 'https'), (
"wsgi.url_scheme unknown: %r" % environ['wsgi.url_scheme'])
check_input(environ['wsgi.input'])
check_errors(environ['wsgi.errors'])
# @@: these need filling out:
if environ['REQUEST_METHOD'] not in (
'GET', 'HEAD', 'POST', 'OPTIONS','PUT','DELETE','TRACE'):
warnings.warn(
"Unknown REQUEST_METHOD: %r" % environ['REQUEST_METHOD'],
WSGIWarning)
assert (not environ.get('SCRIPT_NAME')
or environ['SCRIPT_NAME'].startswith('/')), (
"SCRIPT_NAME doesn't start with /: %r" % environ['SCRIPT_NAME'])
assert (not environ.get('PATH_INFO')
or environ['PATH_INFO'].startswith('/')), (
"PATH_INFO doesn't start with /: %r" % environ['PATH_INFO'])
if environ.get('CONTENT_LENGTH'):
assert int(environ['CONTENT_LENGTH']) >= 0, (
"Invalid CONTENT_LENGTH: %r" % environ['CONTENT_LENGTH'])
if not environ.get('SCRIPT_NAME'):
assert 'PATH_INFO' in environ, (
"One of SCRIPT_NAME or PATH_INFO are required (PATH_INFO "
"should at least be '/' if SCRIPT_NAME is empty)")
assert environ.get('SCRIPT_NAME') != '/', (
"SCRIPT_NAME cannot be '/'; it should instead be '', and "
"PATH_INFO should be '/'")
def check_input(wsgi_input):
for attr in ['read', 'readline', 'readlines', '__iter__']:
assert hasattr(wsgi_input, attr), (
"wsgi.input (%r) doesn't have the attribute %s"
% (wsgi_input, attr))
def check_errors(wsgi_errors):
for attr in ['flush', 'write', 'writelines']:
assert hasattr(wsgi_errors, attr), (
"wsgi.errors (%r) doesn't have the attribute %s"
% (wsgi_errors, attr))
def check_status(status):
assert type(status) is StringType, (
"Status must be a string (not %r)" % status)
# Implicitly check that we can turn it into an integer:
status_code = status.split(None, 1)[0]
assert len(status_code) == 3, (
"Status codes must be three characters: %r" % status_code)
status_int = int(status_code)
assert status_int >= 100, "Status code is invalid: %r" % status_int
if len(status) < 4 or status[3] != ' ':
warnings.warn(
"The status string (%r) should be a three-digit integer "
"followed by a single space and a status explanation"
% status, WSGIWarning)
def check_headers(headers):
assert type(headers) is ListType, (
"Headers (%r) must be of type list: %r"
% (headers, type(headers)))
header_names = {}
for item in headers:
assert type(item) is TupleType, (
"Individual headers (%r) must be of type tuple: %r"
% (item, type(item)))
assert len(item) == 2
name, value = item
assert name.lower() != 'status', (
"The Status header cannot be used; it conflicts with CGI "
"script, and HTTP status is not given through headers "
"(value: %r)." % value)
header_names[name.lower()] = None
assert '\n' not in name and ':' not in name, (
"Header names may not contain ':' or '\\n': %r" % name)
assert header_re.search(name), "Bad header name: %r" % name
assert not name.endswith('-') and not name.endswith('_'), (
"Names may not end in '-' or '_': %r" % name)
assert not bad_header_value_re.search(value), (
"Bad header value: %r (bad char: %r)"
% (value, bad_header_value_re.search(value).group(0)))
def check_content_type(status, headers):
code = int(status.split(None, 1)[0])
# @@: need one more person to verify this interpretation of RFC 2616
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
NO_MESSAGE_BODY = (204, 304)
NO_MESSAGE_TYPE = (204, 304)
for name, value in headers:
if name.lower() == 'content-type':
if code not in NO_MESSAGE_TYPE:
return
assert 0, (("Content-Type header found in a %s response, "
"which must not return content.") % code)
if code not in NO_MESSAGE_BODY:
assert 0, "No Content-Type header found in headers (%s)" % headers
def check_exc_info(exc_info):
assert exc_info is None or type(exc_info) is type(()), (
"exc_info (%r) is not a tuple: %r" % (exc_info, type(exc_info)))
# More exc_info checks?
def check_iterator(iterator):
# Technically a string is legal, which is why it's a really bad
# idea, because it may cause the response to be returned
# character-by-character
assert not isinstance(iterator, str), (
"You should not return a string as your application iterator, "
"instead return a single-item list containing that string.")
def make_middleware(application, global_conf):
# @@: global_conf should be taken out of the middleware function,
# and isolated here
return middleware(application)
make_middleware.__doc__ = __doc__
__all__ = ['middleware', 'make_middleware']
| lgpl-3.0 | 2,726,638,972,741,654,500 | 33.408257 | 125 | 0.618184 | false |
sadaf2605/django | tests/middleware/tests.py | 2 | 36708 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gzip
import random
import re
from io import BytesIO
from unittest import skipIf
from django.conf import settings
from django.core import mail
from django.core.exceptions import PermissionDenied
from django.http import (
FileResponse, HttpRequest, HttpResponse, HttpResponseNotFound,
HttpResponsePermanentRedirect, HttpResponseRedirect, StreamingHttpResponse,
)
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.middleware.common import (
BrokenLinkEmailsMiddleware, CommonMiddleware,
)
from django.middleware.gzip import GZipMiddleware
from django.middleware.http import ConditionalGetMiddleware
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.utils import six
from django.utils.encoding import force_str
from django.utils.six.moves import range
from django.utils.six.moves.urllib.parse import quote
@override_settings(ROOT_URLCONF='middleware.urls')
class CommonMiddlewareTest(SimpleTestCase):
rf = RequestFactory()
@override_settings(APPEND_SLASH=True)
def test_append_slash_have_slash(self):
"""
URLs with slashes should go unmolested.
"""
request = self.rf.get('/slash/')
self.assertIsNone(CommonMiddleware().process_request(request))
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_resource(self):
"""
Matches to explicit slashless URLs should go unmolested.
"""
request = self.rf.get('/noslash')
self.assertIsNone(CommonMiddleware().process_request(request))
response = HttpResponse("Here's the text of the Web page.")
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_unknown(self):
"""
APPEND_SLASH should not redirect to unknown resources.
"""
request = self.rf.get('/unknown')
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect(self):
"""
APPEND_SLASH should redirect slashless URLs to a valid pattern.
"""
request = self.rf.get('/slash')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect_querystring(self):
"""
APPEND_SLASH should preserve querystrings when redirecting.
"""
request = self.rf.get('/slash?test=1')
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.url, '/slash/?test=1')
@override_settings(APPEND_SLASH=True, DEBUG=True)
def test_append_slash_no_redirect_on_POST_in_DEBUG(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST, PUT, or PATCH to an URL which
would normally be redirected to a slashed version.
"""
msg = "maintaining %s data. Change your form to point to testserver/slash/"
request = self.rf.get('/slash')
request.method = 'POST'
response = HttpResponseNotFound()
with self.assertRaisesMessage(RuntimeError, msg % request.method):
CommonMiddleware().process_response(request, response)
request = self.rf.get('/slash')
request.method = 'PUT'
with self.assertRaisesMessage(RuntimeError, msg % request.method):
CommonMiddleware().process_response(request, response)
request = self.rf.get('/slash')
request.method = 'PATCH'
with self.assertRaisesMessage(RuntimeError, msg % request.method):
CommonMiddleware().process_response(request, response)
@override_settings(APPEND_SLASH=False)
def test_append_slash_disabled(self):
"""
Disabling append slash functionality should leave slashless URLs alone.
"""
request = self.rf.get('/slash')
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_quoted(self):
"""
URLs which require quoting should be redirected to their slash version.
"""
request = self.rf.get(quote('/needsquoting#'))
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, '/needsquoting%23/')
@override_settings(APPEND_SLASH=False, PREPEND_WWW=True)
def test_prepend_www(self):
request = self.rf.get('/path/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://www.testserver/path/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_have_slash(self):
request = self.rf.get('/slash/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://www.testserver/slash/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_slashless(self):
request = self.rf.get('/slash')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://www.testserver/slash/')
# The following tests examine expected behavior given a custom URLconf that
# overrides the default one through the request object.
@override_settings(APPEND_SLASH=True)
def test_append_slash_have_slash_custom_urlconf(self):
"""
URLs with slashes should go unmolested.
"""
request = self.rf.get('/customurlconf/slash/')
request.urlconf = 'middleware.extra_urls'
self.assertIsNone(CommonMiddleware().process_request(request))
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_resource_custom_urlconf(self):
"""
Matches to explicit slashless URLs should go unmolested.
"""
request = self.rf.get('/customurlconf/noslash')
request.urlconf = 'middleware.extra_urls'
self.assertIsNone(CommonMiddleware().process_request(request))
response = HttpResponse("Here's the text of the Web page.")
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_unknown_custom_urlconf(self):
"""
APPEND_SLASH should not redirect to unknown resources.
"""
request = self.rf.get('/customurlconf/unknown')
request.urlconf = 'middleware.extra_urls'
self.assertIsNone(CommonMiddleware().process_request(request))
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect_custom_urlconf(self):
"""
APPEND_SLASH should redirect slashless URLs to a valid pattern.
"""
request = self.rf.get('/customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertIsNotNone(r, "CommonMiddleware failed to return APPEND_SLASH redirect using request.urlconf")
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, '/customurlconf/slash/')
@override_settings(APPEND_SLASH=True, DEBUG=True)
def test_append_slash_no_redirect_on_POST_in_DEBUG_custom_urlconf(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST to an URL which would normally be
redirected to a slashed version.
"""
request = self.rf.get('/customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
request.method = 'POST'
response = HttpResponseNotFound()
with self.assertRaisesMessage(RuntimeError, 'end in a slash'):
CommonMiddleware().process_response(request, response)
@override_settings(APPEND_SLASH=False)
def test_append_slash_disabled_custom_urlconf(self):
"""
Disabling append slash functionality should leave slashless URLs alone.
"""
request = self.rf.get('/customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
self.assertIsNone(CommonMiddleware().process_request(request))
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_quoted_custom_urlconf(self):
"""
URLs which require quoting should be redirected to their slash version.
"""
request = self.rf.get(quote('/customurlconf/needsquoting#'))
request.urlconf = 'middleware.extra_urls'
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertIsNotNone(r, "CommonMiddleware failed to return APPEND_SLASH redirect using request.urlconf")
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, '/customurlconf/needsquoting%23/')
@override_settings(APPEND_SLASH=False, PREPEND_WWW=True)
def test_prepend_www_custom_urlconf(self):
request = self.rf.get('/customurlconf/path/')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://www.testserver/customurlconf/path/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_have_slash_custom_urlconf(self):
request = self.rf.get('/customurlconf/slash/')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://www.testserver/customurlconf/slash/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_slashless_custom_urlconf(self):
request = self.rf.get('/customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://www.testserver/customurlconf/slash/')
# ETag + If-Not-Modified support tests
@override_settings(USE_ETAGS=True)
def test_etag(self):
req = HttpRequest()
res = HttpResponse('content')
self.assertTrue(CommonMiddleware().process_response(req, res).has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_etag_streaming_response(self):
req = HttpRequest()
res = StreamingHttpResponse(['content'])
res['ETag'] = 'tomatoes'
self.assertEqual(CommonMiddleware().process_response(req, res).get('ETag'), 'tomatoes')
@override_settings(USE_ETAGS=True)
def test_no_etag_streaming_response(self):
req = HttpRequest()
res = StreamingHttpResponse(['content'])
self.assertFalse(CommonMiddleware().process_response(req, res).has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_if_none_match(self):
first_req = HttpRequest()
first_res = CommonMiddleware().process_response(first_req, HttpResponse('content'))
second_req = HttpRequest()
second_req.method = 'GET'
second_req.META['HTTP_IF_NONE_MATCH'] = first_res['ETag']
second_res = CommonMiddleware().process_response(second_req, HttpResponse('content'))
self.assertEqual(second_res.status_code, 304)
# Tests for the Content-Length header
def test_content_length_header_added(self):
response = HttpResponse('content')
self.assertNotIn('Content-Length', response)
response = CommonMiddleware().process_response(HttpRequest(), response)
self.assertEqual(int(response['Content-Length']), len(response.content))
def test_content_length_header_not_added_for_streaming_response(self):
response = StreamingHttpResponse('content')
self.assertNotIn('Content-Length', response)
response = CommonMiddleware().process_response(HttpRequest(), response)
self.assertNotIn('Content-Length', response)
def test_content_length_header_not_changed(self):
response = HttpResponse()
bad_content_length = len(response.content) + 10
response['Content-Length'] = bad_content_length
response = CommonMiddleware().process_response(HttpRequest(), response)
self.assertEqual(int(response['Content-Length']), bad_content_length)
# Other tests
@override_settings(DISALLOWED_USER_AGENTS=[re.compile(r'foo')])
def test_disallowed_user_agents(self):
request = self.rf.get('/slash')
request.META['HTTP_USER_AGENT'] = 'foo'
with self.assertRaisesMessage(PermissionDenied, 'Forbidden user agent'):
CommonMiddleware().process_request(request)
def test_non_ascii_query_string_does_not_crash(self):
"""Regression test for #15152"""
request = self.rf.get('/slash')
request.META['QUERY_STRING'] = force_str('drink=café')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
def test_response_redirect_class(self):
request = self.rf.get('/slash')
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, '/slash/')
self.assertIsInstance(r, HttpResponsePermanentRedirect)
def test_response_redirect_class_subclass(self):
class MyCommonMiddleware(CommonMiddleware):
response_redirect_class = HttpResponseRedirect
request = self.rf.get('/slash')
response = HttpResponseNotFound()
r = MyCommonMiddleware().process_response(request, response)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.url, '/slash/')
self.assertIsInstance(r, HttpResponseRedirect)
@override_settings(
IGNORABLE_404_URLS=[re.compile(r'foo')],
MANAGERS=['[email protected]'],
)
class BrokenLinkEmailsMiddlewareTest(SimpleTestCase):
rf = RequestFactory()
def setUp(self):
self.req = self.rf.get('/regular_url/that/does/not/exist')
self.resp = self.client.get(self.req.path)
def test_404_error_reporting(self):
self.req.META['HTTP_REFERER'] = '/another/url/'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Broken', mail.outbox[0].subject)
def test_404_error_reporting_no_referer(self):
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
def test_404_error_reporting_ignored_url(self):
self.req.path = self.req.path_info = 'foo_url/that/does/not/exist'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
@skipIf(six.PY3, "HTTP_REFERER is str type on Python 3")
def test_404_error_nonascii_referrer(self):
# Such referer strings should not happen, but anyway, if it happens,
# let's not crash
self.req.META['HTTP_REFERER'] = b'http://testserver/c/\xd0\xbb\xd0\xb8/'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
@skipIf(six.PY3, "HTTP_USER_AGENT is str type on Python 3")
def test_404_error_nonascii_user_agent(self):
# Such user agent strings should not happen, but anyway, if it happens,
# let's not crash
self.req.META['HTTP_REFERER'] = '/another/url/'
self.req.META['HTTP_USER_AGENT'] = b'\xd0\xbb\xd0\xb8\xff\xff'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('User agent: \u043b\u0438\ufffd\ufffd\n', mail.outbox[0].body)
def test_custom_request_checker(self):
class SubclassedMiddleware(BrokenLinkEmailsMiddleware):
ignored_user_agent_patterns = (re.compile(r'Spider.*'), re.compile(r'Robot.*'))
def is_ignorable_request(self, request, uri, domain, referer):
'''Check user-agent in addition to normal checks.'''
if super(SubclassedMiddleware, self).is_ignorable_request(request, uri, domain, referer):
return True
user_agent = request.META['HTTP_USER_AGENT']
return any(pattern.search(user_agent) for pattern in self.ignored_user_agent_patterns)
self.req.META['HTTP_REFERER'] = '/another/url/'
self.req.META['HTTP_USER_AGENT'] = 'Spider machine 3.4'
SubclassedMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
self.req.META['HTTP_USER_AGENT'] = 'My user agent'
SubclassedMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
def test_referer_equal_to_requested_url(self):
"""
Some bots set the referer to the current URL to avoid being blocked by
an referer check (#25302).
"""
self.req.META['HTTP_REFERER'] = self.req.path
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
# URL with scheme and domain should also be ignored
self.req.META['HTTP_REFERER'] = 'http://testserver%s' % self.req.path
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
# URL with a different scheme should be ignored as well because bots
# tend to use http:// in referers even when browsing HTTPS websites.
self.req.META['HTTP_X_PROTO'] = 'https'
self.req.META['SERVER_PORT'] = 443
with self.settings(SECURE_PROXY_SSL_HEADER=('HTTP_X_PROTO', 'https')):
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
def test_referer_equal_to_requested_url_on_another_domain(self):
self.req.META['HTTP_REFERER'] = 'http://anotherserver%s' % self.req.path
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
@override_settings(APPEND_SLASH=True)
def test_referer_equal_to_requested_url_without_trailing_slash_when_append_slash_is_set(self):
self.req.path = self.req.path_info = '/regular_url/that/does/not/exist/'
self.req.META['HTTP_REFERER'] = self.req.path_info[:-1]
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
@override_settings(APPEND_SLASH=False)
def test_referer_equal_to_requested_url_without_trailing_slash_when_append_slash_is_unset(self):
self.req.path = self.req.path_info = '/regular_url/that/does/not/exist/'
self.req.META['HTTP_REFERER'] = self.req.path_info[:-1]
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
@override_settings(ROOT_URLCONF='middleware.cond_get_urls')
class ConditionalGetMiddlewareTest(SimpleTestCase):
def setUp(self):
self.req = RequestFactory().get('/')
self.resp = self.client.get(self.req.path_info)
# Tests for the Date header
def test_date_header_added(self):
self.assertNotIn('Date', self.resp)
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertIn('Date', self.resp)
# Tests for the Content-Length header
def test_content_length_header_added(self):
content_length = len(self.resp.content)
# Already set by CommonMiddleware, remove it to check that
# ConditionalGetMiddleware readds it.
del self.resp['Content-Length']
self.assertNotIn('Content-Length', self.resp)
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertIn('Content-Length', self.resp)
self.assertEqual(int(self.resp['Content-Length']), content_length)
def test_content_length_header_not_added(self):
resp = StreamingHttpResponse('content')
self.assertNotIn('Content-Length', resp)
resp = ConditionalGetMiddleware().process_response(self.req, resp)
self.assertNotIn('Content-Length', resp)
def test_content_length_header_not_changed(self):
bad_content_length = len(self.resp.content) + 10
self.resp['Content-Length'] = bad_content_length
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(int(self.resp['Content-Length']), bad_content_length)
# Tests for the ETag header
def test_if_none_match_and_no_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = 'spam'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_no_if_none_match_and_etag(self):
self.resp['ETag'] = 'eggs'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_none_match_and_same_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = self.resp['ETag'] = 'spam'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_none_match_and_same_etag_with_quotes(self):
self.req.META['HTTP_IF_NONE_MATCH'] = self.resp['ETag'] = '"spam"'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_none_match_and_different_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = 'spam'
self.resp['ETag'] = 'eggs'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_none_match_and_redirect(self):
self.req.META['HTTP_IF_NONE_MATCH'] = self.resp['ETag'] = 'spam'
self.resp['Location'] = '/'
self.resp.status_code = 301
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 301)
def test_if_none_match_and_client_error(self):
self.req.META['HTTP_IF_NONE_MATCH'] = self.resp['ETag'] = 'spam'
self.resp.status_code = 400
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 400)
# Tests for the Last-Modified header
def test_if_modified_since_and_no_last_modified(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_no_if_modified_since_and_last_modified(self):
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_modified_since_and_same_last_modified(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_modified_since_and_last_modified_in_the_past(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_modified_since_and_last_modified_in_the_future(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:41:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_modified_since_and_redirect(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT'
self.resp['Location'] = '/'
self.resp.status_code = 301
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 301)
def test_if_modified_since_and_client_error(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT'
self.resp.status_code = 400
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 400)
class XFrameOptionsMiddlewareTest(SimpleTestCase):
"""
Tests for the X-Frame-Options clickjacking prevention middleware.
"""
def test_same_origin(self):
"""
The X_FRAME_OPTIONS setting can be set to SAMEORIGIN to have the
middleware use that value for the HTTP header.
"""
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(), HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
with override_settings(X_FRAME_OPTIONS='sameorigin'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(), HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_deny(self):
"""
The X_FRAME_OPTIONS setting can be set to DENY to have the middleware
use that value for the HTTP header.
"""
with override_settings(X_FRAME_OPTIONS='DENY'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(), HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
with override_settings(X_FRAME_OPTIONS='deny'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(), HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_defaults_sameorigin(self):
"""
If the X_FRAME_OPTIONS setting is not set then it defaults to
SAMEORIGIN.
"""
with override_settings(X_FRAME_OPTIONS=None):
del settings.X_FRAME_OPTIONS # restored by override_settings
r = XFrameOptionsMiddleware().process_response(HttpRequest(), HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_dont_set_if_set(self):
"""
If the X-Frame-Options header is already set then the middleware does
not attempt to override it.
"""
with override_settings(X_FRAME_OPTIONS='DENY'):
response = HttpResponse()
response['X-Frame-Options'] = 'SAMEORIGIN'
r = XFrameOptionsMiddleware().process_response(HttpRequest(), response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
response = HttpResponse()
response['X-Frame-Options'] = 'DENY'
r = XFrameOptionsMiddleware().process_response(HttpRequest(), response)
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_response_exempt(self):
"""
If the response has an xframe_options_exempt attribute set to False
then it still sets the header, but if it's set to True then it doesn't.
"""
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
response = HttpResponse()
response.xframe_options_exempt = False
r = XFrameOptionsMiddleware().process_response(HttpRequest(), response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
response = HttpResponse()
response.xframe_options_exempt = True
r = XFrameOptionsMiddleware().process_response(HttpRequest(), response)
self.assertIsNone(r.get('X-Frame-Options'))
def test_is_extendable(self):
"""
The XFrameOptionsMiddleware method that determines the X-Frame-Options
header value can be overridden based on something in the request or
response.
"""
class OtherXFrameOptionsMiddleware(XFrameOptionsMiddleware):
# This is just an example for testing purposes...
def get_xframe_options_value(self, request, response):
if getattr(request, 'sameorigin', False):
return 'SAMEORIGIN'
if getattr(response, 'sameorigin', False):
return 'SAMEORIGIN'
return 'DENY'
with override_settings(X_FRAME_OPTIONS='DENY'):
response = HttpResponse()
response.sameorigin = True
r = OtherXFrameOptionsMiddleware().process_response(HttpRequest(), response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
request = HttpRequest()
request.sameorigin = True
r = OtherXFrameOptionsMiddleware().process_response(request, HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
r = OtherXFrameOptionsMiddleware().process_response(HttpRequest(), HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
class GZipMiddlewareTest(SimpleTestCase):
"""
Tests the GZipMiddleware.
"""
short_string = b"This string is too short to be worth compressing."
compressible_string = b'a' * 500
incompressible_string = b''.join(six.int2byte(random.randint(0, 255)) for _ in range(500))
sequence = [b'a' * 500, b'b' * 200, b'a' * 300]
sequence_unicode = ['a' * 500, 'é' * 200, 'a' * 300]
def setUp(self):
self.req = RequestFactory().get('/')
self.req.META['HTTP_ACCEPT_ENCODING'] = 'gzip, deflate'
self.req.META['HTTP_USER_AGENT'] = 'Mozilla/5.0 (Windows NT 5.1; rv:9.0.1) Gecko/20100101 Firefox/9.0.1'
self.resp = HttpResponse()
self.resp.status_code = 200
self.resp.content = self.compressible_string
self.resp['Content-Type'] = 'text/html; charset=UTF-8'
self.stream_resp = StreamingHttpResponse(self.sequence)
self.stream_resp['Content-Type'] = 'text/html; charset=UTF-8'
self.stream_resp_unicode = StreamingHttpResponse(self.sequence_unicode)
self.stream_resp_unicode['Content-Type'] = 'text/html; charset=UTF-8'
@staticmethod
def decompress(gzipped_string):
with gzip.GzipFile(mode='rb', fileobj=BytesIO(gzipped_string)) as f:
return f.read()
def test_compress_response(self):
"""
Compression is performed on responses with compressible content.
"""
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.decompress(r.content), self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertEqual(r.get('Content-Length'), str(len(r.content)))
def test_compress_streaming_response(self):
"""
Compression is performed on responses with streaming content.
"""
r = GZipMiddleware().process_response(self.req, self.stream_resp)
self.assertEqual(self.decompress(b''.join(r)), b''.join(self.sequence))
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertFalse(r.has_header('Content-Length'))
def test_compress_streaming_response_unicode(self):
"""
Compression is performed on responses with streaming Unicode content.
"""
r = GZipMiddleware().process_response(self.req, self.stream_resp_unicode)
self.assertEqual(
self.decompress(b''.join(r)),
b''.join(x.encode('utf-8') for x in self.sequence_unicode)
)
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertFalse(r.has_header('Content-Length'))
def test_compress_file_response(self):
"""
Compression is performed on FileResponse.
"""
with open(__file__, 'rb') as file1:
file_resp = FileResponse(file1)
file_resp['Content-Type'] = 'text/html; charset=UTF-8'
r = GZipMiddleware().process_response(self.req, file_resp)
with open(__file__, 'rb') as file2:
self.assertEqual(self.decompress(b''.join(r)), file2.read())
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertIsNot(r.file_to_stream, file1)
def test_compress_non_200_response(self):
"""
Compression is performed on responses with a status other than 200
(#10762).
"""
self.resp.status_code = 404
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.decompress(r.content), self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'gzip')
def test_no_compress_short_response(self):
"""
Compression isn't performed on responses with short content.
"""
self.resp.content = self.short_string
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.short_string)
self.assertIsNone(r.get('Content-Encoding'))
def test_no_compress_compressed_response(self):
"""
Compression isn't performed on responses that are already compressed.
"""
self.resp['Content-Encoding'] = 'deflate'
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'deflate')
def test_no_compress_incompressible_response(self):
"""
Compression isn't performed on responses with incompressible content.
"""
self.resp.content = self.incompressible_string
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.incompressible_string)
self.assertIsNone(r.get('Content-Encoding'))
@override_settings(USE_ETAGS=True)
class ETagGZipMiddlewareTest(SimpleTestCase):
"""
Tests if the ETagMiddleware behaves correctly with GZipMiddleware.
"""
rf = RequestFactory()
compressible_string = b'a' * 500
def test_compress_response(self):
"""
ETag is changed after gzip compression is performed.
"""
request = self.rf.get('/', HTTP_ACCEPT_ENCODING='gzip, deflate')
response = GZipMiddleware().process_response(
request,
CommonMiddleware().process_response(request, HttpResponse(self.compressible_string))
)
gzip_etag = response.get('ETag')
request = self.rf.get('/', HTTP_ACCEPT_ENCODING='')
response = GZipMiddleware().process_response(
request,
CommonMiddleware().process_response(request, HttpResponse(self.compressible_string))
)
nogzip_etag = response.get('ETag')
self.assertNotEqual(gzip_etag, nogzip_etag)
| bsd-3-clause | 3,331,574,900,177,422,000 | 43.982843 | 112 | 0.662208 | false |
4eek/edx-platform | lms/djangoapps/shoppingcart/migrations/0013_auto__add_field_invoice_is_valid.py | 114 | 13795 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Invoice.is_valid'
db.add_column('shoppingcart_invoice', 'is_valid',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Invoice.is_valid'
db.delete_column('shoppingcart_invoice', 'is_valid')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.coupon': {
'Meta': {'object_name': 'Coupon'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 8, 20, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'percentage_discount': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shoppingcart.couponredemption': {
'Meta': {'object_name': 'CouponRedemption'},
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Coupon']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.courseregistrationcode': {
'Meta': {'object_name': 'CourseRegistrationCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 8, 20, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']", 'null': 'True'})
},
'shoppingcart.invoice': {
'Meta': {'object_name': 'Invoice'},
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'company_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'tax_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'total_amount': ('django.db.models.fields.FloatField', [], {})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'list_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '30', 'decimal_places': '2'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'service_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32', 'db_index': 'True'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.registrationcoderedemption': {
'Meta': {'object_name': 'RegistrationCodeRedemption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'redeemed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 8, 20, 0, 0)', 'null': 'True'}),
'redeemed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'registration_code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.CourseRegistrationCode']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
| agpl-3.0 | 4,610,229,696,991,596,500 | 78.739884 | 182 | 0.555636 | false |
abegong/textbadger | textbadger/tb_app/models.py | 1 | 17389 | #from django.db.models import Model, TextField
#from djangotoolbox.fields import ListField, EmbeddedModelField, DictField
from django.contrib.auth.models import User
from django.db import connections
from bson.objectid import ObjectId
from pymongo.errors import InvalidId
import csv, re, json, datetime, random
from collections import defaultdict
import tb_app.kripp as kripp
def uses_mongo(function):
def _inner(*args, **kwargs):
mongo = connections["default"]
return function(mongo, *args, **kwargs)
return _inner
class MongoEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, ObjectId):
return str(obj)
if hasattr(obj, 'isoformat'):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
##############################################################################
#This is one way new collections are created
def convert_document_csv_to_bson(csv_text):
C = csv.reader(csv.StringIO(csv_text))
#Parse the header row
H = C.next()
#Capture the url/content column index
url_index, content_index = None, None
if 'url' in H:
url_index = H.index('url')
if 'content' in H:
content_index = H.index('content')
if url_index==None and content_index==None:
raise Exception('You must specify either a "url" column or a "content" column in the .csv header.')
#Identify metadata_fields
meta_fields = {}
for h in H:
if re.match('META_', h):
name = re.sub('^META_', '', h)
index = H.index(h)
if name in meta_fields:
raise Exception('Duplicate META_ name : '+name)
meta_fields[name] = index
# print json.dumps(meta_fields, indent=2)
documents_json = []
#http://lethain.com/handling-very-large-csv-and-xml-files-in-python/
#print csv.field_size_limit()
csv.field_size_limit(1000000)
#For each row in the collection
for row in C:
j = {}
#Grab the content or url
#If both are present, url gets precedence
if url_index != None:
j['url'] = row[url_index]
elif content_index != None:
j['content'] = row[content_index]
#Grab metadata fields
m = {}
for f in meta_fields:
#Don't include missing values
#! Maybe include other missing values here
if meta_fields[f] != '':
m[f] = row[meta_fields[f]]
#Don't include empty metadata objects
if m != {}:
j["metadata"] = m
documents_json.append(j)
# print json.dumps(documents_json, indent=2)
return documents_json
def get_new_collection_json(name, description, documents):
""" Create a new collection, given the name, description, and documents """
J = {
'profile' : {
'name' : name,
'description' : description,
'created_at' : datetime.datetime.now(),
'size' : len(documents),
},
'documents' : documents,
}
return J
@uses_mongo
def create_collection_json(mongo, name, description, collections):
""" Create a new collection using documents from other collections
collections is an array with the form:
[{tb_app_collection.$id : docs to retrieve from this collection}]
"""
coll = mongo.get_collection("tb_app_collection")
documents = []
for id_ in collections:
collection = coll.find_one({"_id": ObjectId(id_)})
doc_count = collections[id_]
doc_list = collection["documents"]
random.shuffle( doc_list )
for doc in doc_list[:doc_count]:
doc["metadata"]["source_id"] = id_
doc["metadata"]["source_name"] = collection["profile"]["name"]
documents += doc_list[:doc_count]
random.shuffle(documents)
return get_new_collection_json(name, description, documents)
def get_default_codebook_questions():
return [
{
"question_type": "Static text",
"var_name": "default_question",
"params": {
"header_text": "<h2> New codebook </h2><p><strong>Use the controls at right to add questions.</strong></p>",
}
},
{
"question_type": "Multiple choice",
"var_name": "mchoice",
"params": {
"header_text": "Here is an example of a multiple choice question. Which answer do you like best?",
"answer_array": ["This one", "No, this one", "A third option"],
}
},
{
"question_type": "Short essay",
"var_name": "essay",
"params": {
"header_text": "Here's a short essay question.",
}
}
]
def create_new_variable_json(question_index, subquestion_index, variable_name, question_header, subquestion_label, variable_type):
return {
'question_index': question_index,
'subquestion_index': subquestion_index,
'variable_name': variable_name,
'question_header': question_header,
'subquestion_label': subquestion_label,
'variable_type': variable_type
}
#! As the code is written, this method is never invoked.
#! Using the variables field would help clean up the code in a bunch of places
#! * reliability checking / csv export / table generation on the batch page
def get_codebook_variables_from_questions(questions):
variables = []
for i,q in enumerate(questions):
if q["var_name"]:
var_name = "_"+q["var_name"]
else:
var_name = ''
short_text = q["params"]["header_text"]
#variable_type = q["params"]["variable_type"]
if q["question_type"] == 'Static text':
variables.append( create_new_variable_json(i+1, None, "Q"+str(i+1)+var_name, short_text, "", "none") )
if q["question_type"] in ['Multiple choice', 'Two-way scale']:
variables.append( create_new_variable_json(i+1, None, "Q"+str(i+1)+var_name, short_text, "", "ordinal") )
if q["question_type"] == 'Check all that apply':
for j,a in enumerate(q["params"]["answer_array"]):
variables.append( create_new_variable_json(i+1, None, "Q"+str(i+1)+"_"+str(j+1)+var_name, short_text, "", "nominal") )
if q["question_type"] in ['Text box', 'Short essay']:
variables.append( create_new_variable_json(i+1, None, "Q"+str(i+1)+var_name, short_text, "", "text") )
elif q["question_type"] == 'Radio matrix':
for j,p in enumerate(q["params"]["question_array"]):
variables.append( create_new_variable_json(i+1, j+1, "Q"+str(i+1)+"_"+str(j+1)+var_name, short_text, p, "interval") )
elif q["question_type"] == 'Checkbox matrix':
for j,p in enumerate(q["params"]["question_array"]):
for k,r in enumerate(q["params"]["answer_array"]):
variables.append( create_new_variable_json(i+1, j+1, "Q"+str(i+1)+"_"+str(j+1)+"_"+str(k+1)+var_name, short_text, p, "nominal") )
elif q["question_type"] == 'Two-way matrix':
for j,p in enumerate(q["params"]["left_statements"]):
variables.append( create_new_variable_json(i+1, j+1, "Q"+str(i+1)+"_"+str(j+1)+var_name, short_text, p+"/"+q["params"]["right_statements"][j], "ordinal") )
elif q["question_type"] == 'Text matrix':
for j,p in enumerate(q["params"]["answer_array"]):
variables.append( create_new_variable_json(i+1, j+1, "Q"+str(i+1)+"_"+str(j+1)+var_name, short_text, p, "text") )
return variables
def get_new_codebook_json(name, description):
questions = get_default_codebook_questions()
variables = get_codebook_variables_from_questions(questions)
#Construct object
return {
'profile' : {
'name' : name,
'description' : description,
'created_at' : datetime.datetime.now(),
'version' : 1,
'children' : [],
'batches' : [],
'parent' : None,
},
'questions' : questions,
'variables' : variables,
}
def get_revised_codebook_json(parent_codebook, question_json):
#print parent_codebook
J = {
'profile' : {
'description' : parent_codebook['profile']["description"],
'created_at' : datetime.datetime.now(),
'version' : parent_codebook['profile']["version"] + 1,
'children' : [],
'batches' : [],
'parent' : parent_codebook['_id'],#ObjectId(parent_id),
},
'questions' : question_json,
'variables' : get_codebook_variables_from_questions(question_json),
}
if parent_codebook['profile']["children"]:
J['profile']['name'] = parent_codebook['profile']["name"] + " (branch)"
else:
J['profile']['name'] = parent_codebook['profile']["name"]
return J
def gen_codebook_column_names(codebook):
"""codebook should be in json format, hot off a mongodb query"""
col_names = ['created_at']
for i,q in enumerate(codebook["questions"]):
if q["var_name"]:
var_name = "_"+q["var_name"]
else:
var_name = ''
if q["question_type"] in ['Static text', 'Multiple choice', 'Check all that apply', 'Two-way scale', 'Text box', 'Short essay']:
col_names.append("Q"+str(i+1)+var_name)
elif q["question_type"] in ['Radio matrix', 'Checkbox matrix']:
for j,p in enumerate(q["params"]["question_array"]):
col_names.append("Q"+str(i+1)+"_"+str(j+1)+var_name)
elif q["question_type"] == 'Two-way matrix':
for j,p in enumerate(q["params"]["left_statements"]):
col_names.append("Q"+str(i+1)+"_"+str(j+1)+var_name)
elif q["question_type"] == 'Text matrix':
for j,p in enumerate(q["params"]["answer_array"]):
col_names.append("Q"+str(i+1)+"_"+str(j+1)+var_name)
return col_names
def gen_col_index_from_col_names(col_names):
return dict([(v,k) for (k,v) in enumerate(col_names)])
def gen_csv_column_from_batch_labels(labels, col_index):
csv_col = [None for i in range(len(col_index))]
print labels
for q in labels:
if type(labels[q]) == unicode:
csv_col[col_index[q]] = str(labels[q].encode("utf-8"))
else:
csv_col[col_index[q]] = labels[q]
return csv_col
### Batches ###################################################################
def get_batch_documents_json(coders, pct_overlap, shuffle, collection):
k = len(collection["documents"])
overlap = int((k * pct_overlap) / 100)
import random
doc_ids = range(k)
if shuffle:
# ? This can stay here until we do our DB refactor.
random.shuffle(doc_ids)
shared = doc_ids[:overlap]
unique = doc_ids[overlap:]
#Construct documents object
documents = []
empty_labels = dict([(x, []) for x in coders])
for i in shared:
documents.append({
'index': i,
# 'content': collection["documents"][i]["content"],
'labels': empty_labels
})
for i in unique:
documents.append({
'index': i,
# 'content': collection["documents"][i]["content"],
'labels': { coders[i%len(coders)] : [] }
#Populate the list with a random smattering of fake labels
#'labels': {coders[i % len(coders)]: random.choice([None for x in range(2)] + range(20))}
})
if shuffle:
random.shuffle(documents)
return documents
def get_new_batch_json(count, coders, pct_overlap, shuffle, codebook, collection):
#Construct profile object
profile = {
'name': 'Batch ' + str(count + 1),
'description': collection["profile"]["name"][:20] + " * " + codebook["profile"]["name"][:20] + " (" + str(codebook["profile"]["version"]) + ")",
'index': count + 1,
'codebook_id': codebook['_id'],
'collection_id': collection['_id'],
'coders': coders,
'pct_overlap': pct_overlap,
'shuffle': shuffle,
'created_at': datetime.datetime.now(),
}
documents = get_batch_documents_json(coders, pct_overlap, shuffle, collection)
#Construct batch object
batch = {
'profile' : profile,
'documents': documents,
'reports': {
'progress': {},
'reliability': {},
},
}
return batch
def get_most_recent_answer_set(answer_set_list):
#Get the most recent answer set for this coder (important if the coder used did an "undo")
most_recent_answer_set = {}
most_recent_date = None
for answer_set in answer_set_list:
if not most_recent_date or answer_set["created_at"] > most_recent_date:
most_recent_answer_set = answer_set
most_recent_date = answer_set["created_at"]
return most_recent_answer_set
@uses_mongo
def update_batch_progress(mongo, id_):
#Connect to the DB
coll = mongo.get_collection("tb_app_batch")
#Retrieve the batch
batch = coll.find_one({"_id": ObjectId(id_)})
# print json.dumps(batch, indent=2, cls=MongoEncoder)
#Scaffold the progress object
coders = batch["profile"]["coders"]
progress = {
"coders": dict([(c, {"assigned":0, "complete":0}) for c in coders]),
"summary": {}
}
#Count total and complete document codes
assigned, complete = 0, 0
for doc in batch["documents"]:
for coder in doc["labels"]:
assigned += 1
progress["coders"][coder]["assigned"] += 1
if doc["labels"][coder] != []:
complete += 1
progress["coders"][coder]["complete"] += 1
#Calculate percentages
for coder in progress["coders"]:
c = progress["coders"][coder]
c["percent"] = round(float(100 * c["complete"]) / c["assigned"], 1)
progress["summary"] = {
"assigned": assigned,
"complete": complete,
"percent": round(float(100 * complete) / assigned, 1),
}
batch["reports"]["progress"] = progress
coll.update({"_id": ObjectId(id_)}, batch)
def convert_batch_to_2d_arrays(batch, var_names, missing_val=None):
#2-D arrays wrapped in a dictionary : [question][document][coder]
coder_index = dict([(c,i) for i,c in enumerate(batch["profile"]["coders"])])
#Create empty arrays
#! The "None" here should be zero for CATA variables.
#! But I don't have a good way to detect CATA variables.
#! This code needs a refactor, but now is not the time.
code_arrays = dict([ (n, [[None for c in coder_index] for d in batch["documents"]]) for n in var_names])
for i, doc in enumerate(batch["documents"]):
for coder in doc["labels"]:
answer_set = get_most_recent_answer_set(doc["labels"][coder])
#print answer_set
for question in answer_set:
if question in code_arrays.keys():
try:
#print '\t'.join([str(x) for x in [question, i, coder, answer_set[question]]])
code_arrays[question][i][coder_index[coder]] = float(answer_set[question])
except ValueError:
code_arrays[question][i][coder_index[coder]] = missing_val
return code_arrays
@uses_mongo
def update_batch_reliability(mongo, batch_id):
batch = mongo.get_collection("tb_app_batch").find_one({"_id": ObjectId(batch_id)})
codebook = mongo.get_collection("tb_app_codebook").find_one({"_id": ObjectId(batch["profile"]["codebook_id"])})
variables = codebook["variables"]
var_names = [v["variable_name"] for v in variables]
data_arrays = convert_batch_to_2d_arrays(batch, var_names)
summary = {}
for i, v in enumerate(variables):
# print v
v_name = v["variable_name"]
# print q, '\t', kripp.alpha(data_arrays[q], kripp.interval)
#print v_name, '\t', v["variable_type"]
#Get variable metric
v_type = v["variable_type"]
if v_type == "nominal":
metric = kripp.nominal
elif v_type in ["interval", "ordinal"]:
metric = kripp.interval
elif v_type == "ratio":
metric = kripp.ratio
if metric:
alpha = kripp.alpha(data_arrays[v_name], metric)
try:
alpha_100 = 100*alpha
except TypeError:
alpha_100 = None
summary[v_name] = dict(v.items() + {
'alpha': alpha,
'alpha_100': alpha_100,
}.items())
#Build the reliability object
reliability = {
"updated_at" : datetime.datetime.now(),
#"docs": {},
#"coders": dict([(c, {}) for c in coders]),
"summary": summary,
}
#batch["reports"]["reliability"] = reliability
#print json.dumps(reliability, indent=2, cls=MongoEncoder)
mongo.get_collection("tb_app_batch").update(
{ "_id": ObjectId(batch_id) },
{ "$set": { 'reports.reliability' : reliability}}
)
| mit | 5,115,544,193,139,701,000 | 34.200405 | 171 | 0.56346 | false |
bitmazk/django-multilingual-survey | runtests.py | 1 | 1047 | #!/usr/bin/env python
"""
This script is used to run tests, create a coverage report and output the
statistics at the end of the tox run.
To run this script just execute ``tox``
"""
import re
from fabric.api import local, warn
from fabric.colors import green, red
if __name__ == '__main__':
local('flake8 --ignore=E126 --ignore=W391 --statistics'
' --exclude=submodules,migrations,south_migrations,build .')
local('coverage run --source="multilingual_survey" manage.py test -v 2'
' --traceback --failfast'
' --settings=multilingual_survey.tests.settings'
' --pattern="*_tests.py"')
local('coverage html -d coverage --omit="*__init__*,*/settings/*'
',*/migrations/*,*/south_migrations/*,*/tests/*,*admin*"')
total_line = local('grep -n pc_cov coverage/index.html', capture=True)
percentage = float(re.findall(r'(\d+)%', total_line)[-1])
if percentage < 100:
warn(red('Coverage is {0}%'.format(percentage)))
print(green('Coverage is {0}%'.format(percentage)))
| mit | 4,390,687,281,653,750,000 | 39.269231 | 75 | 0.640879 | false |
alfa-addon/addon | plugin.video.alfa/channels/blogdepelis.py | 1 | 6801 | # -*- coding: utf-8 -*-
# -*- Channel Blog de Pelis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
from builtins import range
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
from bs4 import BeautifulSoup
host = 'https://www.blogdepelis.to/'
list_language = list()
list_quality = []
list_servers = ['directo']
def create_soup(url, referer=None, unescape=False):
logger.info()
if referer:
data = httptools.downloadpage(url, headers={'Referer': referer}).data
else:
data = httptools.downloadpage(url).data
if unescape:
data = scrapertools.unescape(data)
soup = BeautifulSoup(data, "html5lib", from_encoding="utf-8")
return soup
def add_menu_items(item):
logger.info()
itemlist = list()
soup = create_soup(host)
matches = soup.find_all("li", class_="menu-item")
for elem in matches:
url = elem.a["href"]
title = elem.a.text.capitalize()
itemlist.append(Item(channel=item.channel, url=url, title=title, action="list_all"))
return itemlist
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host,
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="add_menu_items",
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + "?s=",
thumbnail=get_thumb('search', auto=True), page=1))
autoplay.show_option(item.channel, itemlist)
return itemlist
def list_all(item):
logger.info()
itemlist = list()
soup = create_soup(item.url)
matches = soup.find_all("article", class_="latestPost")
for elem in matches:
url = elem.a["href"]
thumb = elem.img["src"]
year = scrapertools.find_single_match(elem.a["title"], r"\((\d{4})\)")
title = re.sub(r" \(%s\)" % year, "", elem.a["title"]).capitalize()
action = "findvideos"
if "online" in title.lower() or "películas de" in title.lower():
title = re.sub(r" \(online\)", "", title.lower()).capitalize()
action = "get_from_list"
itemlist.append(Item(channel=item.channel, title=title, url=url, contentTitle=title, action=action,
thumbnail=thumb, infoLabels={'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, True)
try:
next_page = soup.find("a", class_="next page-numbers")["href"]
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=next_page, action='list_all'))
except:
pass
return itemlist
def get_from_list(item):
logger.info()
itemlist = list()
soup = create_soup(item.url)
matches = soup.find_all("div", class_="MsoNormal")
for elem in matches:
if not elem.find("a"):
continue
url = elem.a["href"]
year = scrapertools.find_single_match(elem.text, "\d{4}")
title = elem.a.text.capitalize()
itemlist.append(Item(channel=item.channel, title=title, url=url, contentTitle=title, action="findvideos",
infoLabels={'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, True)
return itemlist
def genres(item):
logger.info()
itemlist = list()
soup = create_soup(host+'peliculas')
action = 'list_all'
matches = soup.find("div", id="panel_genres_filter").find_all("a")
for elem in matches:
title = elem.text
url = "%sresults/?cat=%s&genre=%s&p=" % (host, item.cat, title)
itemlist.append(Item(channel=item.channel, title=title, url=url, action=action, section=item.section, page=0))
return itemlist
def findvideos(item):
logger.info()
itemlist = list()
url = create_soup(item.url).find("iframe")["src"]
itemlist.append(Item(channel=item.channel, title='%s', url=url, action="play", server="directo",
language="LAT", infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if item.contentType == 'movie':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle))
return itemlist
def play(item):
logger.info()
item.url = item.url.replace('&f=frame', '') # Necesario para ProxyWeb
data = httptools.downloadpage(item.url, headers={"referer": host}).data
url = scrapertools.find_single_match(data, '"file":"([^"]+)","label":".*?"')
item = item.clone(url=url + "|referer=%s" % item.url)
return [item]
def search(item, texto):
logger.info()
try:
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return list_all(item)
else:
return []
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'infantiles':
item.url = host + "category/disney-channel"
item.page=1
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
| gpl-3.0 | 1,226,805,095,926,423,000 | 27.301724 | 118 | 0.590174 | false |
amb/blender-texture-tools | __init__.py | 1 | 55839 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Copyright 2019-2021: Tommi Hyppänen
bl_info = {
"name": "Image Edit Operations",
"category": "Paint",
"description": "Various image processing filters and operations",
"author": "Tommi Hyppänen (ambi)",
"location": "Image Editor > Side Panel > Image",
"documentation": "https://blenderartists.org/t/seamless-texture-patching-and-filtering-addon",
"version": (0, 2, 0),
"blender": (2, 93, 0),
}
import bpy # noqa
import functools
import numpy as np
import random
from . import pycl as cl
rnd = random.random
from . import image_ops
import importlib
# from .oklab import linear_to_srgb, srgb_to_linear
importlib.reload(image_ops)
import json
from .cl_abstraction import CLDev
from . import toml_loader
importlib.reload(toml_loader)
cl_load = toml_loader.load
cl_builder = CLDev(0)
cl_nodes = cl_load(cl_builder)
def grayscale(ssp):
out = cl_builder.new_image(ssp.shape[1], ssp.shape[0])
cl_nodes["grayscale"].run([], [cl_builder.new_image_from_ndarray(ssp)], [out])
return out.to_numpy()
def rgb_to_luminance(c):
r = c[..., 0]
g = c[..., 1]
b = c[..., 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
@functools.lru_cache(maxsize=128)
def gauss_curve(x):
# gaussian with 0.01831 at last
res = np.array([np.exp(-((i * (2 / x)) ** 2)) for i in range(-x, x + 1)], dtype=np.float32)
res /= np.sum(res)
return res
def gaussian_repeat_cl(img, out, s):
# TODO: store local pass & barrier(CLK_LOCAL_MEM_FENCE);
cl_nodes["gaussian_h"].run([s], [img], [out])
cl_nodes["gaussian_v"].run([s], [out], [img])
return (img, out)
def gaussian_repeat(pix, s):
"Separated gaussian for image. Over borders = wraparound"
assert pix.dtype == np.float32
img = cl_builder.new_image_from_ndarray(pix)
out = cl_builder.new_image(img.width, img.height)
gaussian_repeat_cl(img, out, s)
return img.to_numpy()
def bilateral_cl(pix, radius, preserve):
"Bilateral filter, OpenCL implementation"
img = cl_builder.new_image_from_ndarray(pix)
out = cl_builder.new_image(img.width, img.height)
cl_nodes["bilateral"].run([radius, preserve], [img], [out])
return out.to_numpy()
def image_gradient_cl(img, out):
src = """
#define READP(x,y) read_imagef(input, sampler, (int2)(x, y))
kernel void image_flow(
__read_only image2d_t input,
__write_only image2d_t output
)
{
int x = get_global_id(0);
int y = get_global_id(1);
const sampler_t sampler = \
CLK_NORMALIZED_COORDS_FALSE |
CLK_ADDRESS_CLAMP_TO_EDGE |
CLK_FILTER_NEAREST;
float4 pix = read_imagef(input, sampler, (int2)(x, y));
float x_comp = READP(x-1, y).x
+READP(x-1, y+1).x
+READP(x-1, y-1).x
- READP(x+1, y).x
- READP(x+1, y+1).x
- READP(x+1, y-1).x;
float y_comp = READP(x, y-1).x
+ READP(x+1, y-1).x
+ READP(x-1, y-1).x
- READP(x, y+1).x
- READP(x+1, y+1).x
- READP(x-1, y+1).x;
float2 grad = (float2)(x_comp, y_comp);
float l = length(grad);
//grad = l > 0.0f ? grad/l : (float2)(0.0f, 0.0f);
// from pythagoras
float height;
height = l < 1.0f ? sqrt(1.0f - l*l) : 0.0f;
float4 out = (float4)(x_comp, y_comp, height, l);
write_imagef(output, (int2)(x,y), out);
}
"""
blr = cl_builder.build("image_flow", src, (cl.cl_image, cl.cl_image))
(out, img) = grayscale_cl(img, out)
cl_builder.run(blr, [], (out,), img)
return (img, out)
def directional_blur_cl(pix, radius, preserve):
"Directional bilateral filter, OpenCL implementation"
original = np.copy(pix)
img = cl_builder.new_image_from_ndarray(pix)
out = cl_builder.new_image(img.width, img.height)
(grad, l0) = image_gradient_cl(img, out)
(grad, l0) = gaussian_repeat_cl(grad, l0, 2)
src = """
#define POW2(a) ((a) * (a))
#define F4_ABS(v) ((float4)(fabs(v.x), fabs(v.y), fabs(v.z), 1.0f))
kernel void guided_bilateral(
const float radius,
const float preserve,
__read_only image2d_t gradient,
__read_only image2d_t input,
__write_only image2d_t output
)
{
int gidx = get_global_id(0);
int gidy = get_global_id(1);
float2 gvec = (float2)(gidx, gidy);
const sampler_t sampler = \
CLK_NORMALIZED_COORDS_FALSE |
CLK_ADDRESS_CLAMP_TO_EDGE |
CLK_FILTER_NEAREST;
const sampler_t sampler_f = \
CLK_NORMALIZED_COORDS_FALSE |
CLK_ADDRESS_CLAMP_TO_EDGE |
CLK_FILTER_LINEAR;
int n_radius = ceil(radius);
float4 center_pix = read_imagef(input, sampler, (int2)(gidx, gidy));
float4 grad = read_imagef(gradient, sampler, (int2)(gidx, gidy));
float4 acc_A = 0.0f;
float4 acc_B = 0.0f;
float4 tempf = 0.0f;
float count = 0.0f;
float diff_map, gaussian_weight, weight;
float dx = grad.x;
float dy = grad.y;
// along tangent flow
float2 v_vec = (float2)(-dy, dx);
// against tangent flow
float2 u_vec = (float2)(dx, dy);
weight = 1.0f;
for (float v = -n_radius; v <= n_radius; v=v+1.0f) {
float2 loc = gvec + (v_vec * v) + (float2)(0.5f, 0.5f);
tempf = read_imagef(input, sampler_f, loc);
diff_map = exp (
- ( POW2(center_pix.x - tempf.x)
+ POW2(center_pix.y - tempf.y)
+ POW2(center_pix.z - tempf.z))
* preserve);
gaussian_weight = exp(-0.5f * (POW2(v)) / radius);
weight = diff_map * gaussian_weight;
// weight = gaussian_weight;
// weight = 1.0;
acc_A += tempf * weight;
count += weight;
}
float4 res = acc_A/fabs(count);
res.w = 1.0f;
write_imagef(output, (int2)(gidx,gidy), res);
//write_imagef(output, (int2)(gidx,gidy), F4_ABS(res));
}
"""
blr = cl_builder.build(
"guided_bilateral", src, (cl.cl_float, cl.cl_float, cl.cl_image, cl.cl_image, cl.cl_image)
)
l1 = cl_builder.new_image_from_ndarray(original)
cl_builder.run(blr, [radius, preserve], (grad, l1), l0)
for _ in range(8):
cl_builder.run(blr, [radius, preserve], (grad, l0), l1)
cl_builder.run(blr, [radius, preserve], (grad, l1), l0)
return l0.to_numpy()
def median_filter(pix, radius):
src = f"""
#define RADIUS {radius}
#define READP(x,y) read_imagef(input, sampler, (int2)(x, y))
kernel void wirth_median_{radius}(
const int width,
const int height,
__read_only image2d_t input,
__write_only image2d_t output)
{{
const int x = get_global_id(0);
const int y = get_global_id(1);
const sampler_t sampler = \
CLK_NORMALIZED_COORDS_FALSE |
CLK_ADDRESS_CLAMP_TO_EDGE |
CLK_FILTER_NEAREST;
float rcol[4] = {{0.0, 0.0, 0.0, 1.0}};
float a[RADIUS][RADIUS*RADIUS];
for (int m = 0; m < RADIUS; m++) {{
for (int n = 0; n < RADIUS; n++) {{
float4 ta = READP(x + n - (RADIUS / 2), y + m - (RADIUS / 2));
a[0][n+RADIUS*m] = ta.x;
a[1][n+RADIUS*m] = ta.y;
a[2][n+RADIUS*m] = ta.z;
}}
}}
// Wirth median
for (int z=0; z<RADIUS; z++) {{
int k = (RADIUS*RADIUS)/2;
int n = (RADIUS*RADIUS);
int i,j,l,m;
float val;
l=0;
m=n-1;
while (l < m) {{
val = a[z][k];
i=l;
j=m;
do {{
while (a[z][i] < val) i++;
while (val < a[z][j]) j--;
if (i<=j) {{
float tmp = a[z][i];
a[z][i] = a[z][j];
a[z][j] = tmp;
i++; j--;
}}
}} while (i <= j);
if (j < k) l=i;
if (k < i) m=j;
}}
rcol[z] = a[z][k];
}}
write_imagef(output, (int2)(x, y), (float4)(rcol[0], rcol[1], rcol[2], 1.0f));
}}"""
k = cl_builder.build(
"wirth_median_" + repr(radius), src, (cl.cl_int, cl.cl_int, cl.cl_image, cl.cl_image)
)
img = cl_builder.new_image_from_ndarray(pix)
out = cl_builder.new_image(img.width, img.height)
cl_builder.run(k, [], [img.data], [out.data], shape=(img.height, img.width))
return out.to_numpy()
def vectors_to_nmap(vectors):
nmap = np.empty((vectors.shape[0], vectors.shape[1], 4), dtype=np.float32)
vectors *= 0.5
nmap[:, :, 0] = vectors[:, :, 0] + 0.5
nmap[:, :, 1] = vectors[:, :, 1] + 0.5
nmap[:, :, 2] = vectors[:, :, 2] + 0.5
nmap[..., 3] = 1.0
return nmap
def nmap_to_vectors(nmap):
vectors = np.empty((nmap.shape[0], nmap.shape[1], 4), dtype=np.float32)
vectors[..., 0] = nmap[..., 0] - 0.5
vectors[..., 1] = nmap[..., 1] - 0.5
vectors[..., 2] = nmap[..., 2] - 0.5
vectors *= 2.0
vectors[..., 3] = 1.0
return vectors
def normalize(pix, save_alpha=False):
# TODO: HSL or Lab lightness normalization, maintain chroma
if save_alpha:
A = pix[..., 3]
t = pix - np.min(pix)
t = t / np.max(t)
if save_alpha:
t[..., 3] = A
return t
def sharpen(pix, width, intensity):
A = pix[..., 3]
gas = gaussian_repeat(pix, width)
pix += (pix - gas) * intensity
pix[..., 3] = A
return pix
def hi_pass(pix, s):
bg = pix.copy()
pix = (bg - gaussian_repeat(pix, s)) * 0.5 + 0.5
pix[:, :, 3] = bg[:, :, 3]
return pix
def hist_match(source, template):
"""
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template image; can have different dimensions to source
Returns:
-----------
matched: np.ndarray
The transformed output image
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True, return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
def gaussianize(source, NG=1000):
"Make histogram into gaussian, save transform"
oldshape = source.shape
output = source.copy()
transforms = []
t_values = np.arange(NG * 8 + 1) / (NG * 8)
t_counts = gauss_curve(NG * 4)
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_max = 0.0
for i in range(3):
# s_values, bin_idx, s_counts = np.lib.arraysetops.unique(
s_values, bin_idx, s_counts = np.unique(
source[..., i].ravel(), return_inverse=True, return_counts=True
)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
s_max = s_quantiles[-1]
if s_max > t_max:
t_max = s_max
transforms.append([s_values, s_quantiles, s_max])
tv = np.interp(s_quantiles, t_quantiles, t_values)[bin_idx]
output[..., i] = tv.reshape(oldshape[:2])
return output, transforms
def degaussianize(source, transforms):
"Make a Gaussianized histogram back to the original using the transform"
oldshape = source.shape
output = source.copy()
for i in range(3):
s_values, bin_idx, s_counts = np.unique(
output[..., i].ravel(), return_inverse=True, return_counts=True
)
t_values, t_quantiles, _ = transforms[i]
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
tv = np.interp(s_quantiles, t_quantiles, t_values)[bin_idx]
output[..., i] = tv.reshape(oldshape[:2])
return output
def hi_pass_balance(pix, s, zoom, scalers, into_lch=True):
from .oklab import LCh_to_srgb, srgb_to_LCh
if scalers == None or scalers == []:
scalers = [1.0, 1.0, 1.0]
assert len(scalers) == 3
assert type(scalers[0]) == float
# separate hue, saturation, value
if into_lch:
pix = srgb_to_LCh(pix)
# save original
bg = pix.copy()
# limit middle sampler max dimensions to the image max dimensions
yzm = pix.shape[0] // 2
xzm = pix.shape[1] // 2
yzoom = zoom if zoom < yzm else yzm
xzoom = zoom if zoom < xzm else xzm
# middle value = (low + high) / 2
pixmin = np.min(pix)
pixmax = np.max(pix)
med = (pixmin + pixmax) / 2
# high pass
# TODO: np.mean
gas = gaussian_repeat(pix - med, s) + med
pix = (pix - gas) * 0.5 + 0.5
# use the middle sampler to normalize histogram
for c in range(3):
pix[..., c] = hist_match(
pix[..., c], bg[yzm - yzoom : yzm + yzoom, xzm - xzoom : xzm + xzoom, c]
)
# apply scalers
for c in range(3):
assert scalers[c] >= 0.0 and scalers[c] <= 1.0
pix[..., c] = pix[..., c] * scalers[c] + bg[..., c] * (1.0 - scalers[c])
pix[..., 3] = bg[..., 3]
if into_lch:
pix = LCh_to_srgb(pix)
return pix
def hgram_equalize(pix, intensity, atest):
old = pix.copy()
# aw = np.argwhere(pix[..., 3] > atest)
aw = (pix[..., 3] > atest).nonzero()
aws = (aw[0], aw[1])
# aws = (aw[:, 0], aw[:, 1])
for c in range(3):
t = pix[..., c][aws]
pix[..., c][aws] = np.sort(t).searchsorted(t)
# pix[..., c][aws] = np.argsort(t)
pix[..., :3] /= np.max(pix[..., :3])
return old * (1.0 - intensity) + pix * intensity
def normals_simple(pix, source):
pix = grayscale(pix)
pix = normalize(pix)
steepness = 1.0
# TODO: better vector calc, not just side pixels
src = """
#define READP(x,y) read_imagef(input, sampler, (int2)(x, y))
kernel void height_to_normals(
const int width,
const int height,
const float steepness,
__read_only image2d_t input,
__write_only image2d_t output
)
{
int x = get_global_id(0);
int y = get_global_id(1);
const sampler_t sampler = \
CLK_NORMALIZED_COORDS_FALSE |
CLK_ADDRESS_CLAMP_TO_EDGE |
CLK_FILTER_NEAREST;
float4 pix = read_imagef(input, sampler, (int2)(x, y));
// sobel operator
float x_comp = READP(x-1, y).x
+READP(x-1, y+1).x
+READP(x-1, y-1).x
- READP(x+1, y).x
- READP(x+1, y+1).x
- READP(x+1, y-1).x;
float y_comp = READP(x, y-1).x
+ READP(x+1, y-1).x
+ READP(x-1, y-1).x
- READP(x, y+1).x
- READP(x+1, y+1).x
- READP(x-1, y+1).x;
float2 grad = (float2)(x_comp, y_comp);
float l = length(grad);
grad /= l;
// from pythagoras
float hg;
hg = l < 1.0f ? sqrt(1.0f - l*l) : 0.0f;
float4 out = (float4)(x_comp*0.5 + 0.5, y_comp*0.5 + 0.5, hg*0.5 + 0.5, 1.0f);
write_imagef(output, (int2)(x,y), out);
}
"""
blr = cl_builder.build(
"height_to_normals", src, (cl.cl_int, cl.cl_int, cl.cl_float, cl.cl_image, cl.cl_image)
)
img = cl_builder.new_image_from_ndarray(pix)
out = cl_builder.new_image(img.width, img.height)
assert steepness != 0.0
cl_builder.run(blr, [steepness], [img.data], [out.data], shape=img.shape)
return out.to_numpy()
def normals_to_curvature(pix):
intensity = 1.0
curve = np.zeros((pix.shape[0], pix.shape[1]), dtype=np.float32)
vectors = nmap_to_vectors(pix)
# y_vec = np.array([1, 0, 0], dtype=np.float32)
# x_vec = np.array([0, 1, 0], dtype=np.float32)
# yd = vectors.dot(x_vec)
# xd = vectors.dot(y_vec)
xd = vectors[:, :, 0]
yd = vectors[:, :, 1]
# curve[0,0] = yd[1,0]
curve[:-1, :] += yd[1:, :]
curve[-1, :] += yd[0, :]
# curve[0,0] = yd[-1,0]
curve[1:, :] -= yd[:-1, :]
curve[0, :] -= yd[-1, :]
# curve[0,0] = xd[1,0]
curve[:, :-1] += xd[:, 1:]
curve[:, -1] += xd[:, 0]
# curve[0,0] = xd[-1,0]
curve[:, 1:] -= xd[:, :-1]
curve[:, 0] -= xd[:, -1]
# normalize
dv = max(abs(np.min(curve)), abs(np.max(curve)))
curve /= dv
# 0 = 0.5 grey
curve = curve * intensity + 0.5
pix[..., 0] = curve
pix[..., 1] = curve
pix[..., 2] = curve
return pix
def gauss_seidel_cl(w, h, h2, target, inp, outp):
# TODO: fix name
src = """
__kernel void curvature_to_height(
const int i_width,
const int i_height,
const float step,
__global const float *input,
__global const float *target,
__global float *output
)
{
int x = get_global_id(0);
int y = get_global_id(1);
int loc = x + y * i_width;
float t = 0.0f;
t += x > 0 ? input[loc-1] : input[loc+(i_width-1)];
t += y > 0 ? input[loc-i_width] : input[loc+(i_height-1)*i_width];
t += x < i_width-1 ? input[loc+1] : input[loc-(i_width-1)];
t += y < i_height-1 ? input[loc+i_width] : input[loc-(i_height-1)*i_width];
t *= 0.25;
t -= step * target[loc];
output[loc] = t;
}
"""
cth = cl_builder.build(
"curvature_to_height",
src,
(cl.cl_int, cl.cl_int, cl.cl_float, cl.cl_mem, cl.cl_mem, cl.cl_mem),
)
assert w % 8 == 0, "Image width must be divisible by 8"
assert h % 8 == 0, "Image height must be divisible by 8"
# cl_builder.run_buffer(cth, [w, h, h2, inp, target], outp, shape=(h, w))
# kernel, params, inputs, outputs
cl_builder.run(cth, [h2], [inp, target], [outp], shape=(h, w))
def curvature_to_height(image, h2, iterations=2000):
target = image[..., 0]
# TODO: from grayscale, not just 1 component
w, h = target.shape[1], target.shape[0]
f = cl_builder.to_buffer(target)
ping = cl_builder.to_buffer(np.ones_like(target) * 0.5)
pong = cl_builder.to_buffer(np.zeros_like(target))
for ic in range(iterations):
gauss_seidel_cl(w, h, h2, f, ping, pong)
gauss_seidel_cl(w, h, h2, f, pong, ping)
res_v, evt = cl.buffer_to_ndarray(cl_builder.queue, ping, like=image[..., 0])
evt.wait()
u = res_v
u = -u
u -= np.min(u)
u /= np.max(u)
return np.dstack([u, u, u, image[..., 3]])
def normals_to_height(image, iterations=2000, intensity=1.0, step=1.0):
vectors = nmap_to_vectors(image)
vectors *= intensity
target = np.roll(vectors[..., 0], 1, axis=1)
target -= np.roll(vectors[..., 0], -1, axis=1)
target += np.roll(vectors[..., 1], 1, axis=0)
target -= np.roll(vectors[..., 1], -1, axis=0)
target *= 0.125
w, h = target.shape[1], target.shape[0]
f = cl_builder.to_buffer(target)
ping = cl_builder.to_buffer(np.ones_like(target) * 0.5)
pong = cl_builder.to_buffer(np.zeros_like(target))
for ic in range(iterations):
gauss_seidel_cl(w, h, step, f, ping, pong)
gauss_seidel_cl(w, h, step, f, pong, ping)
res_v, evt = cl.buffer_to_ndarray(cl_builder.queue, ping, like=image[..., 0])
evt.wait()
u = res_v
u -= np.min(u)
u /= np.max(u)
return np.dstack([u, u, u, image[..., 3]])
def fill_alpha(image, style="black"):
if style == "black":
for c in range(3):
image[..., c] *= image[..., 3]
image[..., 3] = 1.0
return image
else:
cols = [0.5, 0.5, 1.0]
A = image[..., 3]
for c in range(3):
image[..., c] = cols[c] * (1 - A) + image[..., c] * A
image[..., 3] = 1.0
return image
def dog(pix, a, b, threshold):
"Difference of Gaussians with a threshold"
size = max(a, b)
gpix = grayscale(pix)
res = (gaussian_repeat(gpix, a) - gaussian_repeat(gpix, b))[..., :3]
tt = threshold / size
# Xdog Winnemöller et al
pix[..., :3] = np.where(tt >= res, 1.0, 1.0 + np.tanh(40.0 * (tt - res)))
return pix
def gimpify(image):
pixels = np.copy(image)
xs, ys = image.shape[1], image.shape[0]
image = np.roll(image, xs * 2 + xs * 4 * (ys // 2))
sxs = xs // 2
sys = ys // 2
# generate the mask
mask_pix = []
for y in range(0, sys):
zy0 = y / sys + 0.001
zy1 = 1 - y / sys + 0.001
for x in range(0, sxs):
xp = x / sxs
p = 1.0 - zy0 / (1.0 - xp + 0.001)
t = 1.0 - xp / zy1
mask_pix.append(t if t > p else p)
# imask[y, x] = max(, imask[y, x])
tmask = np.array(mask_pix, dtype=np.float32)
tmask = tmask.reshape((sys, sxs))
imask = np.zeros((pixels.shape[0], pixels.shape[1]), dtype=np.float32)
imask[:sys, :sxs] = tmask
imask[imask < 0] = 0
# copy the data into the three remaining corners
imask[0 : sys + 1, sxs:xs] = np.fliplr(imask[0 : sys + 1, 0:sxs])
imask[-sys:ys, 0:sxs] = np.flipud(imask[0:sys, 0:sxs])
imask[-sys:ys, sxs:xs] = np.flipud(imask[0:sys, sxs:xs])
imask[sys, :] = imask[sys - 1, :] # center line
# apply mask
amask = np.empty(pixels.shape, dtype=float)
amask[:, :, 0] = imask
amask[:, :, 1] = imask
amask[:, :, 2] = imask
amask[:, :, 3] = imask
return amask * image + (1.0 - amask) * pixels
def inpaint_tangents(pixels, threshold):
# invalid = pixels[:, :, 2] < 0.5 + (self.tolerance * 0.5)
invalid = pixels[:, :, 2] < threshold
# n2 = (
# ((pixels[:, :, 0] - 0.5) * 2) ** 2
# + ((pixels[:, :, 1] - 0.5) * 2) ** 2
# + ((pixels[:, :, 2] - 0.5) * 2) ** 2
# )
# invalid |= (n2 < 0.9) | (n2 > 1.1)
# grow selection
for _ in range(2):
invalid[0, :] = False
invalid[-1, :] = False
invalid[:, 0] = False
invalid[:, -1] = False
invalid = (
np.roll(invalid, 1, axis=0)
| np.roll(invalid, -1, axis=0)
| np.roll(invalid, 1, axis=1)
| np.roll(invalid, -1, axis=1)
)
pixels[invalid] = np.array([0.5, 0.5, 1.0, 1.0])
invalid[0, :] = False
invalid[-1, :] = False
invalid[:, 0] = False
invalid[:, -1] = False
# fill
front = np.copy(invalid)
locs = [(0, -1, 1), (0, 1, -1), (1, -1, 1), (1, 1, -1)]
for i in range(4):
print("fill step:", i)
for l in locs:
r = np.roll(front, l[1], axis=l[0])
a = (r != front) & front
pixels[a] = pixels[np.roll(a, l[2], axis=l[0])]
front[a] = False
cl = np.roll(invalid, -1, axis=0)
cr = np.roll(invalid, 1, axis=0)
uc = np.roll(invalid, -1, axis=1)
bc = np.roll(invalid, 1, axis=1)
# smooth
for i in range(4):
print("smooth step:", i)
pixels[invalid] = (pixels[invalid] + pixels[cl] + pixels[cr] + pixels[uc] + pixels[bc]) / 5
return pixels
def normalize_tangents(image):
vectors = nmap_to_vectors(image)[..., :3]
vectors = (vectors.T / np.linalg.norm(vectors, axis=2).T).T
retarr = vectors_to_nmap(vectors)
return retarr
def texture_to_normals(image, high, mid, low):
# imgg = gaussian_repeat(image, 4)
g = grayscale(image)
b = curvature_to_height(g, 0.5, iterations=100)
c = curvature_to_height(g, 0.5, iterations=1000)
d = normals_simple(g * high + b * mid + c * low, "Luminance")
d = normals_to_height(d, iterations=500, step=0.5)
d = normals_simple(d, "Luminance")
# d = srgb_to_linear(d)
return d
def knife_seamless(image, v_margin, h_margin, step, m_constraint, smooth, weights):
def diffblocks(a, b, constrain_middle, wg):
l = len(a)
if constrain_middle >= 0.0 and constrain_middle <= 15.0:
penalty = np.abs(((np.arange(l) - (l - 1) * 0.5) * 2.0 / (l - 1))) ** (
constrain_middle + 1.0
)
else:
penalty = 0.0
# assert np.all(penalty) >= 0.0
# assert np.all(penalty) <= 1.0
diff = np.abs(a - b)
# diff = a
# normalize
# diff += np.min(diff)
# diffm = np.max(diff)
# if diffm > 0.0:
# diff /= diffm
return (
diff[..., 0] * weights[0]
+ diff[..., 1] * weights[1]
+ diff[..., 2] * weights[2]
+ penalty
)
def findmin(ar, loc, step):
minloc = loc
lar = len(ar)
for x in range(-step, step + 1):
if loc + x >= 0 and loc + x < lar and ar[loc + x] < ar[minloc]:
minloc = loc + x
return minloc
def copy_to_v(image, img_orig, sr, rv, y):
# sr = stripe width / 2, y = stripe location, rv = cut location
w = image.shape[1]
hw = w // 2
L2 = 8
L = L2 * 2
image[y, hw - sr : hw - sr + rv, :] = img_orig[y, -2 * sr : -2 * sr + rv, :]
image[y, hw - sr + rv : hw + sr, :] = img_orig[y, rv : sr * 2, :]
la = hw - sr + rv
lb = rv
# blending between the two border images
for i in range(L):
l = i - L2
d = i / (L - 1)
lval = img_orig[y, -2 * sr + rv + l, :]
rval = img_orig[y, lb + l, :]
# blend more of the selection which has higher lightness
d = d + (rval[..., 0] * d - lval[..., 0] * (1.0 - d)) * 2.0
if d < 0.0:
d = 0.0
if d > 1.0:
d = 1.0
image[y, la + l, :] = lval * (1.0 - d) + rval * d
def copy_to_h(image, img_orig, sr, rv, x):
h = image.shape[0]
hh = h // 2
image[hh - sr : hh - sr + rv, x, :] = img_orig[h - 2 * sr : h - 2 * sr + rv, x, :]
r2 = sr * 2 - rv
image[hh + sr - r2 : hh + sr, x, :] = img_orig[sr * 2 - r2 : sr * 2, x, :]
h, w = image.shape[0], image.shape[1]
# if self.square:
# max_space = min(h, w)
# h_margin += w - max_space
# v_margin += h - max_space
# new_width = w
# new_height = h
# Make sure result is divisible by 8
v_margin += -((h + v_margin) % 16)
h_margin += -((w + h_margin) % 16)
v_margin //= 2
h_margin //= 2
# -- vertical cut
if smooth > 0:
smoothed = gaussian_repeat(image, smooth)
else:
smoothed = image.copy()
img_orig = image.copy()
hw = w // 2
# right on left
image[:, : hw + h_margin, :] = img_orig[:, hw - h_margin :, :]
# left on right
image[:, hw - h_margin :, :] = img_orig[:, : hw + h_margin, :]
abr = diffblocks(
smoothed[0, -(2 * h_margin) :, :], smoothed[0, : h_margin * 2, :], m_constraint, weights
)
rv = np.argmin(abr)
for y in range(h):
abr = diffblocks(
smoothed[y, -(2 * h_margin) :, :], smoothed[y, : h_margin * 2, :], m_constraint, weights
)
rv = findmin(abr, rv, step)
copy_to_v(image, img_orig, h_margin, rv, y)
# -- horizontal cut
if smooth > 0:
smoothed = gaussian_repeat(image, smooth)
else:
smoothed = image.copy()
img_orig = image.copy()
hw = h // 2
image[: hw + v_margin, ...] = img_orig[hw - v_margin :, ...]
image[hw - v_margin :, ...] = img_orig[: hw + v_margin, ...]
abr = diffblocks(
smoothed[-(2 * v_margin) :, 0, :], smoothed[: v_margin * 2, 0, :], m_constraint, weights
)
rv = np.argmin(abr)
for x in range(w):
abr = diffblocks(
smoothed[-(2 * v_margin) :, x, :], smoothed[: v_margin * 2, x, :], m_constraint, weights
)
rv = findmin(abr, rv, step)
copy_to_h(image, img_orig, v_margin, rv, x)
print(image.shape, img_orig.shape, v_margin, h_margin)
return image[v_margin:-v_margin, h_margin:-h_margin]
def crop_to_square(image):
h, w = image.shape[0], image.shape[1]
offx = w // 2
offy = h // 2
if h > w:
h = w
if w > h:
w = h
# make compatible with CL calcs
w = w - (w % 8)
h = h - (h % 8)
xt = w // 2
yt = w // 2
# crop to center
image = image[offy - yt : offy + yt, offx - xt : offx + xt]
return image
class Grayscale_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "grayscale"
self.info = "Grayscale from RGB"
self.category = "Basic"
self.payload = lambda self, image, context: grayscale(image)
class Random_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "random"
self.info = "Random RGB pixels"
self.category = "Basic"
def _pl(self, image, context):
t = np.random.random(image.shape)
t[..., 3] = 1.0
return t
self.payload = _pl
class Swizzle_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["order_a"] = bpy.props.StringProperty(name="Order A", default="RGBA")
self.props["order_b"] = bpy.props.StringProperty(name="Order B", default="RBGa")
self.props["direction"] = bpy.props.EnumProperty(
name="Direction", items=[("ATOB", "A to B", "", 1), ("BTOA", "B to A", "", 2)]
)
self.prefix = "swizzle"
self.info = "Channel swizzle"
self.category = "Basic"
def _pl(self, image, context):
test_a = self.order_a.upper()
test_b = self.order_b.upper()
if len(test_a) != 4 or len(test_b) != 4:
self.report({"INFO"}, "Swizzle channel count must be 4")
return image
if set(test_a) != set(test_b):
self.report({"INFO"}, "Swizzle channels must have same names")
return image
first = self.order_a
second = self.order_b
if self.direction == "BTOA":
first, second = second, first
temp = image.copy()
for i in range(4):
fl = first[i].upper()
t = second.upper().index(fl)
if second[t] != first[i]:
temp[..., t] = 1.0 - image[..., i]
else:
temp[..., t] = image[..., i]
return temp
self.payload = _pl
class Normalize_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "normalize"
self.info = "Normalize"
self.category = "Basic"
def _pl(self, image, context):
tmp = image[..., 3]
res = normalize(image)
res[..., 3] = tmp
return res
self.payload = _pl
class CropToP2_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "crop_to_power"
self.info = "Crops the middle of the image to power of twos"
self.category = "Dimensions"
def _pl(self, image, context):
h, w = image.shape[0], image.shape[1]
offx = 0
offy = 0
wpow = int(np.log2(w))
hpow = int(np.log2(h))
offx = (w - 2 ** wpow) // 2
offy = (h - 2 ** hpow) // 2
if w > 2 ** wpow:
w = 2 ** wpow
if h > 2 ** hpow:
h = 2 ** hpow
# crop to center
image = image[offy : offy + h, offx : offx + w]
return image
self.payload = _pl
class CropToSquare_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "crop_to_square"
self.info = "Crop the middle to square with two divisible height and width"
self.category = "Dimensions"
def _pl(self, image, context):
return crop_to_square(image)
self.payload = _pl
class Sharpen_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["width"] = bpy.props.IntProperty(name="Width", min=2, default=5)
self.props["intensity"] = bpy.props.FloatProperty(name="Intensity", min=0.0, default=1.0)
self.prefix = "sharpen"
self.info = "Simple sharpen"
self.category = "Filter"
self.payload = lambda self, image, context: sharpen(image, self.width, self.intensity)
class DoG_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["width_a"] = bpy.props.IntProperty(name="Width A", min=2, default=5)
self.props["width_b"] = bpy.props.IntProperty(name="Width B", min=2, default=4)
self.props["threshold"] = bpy.props.FloatProperty(
name="Threshold", min=0.0, max=1.0, default=0.01
)
self.props["preserve"] = bpy.props.BoolProperty(name="Preserve", default=True)
# self.props["intensity"] = bpy.props.FloatProperty(name="Intensity", min=0.0, default=1.0)
self.prefix = "dog"
self.info = "DoG"
self.category = "Advanced"
def _pl(self, image, context):
t = image.copy()
d = dog(image, self.width_a, self.width_b, self.threshold)
if self.preserve:
return t * d
else:
return d
self.payload = _pl
class TextureToNormals_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["high_freq"] = bpy.props.FloatProperty(
name="High frequency", min=0.0, max=1.0, default=0.1
)
self.props["mid_freq"] = bpy.props.FloatProperty(
name="Mid frequency", min=0.0, max=1.0, default=0.2
)
self.props["low_freq"] = bpy.props.FloatProperty(
name="Low frequency", min=0.0, max=1.0, default=0.7
)
self.prefix = "texture_to_normals"
self.info = "Texture to Normals"
self.category = "Advanced"
def _pl(self, image, context):
# # imgg = gaussian_repeat(image, 4)
# g = grayscale(image)
# b = curvature_to_height(g, 0.5, iterations=100)
# c = curvature_to_height(g, 0.5, iterations=1000)
# d = normals_simple(
# g * self.high_freq + b * self.mid_freq + c * self.low_freq, "Luminance"
# )
# d = normals_to_height(d, iterations=500, step=0.5)
# d = normals_simple(d, "Luminance")
return texture_to_normals(image, self.high_freq, self.mid_freq, self.low_freq)
self.payload = _pl
class FillAlpha_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["style"] = bpy.props.EnumProperty(
name="Style",
items=[("black", "Black color", "", 1), ("tangent", "Neutral tangent", "", 2)],
)
self.prefix = "fill_alpha"
self.info = "Fill alpha with color or normal"
self.category = "Basic"
self.payload = lambda self, image, context: fill_alpha(image, style=self.style)
class GaussianBlur_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["width"] = bpy.props.IntProperty(name="Width", min=1, default=20)
# self.props["intensity"] = bpy.props.FloatProperty(name="Intensity", min=0.0, default=1.0)
self.prefix = "gaussian_blur"
self.info = "Does a Gaussian blur"
self.category = "Filter"
self.payload = lambda self, image, context: gaussian_repeat(image, self.width)
class Median_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
# self.props["width"] = bpy.props.IntProperty(name="Width", min=3, max=9, default=3)
self.props["width"] = bpy.props.EnumProperty(
name="Width",
items=[
("3", "3", "", 3),
("5", "5", "", 5),
("9", "9", "", 9),
("15", "15 (crash your computer)", "", 15),
],
default="5",
)
self.prefix = "median_filter"
self.info = "Median filter"
self.category = "Filter"
self.payload = lambda self, image, context: median_filter(image, int(self.width))
class Bilateral_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["radius"] = bpy.props.FloatProperty(
name="Radius", min=0.01, max=100.0, default=10.0
)
self.props["preserve"] = bpy.props.FloatProperty(
name="Preserve", min=0.01, max=100.0, default=20.0
)
self.prefix = "bilateral"
self.info = "Bilateral"
self.category = "Filter"
self.payload = lambda self, image, context: bilateral_cl(image, self.radius, self.preserve)
class DirectionalBilateral_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["radius"] = bpy.props.FloatProperty(
name="Radius", min=0.01, max=100.0, default=10.0
)
self.props["preserve"] = bpy.props.FloatProperty(
name="Preserve", min=0.01, max=100.0, default=20.0
)
self.prefix = "directional_blur"
self.info = "Directional bilateral"
self.category = "Advanced"
self.payload = lambda self, image, context: directional_blur_cl(
image, self.radius, self.preserve
)
class HiPass_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["width"] = bpy.props.IntProperty(name="Width", min=1, default=20)
# self.props["intensity"] = bpy.props.FloatProperty(name="Intensity", min=0.0, default=1.0)
self.prefix = "high_pass"
self.info = "High pass"
self.category = "Filter"
self.payload = lambda self, image, context: hi_pass(image, self.width)
class HiPassBalance_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["width"] = bpy.props.IntProperty(name="Width", min=1, default=50)
self.props["zoom"] = bpy.props.IntProperty(name="Center slice", min=5, default=200)
self.props["hue"] = bpy.props.BoolProperty(name="Preserve hue", default=True)
self.props["sat"] = bpy.props.BoolProperty(name="Preserve chroma", default=False)
# self.props["A"] = bpy.props.FloatProperty(name="C1", default=1.0, min=0.0, max=1.0)
# self.props["B"] = bpy.props.FloatProperty(name="C2", default=1.0, min=0.0, max=1.0)
# self.props["C"] = bpy.props.FloatProperty(name="C3", default=1.0, min=0.0, max=1.0)
self.prefix = "hipass_balance"
self.info = "Remove low frequencies from the image"
self.category = "Balance"
self.payload = lambda self, image, context: hi_pass_balance(
# image, self.width, self.zoom, [self.A, self.B, self.C], into_lch=self.lch
image,
self.width,
self.zoom,
[1.0, 1.0 - 1.0 * self.sat, 1.0 - 1.0 * self.hue],
into_lch=True,
)
class ContrastBalance_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "contrast_balance"
self.info = "Balance contrast"
self.category = "Balance"
self.props["gA"] = bpy.props.IntProperty(name="Range", min=1, max=256, default=20)
self.props["gB"] = bpy.props.IntProperty(name="Error", min=1, max=256, default=40)
self.props["strength"] = bpy.props.FloatProperty(name="Strength", min=0.0, default=1.0)
def _pl(self, image, context):
tmp = image.copy()
# squared error
gcr = gaussian_repeat(tmp, self.gA)
error = (tmp - gcr) ** 2
mask = -gaussian_repeat(error, self.gB)
mask -= np.min(mask)
mask /= np.max(mask)
mask = (mask - 0.5) * self.strength + 1.0
res = gcr + mask * (tmp - gcr)
res[..., 3] = tmp[..., 3]
return res
self.payload = _pl
class HistogramEQ_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["intensity"] = bpy.props.FloatProperty(
name="Intensity", min=0.0, max=1.0, default=1.0
)
self.prefix = "histogram_eq"
self.info = "Histogram equalization"
self.category = "Advanced"
self.payload = lambda self, image, context: hgram_equalize(image, self.intensity, 0.5)
class Gaussianize_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["count"] = bpy.props.IntProperty(name="Count", min=10, max=100000, default=1000)
self.prefix = "gaussianize"
self.info = "Gaussianize histogram"
self.category = "Advanced"
self.payload = lambda self, image, context: gaussianize(image, NG=self.count)[0]
class GimpSeamless_IOP(image_ops.ImageOperatorGenerator):
"""Image seamless generator operator"""
# TODO: the smoothing is not complete, it goes only one way
def generate(self):
self.prefix = "gimp_seamless"
self.info = "Gimp style seamless image operation"
self.category = "Seamless"
self.payload = lambda self, image, context: gimpify(image)
class KnifeSeamless_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "knife_seamless"
self.info = "Optimal knife cut into seamless"
self.category = "Seamless"
self.props["step"] = bpy.props.IntProperty(name="Step", min=1, max=16, default=3)
self.props["margin"] = bpy.props.IntProperty(name="Margin", min=4, max=256, default=40)
self.props["smooth"] = bpy.props.IntProperty(
name="Cut smoothing", min=0, max=64, default=16
)
self.props["constrain"] = bpy.props.FloatProperty(
name="Middle constraint", min=0.0, max=15.0, default=2.0
)
# self.props["square"] = bpy.props.BoolProperty(name="To square", default=False)
# def diffblocks(a, b, constrain_middle):
# l = len(a)
# if constrain_middle >= 0.0 and constrain_middle <= 15.0:
# penalty = np.abs(((np.arange(l) - (l - 1) * 0.5) * 2.0 / (l - 1))) ** (
# constrain_middle + 1.0
# )
# else:
# penalty = 0.0
# # assert np.all(penalty) >= 0.0
# # assert np.all(penalty) <= 1.0
# # TODO: adding power might be better
# # return rgb_to_luminance(np.abs(a - b)) ** 2.0 + penalty
# return rgb_to_luminance(np.abs(a - b)) + penalty
# def findmin(ar, loc, step):
# minloc = loc
# lar = len(ar)
# for x in range(-step, step + 1):
# if loc + x >= 0 and loc + x < lar and ar[loc + x] < ar[minloc]:
# minloc = loc + x
# return minloc
# def copy_to_v(image, img_orig, sr, rv, y):
# w = image.shape[1]
# hw = w // 2
# image[y, hw - sr : hw - sr + rv, :] = img_orig[y, w - 2 * sr : w - 2 * sr + rv, :]
# r2 = sr * 2 - rv
# image[y, hw + sr - r2 : hw + sr, :] = img_orig[y, sr * 2 - r2 : sr * 2, :]
# def copy_to_h(image, img_orig, sr, rv, y):
# w = image.shape[0]
# hw = w // 2
# image[hw - sr : hw - sr + rv, y, :] = img_orig[w - 2 * sr : w - 2 * sr + rv, y, :]
# r2 = sr * 2 - rv
# image[hw + sr - r2 : hw + sr, y, :] = img_orig[sr * 2 - r2 : sr * 2, y, :]
def _pl(self, image, context):
h, w = image.shape[0], image.shape[1]
print(w, h)
v_margin = self.margin
h_margin = self.margin
step = self.step
m_constraint = 16.0 - self.constrain
# if self.square:
max_space = min(h, w)
h_margin += w - max_space
v_margin += h - max_space
print(h_margin, v_margin)
from .oklab import srgb_to_LCh, LCh_to_srgb
kr = knife_seamless(
srgb_to_LCh(image),
v_margin,
h_margin,
step,
m_constraint,
self.smooth,
[1.0, 0.0, 0.0],
)
return LCh_to_srgb(kr)
# h, w = image.shape[0], image.shape[1]
# # new_width = w
# # new_height = h
# # -- vertical cut
# if self.smooth > 0:
# smoothed = gaussian_repeat(image, self.smooth)
# else:
# smoothed = image.copy()
# img_orig = image.copy()
# hw = w // 2
# # right on left
# image[:, : hw + h_margin, :] = img_orig[:, hw - h_margin :, :]
# # left on right
# image[:, hw - h_margin :, :] = img_orig[:, : hw + h_margin, :]
# abr = diffblocks(
# smoothed[0, -(2 * h_margin) :, :], smoothed[0, : h_margin * 2, :], m_constraint
# )
# rv = np.argmin(abr)
# for y in range(h):
# abr = diffblocks(
# smoothed[y, -(2 * h_margin) :, :], smoothed[y, : h_margin * 2, :],
# m_constraint
# )
# rv = findmin(abr, rv, step)
# copy_to_v(image, img_orig, h_margin, rv, y)
# # -- horizontal cut
# if self.smooth > 0:
# smoothed = gaussian_repeat(image, self.smooth)
# else:
# smoothed = image.copy()
# img_orig = image.copy()
# hw = h // 2
# image[: hw + v_margin, ...] = img_orig[hw - v_margin :, ...]
# image[hw - v_margin :, ...] = img_orig[: hw + v_margin, ...]
# abr = diffblocks(
# smoothed[-(2 * v_margin) :, 0, :], smoothed[: v_margin * 2, 0, :], m_constraint
# )
# rv = np.argmin(abr)
# for x in range(w):
# abr = diffblocks(
# smoothed[-(2 * v_margin) :, x, :], smoothed[: v_margin * 2, x, :],
# m_constraint
# )
# rv = findmin(abr, rv, step)
# copy_to_h(image, img_orig, v_margin, rv, x)
# return image[v_margin:-v_margin, h_margin:-h_margin]
self.payload = _pl
class HistogramSeamless_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "histogram_seamless"
self.info = "Seamless histogram blending"
self.category = "Seamless"
def _pl(self, image, context):
gimg, transforms = gaussianize(image)
blended = gimpify(gimg)
return degaussianize(blended, transforms)
self.payload = _pl
class Normals_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
# self.props["intensity"] = bpy.props.FloatProperty(name="Intensity", min=0.0, default=1.0)
self.prefix = "height_to_normals"
self.info = "(Very rough estimate) normal map from RGB"
self.category = "Normals"
self.payload = lambda self, image, context: normals_simple(
# image, self.width, self.intensity, "Luminance"
image,
"Luminance",
)
class NormalsToCurvature_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
# self.props["width"] = bpy.props.IntProperty(name="Width", min=0, default=2)
# self.props["intensity"] = bpy.props.FloatProperty(name="Intensity", min=0.0, default=1.0)
self.prefix = "normals_to_curvature"
self.info = "Curvature map from tangent normal map"
self.category = "Normals"
self.payload = lambda self, image, context: normals_to_curvature(image)
class CurveToHeight_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["step"] = bpy.props.FloatProperty(name="Step", min=0.00001, default=0.1)
self.props["iterations"] = bpy.props.IntProperty(name="Iterations", min=10, default=400)
self.prefix = "curvature_to_height"
self.info = "Height from curvature"
self.category = "Normals"
self.payload = lambda self, image, context: curvature_to_height(
image, self.step, iterations=self.iterations
)
class NormalsToHeight_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
# self.props["grid"] = bpy.props.IntProperty(name="Grid subd", min=1, default=4)
self.props["iterations"] = bpy.props.IntProperty(name="Iterations", min=10, default=200)
self.prefix = "normals_to_height"
self.info = "Normals to height"
self.category = "Normals"
self.payload = lambda self, image, context: normals_to_height(
image, iterations=self.iterations
)
class InpaintTangents_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
# self.props["flip"] = bpy.props.BoolProperty(name="Flip direction", default=False)
# self.props["iterations"] = bpy.props.IntProperty(name="Iterations", min=10, default=200)
self.props["threshold"] = bpy.props.FloatProperty(
name="Threshold", min=0.1, max=0.9, default=0.5
)
self.prefix = "inpaint_invalid"
self.info = "Inpaint invalid tangents"
self.category = "Normals"
self.payload = lambda self, image, context: inpaint_tangents(image, self.threshold)
class NormalizeTangents_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "normalize_tangents"
self.info = "Make all tangents length 1"
self.category = "Normals"
self.payload = lambda self, image, context: normalize_tangents(image)
class ImageToMaterial_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "image_to_material"
self.info = "Image to material"
self.category = "Materials"
self.props["mat_name"] = bpy.props.StringProperty(
name="Name", description="Material name", default="Test"
)
def _pl(self, image, context):
from . import json_material
# json_out = json_material.read_material_nodes_to_json(bpy.data.materials[0])
# print(json_out)
# print(bpy.utils.resource_path('LOCAL'))
# print(os.getcwd())
# print(bpy.utils.user_resource('SCRIPTS', "addons"))
# print(directory)
# with open('mat.json', 'w') as out_file:
# json.dump(json_out, out_file)
import os
with open(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "default_material.json"),
"r",
) as in_file:
json_in = json.load(in_file)
mat = bpy.data.materials.get(self.mat_name) or bpy.data.materials.new(self.mat_name)
json_material.overwrite_material_from_json(mat, json_in)
# import pprint
# pp = pprint.PrettyPrinter(indent=4)
# pp.pprint(d_nodes)
base_image = image_ops.get_area_image(context)
base_data = image_ops.image_to_ndarray(base_image)
# ----- Crop to square
print("Crop image to square")
base_data = crop_to_square(base_data)
h, w = base_data.shape[:2]
print(f"({w}, {h}) after crop")
min_dim = min(h, w)
# ----- Make seamless image
print("Make seamless diffuse")
# TODO: check this is optimal
# data_d = hi_pass_balance(base_data, min_dim, min_dim // 2)
data_d = hi_pass_balance(base_data, min_dim, min_dim, [1.0, 0.0, 0.0])
knife_result = knife_seamless(
data_d, h // 3 // 2, w // 3 // 2, 4, 12.0, 8, [1.0, 1.0, 1.0]
)
# Save new width and height after seamless knife cut
h, w = knife_result.shape[:2]
print(f"({w}, {h}) after seamless cut")
img_d = image_ops.image_create_overwrite(base_image.name + "_d", w, h, "sRGB")
image_ops.ndarray_to_image(img_d, knife_result)
mat.node_tree.nodes["Diffuse Texture"].image = img_d
# ----- Create normal map image
print("Make normal map")
img_n = image_ops.image_create_overwrite(base_image.name + "_n", w, h, "Non-Color")
# image_ops.ndarray_to_image(img_n, texture_to_normals(knife_result, 0.1, 0.2, 0.7))
image_ops.ndarray_to_image(img_n, texture_to_normals(knife_result, 0.05, 0.3, 0.6))
mat.node_tree.nodes["Normal Texture"].image = img_n
# ----- Create height map
print("Make height map for roughness")
img_h = image_ops.image_create_overwrite(base_image.name + "_h", w, h, "Non-Color")
image_ops.ndarray_to_image(
img_h, curvature_to_height(knife_result, 0.5, iterations=500)
)
mat.node_tree.nodes["Roughness Texture"].image = img_h
mat.node_tree.nodes["Invert"].inputs["Fac"].default_value = 1.0
mat.node_tree.nodes["Gamma"].inputs["Gamma"].default_value = 0.5
mat.node_tree.nodes["Normal Map"].inputs["Strength"].default_value = 4.0
return image
self.payload = _pl
# class StoreMaterialTemplate_IOP(image_ops.ImageOperatorGenerator):
# def generate(self):
# self.prefix = "store_material_template"
# self.info = "Store material template"
# self.category = "Materials"
# self.props["mat_name"] = bpy.props.StringProperty(
# name="Name", description="Material name", default="Test"
# )
# def _pl(self, image, context):
# from . import json_material
# with open("default_material.json", "w") as out_file:
# json.dump(
# json_material.read_material_nodes_to_json(bpy.data.materials[self.mat_name]),
# out_file,
# )
# return image
# self.payload = _pl
# additional_classes = [BTT_InstallLibraries, BTT_AddonPreferences]
additional_classes = []
register, unregister = image_ops.create(locals(), additional_classes)
| gpl-2.0 | 4,774,572,535,130,893,000 | 31.500582 | 100 | 0.534852 | false |
MacGyverNL/alot | setup.py | 1 | 1650 | #!/usr/bin/env python3
from setuptools import setup, find_packages
import alot
setup(
name='alot',
version=alot.__version__,
description=alot.__description__,
author=alot.__author__,
author_email=alot.__author_email__,
url=alot.__url__,
license=alot.__copyright__,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console :: Curses',
'Framework :: AsyncIO',
'Intended Audience :: End Users/Desktop',
(
'License :: OSI Approved'
':: GNU General Public License v3 or later (GPLv3+)'),
'Operating System :: POSIX',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Communications :: Email :: Email Clients (MUA)',
'Topic :: Database :: Front-Ends',
],
packages=find_packages(exclude=['tests*']),
package_data={
'alot': [
'defaults/alot.rc.spec',
'defaults/notmuch.rc.spec',
'defaults/abook_contacts.spec',
'defaults/default.theme',
'defaults/default.bindings',
'defaults/config.stub',
'defaults/theme.spec',
]
},
entry_points={
'console_scripts':
['alot = alot.__main__:main'],
},
install_requires=[
'notmuch>=0.27',
'urwid>=1.3.0',
'urwidtrees>=1.0',
'twisted>=10.2.0',
'python-magic',
'configobj>=4.7.0',
'gpg'
],
provides=['alot'],
test_suite="tests",
python_requires=">=3.6",
)
| gpl-3.0 | -1,418,785,842,951,232,000 | 27.448276 | 66 | 0.527273 | false |
plowman/python-mcparseface | models/syntaxnet/tensorflow/tensorflow/contrib/ffmpeg/encode_audio_op_test.py | 6 | 1742 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for third_party.tensorflow.contrib.ffmpeg.encode_audio_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
from tensorflow.contrib import ffmpeg
from tensorflow.python.platform import resource_loader
class EncodeAudioOpTest(tf.test.TestCase):
def testRoundTrip(self):
"""Fabricates some audio, creates a wav file, reverses it, and compares."""
with self.test_session():
path = os.path.join(
resource_loader.get_data_files_path(), 'testdata/mono_10khz.wav')
with open(path, 'rb') as f:
original_contents = f.read()
audio_op = ffmpeg.decode_audio(
original_contents, file_format='wav', samples_per_second=10000,
channel_count=1)
encode_op = ffmpeg.encode_audio(
audio_op, file_format='wav', samples_per_second=10000)
encoded_contents = encode_op.eval()
self.assertEqual(original_contents, encoded_contents)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 1,357,190,431,181,552,600 | 33.84 | 79 | 0.679104 | false |
jalanb/dotjab | src/python/y.py | 2 | 1435 | """y not?"""
import os
import argv
argv.add_options([
('delete', 'delete python compiled files as well', False),
('wipe', 'remove known garbage', False),
('stat', 'run svn stat', False),
('tags', 'refresh the tags file', True),
('verbose', 'run ptags verbosely', False),
])
from ls import ly
from repositories import svn
def remove_globs(globs):
for glob in globs:
#print glob, [f for f in argv.first_directory.files(glob)]
for p in argv.first_directory.listdir(glob):
if p.islink():
p.unlink()
elif p.isfile():
p.remove()
elif p.isdir():
p.rmdir()
else:
raise ValueError('Do not know how to remove %s' % p)
def wipe():
remove_globs([
'*~', '.*~', '*.orig', 'fred*', 'mary',
'*.tmp', '*.bak', 'one', 'two'])
_ = [f.rm() for f in argv.first_directory.files('*.fail') if not f.size]
def delete():
remove_globs(['*.pyc', '*.pyo'])
def tags():
import ptags
ptags.read_write_dir(argv.first_directory)
def main():
if argv.first_directory != os.getcwd():
print 'cd', argv.first_directory
argv.first_directory.cd()
for method in argv.methods:
method()
ly.show()
if argv.options.stat:
svn.show_stat(argv.first_directory)
if __name__ == '__main__':
ly.prepare_argv()
argv.main(main)
| mit | -3,757,691,127,534,682,000 | 21.421875 | 76 | 0.544948 | false |
ArnossArnossi/django | tests/template_tests/test_context.py | 166 | 5389 | # -*- coding: utf-8 -*-
from django.http import HttpRequest
from django.template import (
Context, Engine, RequestContext, Template, Variable, VariableDoesNotExist,
)
from django.template.context import RenderContext
from django.test import RequestFactory, SimpleTestCase
class ContextTests(SimpleTestCase):
def test_context(self):
c = Context({"a": 1, "b": "xyzzy"})
self.assertEqual(c["a"], 1)
self.assertEqual(c.push(), {})
c["a"] = 2
self.assertEqual(c["a"], 2)
self.assertEqual(c.get("a"), 2)
self.assertEqual(c.pop(), {"a": 2})
self.assertEqual(c["a"], 1)
self.assertEqual(c.get("foo", 42), 42)
def test_push_context_manager(self):
c = Context({"a": 1})
with c.push():
c['a'] = 2
self.assertEqual(c['a'], 2)
self.assertEqual(c['a'], 1)
with c.push(a=3):
self.assertEqual(c['a'], 3)
self.assertEqual(c['a'], 1)
def test_update_context_manager(self):
c = Context({"a": 1})
with c.update({}):
c['a'] = 2
self.assertEqual(c['a'], 2)
self.assertEqual(c['a'], 1)
with c.update({'a': 3}):
self.assertEqual(c['a'], 3)
self.assertEqual(c['a'], 1)
def test_setdefault(self):
c = Context()
x = c.setdefault('x', 42)
self.assertEqual(x, 42)
self.assertEqual(c['x'], 42)
x = c.setdefault('x', 100)
self.assertEqual(x, 42)
self.assertEqual(c['x'], 42)
def test_resolve_on_context_method(self):
"""
#17778 -- Variable shouldn't resolve RequestContext methods
"""
empty_context = Context()
with self.assertRaises(VariableDoesNotExist):
Variable('no_such_variable').resolve(empty_context)
with self.assertRaises(VariableDoesNotExist):
Variable('new').resolve(empty_context)
self.assertEqual(
Variable('new').resolve(Context({'new': 'foo'})),
'foo',
)
def test_render_context(self):
test_context = RenderContext({'fruit': 'papaya'})
# Test that push() limits access to the topmost dict
test_context.push()
test_context['vegetable'] = 'artichoke'
self.assertEqual(list(test_context), ['vegetable'])
self.assertNotIn('fruit', test_context)
with self.assertRaises(KeyError):
test_context['fruit']
self.assertIsNone(test_context.get('fruit'))
def test_flatten_context(self):
a = Context()
a.update({'a': 2})
a.update({'b': 4})
a.update({'c': 8})
self.assertEqual(a.flatten(), {
'False': False, 'None': None, 'True': True,
'a': 2, 'b': 4, 'c': 8
})
def test_context_comparable(self):
"""
#21765 -- equality comparison should work
"""
test_data = {'x': 'y', 'v': 'z', 'd': {'o': object, 'a': 'b'}}
self.assertEqual(Context(test_data), Context(test_data))
a = Context()
b = Context()
self.assertEqual(a, b)
# update only a
a.update({'a': 1})
self.assertNotEqual(a, b)
# update both to check regression
a.update({'c': 3})
b.update({'c': 3})
self.assertNotEqual(a, b)
# make contexts equals again
b.update({'a': 1})
self.assertEqual(a, b)
def test_copy_request_context_twice(self):
"""
#24273 -- Copy twice shouldn't raise an exception
"""
RequestContext(HttpRequest()).new().new()
class RequestContextTests(SimpleTestCase):
def test_include_only(self):
"""
#15721 -- ``{% include %}`` and ``RequestContext`` should work
together.
"""
engine = Engine(loaders=[
('django.template.loaders.locmem.Loader', {
'child': '{{ var|default:"none" }}',
}),
])
request = RequestFactory().get('/')
ctx = RequestContext(request, {'var': 'parent'})
self.assertEqual(engine.from_string('{% include "child" %}').render(ctx), 'parent')
self.assertEqual(engine.from_string('{% include "child" only %}').render(ctx), 'none')
def test_stack_size(self):
"""
#7116 -- Optimize RequetsContext construction
"""
request = RequestFactory().get('/')
ctx = RequestContext(request, {})
# The stack should now contain 3 items:
# [builtins, supplied context, context processor, empty dict]
self.assertEqual(len(ctx.dicts), 4)
def test_context_comparable(self):
# Create an engine without any context processors.
test_data = {'x': 'y', 'v': 'z', 'd': {'o': object, 'a': 'b'}}
# test comparing RequestContext to prevent problems if somebody
# adds __eq__ in the future
request = RequestFactory().get('/')
self.assertEqual(
RequestContext(request, dict_=test_data),
RequestContext(request, dict_=test_data),
)
def test_modify_context_and_render(self):
template = Template('{{ foo }}')
request = RequestFactory().get('/')
context = RequestContext(request, {})
context['foo'] = 'foo'
self.assertEqual(template.render(context), 'foo')
| bsd-3-clause | 4,328,593,847,120,427,000 | 29.446328 | 94 | 0.547597 | false |
waseem18/oh-mainline | mysite/project/tasks/__init__.py | 15 | 1736 | # This file is part of OpenHatch.
# Copyright (C) 2010 Parker Phinney
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.mail import send_mail
def send_email_to_all_because_project_icon_was_marked_as_wrong(project__pk, project__name, project_icon_url):
# links you to the project page
# links you to the secret, wrong project icon
# TODO:figure out if we should be worried about project icons getting deleted
# i think that we dont. provide a justification here.
project_page_url = 'https://openhatch.org/projects/' + project__name
# FIXME: this url
hidden_project_icon_url = 'https://openhatch.org/static/images/icons/projects/'
subject = '[OH]- ' + project__name + ' icon was marked as incorrect'
body = ''
body += 'project name: ' + project__name + '\n'
body += 'project url: ' + project_page_url + '\n'
body += 'project icon url (currently not displayed): ' + \
project_icon_url + '\n'
body += 'thanks'
return send_mail(subject, body, '[email protected]', ['[email protected]'], fail_silently=False)
| agpl-3.0 | -756,724,364,706,574,200 | 47.222222 | 109 | 0.706221 | false |
kei-yamazaki/jenkins-job-builder | jenkins_jobs/modules/wrappers.py | 1 | 54570 | # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Wrappers can alter the way the build is run as well as the build output.
**Component**: wrappers
:Macro: wrapper
:Entry Point: jenkins_jobs.wrappers
"""
import logging
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.modules.builders import create_builders
from jenkins_jobs.modules.helpers import config_file_provider_builder
def ci_skip(parser, xml_parent, data):
"""yaml: ci-skip
Skip making a build for certain push.
Just add [ci skip] into your commit's message to let Jenkins know,
that you do not want to perform build for the next push.
Requires the Jenkins :jenkins-wiki:`Ci Skip Plugin <Ci+Skip+Plugin>`.
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/ci-skip001.yaml
"""
rpobj = XML.SubElement(xml_parent, 'ruby-proxy-object')
robj = XML.SubElement(rpobj, 'ruby-object', attrib={
'pluginid': 'ci-skip',
'ruby-class': 'Jenkins::Tasks::BuildWrapperProxy'
})
pluginid = XML.SubElement(robj, 'pluginid', {
'pluginid': 'ci-skip', 'ruby-class': 'String'
})
pluginid.text = 'ci-skip'
obj = XML.SubElement(robj, 'object', {
'ruby-class': 'CiSkipWrapper', 'pluginid': 'ci-skip'
})
XML.SubElement(obj, 'ci__skip', {
'pluginid': 'ci-skip', 'ruby-class': 'NilClass'
})
def config_file_provider(parser, xml_parent, data):
"""yaml: config-file-provider
Provide configuration files (i.e., settings.xml for maven etc.)
which will be copied to the job's workspace.
Requires the Jenkins :jenkins-wiki:`Config File Provider Plugin
<Config+File+Provider+Plugin>`.
:arg list files: List of managed config files made up of three
parameters
:files: * **file-id** (`str`) -- The identifier for the managed config
file
* **target** (`str`) -- Define where the file should be created
(optional)
* **variable** (`str`) -- Define an environment variable to be
used (optional)
Example:
.. literalinclude:: \
/../../tests/wrappers/fixtures/config-file-provider003.yaml
:language: yaml
"""
cfp = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.configfiles.'
'buildwrapper.ConfigFileBuildWrapper')
cfp.set('plugin', 'config-file-provider')
config_file_provider_builder(cfp, data)
def logfilesize(parser, xml_parent, data):
"""yaml: logfilesize
Abort the build if its logfile becomes too big.
Requires the Jenkins :jenkins-wiki:`Logfilesizechecker Plugin
<Logfilesizechecker+Plugin>`.
:arg bool set-own: Use job specific maximum log size instead of global
config value (default false).
:arg bool fail: Make builds aborted by this wrapper be marked as "failed"
(default false).
:arg int size: Abort the build if logfile size is bigger than this
value (in MiB, default 128). Only applies if set-own is true.
Minimum config example:
.. literalinclude:: /../../tests/wrappers/fixtures/logfilesize002.yaml
Full config example:
.. literalinclude:: /../../tests/wrappers/fixtures/logfilesize001.yaml
"""
lfswrapper = XML.SubElement(xml_parent,
'hudson.plugins.logfilesizechecker.'
'LogfilesizecheckerWrapper')
lfswrapper.set("plugin", "logfilesizechecker")
XML.SubElement(lfswrapper, 'setOwn').text = str(
data.get('set-own', 'false')).lower()
XML.SubElement(lfswrapper, 'maxLogSize').text = str(
data.get('size', '128')).lower()
XML.SubElement(lfswrapper, 'failBuild').text = str(
data.get('fail', 'false')).lower()
def timeout(parser, xml_parent, data):
"""yaml: timeout
Abort the build if it runs too long.
Requires the Jenkins :jenkins-wiki:`Build Timeout Plugin
<Build-timeout+Plugin>`.
:arg bool fail: Mark the build as failed (default false)
:arg bool write-description: Write a message in the description
(default false)
:arg int timeout: Abort the build after this number of minutes (default 3)
:arg str timeout-var: Export an environment variable to reference the
timeout value (optional)
:arg str type: Timeout type to use (default absolute)
:arg int elastic-percentage: Percentage of the three most recent builds
where to declare a timeout (default 0)
:arg int elastic-default-timeout: Timeout to use if there were no previous
builds (default 3)
:type values:
* **likely-stuck**
* **elastic**
* **absolute**
* **no-activity**
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/timeout001.yaml
.. literalinclude:: /../../tests/wrappers/fixtures/timeout002.yaml
.. literalinclude:: /../../tests/wrappers/fixtures/timeout003.yaml
.. literalinclude:: /../../tests/wrappers/fixtures/timeout004.yaml
"""
twrapper = XML.SubElement(xml_parent,
'hudson.plugins.build__timeout.'
'BuildTimeoutWrapper')
XML.SubElement(twrapper, 'timeoutMinutes').text = str(
data.get('timeout', 3))
timeout_env_var = data.get('timeout-var')
if timeout_env_var:
XML.SubElement(twrapper, 'timeoutEnvVar').text = str(timeout_env_var)
XML.SubElement(twrapper, 'failBuild').text = str(
data.get('fail', 'false')).lower()
XML.SubElement(twrapper, 'writingDescription').text = str(
data.get('write-description', 'false')).lower()
XML.SubElement(twrapper, 'timeoutPercentage').text = str(
data.get('elastic-percentage', 0))
XML.SubElement(twrapper, 'timeoutMinutesElasticDefault').text = str(
data.get('elastic-default-timeout', 3))
tout_type = str(data.get('type', 'absolute')).lower()
if tout_type == 'likely-stuck':
tout_type = 'likelyStuck'
elif tout_type == 'no-activity':
noactivity = XML.SubElement(twrapper,
'strategy',
{'class': 'hudson.plugins.build_timeout.impl.NoActivityTimeOutStrategy'})
XML.SubElement(noactivity,
'timeout').text = str(
data.get('no-activity-timeout', 180000))
XML.SubElement(twrapper, 'timeoutType').text = tout_type
def timestamps(parser, xml_parent, data):
"""yaml: timestamps
Add timestamps to the console log.
Requires the Jenkins :jenkins-wiki:`Timestamper Plugin <Timestamper>`.
Example::
wrappers:
- timestamps
"""
XML.SubElement(xml_parent,
'hudson.plugins.timestamper.TimestamperBuildWrapper')
def ansicolor(parser, xml_parent, data):
"""yaml: ansicolor
Translate ANSI color codes to HTML in the console log.
Requires the Jenkins :jenkins-wiki:`Ansi Color Plugin <AnsiColor+Plugin>`.
:arg string colormap: (optional) color mapping to use
Examples::
wrappers:
- ansicolor
# Explicitly setting the colormap
wrappers:
- ansicolor:
colormap: vga
"""
cwrapper = XML.SubElement(
xml_parent,
'hudson.plugins.ansicolor.AnsiColorBuildWrapper')
# Optional colormap
colormap = data.get('colormap')
if colormap:
XML.SubElement(cwrapper, 'colorMapName').text = colormap
def mask_passwords(parser, xml_parent, data):
"""yaml: mask-passwords
Hide passwords in the console log.
Requires the Jenkins :jenkins-wiki:`Mask Passwords Plugin
<Mask+Passwords+Plugin>`.
Example::
wrappers:
- mask-passwords
"""
XML.SubElement(xml_parent,
'com.michelin.cio.hudson.plugins.maskpasswords.'
'MaskPasswordsBuildWrapper')
def workspace_cleanup(parser, xml_parent, data):
"""yaml: workspace-cleanup (pre-build)
Requires the Jenkins :jenkins-wiki:`Workspace Cleanup Plugin
<Workspace+Cleanup+Plugin>`.
The post-build workspace-cleanup is available as a publisher.
:arg list include: list of files to be included
:arg list exclude: list of files to be excluded
:arg bool dirmatch: Apply pattern to directories too
Example::
wrappers:
- workspace-cleanup:
include:
- "*.zip"
"""
p = XML.SubElement(xml_parent,
'hudson.plugins.ws__cleanup.PreBuildCleanup')
p.set("plugin", "[email protected]")
if "include" in data or "exclude" in data:
patterns = XML.SubElement(p, 'patterns')
for inc in data.get("include", []):
ptrn = XML.SubElement(patterns, 'hudson.plugins.ws__cleanup.Pattern')
XML.SubElement(ptrn, 'pattern').text = inc
XML.SubElement(ptrn, 'type').text = "INCLUDE"
for exc in data.get("exclude", []):
ptrn = XML.SubElement(patterns, 'hudson.plugins.ws__cleanup.Pattern')
XML.SubElement(ptrn, 'pattern').text = exc
XML.SubElement(ptrn, 'type').text = "EXCLUDE"
deldirs = XML.SubElement(p, 'deleteDirs')
deldirs.text = str(data.get("dirmatch", False)).lower()
def m2_repository_cleanup(parser, xml_parent, data):
"""yaml: m2-repository-cleanup
Configure M2 Repository Cleanup
Requires the Jenkins :jenkins-wiki:`M2 Repository Cleanup
<M2+Repository+Cleanup+Plugin>`.
:arg list patterns: List of patterns for artifacts to cleanup before
building. (optional)
This plugin allows you to configure a maven2 job to clean some or all of
the artifacts from the repository before it runs.
Example:
.. literalinclude:: \
../../tests/wrappers/fixtures/m2-repository-cleanup001.yaml
"""
m2repo = XML.SubElement(
xml_parent,
'hudson.plugins.m2__repo__reaper.M2RepoReaperWrapper')
m2repo.set("plugin", "m2-repo-reaper")
patterns = data.get("patterns", [])
XML.SubElement(m2repo, 'artifactPatterns').text = ",".join(patterns)
p = XML.SubElement(m2repo, 'patterns')
for pattern in patterns:
XML.SubElement(p, 'string').text = pattern
def rvm_env(parser, xml_parent, data):
"""yaml: rvm-env
Set the RVM implementation
Requires the Jenkins :jenkins-wiki:`Rvm Plugin <RVM+Plugin>`.
:arg str implementation: Type of implementation. Syntax is RUBY[@GEMSET],
such as '1.9.3' or 'jruby@foo'.
Example::
wrappers:
- rvm-env:
implementation: 1.9.3
"""
rpo = XML.SubElement(xml_parent,
'ruby-proxy-object')
ro_class = "Jenkins::Plugin::Proxies::BuildWrapper"
ro = XML.SubElement(rpo,
'ruby-object',
{'ruby-class': ro_class,
'pluginid': 'rvm'})
o = XML.SubElement(ro,
'object',
{'ruby-class': 'RvmWrapper',
'pluginid': 'rvm'})
XML.SubElement(o,
'impl',
{'pluginid': 'rvm',
'ruby-class': 'String'}).text = data['implementation']
XML.SubElement(ro,
'pluginid',
{'pluginid': 'rvm',
'ruby-class': 'String'}).text = "rvm"
def rbenv(parser, xml_parent, data):
"""yaml: rbenv
Set the rbenv implementation.
Requires the Jenkins :jenkins-wiki:`rbenv plugin <rbenv+plugin>`.
All parameters are optional.
:arg str ruby-version: Version of Ruby to use (default: 1.9.3-p484)
:arg bool ignore-local-version: If true, ignore local Ruby
version (defined in the ".ruby-version" file in workspace) even if it
has been defined (default: false)
:arg str preinstall-gem-list: List of gems to install
(default: 'bundler,rake')
:arg str rbenv-root: RBENV_ROOT (default: $HOME/.rbenv)
:arg str rbenv-repo: Which repo to clone rbenv from
(default: https://github.com/sstephenson/rbenv.git)
:arg str rbenv-branch: Which branch to clone rbenv from (default: master)
:arg str ruby-build-repo: Which repo to clone ruby-build from
(default: https://github.com/sstephenson/ruby-build.git)
:arg str ruby-build-branch: Which branch to clone ruby-build from
(default: master)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/rbenv003.yaml
"""
mapping = [
# option, xml name, default value (text), attributes (hard coded)
("preinstall-gem-list", 'gem__list', 'bundler,rake'),
("rbenv-root", 'rbenv__root', '$HOME/.rbenv'),
("rbenv-repo", 'rbenv__repository',
'https://github.com/sstephenson/rbenv.git'),
("rbenv-branch", 'rbenv__revision', 'master'),
("ruby-build-repo", 'ruby__build__repository',
'https://github.com/sstephenson/ruby-build.git'),
("ruby-build-branch", 'ruby__build__revision', 'master'),
("ruby-version", 'version', '1.9.3-p484'),
]
rpo = XML.SubElement(xml_parent,
'ruby-proxy-object')
ro_class = "Jenkins::Tasks::BuildWrapperProxy"
ro = XML.SubElement(rpo,
'ruby-object',
{'ruby-class': ro_class,
'pluginid': 'rbenv'})
XML.SubElement(ro,
'pluginid',
{'pluginid': "rbenv",
'ruby-class': "String"}).text = "rbenv"
o = XML.SubElement(ro,
'object',
{'ruby-class': 'RbenvWrapper',
'pluginid': 'rbenv'})
for elem in mapping:
(optname, xmlname, val) = elem[:3]
xe = XML.SubElement(o,
xmlname,
{'ruby-class': "String",
'pluginid': "rbenv"})
if optname and optname in data:
val = data[optname]
if type(val) == bool:
xe.text = str(val).lower()
else:
xe.text = val
ignore_local_class = 'FalseClass'
if 'ignore-local-version' in data:
ignore_local_string = str(data['ignore-local-version']).lower()
if ignore_local_string == 'true':
ignore_local_class = 'TrueClass'
XML.SubElement(o,
'ignore__local__version',
{'ruby-class': ignore_local_class,
'pluginid': 'rbenv'})
def build_name(parser, xml_parent, data):
"""yaml: build-name
Set the name of the build
Requires the Jenkins :jenkins-wiki:`Build Name Setter Plugin
<Build+Name+Setter+Plugin>`.
:arg str name: Name for the build. Typically you would use a variable
from Jenkins in the name. The syntax would be ${FOO} for
the FOO variable.
Example::
wrappers:
- build-name:
name: Build-${FOO}
"""
bsetter = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.buildnamesetter.'
'BuildNameSetter')
XML.SubElement(bsetter, 'template').text = data['name']
def port_allocator(parser, xml_parent, data):
"""yaml: port-allocator
Assign unique TCP port numbers
Requires the Jenkins :jenkins-wiki:`Port Allocator Plugin
<Port+Allocator+Plugin>`.
:arg str name: Deprecated, use names instead
:arg list names: Variable list of names of the port or list of
specific port numbers
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/port-allocator002.yaml
"""
pa = XML.SubElement(xml_parent,
'org.jvnet.hudson.plugins.port__allocator.'
'PortAllocator')
ports = XML.SubElement(pa, 'ports')
names = data.get('names')
if not names:
logger = logging.getLogger(__name__)
logger.warn('port_allocator name is deprecated, use a names list '
' instead')
names = [data['name']]
for name in names:
dpt = XML.SubElement(ports,
'org.jvnet.hudson.plugins.port__allocator.'
'DefaultPortType')
XML.SubElement(dpt, 'name').text = name
def locks(parser, xml_parent, data):
"""yaml: locks
Control parallel execution of jobs.
Requires the Jenkins :jenkins-wiki:`Locks and Latches Plugin
<Locks+and+Latches+plugin>`.
:arg: list of locks to use
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/locks002.yaml
:language: yaml
"""
locks = data
if locks:
lw = XML.SubElement(xml_parent,
'hudson.plugins.locksandlatches.LockWrapper')
locktop = XML.SubElement(lw, 'locks')
for lock in locks:
lockwrapper = XML.SubElement(locktop,
'hudson.plugins.locksandlatches.'
'LockWrapper_-LockWaitConfig')
XML.SubElement(lockwrapper, 'name').text = lock
def copy_to_slave(parser, xml_parent, data):
"""yaml: copy-to-slave
Copy files to slave before build
Requires the Jenkins :jenkins-wiki:`Copy To Slave Plugin
<Copy+To+Slave+Plugin>`.
:arg list includes: list of file patterns to copy
:arg list excludes: list of file patterns to exclude
:arg bool flatten: flatten directory structure
:arg str relative-to: base location of includes/excludes,
must be userContent ($JENKINS_HOME/userContent)
home ($JENKINS_HOME) or workspace
:arg bool include-ant-excludes: exclude ant's default excludes
Example::
wrappers:
- copy-to-slave:
includes:
- file1
- file2*.txt
excludes:
- file2bad.txt
"""
p = 'com.michelin.cio.hudson.plugins.copytoslave.CopyToSlaveBuildWrapper'
cs = XML.SubElement(xml_parent, p)
XML.SubElement(cs, 'includes').text = ','.join(data.get('includes', ['']))
XML.SubElement(cs, 'excludes').text = ','.join(data.get('excludes', ['']))
XML.SubElement(cs, 'flatten').text = \
str(data.get('flatten', False)).lower()
XML.SubElement(cs, 'includeAntExcludes').text = \
str(data.get('include-ant-excludes', False)).lower()
rel = str(data.get('relative-to', 'userContent'))
opt = ('userContent', 'home', 'workspace')
if rel not in opt:
raise ValueError('relative-to must be one of %r' % opt)
XML.SubElement(cs, 'relativeTo').text = rel
# seems to always be false, can't find it in source code
XML.SubElement(cs, 'hudsonHomeRelative').text = 'false'
def inject(parser, xml_parent, data):
"""yaml: inject
Add or override environment variables to the whole build process
Requires the Jenkins :jenkins-wiki:`EnvInject Plugin <EnvInject+Plugin>`.
:arg str properties-file: path to the properties file (default '')
:arg str properties-content: key value pair of properties (default '')
:arg str script-file: path to the script file (default '')
:arg str script-content: contents of a script (default '')
:arg str groovy-script-content: contents of a groovy script (default '')
Example::
wrappers:
- inject:
properties-file: /usr/local/foo
properties-content: PATH=/foo/bar
script-file: /usr/local/foo.sh
script-content: echo $PATH
"""
eib = XML.SubElement(xml_parent, 'EnvInjectBuildWrapper')
info = XML.SubElement(eib, 'info')
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'propertiesFilePath', data.get('properties-file'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'propertiesContent', data.get('properties-content'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'scriptFilePath', data.get('script-file'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'scriptContent', data.get('script-content'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'groovyScriptContent', data.get('groovy-script-content'))
XML.SubElement(info, 'loadFilesFromMaster').text = 'false'
def inject_ownership_variables(parser, xml_parent, data):
"""yaml: inject-ownership-variables
Inject ownership variables to the build as environment variables.
Requires the Jenkins :jenkins-wiki:`EnvInject Plugin <EnvInject+Plugin>`
and Jenkins :jenkins-wiki:`Ownership plugin <Ownership+Plugin>`.
:arg bool job-variables: inject job ownership variables to the job
(default false)
:arg bool node-variables: inject node ownership variables to the job
(default false)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/ownership001.yaml
"""
ownership = XML.SubElement(xml_parent, 'com.synopsys.arc.jenkins.plugins.'
'ownership.wrappers.OwnershipBuildWrapper')
XML.SubElement(ownership, 'injectNodeOwnership').text = \
str(data.get('node-variables', False)).lower()
XML.SubElement(ownership, 'injectJobOwnership').text = \
str(data.get('job-variables', False)).lower()
def inject_passwords(parser, xml_parent, data):
"""yaml: inject-passwords
Inject passwords to the build as environment variables.
Requires the Jenkins :jenkins-wiki:`EnvInject Plugin <EnvInject+Plugin>`.
:arg bool global: inject global passwords to the job
:arg bool mask-password-params: mask passsword parameters
:arg list job-passwords: key value pair of job passwords
:Parameter: * **name** (`str`) Name of password
* **password** (`str`) Encrypted password
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/passwords001.yaml
"""
eib = XML.SubElement(xml_parent, 'EnvInjectPasswordWrapper')
XML.SubElement(eib, 'injectGlobalPasswords').text = \
str(data.get('global', False)).lower()
XML.SubElement(eib, 'maskPasswordParameters').text = \
str(data.get('mask-password-params', False)).lower()
entries = XML.SubElement(eib, 'passwordEntries')
passwords = data.get('job-passwords', [])
if passwords:
for password in passwords:
entry = XML.SubElement(entries, 'EnvInjectPasswordEntry')
XML.SubElement(entry, 'name').text = password['name']
XML.SubElement(entry, 'value').text = password['password']
def env_file(parser, xml_parent, data):
"""yaml: env-file
Add or override environment variables to the whole build process
Requires the Jenkins :jenkins-wiki:`Environment File Plugin
<Envfile+Plugin>`.
:arg str properties-file: path to the properties file (default '')
Example::
wrappers:
- env-file:
properties-file: ${WORKSPACE}/foo
"""
eib = XML.SubElement(xml_parent,
'hudson.plugins.envfile.EnvFileBuildWrapper')
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
eib, 'filePath', data.get('properties-file'))
def env_script(parser, xml_parent, data):
"""yaml: env-script
Add or override environment variables to the whole build process.
Requires the Jenkins :jenkins-wiki:`Environment Script Plugin
<Environment+Script+Plugin>`.
:arg script-content: The script to run (default: '')
:arg only-run-on-parent: Only applicable for Matrix Jobs. If true, run only
on the matrix parent job (default: false)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/env-script001.yaml
"""
el = XML.SubElement(xml_parent, 'com.lookout.jenkins.EnvironmentScript')
XML.SubElement(el, 'script').text = data.get('script-content', '')
only_on_parent = str(data.get('only-run-on-parent', False)).lower()
XML.SubElement(el, 'onlyRunOnParent').text = only_on_parent
def jclouds(parser, xml_parent, data):
"""yaml: jclouds
Uses JClouds to provide slave launching on most of the currently
usable Cloud infrastructures.
Requires the Jenkins :jenkins-wiki:`JClouds Plugin <JClouds+Plugin>`.
:arg bool single-use: Whether or not to terminate the slave after use
(default: False).
:arg list instances: The name of the jclouds template to create an
instance from, and its parameters.
:arg str cloud-name: The name of the jclouds profile containing the
specified template.
:arg int count: How many instances to create (default: 1).
:arg bool stop-on-terminate: Whether or not to suspend instead of terminate
the instance (default: False).
Example::
wrappers:
- jclouds:
single-use: True
instances:
- jenkins-dev-slave:
cloud-name: mycloud1
count: 1
stop-on-terminate: True
- jenkins-test-slave:
cloud-name: mycloud2
count: 2
stop-on-terminate: False
"""
if 'instances' in data:
buildWrapper = XML.SubElement(xml_parent,
'jenkins.plugins.jclouds.compute.'
'JCloudsBuildWrapper')
instances = XML.SubElement(buildWrapper, 'instancesToRun')
for foo in data['instances']:
for template, params in foo.items():
instance = XML.SubElement(instances,
'jenkins.plugins.jclouds.compute.'
'InstancesToRun')
XML.SubElement(instance, 'templateName').text = template
XML.SubElement(instance, 'cloudName').text = \
params.get('cloud-name', '')
XML.SubElement(instance, 'count').text = \
str(params.get('count', 1))
XML.SubElement(instance, 'suspendOrTerminate').text = \
str(params.get('stop-on-terminate', False)).lower()
if data.get('single-use'):
XML.SubElement(xml_parent,
'jenkins.plugins.jclouds.compute.'
'JCloudsOneOffSlave')
def build_user_vars(parser, xml_parent, data):
"""yaml: build-user-vars
Set environment variables to the value of the user that started the build.
Requires the Jenkins :jenkins-wiki:`Build User Vars Plugin
<Build+User+Vars+Plugin>`.
Example::
wrappers:
- build-user-vars
"""
XML.SubElement(xml_parent, 'org.jenkinsci.plugins.builduser.BuildUser')
def release(parser, xml_parent, data):
"""yaml: release
Add release build configuration
Requires the Jenkins :jenkins-wiki:`Release Plugin <Release+Plugin>`.
:arg bool keep-forever: Keep build forever (default true)
:arg bool override-build-parameters: Enable build-parameter override
(default false)
:arg string version-template: Release version template (default '')
:arg list parameters: Release parameters (see the :ref:`Parameters` module)
:arg list pre-build: Pre-build steps (see the :ref:`Builders` module)
:arg list post-build: Post-build steps (see :ref:`Builders`)
:arg list post-success: Post successful-build steps (see :ref:`Builders`)
:arg list post-failed: Post failed-build steps (see :ref:`Builders`)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/release001.yaml
"""
relwrap = XML.SubElement(xml_parent,
'hudson.plugins.release.ReleaseWrapper')
# For 'keep-forever', the sense of the XML flag is the opposite of
# the YAML flag.
no_keep_forever = 'false'
if str(data.get('keep-forever', True)).lower() == 'false':
no_keep_forever = 'true'
XML.SubElement(relwrap, 'doNotKeepLog').text = no_keep_forever
XML.SubElement(relwrap, 'overrideBuildParameters').text = str(
data.get('override-build-parameters', False)).lower()
XML.SubElement(relwrap, 'releaseVersionTemplate').text = data.get(
'version-template', '')
parameters = data.get('parameters', [])
if parameters:
pdef = XML.SubElement(relwrap, 'parameterDefinitions')
for param in parameters:
parser.registry.dispatch('parameter', parser, pdef, param)
builder_steps = {
'pre-build': 'preBuildSteps',
'post-build': 'postBuildSteps',
'post-success': 'postSuccessfulBuildSteps',
'post-fail': 'postFailedBuildSteps',
}
for step in builder_steps.keys():
for builder in data.get(step, []):
parser.registry.dispatch('builder', parser,
XML.SubElement(relwrap,
builder_steps[step]),
builder)
def sauce_ondemand(parser, xml_parent, data):
"""yaml: sauce-ondemand
Allows you to integrate Sauce OnDemand with Jenkins. You can
automate the setup and tear down of Sauce Connect and integrate
the Sauce OnDemand results videos per test. Requires the Jenkins
:jenkins-wiki:`Sauce OnDemand Plugin <Sauce+OnDemand+Plugin>`.
:arg bool enable-sauce-connect: launches a SSH tunnel from their cloud
to your private network (default false)
:arg str sauce-host: The name of the selenium host to be used. For
tests run using Sauce Connect, this should be localhost.
ondemand.saucelabs.com can also be used to conenct directly to
Sauce OnDemand, The value of the host will be stored in the
SAUCE_ONDEMAND_HOST environment variable. (default '')
:arg str sauce-port: The name of the Selenium Port to be used. For
tests run using Sauce Connect, this should be 4445. If using
ondemand.saucelabs.com for the Selenium Host, then use 4444.
The value of the port will be stored in the SAUCE_ONDEMAND_PORT
environment variable. (default '')
:arg str override-username: If set then api-access-key must be set.
Overrides the username from the global config. (default '')
:arg str override-api-access-key: If set then username must be set.
Overrides the api-access-key set in the global config. (default '')
:arg str starting-url: The value set here will be stored in the
SELENIUM_STARTING_ULR environment variable. Only used when type
is selenium. (default '')
:arg str type: Type of test to run (default selenium)
:type values:
* **selenium**
* **webdriver**
:arg list platforms: The platforms to run the tests on. Platforms
supported are dynamically retrieved from sauce labs. The format of
the values has only the first letter capitalized, no spaces, underscore
between os and version, underscore in internet_explorer, everything
else is run together. If there are not multiple version of the browser
then just the first version number is used.
Examples: Mac_10.8iphone5.1 or Windows_2003firefox10
or Windows_2012internet_explorer10 (default '')
:arg bool launch-sauce-connect-on-slave: Whether to launch sauce connect
on the slave. (default false)
:arg str https-protocol: The https protocol to use (default '')
:arg str sauce-connect-options: Options to pass to sauce connect
(default '')
Example::
wrappers:
- sauce-ondemand:
enable-sauce-connect: true
sauce-host: foo
sauce-port: 8080
override-username: foo
override-api-access-key: 123lkj123kh123l;k12323
type: webdriver
platforms:
- Linuxandroid4
- Linuxfirefox10
- Linuxfirefox11
launch-sauce-connect-on-slave: true
"""
sauce = XML.SubElement(xml_parent, 'hudson.plugins.sauce__ondemand.'
'SauceOnDemandBuildWrapper')
XML.SubElement(sauce, 'enableSauceConnect').text = str(data.get(
'enable-sauce-connect', False)).lower()
host = data.get('sauce-host', '')
XML.SubElement(sauce, 'seleniumHost').text = host
port = data.get('sauce-port', '')
XML.SubElement(sauce, 'seleniumPort').text = port
# Optional override global authentication
username = data.get('override-username')
key = data.get('override-api-access-key')
if username and key:
cred = XML.SubElement(sauce, 'credentials')
XML.SubElement(cred, 'username').text = username
XML.SubElement(cred, 'apiKey').text = key
atype = data.get('type', 'selenium')
info = XML.SubElement(sauce, 'seleniumInformation')
if atype == 'selenium':
url = data.get('starting-url', '')
XML.SubElement(info, 'startingURL').text = url
browsers = XML.SubElement(info, 'seleniumBrowsers')
for platform in data['platforms']:
XML.SubElement(browsers, 'string').text = platform
XML.SubElement(info, 'isWebDriver').text = 'false'
XML.SubElement(sauce, 'seleniumBrowsers',
{'reference': '../seleniumInformation/'
'seleniumBrowsers'})
if atype == 'webdriver':
browsers = XML.SubElement(info, 'webDriverBrowsers')
for platform in data['platforms']:
XML.SubElement(browsers, 'string').text = platform
XML.SubElement(info, 'isWebDriver').text = 'true'
XML.SubElement(sauce, 'webDriverBrowsers',
{'reference': '../seleniumInformation/'
'webDriverBrowsers'})
XML.SubElement(sauce, 'launchSauceConnectOnSlave').text = str(data.get(
'launch-sauce-connect-on-slave', False)).lower()
protocol = data.get('https-protocol', '')
XML.SubElement(sauce, 'httpsProtocol').text = protocol
options = data.get('sauce-connect-options', '')
XML.SubElement(sauce, 'options').text = options
def pathignore(parser, xml_parent, data):
"""yaml: pathignore
This plugin allows SCM-triggered jobs to ignore
build requests if only certain paths have changed.
Requires the Jenkins :jenkins-wiki:`Pathignore Plugin <Pathignore+Plugin>`.
:arg str ignored: A set of patterns to define ignored changes
Example::
wrappers:
- pathignore:
ignored: "docs, tests"
"""
ruby = XML.SubElement(xml_parent, 'ruby-proxy-object')
robj = XML.SubElement(ruby, 'ruby-object', attrib={
'pluginid': 'pathignore',
'ruby-class': 'Jenkins::Plugin::Proxies::BuildWrapper'
})
pluginid = XML.SubElement(robj, 'pluginid', {
'pluginid': 'pathignore', 'ruby-class': 'String'
})
pluginid.text = 'pathignore'
obj = XML.SubElement(robj, 'object', {
'ruby-class': 'PathignoreWrapper', 'pluginid': 'pathignore'
})
ignored = XML.SubElement(obj, 'ignored__paths', {
'pluginid': 'pathignore', 'ruby-class': 'String'
})
ignored.text = data.get('ignored', '')
XML.SubElement(obj, 'invert__ignore', {
'ruby-class': 'FalseClass', 'pluginid': 'pathignore'
})
def pre_scm_buildstep(parser, xml_parent, data):
"""yaml: pre-scm-buildstep
Execute a Build Step before running the SCM
Requires the Jenkins :jenkins-wiki:`pre-scm-buildstep <pre-scm-buildstep>`.
:arg list buildsteps: List of build steps to execute
:Buildstep: Any acceptable builder, as seen in the example
Example::
wrappers:
- pre-scm-buildstep:
- shell: |
#!/bin/bash
echo "Doing somethiung cool"
- shell: |
#!/bin/zsh
echo "Doing somethin cool with zsh"
- ant: "target1 target2"
ant-name: "Standard Ant"
- inject:
properties-file: example.prop
properties-content: EXAMPLE=foo-bar
"""
bsp = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.preSCMbuildstep.'
'PreSCMBuildStepsWrapper')
bs = XML.SubElement(bsp, 'buildSteps')
for step in data:
for edited_node in create_builders(parser, step):
bs.append(edited_node)
def logstash(parser, xml_parent, data):
"""yaml: logstash build wrapper
Dump the Jenkins console output to Logstash
Requires the Jenkins :jenkins-wiki:`logstash plugin <Logstash+Plugin>`.
:arg use-redis: Boolean to use Redis. (default: true)
:arg redis: Redis config params
:Parameter: * **host** (`str`) Redis hostname\
(default 'localhost')
:Parameter: * **port** (`int`) Redis port number (default 6397)
:Parameter: * **database-number** (`int`)\
Redis database number (default 0)
:Parameter: * **database-password** (`str`)\
Redis database password (default '')
:Parameter: * **data-type** (`str`)\
Redis database type (default 'list')
:Parameter: * **key** (`str`) Redis key (default 'logstash')
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/logstash001.yaml
"""
logstash = XML.SubElement(xml_parent,
'jenkins.plugins.logstash.'
'LogstashBuildWrapper')
logstash.set('plugin', '[email protected]')
redis_bool = XML.SubElement(logstash, 'useRedis')
redis_bool.text = str(data.get('use-redis', True)).lower()
if data.get('use-redis'):
redis_config = data.get('redis', {})
redis_sub_element = XML.SubElement(logstash, 'redis')
host_sub_element = XML.SubElement(redis_sub_element, 'host')
host_sub_element.text = str(
redis_config.get('host', 'localhost'))
port_sub_element = XML.SubElement(redis_sub_element, 'port')
port_sub_element.text = str(redis_config.get('port', '6379'))
database_numb_sub_element = XML.SubElement(redis_sub_element, 'numb')
database_numb_sub_element.text = \
str(redis_config.get('database-number', '0'))
database_pass_sub_element = XML.SubElement(redis_sub_element, 'pass')
database_pass_sub_element.text = \
str(redis_config.get('database-password', ''))
data_type_sub_element = XML.SubElement(redis_sub_element, 'dataType')
data_type_sub_element.text = \
str(redis_config.get('data-type', 'list'))
key_sub_element = XML.SubElement(redis_sub_element, 'key')
key_sub_element.text = str(redis_config.get('key', 'logstash'))
def mongo_db(parser, xml_parent, data):
"""yaml: mongo-db build wrapper
Initalizes a MongoDB database while running the build.
Requires the Jenkins :jenkins-wiki:`MongoDB plugin <MongoDB+Plugin>`.
:arg str name: The name of the MongoDB install to use
:arg str data-directory: Data directory for the server (optional)
:arg int port: Port for the server (optional)
:arg str startup-params: Startup parameters for the server (optional)
:arg int start-timeout: How long to wait for the server to start in
milliseconds. 0 means no timeout. (default '0')
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/mongo-db001.yaml
"""
mongodb = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.mongodb.'
'MongoBuildWrapper')
mongodb.set('plugin', 'mongodb')
if not str(data.get('name', '')):
raise JenkinsJobsException('The mongo install name must be specified.')
XML.SubElement(mongodb, 'mongodbName').text = str(data.get('name', ''))
XML.SubElement(mongodb, 'port').text = str(data.get('port', ''))
XML.SubElement(mongodb, 'dbpath').text = str(data.get(
'data-directory', ''))
XML.SubElement(mongodb, 'parameters').text = str(data.get(
'startup-params', ''))
XML.SubElement(mongodb, 'startTimeout').text = str(data.get(
'start-timeout', '0'))
def delivery_pipeline(parser, xml_parent, data):
"""yaml: delivery-pipeline
If enabled the job will create a version based on the template.
The version will be set to the environment variable PIPELINE_VERSION and
will also be set in the downstream jobs.
Requires the Jenkins :jenkins-wiki:`Delivery Pipeline Plugin
<Delivery+Pipeline+Plugin>`.
:arg str version-template: Template for generated version e.g
1.0.${BUILD_NUMBER} (default: '')
:arg bool set-display-name: Set the generated version as the display name
for the build (default: false)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/delivery-pipeline1.yaml
"""
pvc = XML.SubElement(xml_parent,
'se.diabol.jenkins.pipeline.'
'PipelineVersionContributor')
XML.SubElement(pvc, 'versionTemplate').text = data.get(
'version-template', '')
XML.SubElement(pvc, 'updateDisplayName').text = str(data.get(
'set-display-name', False)).lower()
def matrix_tie_parent(parser, xml_parent, data):
"""yaml: matrix-tie-parent
Tie parent to a node.
Requires the Jenkins :jenkins-wiki:`Matrix Tie Parent Plugin
<Matrix+Tie+Parent+Plugin>`.
Note that from Jenkins version 1.532 this plugin's functionality is
available under the "advanced" option of the matrix project configuration.
You can use the top level ``node`` parameter to control where the parent
job is tied in Jenkins 1.532 and higher.
:arg str node: Name of the node.
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/matrix-tie-parent.yaml
"""
mtp = XML.SubElement(xml_parent, 'matrixtieparent.BuildWrapperMtp')
XML.SubElement(mtp, 'labelName').text = data['node']
def exclusion(parser, xml_parent, data):
"""yaml: exclusion
Add a resource to use for critical sections to establish a mutex on. If
another job specifies the same resource, the second job will wait for the
blocked resource to become available.
Requires the Jenkins :jenkins-wiki:`Exclusion Plugin <Exclusion-Plugin>`.
:arg list resources: List of resources to add for exclusion
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/exclusion002.yaml
"""
exl = XML.SubElement(xml_parent,
'org.jvnet.hudson.plugins.exclusion.IdAllocator')
exl.set('plugin', 'Exclusion')
ids = XML.SubElement(exl, 'ids')
resources = data.get('resources', [])
for resource in resources:
dit = \
XML.SubElement(ids,
'org.jvnet.hudson.plugins.exclusion.DefaultIdType')
XML.SubElement(dit, 'name').text = str(resource).upper()
def ssh_agent_credentials(parser, xml_parent, data):
"""yaml: ssh-agent-credentials
Sets up the user for the ssh agent plugin for jenkins.
Requires the Jenkins :jenkins-wiki:`SSH-Agent Plugin <SSH+Agent+Plugin>`.
:arg list users: A list of Jenkins users credential IDs (required)
:arg str user: The user id of the jenkins user credentials (deprecated)
Example:
.. literalinclude::
/../../tests/wrappers/fixtures/ssh-agent-credentials002.yaml
if both **users** and **user** parameters specified, **users** will be
prefered, **user** will be ignored.
Example:
.. literalinclude::
/../../tests/wrappers/fixtures/ssh-agent-credentials003.yaml
The **users** with one value in list equals to the **user**. In this
case old style XML will be generated. Use this format if you use
SSH-Agent plugin < 1.5.
Example:
.. literalinclude::
/../../tests/wrappers/fixtures/ssh-agent-credentials004.yaml
equals to:
.. literalinclude::
/../../tests/wrappers/fixtures/ssh-agent-credentials001.yaml
"""
logger = logging.getLogger(__name__)
entry_xml = XML.SubElement(
xml_parent,
'com.cloudbees.jenkins.plugins.sshagent.SSHAgentBuildWrapper')
xml_key = 'user'
user_list = list()
if 'users' in data:
user_list += data['users']
if len(user_list) > 1:
entry_xml = XML.SubElement(entry_xml, 'credentialIds')
xml_key = 'string'
if 'user' in data:
logger.warn("Both 'users' and 'user' parameters specified for "
"ssh-agent-credentials. 'users' is used, 'user' is "
"ignored.")
elif 'user' in data:
logger.warn("The 'user' param has been deprecated, "
"use the 'users' param instead.")
user_list.append(data['user'])
else:
raise JenkinsJobsException("Missing 'user' or 'users' parameter "
"for ssh-agent-credentials")
for user in user_list:
XML.SubElement(entry_xml, xml_key).text = user
def credentials_binding(parser, xml_parent, data):
"""yaml: credentials-binding
Binds credentials to environment variables using the credentials binding
plugin for jenkins.
Requires the Jenkins :jenkins-wiki:`Credentials Binding Plugin
<Credentials+Binding+Plugin>` version 1.1 or greater.
:arg list binding-type: List of each bindings to create. Bindings may be\
of type `zip-file`, `file`, `username-password`,\
or `text`
:Parameters: * **credential-id** (`str`) UUID of the credential being\
referenced
* **variable** (`str`) Environment variable where the\
credential will be stored
Example:
.. literalinclude::
/../../tests/wrappers/fixtures/credentials_binding.yaml
:language: yaml
"""
entry_xml = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.credentialsbinding.impl.SecretBuildWrapper')
bindings_xml = XML.SubElement(entry_xml, 'bindings')
binding_types = {
'zip-file': 'org.jenkinsci.plugins.credentialsbinding.impl.'
'ZipFileBinding',
'file': 'org.jenkinsci.plugins.credentialsbinding.impl.FileBinding',
'username-password': 'org.jenkinsci.plugins.credentialsbinding.impl.'
'UsernamePasswordBinding',
'text': 'org.jenkinsci.plugins.credentialsbinding.impl.StringBinding'
}
if not data:
raise JenkinsJobsException('At least one binding-type must be '
'specified for the credentials-binding '
'element')
for binding in data:
for binding_type, params in binding.items():
if binding_type not in binding_types.keys():
raise JenkinsJobsException('binding-type must be one of %r' %
binding_types.keys())
binding_xml = XML.SubElement(bindings_xml,
binding_types[binding_type])
variable_xml = XML.SubElement(binding_xml, 'variable')
variable_xml.text = params.get('variable')
credential_xml = XML.SubElement(binding_xml, 'credentialsId')
credential_xml.text = params.get('credential-id')
def custom_tools(parser, xml_parent, data):
"""yaml: custom-tools
Requires the Jenkins :jenkins-wiki:`Custom Tools Plugin
<Custom+Tools+Plugin>`.
:arg list tools: List of custom tools to add
(optional)
:arg bool skip-master-install: skips the install in top level matrix job
(default 'false')
:arg bool convert-homes-to-upper: Converts the home env vars to uppercase
(default 'false')
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/custom-tools001.yaml
"""
base = 'com.cloudbees.jenkins.plugins.customtools'
wrapper = XML.SubElement(xml_parent,
base + ".CustomToolInstallWrapper")
wrapper_tools = XML.SubElement(wrapper, 'selectedTools')
tools = data.get('tools', [])
tool_node = base + '.CustomToolInstallWrapper_-SelectedTool'
for tool in tools:
tool_wrapper = XML.SubElement(wrapper_tools, tool_node)
XML.SubElement(tool_wrapper, 'name').text = str(tool)
opts = XML.SubElement(wrapper,
'multiconfigOptions')
skip_install = str(data.get('skip-master-install', 'false'))
XML.SubElement(opts,
'skipMasterInstallation').text = skip_install
convert_home = str(data.get('convert-homes-to-upper', 'false'))
XML.SubElement(wrapper,
'convertHomesToUppercase').text = convert_home
def xvnc(parser, xml_parent, data):
"""yaml: xvnc
Enable xvnc during the build.
Requires the Jenkins :jenkins-wiki:`xvnc plugin <Xvnc+Plugin>`.
:arg bool screenshot: Take screenshot upon build completion
(default: false)
:arg bool xauthority: Create a dedicated Xauthority file per build
(default: true)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/xvnc001.yaml
"""
xwrapper = XML.SubElement(xml_parent,
'hudson.plugins.xvnc.Xvnc')
XML.SubElement(xwrapper, 'takeScreenshot').text = str(
data.get('screenshot', False)).lower()
XML.SubElement(xwrapper, 'useXauthority').text = str(
data.get('xauthority', True)).lower()
def job_log_logger(parser, xml_parent, data):
"""yaml: job-log-logger
Enable writing the job log to the underlying logging system.
Requires the Jenkins :jenkins-wiki:`Job Log Logger plugin
<Job+Log+Logger+Plugin>`.
:arg bool suppress-empty: Suppress empty log messages
(default: true)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/job-log-logger001.yaml
"""
top = XML.SubElement(xml_parent,
'org.jenkins.ci.plugins.jobloglogger.'
'JobLogLoggerBuildWrapper')
XML.SubElement(top, 'suppressEmpty').text = str(
data.get('suppress-empty', True)).lower()
def xvfb(parser, xml_parent, data):
"""yaml: xvfb
Enable xvfb during the build.
Requires the Jenkins :jenkins-wiki:`Xvfb Plugin <Xvfb+Plugin>`.
:arg str installation-name: The name of the Xvfb tool instalation
(default: default)
:arg bool auto-display-name: Uses the -displayfd option of Xvfb by which it
chooses it's own display name
(default: false)
:arg str display-name: Ordinal of the display Xvfb will be running on, if
left empty choosen based on current build executor
number (optional)
:arg str assigned-labels: If you want to start Xvfb only on specific nodes
specify its name or label (optional)
:arg bool parallel-build: When running multiple Jenkins nodes on the same
machine this setting influences the display
number generation (default: false)
:arg int timeout: A timeout of given seconds to wait before returning
control to the job (default: 0)
:arg str screen: Resolution and color depth. (default: 1024x768x24)
:arg str display-name-offset: Offset for display names. (default: 1)
:arg str additional-options: Additional options to be added with the
options above to the Xvfb command line
(optional)
:arg bool debug: If Xvfb output should appear in console log of this job
(default: false)
:arg bool shutdown-with-build: Should the display be kept until the whole
job ends (default: false)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/xvfb001.yaml
"""
xwrapper = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.xvfb.XvfbBuildWrapper')
XML.SubElement(xwrapper, 'installationName').text = str(data.get(
'installation-name', 'default'))
XML.SubElement(xwrapper, 'autoDisplayName').text = str(data.get(
'auto-display-name', False)).lower()
if 'display-name' in data:
XML.SubElement(xwrapper, 'displayName').text = str(data.get(
'display-name', ''))
XML.SubElement(xwrapper, 'assignedLabels').text = str(data.get(
'assigned-labels', ''))
XML.SubElement(xwrapper, 'parallelBuild').text = str(data.get(
'parallel-build', False)).lower()
XML.SubElement(xwrapper, 'timeout').text = str(data.get('timeout', '0'))
XML.SubElement(xwrapper, 'screen').text = str(data.get(
'screen', '1024x768x24'))
XML.SubElement(xwrapper, 'displayNameOffset').text = str(data.get(
'display-name-offset', '1'))
XML.SubElement(xwrapper, 'additionalOptions').text = str(data.get(
'additional-options', ''))
XML.SubElement(xwrapper, 'debug').text = str(data.get(
'debug', False)).lower()
XML.SubElement(xwrapper, 'shutdownWithBuild').text = str(data.get(
'shutdown-with-build', False)).lower()
class Wrappers(jenkins_jobs.modules.base.Base):
sequence = 80
component_type = 'wrapper'
component_list_type = 'wrappers'
def gen_xml(self, parser, xml_parent, data):
wrappers = XML.SubElement(xml_parent, 'buildWrappers')
for wrap in data.get('wrappers', []):
self.registry.dispatch('wrapper', parser, wrappers, wrap)
| apache-2.0 | 8,760,421,605,870,689,000 | 37.241065 | 109 | 0.617464 | false |
xubenben/scikit-learn | sklearn/linear_model/ridge.py | 25 | 39394 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)[0]
return coefs
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is set, then
the solver will automatically be set to 'cholesky'
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional information
depending on the solver used.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr'):
raise ValueError('Solver %s not understood' % solver)
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == "lsqr":
coef = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=self.solver)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}
shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'svd' will use a Singular value decomposition to obtain
the solution, 'cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
If an integer is passed, it is the number of folds for KFold cross
validation. Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause | 9,062,958,117,623,075,000 | 35.108158 | 80 | 0.587805 | false |
clobrano/personfinder | app/unidecode/x070.py | 252 | 4693 | data = (
'You ', # 0x00
'Yang ', # 0x01
'Lu ', # 0x02
'Si ', # 0x03
'Jie ', # 0x04
'Ying ', # 0x05
'Du ', # 0x06
'Wang ', # 0x07
'Hui ', # 0x08
'Xie ', # 0x09
'Pan ', # 0x0a
'Shen ', # 0x0b
'Biao ', # 0x0c
'Chan ', # 0x0d
'Mo ', # 0x0e
'Liu ', # 0x0f
'Jian ', # 0x10
'Pu ', # 0x11
'Se ', # 0x12
'Cheng ', # 0x13
'Gu ', # 0x14
'Bin ', # 0x15
'Huo ', # 0x16
'Xian ', # 0x17
'Lu ', # 0x18
'Qin ', # 0x19
'Han ', # 0x1a
'Ying ', # 0x1b
'Yong ', # 0x1c
'Li ', # 0x1d
'Jing ', # 0x1e
'Xiao ', # 0x1f
'Ying ', # 0x20
'Sui ', # 0x21
'Wei ', # 0x22
'Xie ', # 0x23
'Huai ', # 0x24
'Hao ', # 0x25
'Zhu ', # 0x26
'Long ', # 0x27
'Lai ', # 0x28
'Dui ', # 0x29
'Fan ', # 0x2a
'Hu ', # 0x2b
'Lai ', # 0x2c
'[?] ', # 0x2d
'[?] ', # 0x2e
'Ying ', # 0x2f
'Mi ', # 0x30
'Ji ', # 0x31
'Lian ', # 0x32
'Jian ', # 0x33
'Ying ', # 0x34
'Fen ', # 0x35
'Lin ', # 0x36
'Yi ', # 0x37
'Jian ', # 0x38
'Yue ', # 0x39
'Chan ', # 0x3a
'Dai ', # 0x3b
'Rang ', # 0x3c
'Jian ', # 0x3d
'Lan ', # 0x3e
'Fan ', # 0x3f
'Shuang ', # 0x40
'Yuan ', # 0x41
'Zhuo ', # 0x42
'Feng ', # 0x43
'She ', # 0x44
'Lei ', # 0x45
'Lan ', # 0x46
'Cong ', # 0x47
'Qu ', # 0x48
'Yong ', # 0x49
'Qian ', # 0x4a
'Fa ', # 0x4b
'Guan ', # 0x4c
'Que ', # 0x4d
'Yan ', # 0x4e
'Hao ', # 0x4f
'Hyeng ', # 0x50
'Sa ', # 0x51
'Zan ', # 0x52
'Luan ', # 0x53
'Yan ', # 0x54
'Li ', # 0x55
'Mi ', # 0x56
'Shan ', # 0x57
'Tan ', # 0x58
'Dang ', # 0x59
'Jiao ', # 0x5a
'Chan ', # 0x5b
'[?] ', # 0x5c
'Hao ', # 0x5d
'Ba ', # 0x5e
'Zhu ', # 0x5f
'Lan ', # 0x60
'Lan ', # 0x61
'Nang ', # 0x62
'Wan ', # 0x63
'Luan ', # 0x64
'Xun ', # 0x65
'Xian ', # 0x66
'Yan ', # 0x67
'Gan ', # 0x68
'Yan ', # 0x69
'Yu ', # 0x6a
'Huo ', # 0x6b
'Si ', # 0x6c
'Mie ', # 0x6d
'Guang ', # 0x6e
'Deng ', # 0x6f
'Hui ', # 0x70
'Xiao ', # 0x71
'Xiao ', # 0x72
'Hu ', # 0x73
'Hong ', # 0x74
'Ling ', # 0x75
'Zao ', # 0x76
'Zhuan ', # 0x77
'Jiu ', # 0x78
'Zha ', # 0x79
'Xie ', # 0x7a
'Chi ', # 0x7b
'Zhuo ', # 0x7c
'Zai ', # 0x7d
'Zai ', # 0x7e
'Can ', # 0x7f
'Yang ', # 0x80
'Qi ', # 0x81
'Zhong ', # 0x82
'Fen ', # 0x83
'Niu ', # 0x84
'Jiong ', # 0x85
'Wen ', # 0x86
'Po ', # 0x87
'Yi ', # 0x88
'Lu ', # 0x89
'Chui ', # 0x8a
'Pi ', # 0x8b
'Kai ', # 0x8c
'Pan ', # 0x8d
'Yan ', # 0x8e
'Kai ', # 0x8f
'Pang ', # 0x90
'Mu ', # 0x91
'Chao ', # 0x92
'Liao ', # 0x93
'Gui ', # 0x94
'Kang ', # 0x95
'Tun ', # 0x96
'Guang ', # 0x97
'Xin ', # 0x98
'Zhi ', # 0x99
'Guang ', # 0x9a
'Guang ', # 0x9b
'Wei ', # 0x9c
'Qiang ', # 0x9d
'[?] ', # 0x9e
'Da ', # 0x9f
'Xia ', # 0xa0
'Zheng ', # 0xa1
'Zhu ', # 0xa2
'Ke ', # 0xa3
'Zhao ', # 0xa4
'Fu ', # 0xa5
'Ba ', # 0xa6
'Duo ', # 0xa7
'Duo ', # 0xa8
'Ling ', # 0xa9
'Zhuo ', # 0xaa
'Xuan ', # 0xab
'Ju ', # 0xac
'Tan ', # 0xad
'Pao ', # 0xae
'Jiong ', # 0xaf
'Pao ', # 0xb0
'Tai ', # 0xb1
'Tai ', # 0xb2
'Bing ', # 0xb3
'Yang ', # 0xb4
'Tong ', # 0xb5
'Han ', # 0xb6
'Zhu ', # 0xb7
'Zha ', # 0xb8
'Dian ', # 0xb9
'Wei ', # 0xba
'Shi ', # 0xbb
'Lian ', # 0xbc
'Chi ', # 0xbd
'Huang ', # 0xbe
'[?] ', # 0xbf
'Hu ', # 0xc0
'Shuo ', # 0xc1
'Lan ', # 0xc2
'Jing ', # 0xc3
'Jiao ', # 0xc4
'Xu ', # 0xc5
'Xing ', # 0xc6
'Quan ', # 0xc7
'Lie ', # 0xc8
'Huan ', # 0xc9
'Yang ', # 0xca
'Xiao ', # 0xcb
'Xiu ', # 0xcc
'Xian ', # 0xcd
'Yin ', # 0xce
'Wu ', # 0xcf
'Zhou ', # 0xd0
'Yao ', # 0xd1
'Shi ', # 0xd2
'Wei ', # 0xd3
'Tong ', # 0xd4
'Xue ', # 0xd5
'Zai ', # 0xd6
'Kai ', # 0xd7
'Hong ', # 0xd8
'Luo ', # 0xd9
'Xia ', # 0xda
'Zhu ', # 0xdb
'Xuan ', # 0xdc
'Zheng ', # 0xdd
'Po ', # 0xde
'Yan ', # 0xdf
'Hui ', # 0xe0
'Guang ', # 0xe1
'Zhe ', # 0xe2
'Hui ', # 0xe3
'Kao ', # 0xe4
'[?] ', # 0xe5
'Fan ', # 0xe6
'Shao ', # 0xe7
'Ye ', # 0xe8
'Hui ', # 0xe9
'[?] ', # 0xea
'Tang ', # 0xeb
'Jin ', # 0xec
'Re ', # 0xed
'[?] ', # 0xee
'Xi ', # 0xef
'Fu ', # 0xf0
'Jiong ', # 0xf1
'Che ', # 0xf2
'Pu ', # 0xf3
'Jing ', # 0xf4
'Zhuo ', # 0xf5
'Ting ', # 0xf6
'Wan ', # 0xf7
'Hai ', # 0xf8
'Peng ', # 0xf9
'Lang ', # 0xfa
'Shan ', # 0xfb
'Hu ', # 0xfc
'Feng ', # 0xfd
'Chi ', # 0xfe
'Rong ', # 0xff
)
| apache-2.0 | -2,989,366,622,680,537,600 | 17.189922 | 20 | 0.393352 | false |
Elbagoury/odoo | addons/event/event.py | 56 | 19486 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import timedelta
import pytz
from openerp import models, fields, api, _
from openerp.exceptions import AccessError, Warning
class event_type(models.Model):
""" Event Type """
_name = 'event.type'
_description = 'Event Type'
name = fields.Char(string='Event Type', required=True)
default_reply_to = fields.Char(string='Default Reply-To',
help="The email address of the organizer which is put in the 'Reply-To' of all emails sent automatically at event or registrations confirmation. You can also put your email address of your mail gateway if you use one.")
default_email_event = fields.Many2one('email.template', string='Event Confirmation Email',
help="It will select this default confirmation event mail value when you choose this event")
default_email_registration = fields.Many2one('email.template', string='Registration Confirmation Email',
help="It will select this default confirmation registration mail value when you choose this event")
default_registration_min = fields.Integer(string='Default Minimum Registration', default=0,
help="It will select this default minimum value when you choose this event")
default_registration_max = fields.Integer(string='Default Maximum Registration', default=0,
help="It will select this default maximum value when you choose this event")
class event_event(models.Model):
"""Event"""
_name = 'event.event'
_description = 'Event'
_inherit = ['mail.thread', 'ir.needaction_mixin']
_order = 'date_begin'
name = fields.Char(string='Event Name', translate=True, required=True,
readonly=False, states={'done': [('readonly', True)]})
user_id = fields.Many2one('res.users', string='Responsible User',
default=lambda self: self.env.user,
readonly=False, states={'done': [('readonly', True)]})
type = fields.Many2one('event.type', string='Type of Event',
readonly=False, states={'done': [('readonly', True)]})
seats_max = fields.Integer(string='Maximum Available Seats', oldname='register_max',
readonly=True, states={'draft': [('readonly', False)]},
help="You can for each event define a maximum registration level. If you have too much registrations you are not able to confirm your event. (put 0 to ignore this rule )")
seats_min = fields.Integer(string='Minimum Reserved Seats', oldname='register_min',
readonly=True, states={'draft': [('readonly', False)]},
help="You can for each event define a minimum registration level. If you do not enough registrations you are not able to confirm your event. (put 0 to ignore this rule )")
seats_reserved = fields.Integer(oldname='register_current', string='Reserved Seats',
store=True, readonly=True, compute='_compute_seats')
seats_available = fields.Integer(oldname='register_avail', string='Available Seats',
store=True, readonly=True, compute='_compute_seats')
seats_unconfirmed = fields.Integer(oldname='register_prospect', string='Unconfirmed Seat Reservations',
store=True, readonly=True, compute='_compute_seats')
seats_used = fields.Integer(oldname='register_attended', string='Number of Participations',
store=True, readonly=True, compute='_compute_seats')
@api.multi
@api.depends('seats_max', 'registration_ids.state', 'registration_ids.nb_register')
def _compute_seats(self):
""" Determine reserved, available, reserved but unconfirmed and used seats. """
# initialize fields to 0
for event in self:
event.seats_unconfirmed = event.seats_reserved = event.seats_used = 0
# aggregate registrations by event and by state
if self.ids:
state_field = {
'draft': 'seats_unconfirmed',
'open':'seats_reserved',
'done': 'seats_used',
}
query = """ SELECT event_id, state, sum(nb_register)
FROM event_registration
WHERE event_id IN %s AND state IN ('draft', 'open', 'done')
GROUP BY event_id, state
"""
self._cr.execute(query, (tuple(self.ids),))
for event_id, state, num in self._cr.fetchall():
event = self.browse(event_id)
event[state_field[state]] += num
# compute seats_available
for event in self:
event.seats_available = \
event.seats_max - (event.seats_reserved + event.seats_used) \
if event.seats_max > 0 else 0
registration_ids = fields.One2many('event.registration', 'event_id', string='Registrations',
readonly=False, states={'done': [('readonly', True)]})
count_registrations = fields.Integer(string='Registrations',
compute='_count_registrations')
date_begin = fields.Datetime(string='Start Date', required=True,
readonly=True, states={'draft': [('readonly', False)]})
date_end = fields.Datetime(string='End Date', required=True,
readonly=True, states={'draft': [('readonly', False)]})
@api.model
def _tz_get(self):
return [(x, x) for x in pytz.all_timezones]
date_tz = fields.Selection('_tz_get', string='Timezone',
default=lambda self: self._context.get('tz', 'UTC'))
@api.one
@api.depends('date_tz', 'date_begin')
def _compute_date_begin_tz(self):
if self.date_begin:
self_in_tz = self.with_context(tz=(self.date_tz or 'UTC'))
date_begin = fields.Datetime.from_string(self.date_begin)
self.date_begin_located = fields.Datetime.to_string(fields.Datetime.context_timestamp(self_in_tz, date_begin))
else:
self.date_begin_located = False
@api.one
@api.depends('date_tz', 'date_end')
def _compute_date_end_tz(self):
if self.date_end:
self_in_tz = self.with_context(tz=(self.date_tz or 'UTC'))
date_end = fields.Datetime.from_string(self.date_end)
self.date_end_located = fields.Datetime.to_string(fields.Datetime.context_timestamp(self_in_tz, date_end))
else:
self.date_end_located = False
@api.one
@api.depends('address_id')
def _compute_country(self):
self.country_id = self.address_id.country_id
date_begin_located = fields.Datetime(string='Start Date Located', compute='_compute_date_begin_tz')
date_end_located = fields.Datetime(string='End Date Located', compute='_compute_date_end_tz')
state = fields.Selection([
('draft', 'Unconfirmed'),
('cancel', 'Cancelled'),
('confirm', 'Confirmed'),
('done', 'Done')
], string='Status', default='draft', readonly=True, required=True, copy=False,
help="If event is created, the status is 'Draft'. If event is confirmed for the particular dates the status is set to 'Confirmed'. If the event is over, the status is set to 'Done'. If event is cancelled the status is set to 'Cancelled'.")
email_registration_id = fields.Many2one(
'email.template', string='Registration Confirmation Email',
domain=[('model', '=', 'event.registration')],
help='This field contains the template of the mail that will be automatically sent each time a registration for this event is confirmed.')
email_confirmation_id = fields.Many2one(
'email.template', string='Event Confirmation Email',
domain=[('model', '=', 'event.registration')],
help="If you set an email template, each participant will receive this email announcing the confirmation of the event.")
reply_to = fields.Char(string='Reply-To Email',
readonly=False, states={'done': [('readonly', True)]},
help="The email address of the organizer is likely to be put here, with the effect to be in the 'Reply-To' of the mails sent automatically at event or registrations confirmation. You can also put the email address of your mail gateway if you use one.")
address_id = fields.Many2one('res.partner', string='Location',
default=lambda self: self.env.user.company_id.partner_id,
readonly=False, states={'done': [('readonly', True)]})
country_id = fields.Many2one('res.country', string='Country',
store=True, compute='_compute_country')
description = fields.Html(string='Description', oldname='note', translate=True,
readonly=False, states={'done': [('readonly', True)]})
company_id = fields.Many2one('res.company', string='Company', change_default=True,
default=lambda self: self.env['res.company']._company_default_get('event.event'),
required=False, readonly=False, states={'done': [('readonly', True)]})
organizer_id = fields.Many2one('res.partner', string='Organizer',
default=lambda self: self.env.user.company_id.partner_id)
is_subscribed = fields.Boolean(string='Subscribed',
compute='_compute_subscribe')
@api.one
@api.depends('registration_ids')
def _count_registrations(self):
self.count_registrations = len(self.registration_ids)
@api.one
@api.depends('registration_ids.user_id', 'registration_ids.state')
def _compute_subscribe(self):
""" Determine whether the current user is already subscribed to any event in `self` """
user = self.env.user
self.is_subscribed = any(
reg.user_id == user and reg.state in ('open', 'done')
for reg in self.registration_ids
)
@api.multi
@api.depends('name', 'date_begin', 'date_end')
def name_get(self):
result = []
for event in self:
date_begin = fields.Datetime.from_string(event.date_begin)
date_end = fields.Datetime.from_string(event.date_end)
dates = [fields.Date.to_string(fields.Datetime.context_timestamp(event, dt)) for dt in [date_begin, date_end] if dt]
dates = sorted(set(dates))
result.append((event.id, '%s (%s)' % (event.name, ' - '.join(dates))))
return result
@api.one
@api.constrains('seats_max', 'seats_available')
def _check_seats_limit(self):
if self.seats_max and self.seats_available < 0:
raise Warning(_('No more available seats.'))
@api.one
@api.constrains('date_begin', 'date_end')
def _check_closing_date(self):
if self.date_end < self.date_begin:
raise Warning(_('Closing Date cannot be set before Beginning Date.'))
@api.one
def button_draft(self):
self.state = 'draft'
@api.one
def button_cancel(self):
for event_reg in self.registration_ids:
if event_reg.state == 'done':
raise Warning(_("You have already set a registration for this event as 'Attended'. Please reset it to draft if you want to cancel this event."))
self.registration_ids.write({'state': 'cancel'})
self.state = 'cancel'
@api.one
def button_done(self):
self.state = 'done'
@api.one
def confirm_event(self):
if self.email_confirmation_id:
# send reminder that will confirm the event for all the people that were already confirmed
regs = self.registration_ids.filtered(lambda reg: reg.state not in ('draft', 'cancel'))
regs.mail_user_confirm()
self.state = 'confirm'
@api.one
def button_confirm(self):
""" Confirm Event and send confirmation email to all register peoples """
self.confirm_event()
@api.one
def subscribe_to_event(self):
""" Subscribe the current user to a given event """
user = self.env.user
num_of_seats = int(self._context.get('ticket', 1))
regs = self.registration_ids.filtered(lambda reg: reg.user_id == user)
# the subscription is done as SUPERUSER_ID because in case we share the
# kanban view, we want anyone to be able to subscribe
if not regs:
regs = regs.sudo().create({
'event_id': self.id,
'email': user.email,
'name':user.name,
'user_id': user.id,
'nb_register': num_of_seats,
})
else:
regs.write({'nb_register': num_of_seats})
regs.sudo().confirm_registration()
@api.one
def unsubscribe_to_event(self):
""" Unsubscribe the current user from a given event """
# the unsubscription is done as SUPERUSER_ID because in case we share
# the kanban view, we want anyone to be able to unsubscribe
user = self.env.user
regs = self.sudo().registration_ids.filtered(lambda reg: reg.user_id == user)
regs.button_reg_cancel()
@api.onchange('type')
def _onchange_type(self):
if self.type:
self.reply_to = self.type.default_reply_to
self.email_registration_id = self.type.default_email_registration
self.email_confirmation_id = self.type.default_email_event
self.seats_min = self.type.default_registration_min
self.seats_max = self.type.default_registration_max
@api.onchange('date_begin')
def _onchange_date_begin(self):
if self.date_begin and not self.date_end:
date_begin = fields.Datetime.from_string(self.date_begin)
self.date_end = fields.Datetime.to_string(date_begin + timedelta(hours=1))
class event_registration(models.Model):
_name = 'event.registration'
_description = 'Event Registration'
_inherit = ['mail.thread', 'ir.needaction_mixin']
_order = 'name, create_date desc'
origin = fields.Char(string='Source Document', readonly=True,
help="Reference of the sales order which created the registration")
nb_register = fields.Integer(string='Number of Participants', required=True, default=1,
readonly=True, states={'draft': [('readonly', False)]})
event_id = fields.Many2one('event.event', string='Event', required=True,
readonly=True, states={'draft': [('readonly', False)]})
partner_id = fields.Many2one('res.partner', string='Partner',
states={'done': [('readonly', True)]})
date_open = fields.Datetime(string='Registration Date', readonly=True)
date_closed = fields.Datetime(string='Attended Date', readonly=True)
reply_to = fields.Char(string='Reply-to Email', related='event_id.reply_to',
readonly=True)
log_ids = fields.One2many('mail.message', 'res_id', string='Logs',
domain=[('model', '=', _name)])
event_begin_date = fields.Datetime(string="Event Start Date", related='event_id.date_begin',
readonly=True)
event_end_date = fields.Datetime(string="Event End Date", related='event_id.date_end',
readonly=True)
user_id = fields.Many2one('res.users', string='User', states={'done': [('readonly', True)]})
company_id = fields.Many2one('res.company', string='Company', related='event_id.company_id',
store=True, readonly=True, states={'draft':[('readonly', False)]})
state = fields.Selection([
('draft', 'Unconfirmed'),
('cancel', 'Cancelled'),
('open', 'Confirmed'),
('done', 'Attended'),
], string='Status', default='draft', readonly=True, copy=False)
email = fields.Char(string='Email')
phone = fields.Char(string='Phone')
name = fields.Char(string='Name', select=True)
@api.one
@api.constrains('event_id', 'state', 'nb_register')
def _check_seats_limit(self):
if self.event_id.seats_max and \
self.event_id.seats_available < (self.nb_register if self.state == 'draft' else 0):
raise Warning(_('No more available seats.'))
@api.one
def do_draft(self):
self.state = 'draft'
@api.one
def confirm_registration(self):
self.event_id.message_post(
body=_('New registration confirmed: %s.') % (self.name or ''),
subtype="event.mt_event_registration")
self.message_post(body=_('Event Registration confirmed.'))
self.state = 'open'
@api.one
def registration_open(self):
""" Open Registration """
self.confirm_registration()
self.mail_user()
@api.one
def button_reg_close(self):
""" Close Registration """
today = fields.Datetime.now()
if self.event_id.date_begin <= today:
self.write({'state': 'done', 'date_closed': today})
else:
raise Warning(_("You must wait for the starting day of the event to do this action."))
@api.one
def button_reg_cancel(self):
self.state = 'cancel'
@api.one
def mail_user(self):
"""Send email to user with email_template when registration is done """
if self.event_id.state == 'confirm' and self.event_id.email_confirmation_id:
self.mail_user_confirm()
else:
template = self.event_id.email_registration_id
if template:
mail_message = template.send_mail(self.id)
@api.one
def mail_user_confirm(self):
"""Send email to user when the event is confirmed """
template = self.event_id.email_confirmation_id
if template:
mail_message = template.send_mail(self.id)
@api.multi
def message_get_suggested_recipients(self):
recipients = super(event_registration, self).message_get_suggested_recipients()
try:
for registration in self:
if registration.partner_id:
self._message_add_suggested_recipient(recipients, registration, partner=registration.partner_id, reason=_('Registrant'))
elif registration.email:
self._message_add_suggested_recipient(recipients, registration, email=registration.email, reason=_('Registrant Email'))
except AccessError: # no read access rights -> ignore suggested recipients
pass
return recipients
@api.onchange('partner_id')
def _onchange_partner(self):
if self.partner_id:
contact_id = self.partner_id.address_get().get('default', False)
if contact_id:
contact = self.env['res.partner'].browse(contact_id)
self.name = contact.name
self.email = contact.email
self.phone = contact.phone
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,462,281,954,116,029,000 | 46.296117 | 260 | 0.629785 | false |
sternshus/Arelle | setup.py | 1 | 17326 | """
Created on Jan 30, 2011
@author: Mark V Systems Limited
(c) Copyright 2011 Mark V Systems Limited, All rights reserved.
"""
import sys
import os
import datetime
from distutils.command.build_py import build_py as _build_py
def get_version():
"""
Utility function to return the current version of the library, as defined
by the version string in the arelle's _pkg_meta.py file. The format follows
the standard Major.Minor.Fix notation.
:return: The version string in the standard Major.Minor.Fix notation.
:rtype: str
"""
import imp
source_dir = 'arelle'
with open('{}/_pkg_meta.py'.format(source_dir), 'rb') as fp:
mod = imp.load_source('_pkg_meta', source_dir, fp)
return mod.version
setup_requires = ['lxml']
# install_requires specifies a list of package dependencies that are
# installed when 'python setup.py install' is run. On Linux/Mac systems
# this also allows installation directly from the github repository
# (using 'pip install -e git+git://github.com/rheimbuchArelle.git#egg=Arelle')
# and the install_requires packages are auto-installed as well.
install_requires = ['lxml']
options = {}
scripts = []
cxFreezeExecutables = []
cmdclass = {}
# Files that should not be passed through 3to2 conversion
# in python 2.7 builds
build_py27_unmodified = [
'arelle/webserver/bottle.py',
'arelle/PythonUtil.py'
]
# Files that should be excluded from python 2.7 builds
build_py27_excluded = [
'arelle/CntlrQuickBooks.py',
'arelle/CntlrWinMain.py',
'arelle/CntlrWinTooltip.py',
'arelle/Dialog*.py',
'arelle/UiUtil.py',
'arelle/ViewWin*.py',
'arelle/WatchRss.py'
]
def match_patterns(path, pattern_list=[]):
from fnmatch import fnmatch
for pattern in pattern_list:
if fnmatch(path, pattern):
return True
return False
# When building under python 2.7, run refactorings from lib3to2
class build_py27(_build_py):
def __init__(self, *args, **kwargs):
_build_py.__init__(self, *args, **kwargs)
import logging
from lib2to3 import refactor
import lib3to2.main
rt_logger = logging.getLogger("RefactoringTool")
rt_logger.addHandler(logging.StreamHandler())
fixers = refactor.get_fixers_from_package('lib3to2.fixes')
fixers.remove('lib3to2.fixes.fix_print')
self.rtool = lib3to2.main.StdoutRefactoringTool(
fixers,
None,
[],
False,
False
)
def copy_file(self, source, target, preserve_mode=True):
if match_patterns(source, build_py27_unmodified):
_build_py.copy_file(self, source, target, preserve_mode)
elif match_patterns(source, build_py27_excluded):
print("excluding: %s" % source)
elif source.endswith('.py'):
try:
print("3to2 converting: %s => %s" % (source, target))
with open(source, 'rt') as input:
# ensure file contents have trailing newline
source_content = input.read() + "\n"
nval = self.rtool.refactor_string(source_content, source)
if nval is not None:
with open(target, 'wt') as output:
output.write('from __future__ import print_function\n')
output.write(str(nval))
else:
raise(Exception("Failed to parse: %s" % source))
except Exception as e:
print("3to2 error (%s => %s): %s" % (source,target,e))
if sys.version_info[0] < 3:
setup_requires.append('3to2')
# cmdclass allows you to override the distutils commands that are
# run through 'python setup.py somecmd'. Under python 2.7 replace
# the 'build_py' with a custom subclass (build_py27) that invokes
# 3to2 refactoring on each python file as its copied to the build
# directory.
cmdclass['build_py'] = build_py27
# (Under python3 no commands are replaced, so the default command classes are used.)
try:
# Under python2.7, run build before running build_sphinx
import sphinx.setup_command
class build_sphinx_py27(sphinx.setup_command.BuildDoc):
def run(self):
self.run_command('build_py')
# Ensure sphinx looks at the "built" arelle libs that
# have passed through the 3to2 refactorings
# in `build_py27`.
sys.path.insert(0, os.path.abspath("./build/lib"))
sphinx.setup_command.BuildDoc.run(self)
if sys.version_info[0] < 3:
setup_requires.append('3to2')
setup_requires.append('sphinx')
# do a similar override of the 'build_sphinx' command to ensure
# that the 3to2-enabled build command runs before calling back to
# the default build_sphinx superclass.
cmdclass['build_sphinx'] = build_sphinx_py27
# There is also a python 2.x conditional switch in 'apidocs/conf.py'
# that sets sphinx to look at the 3to2 converted build files instead
# of the original unconverted source.
except ImportError as e:
print("Documentation production by Sphinx is not available: %s" % e)
''' this section was for py2app which no longer works on Mavericks,
switch below to cx_Freeze
if sys.platform == 'darwin':
from setuptools import setup, find_packages
setup_requires.append('py2app')
# Cross-platform applications generally expect sys.argv to
# be used for opening files.
plist = dict(CFBundleIconFile='arelle.icns',
NSHumanReadableCopyright='(c) 2010-2013 Mark V Systems Limited')
# MacOS launches CntlrWinMain and uses "ARELLE_ARGS" to effect console (shell) mode
options = dict(py2app=dict(app=['arelle/CntlrWinMain.py'],
iconfile='arelle/images/arelle.icns',
plist=plist,
#
# rdflib & isodate egg files: rename .zip cpy lib & egg-info subdirectories to site-packages directory
#
includes=['lxml', 'lxml.etree',
'lxml._elementpath', 'pg8000',
'rdflib', 'rdflib.extras', 'rdflib.tools',
# more rdflib plugin modules may need to be added later
'rdflib.plugins', 'rdflib.plugins.memory',
'rdflib.plugins.parsers',
'rdflib.plugins.serializers', 'rdflib.plugins.serializers.rdfxml', 'rdflib.plugins.serializers.turtle', 'rdflib.plugins.serializers.xmlwriter',
'rdflib.plugins.sparql',
'rdflib.plugins.stores',
'isodate', 'regex', 'gzip', 'zlib']))
packages = find_packages('.')
dataFiles = [
#XXX: this breaks build on Lion/Py3.2 --mike
#'--iconfile',
('config',['arelle/config/' + f for f in os.listdir('arelle/config')]),
('doc',['arelle/doc/' + f for f in os.listdir('arelle/doc')]),
('examples',['arelle/examples/' + f for f in os.listdir('arelle/examples')]),
('images',['arelle/images/' + f for f in os.listdir('arelle/images')]),
('examples/plugin',['arelle/examples/plugin/' + f for f in os.listdir('arelle/examples/plugin')]),
('examples/plugin/locale/fr/LC_MESSAGES',['arelle/examples/plugin/locale/fr/LC_MESSAGES/' + f for f in os.listdir('arelle/examples/plugin/locale/fr/LC_MESSAGES')]),
('plugin',['arelle/plugin/' + f for f in os.listdir('arelle/plugin')]),
('scripts',['arelle/scripts/' + f for f in os.listdir('arelle/scripts-macOS')]),
]
for dir, subDirs, files in os.walk('arelle/locale'):
dir = dir.replace('\\','/')
dataFiles.append((dir[7:],
[dir + "/" + f for f in files]))
cx_FreezeExecutables = []
#End of py2app defunct section
'''
# works on ubuntu with hand-built cx_Freeze
if sys.platform in ('darwin', 'linux2', 'linux', 'sunos5'):
from setuptools import find_packages
try:
from cx_Freeze import setup, Executable
cx_FreezeExecutables = [
Executable(script="arelleGUI.pyw", targetName="arelle"),
Executable(script="arelleCmdLine.py")
]
except:
from setuptools import setup
cx_FreezeExecutables = []
packages = find_packages(
'.', # note that new setuptools finds plugin and lib unwanted stuff
exclude=['*.plugin.*', '*.lib.*']
)
dataFiles = []
includeFiles = [
('arelle/config','config'),
('arelle/doc','doc'),
('arelle/images','images'),
('arelle/locale','locale'),
('arelle/examples','examples'),
('arelle/examples/plugin','examples/plugin'),
(
'arelle/examples/plugin/locale/fr/LC_MESSAGES',
'examples/plugin/locale/fr/LC_MESSAGES'
),
('arelle/plugin','plugin')
]
if sys.platform == 'darwin':
includeFiles.append(('arelle/scripts-macOS','scripts'))
# copy tck and tk built as described: https://www.tcl.tk/doc/howto/compile.html#mac
includeFiles.append(('/Library/Frameworks/Tcl.framework/Versions/8.6/Resources/Scripts','tcl8.6'))
includeFiles.append(('/Library/Frameworks/Tk.framework/Versions/8.6/Resources/Scripts','tk8.6'))
else:
includeFiles.append(('arelle/scripts-unix','scripts'))
if os.path.exists("/etc/redhat-release"):
# extra libraries needed for red hat
includeFiles.append(('/usr/local/lib/libexslt.so', 'libexslt.so'))
includeFiles.append(('/usr/local/lib/libxml2.so', 'libxml2.so'))
# for some reason redhat needs libxml2.so.2 as well
includeFiles.append(('/usr/local/lib/libxml2.so.2', 'libxml2.so.2'))
includeFiles.append(('/usr/local/lib/libxslt.so', 'libxslt.so'))
includeFiles.append(('/usr/local/lib/libz.so', 'libz.so'))
if os.path.exists("version.txt"):
includeFiles.append(('version.txt', 'version.txt'))
includeLibs = [
'lxml', 'lxml.etree', 'lxml._elementpath', 'lxml.html',
'pg8000', 'pymysql', 'sqlite3', 'numpy',
# note cx_Oracle isn't here because it is version and machine specific,
# ubuntu not likely working
# more rdflib plugin modules may need to be added later
'rdflib',
'rdflib.extras',
'rdflib.tools',
'rdflib.plugins',
'rdflib.plugins.memory',
'rdflib.plugins.parsers',
'rdflib.plugins.serializers',
'rdflib.plugins.serializers.rdfxml',
'rdflib.plugins.serializers.turtle',
'rdflib.plugins.serializers.xmlwriter',
'rdflib.plugins.sparql',
'rdflib.plugins.stores',
'isodate', 'regex', 'gzip', 'zlib',
'openpyxl' # openpyxl's __init__.py must be hand edited, see https://bitbucket.org/openpyxl/openpyxl/pull-requests/80/__about__py/diff
]
# uncomment the next two files if cx_Freezing with EdgarRenderer
# note that openpyxl must be 2.1.4 at this time
if os.path.exists("arelle/plugin/EdgarRenderer"):
includeLibs += [
'cherrypy', 'cherrypy.wsgiserver.wsgiserver3',
'dateutil',
'dateutil.relativedelta',
'six',
'tornado',
'pyparsing',
'matplotlib'
]
import matplotlib
dataFiles += matplotlib.get_py2exe_datafiles()
if sys.platform != 'sunos5':
try:
import pyodbc # see if this is importable
includeLibs.append('pyodbc') # has C compiling errors on Sparc
except ImportError:
pass
options = dict(
build_exe={
"include_files": includeFiles,
#
# rdflib & isodate egg files: rename .zip cpy lib & egg-info
# subdirectories to site-packages directory
#
"includes": includeLibs,
"packages": packages,
}
)
if sys.platform == 'darwin':
options["bdist_mac"] = {
"iconfile": 'arelle/images/arelle.icns',
"bundle_name": 'Arelle',
}
elif sys.platform == 'win32':
from setuptools import find_packages
from cx_Freeze import setup, Executable
# py2exe is not ported to Python 3 yet
# setup_requires.append('py2exe')
# FIXME: this should use the entry_points mechanism
packages = find_packages('.')
print("packages={}".format(packages))
dataFiles = None
win32includeFiles = [
('arelle\\config','config'),
('arelle\\doc','doc'),
('arelle\\images','images'),
('arelle\\locale','locale'),
('arelle\\examples','examples'),
('arelle\\examples\\plugin','examples/plugin'),
(
'arelle\\examples\\plugin\\locale\\fr\\LC_MESSAGES',
'examples/plugin/locale/fr/LC_MESSAGES'
),
('arelle\\plugin','plugin'),
('arelle\\scripts-windows','scripts')
]
if 'arelle.webserver' in packages:
win32includeFiles.append('QuickBooks.qwc')
if os.path.exists("version.txt"):
win32includeFiles.append('version.txt')
includeLibs = [
'lxml', 'lxml.etree', 'lxml._elementpath', 'lxml.html',
'pg8000', 'pymysql', 'cx_Oracle', 'pyodbc', 'sqlite3', 'numpy',
# more rdflib plugin modules may need to be added later
'rdflib',
'rdflib.extras',
'rdflib.tools',
'rdflib.plugins',
'rdflib.plugins.memory',
'rdflib.plugins.parsers',
'rdflib.plugins.serializers',
'rdflib.plugins.serializers.rdfxml',
'rdflib.plugins.serializers.turtle',
'rdflib.plugins.serializers.xmlwriter',
'rdflib.plugins.sparql',
'rdflib.plugins.stores',
'isodate', 'regex', 'gzip', 'zlib',
'openpyxl' # openpyxl's __init__.py must be hand edited, see https://bitbucket.org/openpyxl/openpyxl/pull-requests/80/__about__py/diff
]
# uncomment the next line if cx_Freezing with EdgarRenderer
# note that openpyxl must be 2.1.4 at this time
# removed tornado
if os.path.exists("arelle/plugin/EdgarRenderer"):
includeLibs += [
'cherrypy', 'cherrypy.wsgiserver.wsgiserver3',
'dateutil', 'dateutil.relativedelta',
"six", "pyparsing", "matplotlib"
]
options = dict(
build_exe={
"include_files": win32includeFiles,
"include_msvcr": True, # include MSVCR100
# "icon": 'arelle\\images\\arelle16x16and32x32.ico',
"packages": packages,
#
# rdflib & isodate egg files: rename .zip cpy lib & egg-info
# subdirectories to site-packages directory
#
"includes": includeLibs
}
)
# windows uses arelleGUI.exe to launch in GUI mode, arelleCmdLine.exe in command line mode
cx_FreezeExecutables = [
Executable(
script="arelleGUI.pyw",
base="Win32GUI",
icon='arelle\\images\\arelle16x16and32x32.ico',
),
Executable(
script="arelleCmdLine.py",
)
]
else:
#print("Your platform {0} isn't supported".format(sys.platform))
#sys.exit(1)
from setuptools import os, setup, find_packages
packages = find_packages(
'.', # note that new setuptools finds plugin and lib unwanted stuff
exclude=['*.plugin.*', '*.lib.*']
)
dataFiles = [(
'config',
['arelle/config/' + f for f in os.listdir('arelle/config')]
)]
cx_FreezeExecutables = []
timestamp = datetime.datetime.utcnow()
setup(
name='Arelle',
version=get_version(),
description='An open source XBRL platform',
long_description=open('README.md').read(),
author='arelle.org',
author_email='[email protected]',
url='http://www.arelle.org',
download_url='http://www.arelle.org/download',
cmdclass=cmdclass,
include_package_data=True, # note: this uses MANIFEST.in
packages=packages,
data_files=dataFiles,
platforms=['OS Independent'],
license='Apache-2',
keywords=['xbrl'],
classifiers=[
'Development Status :: 1 - Active',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache-2 License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Operating System :: OS Independent',
'Topic :: XBRL Validation and Versioning',
],
scripts=scripts,
entry_points={
'console_scripts': [
'arelle=arelle.CntlrCmdLine:main',
'arelle-gui=arelle.CntlrWinMain:main',
]
},
setup_requires=setup_requires,
install_requires=install_requires,
options=options,
executables=cx_FreezeExecutables,
)
| apache-2.0 | 1,700,768,734,461,344,300 | 37.760626 | 185 | 0.592289 | false |
jordanemedlock/psychtruths | temboo/core/Library/SendGrid/NewsletterAPI/Newsletter/EditNewsletter.py | 5 | 5093 | # -*- coding: utf-8 -*-
###############################################################################
#
# EditNewsletter
# Edit an existing newsletter.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class EditNewsletter(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the EditNewsletter Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(EditNewsletter, self).__init__(temboo_session, '/Library/SendGrid/NewsletterAPI/Newsletter/EditNewsletter')
def new_input_set(self):
return EditNewsletterInputSet()
def _make_result_set(self, result, path):
return EditNewsletterResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return EditNewsletterChoreographyExecution(session, exec_id, path)
class EditNewsletterInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the EditNewsletter
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key obtained from SendGrid.)
"""
super(EditNewsletterInputSet, self)._set_input('APIKey', value)
def set_APIUser(self, value):
"""
Set the value of the APIUser input for this Choreo. ((required, string) The username registered with SendGrid.)
"""
super(EditNewsletterInputSet, self)._set_input('APIUser', value)
def set_HTML(self, value):
"""
Set the value of the HTML input for this Choreo. ((required, string) The html portion of the newsletter.)
"""
super(EditNewsletterInputSet, self)._set_input('HTML', value)
def set_Identity(self, value):
"""
Set the value of the Identity input for this Choreo. ((required, string) The new identity Identiy for the newsletter that is being edited.)
"""
super(EditNewsletterInputSet, self)._set_input('Identity', value)
def set_Name(self, value):
"""
Set the value of the Name input for this Choreo. ((required, string) The name of the newsletter that is being edited.)
"""
super(EditNewsletterInputSet, self)._set_input('Name', value)
def set_NewName(self, value):
"""
Set the value of the NewName input for this Choreo. ((required, string) The new name of the newsletter that is being edited.)
"""
super(EditNewsletterInputSet, self)._set_input('NewName', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format of the response from SendGrid, in either json, or xml. Default is set to json.)
"""
super(EditNewsletterInputSet, self)._set_input('ResponseFormat', value)
def set_Subject(self, value):
"""
Set the value of the Subject input for this Choreo. ((required, string) The new subject for the edited newsletter.)
"""
super(EditNewsletterInputSet, self)._set_input('Subject', value)
def set_Text(self, value):
"""
Set the value of the Text input for this Choreo. ((required, string) The text portion of the newsletter.)
"""
super(EditNewsletterInputSet, self)._set_input('Text', value)
class EditNewsletterResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the EditNewsletter Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from SendGrid. The format corresponds to the ResponseFormat input. Default is json.)
"""
return self._output.get('Response', None)
class EditNewsletterChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return EditNewsletterResultSet(response, path)
| apache-2.0 | -3,864,190,180,864,355,000 | 40.406504 | 179 | 0.666405 | false |
xiaotdl/ansible | lib/ansible/template/template.py | 267 | 1397 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import jinja2
__all__ = ['AnsibleJ2Template']
class AnsibleJ2Template(jinja2.environment.Template):
'''
A helper class, which prevents Jinja2 from running _jinja2_vars through dict().
Without this, {% include %} and similar will create new contexts unlike the special
one created in template_from_file. This ensures they are all alike, except for
potential locals.
'''
def new_context(self, vars=None, shared=False, locals=None):
return jinja2.runtime.Context(self.environment, vars.add_locals(locals), self.name, self.blocks)
| gpl-3.0 | 8,045,062,527,080,849,000 | 36.756757 | 104 | 0.739442 | false |
maelnor/nova | nova/api/openstack/compute/plugins/v3/floating_ip_pools.py | 5 | 2145 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova import network
ALIAS = 'os-floating-ip-pools'
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
def _translate_floating_ip_view(pool_name):
return {
'name': pool_name,
}
def _translate_floating_ip_pools_view(pools):
return {
'floating_ip_pools': [_translate_floating_ip_view(pool_name)
for pool_name in pools]
}
class FloatingIPPoolsController(object):
"""The Floating IP Pool API controller for the OpenStack API."""
def __init__(self):
self.network_api = network.API()
super(FloatingIPPoolsController, self).__init__()
@extensions.expected_errors(())
def index(self, req):
"""Return a list of pools."""
context = req.environ['nova.context']
authorize(context)
pools = self.network_api.get_floating_ip_pools(context)
return _translate_floating_ip_pools_view(pools)
class FloatingIpPools(extensions.V3APIExtensionBase):
"""Floating IPs support."""
name = "FloatingIpPools"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
FloatingIPPoolsController())]
return resource
def get_controller_extensions(self):
"""It's an abstract function V3APIExtensionBase and the extension
will not be loaded without it.
"""
return []
| apache-2.0 | 1,603,399,602,010,664,400 | 30.544118 | 78 | 0.655478 | false |
Maple1401/awesome-python-webapp | www/markdown2.py | 27 | 92765 | #!/usr/bin/env python
# Copyright (c) 2012 Trent Mick.
# Copyright (c) 2007-2008 ActiveState Corp.
# License: MIT (http://www.opensource.org/licenses/mit-license.php)
from __future__ import generators
r"""A fast and complete Python implementation of Markdown.
[from http://daringfireball.net/projects/markdown/]
> Markdown is a text-to-HTML filter; it translates an easy-to-read /
> easy-to-write structured text format into HTML. Markdown's text
> format is most similar to that of plain text email, and supports
> features such as headers, *emphasis*, code blocks, blockquotes, and
> links.
>
> Markdown's syntax is designed not as a generic markup language, but
> specifically to serve as a front-end to (X)HTML. You can use span-level
> HTML tags anywhere in a Markdown document, and you can use block level
> HTML tags (like <div> and <table> as well).
Module usage:
>>> import markdown2
>>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)`
u'<p><em>boo!</em></p>\n'
>>> markdowner = Markdown()
>>> markdowner.convert("*boo!*")
u'<p><em>boo!</em></p>\n'
>>> markdowner.convert("**boom!**")
u'<p><strong>boom!</strong></p>\n'
This implementation of Markdown implements the full "core" syntax plus a
number of extras (e.g., code syntax coloring, footnotes) as described on
<https://github.com/trentm/python-markdown2/wiki/Extras>.
"""
cmdln_desc = """A fast and complete Python implementation of Markdown, a
text-to-HTML conversion tool for web writers.
Supported extra syntax options (see -x|--extras option below and
see <https://github.com/trentm/python-markdown2/wiki/Extras> for details):
* code-friendly: Disable _ and __ for em and strong.
* cuddled-lists: Allow lists to be cuddled to the preceding paragraph.
* fenced-code-blocks: Allows a code block to not have to be indented
by fencing it with '```' on a line before and after. Based on
<http://github.github.com/github-flavored-markdown/> with support for
syntax highlighting.
* footnotes: Support footnotes as in use on daringfireball.net and
implemented in other Markdown processors (tho not in Markdown.pl v1.0.1).
* header-ids: Adds "id" attributes to headers. The id value is a slug of
the header text.
* html-classes: Takes a dict mapping html tag names (lowercase) to a
string to use for a "class" tag attribute. Currently only supports
"pre" and "code" tags. Add an issue if you require this for other tags.
* markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to
have markdown processing be done on its contents. Similar to
<http://michelf.com/projects/php-markdown/extra/#markdown-attr> but with
some limitations.
* metadata: Extract metadata from a leading '---'-fenced block.
See <https://github.com/trentm/python-markdown2/issues/77> for details.
* nofollow: Add `rel="nofollow"` to add `<a>` tags with an href. See
<http://en.wikipedia.org/wiki/Nofollow>.
* pyshell: Treats unindented Python interactive shell sessions as <code>
blocks.
* link-patterns: Auto-link given regex patterns in text (e.g. bug number
references, revision number references).
* smarty-pants: Replaces ' and " with curly quotation marks or curly
apostrophes. Replaces --, ---, ..., and . . . with en dashes, em dashes,
and ellipses.
* toc: The returned HTML string gets a new "toc_html" attribute which is
a Table of Contents for the document. (experimental)
* xml: Passes one-liner processing instructions and namespaced XML tags.
* wiki-tables: Google Code Wiki-style tables. See
<http://code.google.com/p/support/wiki/WikiSyntax#Tables>.
"""
# Dev Notes:
# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm
# not yet sure if there implications with this. Compare 'pydoc sre'
# and 'perldoc perlre'.
__version_info__ = (2, 1, 0)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "Trent Mick"
import os
import sys
from pprint import pprint
import re
import logging
try:
from hashlib import md5
except ImportError:
from md5 import md5
import optparse
from random import random, randint
import codecs
#---- Python version compat
try:
from urllib.parse import quote # python3
except ImportError:
from urllib import quote # python2
if sys.version_info[:2] < (2,4):
from sets import Set as set
def reversed(sequence):
for i in sequence[::-1]:
yield i
# Use `bytes` for byte strings and `unicode` for unicode strings (str in Py3).
if sys.version_info[0] <= 2:
py3 = False
try:
bytes
except NameError:
bytes = str
base_string_type = basestring
elif sys.version_info[0] >= 3:
py3 = True
unicode = str
base_string_type = str
#---- globals
DEBUG = False
log = logging.getLogger("markdown")
DEFAULT_TAB_WIDTH = 4
SECRET_SALT = bytes(randint(0, 1000000))
def _hash_text(s):
return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest()
# Table of hash values for escaped characters:
g_escape_table = dict([(ch, _hash_text(ch))
for ch in '\\`*_{}[]()>#+-.!'])
#---- exceptions
class MarkdownError(Exception):
pass
#---- public api
def markdown_path(path, encoding="utf-8",
html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
fp = codecs.open(path, 'r', encoding)
text = fp.read()
fp.close()
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
class Markdown(object):
# The dict of "extras" to enable in processing -- a mapping of
# extra name to argument for the extra. Most extras do not have an
# argument, in which case the value is None.
#
# This can be set via (a) subclassing and (b) the constructor
# "extras" argument.
extras = None
urls = None
titles = None
html_blocks = None
html_spans = None
html_removed_text = "[HTML_REMOVED]" # for compat with markdown.py
# Used to track when we're inside an ordered or unordered list
# (see _ProcessListItems() for details):
list_level = 0
_ws_only_line_re = re.compile(r"^[ \t]+$", re.M)
def __init__(self, html4tags=False, tab_width=4, safe_mode=None,
extras=None, link_patterns=None, use_file_vars=False):
if html4tags:
self.empty_element_suffix = ">"
else:
self.empty_element_suffix = " />"
self.tab_width = tab_width
# For compatibility with earlier markdown2.py and with
# markdown.py's safe_mode being a boolean,
# safe_mode == True -> "replace"
if safe_mode is True:
self.safe_mode = "replace"
else:
self.safe_mode = safe_mode
# Massaging and building the "extras" info.
if self.extras is None:
self.extras = {}
elif not isinstance(self.extras, dict):
self.extras = dict([(e, None) for e in self.extras])
if extras:
if not isinstance(extras, dict):
extras = dict([(e, None) for e in extras])
self.extras.update(extras)
assert isinstance(self.extras, dict)
if "toc" in self.extras and not "header-ids" in self.extras:
self.extras["header-ids"] = None # "toc" implies "header-ids"
self._instance_extras = self.extras.copy()
self.link_patterns = link_patterns
self.use_file_vars = use_file_vars
self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M)
self._escape_table = g_escape_table.copy()
if "smarty-pants" in self.extras:
self._escape_table['"'] = _hash_text('"')
self._escape_table["'"] = _hash_text("'")
def reset(self):
self.urls = {}
self.titles = {}
self.html_blocks = {}
self.html_spans = {}
self.list_level = 0
self.extras = self._instance_extras.copy()
if "footnotes" in self.extras:
self.footnotes = {}
self.footnote_ids = []
if "header-ids" in self.extras:
self._count_from_header_id = {} # no `defaultdict` in Python 2.4
if "metadata" in self.extras:
self.metadata = {}
# Per <https://developer.mozilla.org/en-US/docs/HTML/Element/a> "rel"
# should only be used in <a> tags with an "href" attribute.
_a_nofollow = re.compile(r"<(a)([^>]*href=)", re.IGNORECASE)
def convert(self, text):
"""Convert the given text."""
# Main function. The order in which other subs are called here is
# essential. Link and image substitutions need to happen before
# _EscapeSpecialChars(), so that any *'s or _'s in the <a>
# and <img> tags get encoded.
# Clear the global hashes. If we don't clear these, you get conflicts
# from other articles when generating a page which contains more than
# one article (e.g. an index page that shows the N most recent
# articles):
self.reset()
if not isinstance(text, unicode):
#TODO: perhaps shouldn't presume UTF-8 for string input?
text = unicode(text, 'utf-8')
if self.use_file_vars:
# Look for emacs-style file variable hints.
emacs_vars = self._get_emacs_vars(text)
if "markdown-extras" in emacs_vars:
splitter = re.compile("[ ,]+")
for e in splitter.split(emacs_vars["markdown-extras"]):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
self.extras[ename] = earg
# Standardize line endings:
text = re.sub("\r\n|\r", "\n", text)
# Make sure $text ends with a couple of newlines:
text += "\n\n"
# Convert all tabs to spaces.
text = self._detab(text)
# Strip any lines consisting only of spaces and tabs.
# This makes subsequent regexen easier to write, because we can
# match consecutive blank lines with /\n+/ instead of something
# contorted like /[ \t]*\n+/ .
text = self._ws_only_line_re.sub("", text)
# strip metadata from head and extract
if "metadata" in self.extras:
text = self._extract_metadata(text)
text = self.preprocess(text)
if self.safe_mode:
text = self._hash_html_spans(text)
# Turn block-level HTML blocks into hash entries
text = self._hash_html_blocks(text, raw=True)
# Strip link definitions, store in hashes.
if "footnotes" in self.extras:
# Must do footnotes first because an unlucky footnote defn
# looks like a link defn:
# [^4]: this "looks like a link defn"
text = self._strip_footnote_definitions(text)
text = self._strip_link_definitions(text)
text = self._run_block_gamut(text)
if "footnotes" in self.extras:
text = self._add_footnotes(text)
text = self.postprocess(text)
text = self._unescape_special_chars(text)
if self.safe_mode:
text = self._unhash_html_spans(text)
if "nofollow" in self.extras:
text = self._a_nofollow.sub(r'<\1 rel="nofollow"\2', text)
text += "\n"
rv = UnicodeWithAttrs(text)
if "toc" in self.extras:
rv._toc = self._toc
if "metadata" in self.extras:
rv.metadata = self.metadata
return rv
def postprocess(self, text):
"""A hook for subclasses to do some postprocessing of the html, if
desired. This is called before unescaping of special chars and
unhashing of raw HTML spans.
"""
return text
def preprocess(self, text):
"""A hook for subclasses to do some preprocessing of the Markdown, if
desired. This is called after basic formatting of the text, but prior
to any extras, safe mode, etc. processing.
"""
return text
# Is metadata if the content starts with '---'-fenced `key: value`
# pairs. E.g. (indented for presentation):
# ---
# foo: bar
# another-var: blah blah
# ---
_metadata_pat = re.compile("""^---[ \t]*\n((?:[ \t]*[^ \t:]+[ \t]*:[^\n]*\n)+)---[ \t]*\n""")
def _extract_metadata(self, text):
# fast test
if not text.startswith("---"):
return text
match = self._metadata_pat.match(text)
if not match:
return text
tail = text[len(match.group(0)):]
metadata_str = match.group(1).strip()
for line in metadata_str.split('\n'):
key, value = line.split(':', 1)
self.metadata[key.strip()] = value.strip()
return tail
_emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE)
# This regular expression is intended to match blocks like this:
# PREFIX Local Variables: SUFFIX
# PREFIX mode: Tcl SUFFIX
# PREFIX End: SUFFIX
# Some notes:
# - "[ \t]" is used instead of "\s" to specifically exclude newlines
# - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
# not like anything other than Unix-style line terminators.
_emacs_local_vars_pat = re.compile(r"""^
(?P<prefix>(?:[^\r\n|\n|\r])*?)
[\ \t]*Local\ Variables:[\ \t]*
(?P<suffix>.*?)(?:\r\n|\n|\r)
(?P<content>.*?\1End:)
""", re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
def _get_emacs_vars(self, text):
"""Return a dictionary of emacs-style local variables.
Parsing is done loosely according to this spec (and according to
some in-practice deviations from this):
http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
"""
emacs_vars = {}
SIZE = pow(2, 13) # 8kB
# Search near the start for a '-*-'-style one-liner of variables.
head = text[:SIZE]
if "-*-" in head:
match = self._emacs_oneliner_vars_pat.search(head)
if match:
emacs_vars_str = match.group(1)
assert '\n' not in emacs_vars_str
emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';')
if s.strip()]
if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]:
# While not in the spec, this form is allowed by emacs:
# -*- Tcl -*-
# where the implied "variable" is "mode". This form
# is only allowed if there are no other variables.
emacs_vars["mode"] = emacs_var_strs[0].strip()
else:
for emacs_var_str in emacs_var_strs:
try:
variable, value = emacs_var_str.strip().split(':', 1)
except ValueError:
log.debug("emacs variables error: malformed -*- "
"line: %r", emacs_var_str)
continue
# Lowercase the variable name because Emacs allows "Mode"
# or "mode" or "MoDe", etc.
emacs_vars[variable.lower()] = value.strip()
tail = text[-SIZE:]
if "Local Variables" in tail:
match = self._emacs_local_vars_pat.search(tail)
if match:
prefix = match.group("prefix")
suffix = match.group("suffix")
lines = match.group("content").splitlines(0)
#print "prefix=%r, suffix=%r, content=%r, lines: %s"\
# % (prefix, suffix, match.group("content"), lines)
# Validate the Local Variables block: proper prefix and suffix
# usage.
for i, line in enumerate(lines):
if not line.startswith(prefix):
log.debug("emacs variables error: line '%s' "
"does not use proper prefix '%s'"
% (line, prefix))
return {}
# Don't validate suffix on last line. Emacs doesn't care,
# neither should we.
if i != len(lines)-1 and not line.endswith(suffix):
log.debug("emacs variables error: line '%s' "
"does not use proper suffix '%s'"
% (line, suffix))
return {}
# Parse out one emacs var per line.
continued_for = None
for line in lines[:-1]: # no var on the last line ("PREFIX End:")
if prefix: line = line[len(prefix):] # strip prefix
if suffix: line = line[:-len(suffix)] # strip suffix
line = line.strip()
if continued_for:
variable = continued_for
if line.endswith('\\'):
line = line[:-1].rstrip()
else:
continued_for = None
emacs_vars[variable] += ' ' + line
else:
try:
variable, value = line.split(':', 1)
except ValueError:
log.debug("local variables error: missing colon "
"in local variables entry: '%s'" % line)
continue
# Do NOT lowercase the variable name, because Emacs only
# allows "mode" (and not "Mode", "MoDe", etc.) in this block.
value = value.strip()
if value.endswith('\\'):
value = value[:-1].rstrip()
continued_for = variable
else:
continued_for = None
emacs_vars[variable] = value
# Unquote values.
for var, val in list(emacs_vars.items()):
if len(val) > 1 and (val.startswith('"') and val.endswith('"')
or val.startswith('"') and val.endswith('"')):
emacs_vars[var] = val[1:-1]
return emacs_vars
# Cribbed from a post by Bart Lateur:
# <http://www.nntp.perl.org/group/perl.macperl.anyperl/154>
_detab_re = re.compile(r'(.*?)\t', re.M)
def _detab_sub(self, match):
g1 = match.group(1)
return g1 + (' ' * (self.tab_width - len(g1) % self.tab_width))
def _detab(self, text):
r"""Remove (leading?) tabs from a file.
>>> m = Markdown()
>>> m._detab("\tfoo")
' foo'
>>> m._detab(" \tfoo")
' foo'
>>> m._detab("\t foo")
' foo'
>>> m._detab(" foo")
' foo'
>>> m._detab(" foo\n\tbar\tblam")
' foo\n bar blam'
"""
if '\t' not in text:
return text
return self._detab_re.subn(self._detab_sub, text)[0]
# I broke out the html5 tags here and add them to _block_tags_a and
# _block_tags_b. This way html5 tags are easy to keep track of.
_html5tags = '|article|aside|header|hgroup|footer|nav|section|figure|figcaption'
_block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del'
_block_tags_a += _html5tags
_strict_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_a,
re.X | re.M)
_block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math'
_block_tags_b += _html5tags
_liberal_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
.*</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_b,
re.X | re.M)
_html_markdown_attr_re = re.compile(
r'''\s+markdown=("1"|'1')''')
def _hash_html_block_sub(self, match, raw=False):
html = match.group(1)
if raw and self.safe_mode:
html = self._sanitize_html(html)
elif 'markdown-in-html' in self.extras and 'markdown=' in html:
first_line = html.split('\n', 1)[0]
m = self._html_markdown_attr_re.search(first_line)
if m:
lines = html.split('\n')
middle = '\n'.join(lines[1:-1])
last_line = lines[-1]
first_line = first_line[:m.start()] + first_line[m.end():]
f_key = _hash_text(first_line)
self.html_blocks[f_key] = first_line
l_key = _hash_text(last_line)
self.html_blocks[l_key] = last_line
return ''.join(["\n\n", f_key,
"\n\n", middle, "\n\n",
l_key, "\n\n"])
key = _hash_text(html)
self.html_blocks[key] = html
return "\n\n" + key + "\n\n"
def _hash_html_blocks(self, text, raw=False):
"""Hashify HTML blocks
We only want to do this for block-level HTML tags, such as headers,
lists, and tables. That's because we still want to wrap <p>s around
"paragraphs" that are wrapped in non-block-level tags, such as anchors,
phrase emphasis, and spans. The list of tags we're looking for is
hard-coded.
@param raw {boolean} indicates if these are raw HTML blocks in
the original source. It makes a difference in "safe" mode.
"""
if '<' not in text:
return text
# Pass `raw` value into our calls to self._hash_html_block_sub.
hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw)
# First, look for nested blocks, e.g.:
# <div>
# <div>
# tags for inner block must be indented.
# </div>
# </div>
#
# The outermost tags must start at the left margin for this to match, and
# the inner nested divs must be indented.
# We need to do this before the next, more liberal match, because the next
# match will start at the first `<div>` and stop at the first `</div>`.
text = self._strict_tag_block_re.sub(hash_html_block_sub, text)
# Now match more liberally, simply from `\n<tag>` to `</tag>\n`
text = self._liberal_tag_block_re.sub(hash_html_block_sub, text)
# Special case just for <hr />. It was easier to make a special
# case than to make the other regex more complicated.
if "<hr" in text:
_hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width)
text = _hr_tag_re.sub(hash_html_block_sub, text)
# Special case for standalone HTML comments:
if "<!--" in text:
start = 0
while True:
# Delimiters for next comment block.
try:
start_idx = text.index("<!--", start)
except ValueError:
break
try:
end_idx = text.index("-->", start_idx) + 3
except ValueError:
break
# Start position for next comment block search.
start = end_idx
# Validate whitespace before comment.
if start_idx:
# - Up to `tab_width - 1` spaces before start_idx.
for i in range(self.tab_width - 1):
if text[start_idx - 1] != ' ':
break
start_idx -= 1
if start_idx == 0:
break
# - Must be preceded by 2 newlines or hit the start of
# the document.
if start_idx == 0:
pass
elif start_idx == 1 and text[0] == '\n':
start_idx = 0 # to match minute detail of Markdown.pl regex
elif text[start_idx-2:start_idx] == '\n\n':
pass
else:
break
# Validate whitespace after comment.
# - Any number of spaces and tabs.
while end_idx < len(text):
if text[end_idx] not in ' \t':
break
end_idx += 1
# - Must be following by 2 newlines or hit end of text.
if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'):
continue
# Escape and hash (must match `_hash_html_block_sub`).
html = text[start_idx:end_idx]
if raw and self.safe_mode:
html = self._sanitize_html(html)
key = _hash_text(html)
self.html_blocks[key] = html
text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:]
if "xml" in self.extras:
# Treat XML processing instructions and namespaced one-liner
# tags as if they were block HTML tags. E.g., if standalone
# (i.e. are their own paragraph), the following do not get
# wrapped in a <p> tag:
# <?foo bar?>
#
# <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/>
_xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width)
text = _xml_oneliner_re.sub(hash_html_block_sub, text)
return text
def _strip_link_definitions(self, text):
# Strips link definitions from text, stores the URLs and titles in
# hash references.
less_than_tab = self.tab_width - 1
# Link defs are in the form:
# [id]: url "optional title"
_link_def_re = re.compile(r"""
^[ ]{0,%d}\[(.+)\]: # id = \1
[ \t]*
\n? # maybe *one* newline
[ \t]*
<?(.+?)>? # url = \2
[ \t]*
(?:
\n? # maybe one newline
[ \t]*
(?<=\s) # lookbehind for whitespace
['"(]
([^\n]*) # title = \3
['")]
[ \t]*
)? # title is optional
(?:\n+|\Z)
""" % less_than_tab, re.X | re.M | re.U)
return _link_def_re.sub(self._extract_link_def_sub, text)
def _extract_link_def_sub(self, match):
id, url, title = match.groups()
key = id.lower() # Link IDs are case-insensitive
self.urls[key] = self._encode_amps_and_angles(url)
if title:
self.titles[key] = title
return ""
def _extract_footnote_def_sub(self, match):
id, text = match.groups()
text = _dedent(text, skip_first_line=not text.startswith('\n')).strip()
normed_id = re.sub(r'\W', '-', id)
# Ensure footnote text ends with a couple newlines (for some
# block gamut matches).
self.footnotes[normed_id] = text + "\n\n"
return ""
def _strip_footnote_definitions(self, text):
"""A footnote definition looks like this:
[^note-id]: Text of the note.
May include one or more indented paragraphs.
Where,
- The 'note-id' can be pretty much anything, though typically it
is the number of the footnote.
- The first paragraph may start on the next line, like so:
[^note-id]:
Text of the note.
"""
less_than_tab = self.tab_width - 1
footnote_def_re = re.compile(r'''
^[ ]{0,%d}\[\^(.+)\]: # id = \1
[ \t]*
( # footnote text = \2
# First line need not start with the spaces.
(?:\s*.*\n+)
(?:
(?:[ ]{%d} | \t) # Subsequent lines must be indented.
.*\n+
)*
)
# Lookahead for non-space at line-start, or end of doc.
(?:(?=^[ ]{0,%d}\S)|\Z)
''' % (less_than_tab, self.tab_width, self.tab_width),
re.X | re.M)
return footnote_def_re.sub(self._extract_footnote_def_sub, text)
_hr_data = [
('*', re.compile(r"^[ ]{0,3}\*(.*?)$", re.M)),
('-', re.compile(r"^[ ]{0,3}\-(.*?)$", re.M)),
('_', re.compile(r"^[ ]{0,3}\_(.*?)$", re.M)),
]
def _run_block_gamut(self, text):
# These are all the transformations that form block-level
# tags like paragraphs, headers, and list items.
if "fenced-code-blocks" in self.extras:
text = self._do_fenced_code_blocks(text)
text = self._do_headers(text)
# Do Horizontal Rules:
# On the number of spaces in horizontal rules: The spec is fuzzy: "If
# you wish, you may use spaces between the hyphens or asterisks."
# Markdown.pl 1.0.1's hr regexes limit the number of spaces between the
# hr chars to one or two. We'll reproduce that limit here.
hr = "\n<hr"+self.empty_element_suffix+"\n"
for ch, regex in self._hr_data:
if ch in text:
for m in reversed(list(regex.finditer(text))):
tail = m.group(1).rstrip()
if not tail.strip(ch + ' ') and tail.count(" ") == 0:
start, end = m.span()
text = text[:start] + hr + text[end:]
text = self._do_lists(text)
if "pyshell" in self.extras:
text = self._prepare_pyshell_blocks(text)
if "wiki-tables" in self.extras:
text = self._do_wiki_tables(text)
text = self._do_code_blocks(text)
text = self._do_block_quotes(text)
# We already ran _HashHTMLBlocks() before, in Markdown(), but that
# was to escape raw HTML in the original Markdown source. This time,
# we're escaping the markup we've just created, so that we don't wrap
# <p> tags around block-level tags.
text = self._hash_html_blocks(text)
text = self._form_paragraphs(text)
return text
def _pyshell_block_sub(self, match):
lines = match.group(0).splitlines(0)
_dedentlines(lines)
indent = ' ' * self.tab_width
s = ('\n' # separate from possible cuddled paragraph
+ indent + ('\n'+indent).join(lines)
+ '\n\n')
return s
def _prepare_pyshell_blocks(self, text):
"""Ensure that Python interactive shell sessions are put in
code blocks -- even if not properly indented.
"""
if ">>>" not in text:
return text
less_than_tab = self.tab_width - 1
_pyshell_block_re = re.compile(r"""
^([ ]{0,%d})>>>[ ].*\n # first line
^(\1.*\S+.*\n)* # any number of subsequent lines
^\n # ends with a blank line
""" % less_than_tab, re.M | re.X)
return _pyshell_block_re.sub(self._pyshell_block_sub, text)
def _wiki_table_sub(self, match):
ttext = match.group(0).strip()
#print 'wiki table: %r' % match.group(0)
rows = []
for line in ttext.splitlines(0):
line = line.strip()[2:-2].strip()
row = [c.strip() for c in re.split(r'(?<!\\)\|\|', line)]
rows.append(row)
#pprint(rows)
hlines = ['<table>', '<tbody>']
for row in rows:
hrow = ['<tr>']
for cell in row:
hrow.append('<td>')
hrow.append(self._run_span_gamut(cell))
hrow.append('</td>')
hrow.append('</tr>')
hlines.append(''.join(hrow))
hlines += ['</tbody>', '</table>']
return '\n'.join(hlines) + '\n'
def _do_wiki_tables(self, text):
# Optimization.
if "||" not in text:
return text
less_than_tab = self.tab_width - 1
wiki_table_re = re.compile(r'''
(?:(?<=\n\n)|\A\n?) # leading blank line
^([ ]{0,%d})\|\|.+?\|\|[ ]*\n # first line
(^\1\|\|.+?\|\|\n)* # any number of subsequent lines
''' % less_than_tab, re.M | re.X)
return wiki_table_re.sub(self._wiki_table_sub, text)
def _run_span_gamut(self, text):
# These are all the transformations that occur *within* block-level
# tags like paragraphs, headers, and list items.
text = self._do_code_spans(text)
text = self._escape_special_chars(text)
# Process anchor and image tags.
text = self._do_links(text)
# Make links out of things like `<http://example.com/>`
# Must come after _do_links(), because you can use < and >
# delimiters in inline links like [this](<url>).
text = self._do_auto_links(text)
if "link-patterns" in self.extras:
text = self._do_link_patterns(text)
text = self._encode_amps_and_angles(text)
text = self._do_italics_and_bold(text)
if "smarty-pants" in self.extras:
text = self._do_smart_punctuation(text)
# Do hard breaks:
text = re.sub(r" {2,}\n", " <br%s\n" % self.empty_element_suffix, text)
return text
# "Sorta" because auto-links are identified as "tag" tokens.
_sorta_html_tokenize_re = re.compile(r"""
(
# tag
</?
(?:\w+) # tag name
(?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))* # attributes
\s*/?>
|
# auto-link (e.g., <http://www.activestate.com/>)
<\w+[^>]*>
|
<!--.*?--> # comment
|
<\?.*?\?> # processing instruction
)
""", re.X)
def _escape_special_chars(self, text):
# Python markdown note: the HTML tokenization here differs from
# that in Markdown.pl, hence the behaviour for subtle cases can
# differ (I believe the tokenizer here does a better job because
# it isn't susceptible to unmatched '<' and '>' in HTML tags).
# Note, however, that '>' is not allowed in an auto-link URL
# here.
escaped = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup:
# Within tags/HTML-comments/auto-links, encode * and _
# so they don't conflict with their use in Markdown for
# italics and strong. We're replacing each such
# character with its corresponding MD5 checksum value;
# this is likely overkill, but it should prevent us from
# colliding with the escape values by accident.
escaped.append(token.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
else:
escaped.append(self._encode_backslash_escapes(token))
is_html_markup = not is_html_markup
return ''.join(escaped)
def _hash_html_spans(self, text):
# Used for safe_mode.
def _is_auto_link(s):
if ':' in s and self._auto_link_re.match(s):
return True
elif '@' in s and self._auto_email_link_re.match(s):
return True
return False
tokens = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup and not _is_auto_link(token):
sanitized = self._sanitize_html(token)
key = _hash_text(sanitized)
self.html_spans[key] = sanitized
tokens.append(key)
else:
tokens.append(token)
is_html_markup = not is_html_markup
return ''.join(tokens)
def _unhash_html_spans(self, text):
for key, sanitized in list(self.html_spans.items()):
text = text.replace(key, sanitized)
return text
def _sanitize_html(self, s):
if self.safe_mode == "replace":
return self.html_removed_text
elif self.safe_mode == "escape":
replacements = [
('&', '&'),
('<', '<'),
('>', '>'),
]
for before, after in replacements:
s = s.replace(before, after)
return s
else:
raise MarkdownError("invalid value for 'safe_mode': %r (must be "
"'escape' or 'replace')" % self.safe_mode)
_tail_of_inline_link_re = re.compile(r'''
# Match tail of: [text](/url/) or [text](/url/ "title")
\( # literal paren
[ \t]*
(?P<url> # \1
<.*?>
|
.*?
)
[ \t]*
( # \2
(['"]) # quote char = \3
(?P<title>.*?)
\3 # matching quote
)? # title is optional
\)
''', re.X | re.S)
_tail_of_reference_link_re = re.compile(r'''
# Match tail of: [text][id]
[ ]? # one optional space
(?:\n[ ]*)? # one optional newline followed by spaces
\[
(?P<id>.*?)
\]
''', re.X | re.S)
def _do_links(self, text):
"""Turn Markdown link shortcuts into XHTML <a> and <img> tags.
This is a combination of Markdown.pl's _DoAnchors() and
_DoImages(). They are done together because that simplified the
approach. It was necessary to use a different approach than
Markdown.pl because of the lack of atomic matching support in
Python's regex engine used in $g_nested_brackets.
"""
MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24
# `anchor_allowed_pos` is used to support img links inside
# anchors, but not anchors inside anchors. An anchor's start
# pos must be `>= anchor_allowed_pos`.
anchor_allowed_pos = 0
curr_pos = 0
while True: # Handle the next link.
# The next '[' is the start of:
# - an inline anchor: [text](url "title")
# - a reference anchor: [text][id]
# - an inline img: 
# - a reference img: ![text][id]
# - a footnote ref: [^id]
# (Only if 'footnotes' extra enabled)
# - a footnote defn: [^id]: ...
# (Only if 'footnotes' extra enabled) These have already
# been stripped in _strip_footnote_definitions() so no
# need to watch for them.
# - a link definition: [id]: url "title"
# These have already been stripped in
# _strip_link_definitions() so no need to watch for them.
# - not markup: [...anything else...
try:
start_idx = text.index('[', curr_pos)
except ValueError:
break
text_length = len(text)
# Find the matching closing ']'.
# Markdown.pl allows *matching* brackets in link text so we
# will here too. Markdown.pl *doesn't* currently allow
# matching brackets in img alt text -- we'll differ in that
# regard.
bracket_depth = 0
for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,
text_length)):
ch = text[p]
if ch == ']':
bracket_depth -= 1
if bracket_depth < 0:
break
elif ch == '[':
bracket_depth += 1
else:
# Closing bracket not found within sentinel length.
# This isn't markup.
curr_pos = start_idx + 1
continue
link_text = text[start_idx+1:p]
# Possibly a footnote ref?
if "footnotes" in self.extras and link_text.startswith("^"):
normed_id = re.sub(r'\W', '-', link_text[1:])
if normed_id in self.footnotes:
self.footnote_ids.append(normed_id)
result = '<sup class="footnote-ref" id="fnref-%s">' \
'<a href="#fn-%s">%s</a></sup>' \
% (normed_id, normed_id, len(self.footnote_ids))
text = text[:start_idx] + result + text[p+1:]
else:
# This id isn't defined, leave the markup alone.
curr_pos = p+1
continue
# Now determine what this is by the remainder.
p += 1
if p == text_length:
return text
# Inline anchor or img?
if text[p] == '(': # attempt at perf improvement
match = self._tail_of_inline_link_re.match(text, p)
if match:
# Handle an inline anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
url, title = match.group("url"), match.group("title")
if url and url[0] == '<':
url = url[1:-1] # '<url>' -> 'url'
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
if title:
title_str = ' title="%s"' % (
_xml_escape_attr(title)
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
else:
title_str = ''
if is_img:
result = '<img src="%s" alt="%s"%s%s' \
% (url.replace('"', '"'),
_xml_escape_attr(link_text),
title_str, self.empty_element_suffix)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
elif start_idx >= anchor_allowed_pos:
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
continue
# Reference anchor or img?
else:
match = self._tail_of_reference_link_re.match(text, p)
if match:
# Handle a reference-style anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
link_id = match.group("id").lower()
if not link_id:
link_id = link_text.lower() # for links like [this][]
if link_id in self.urls:
url = self.urls[link_id]
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
title = self.titles.get(link_id)
if title:
before = title
title = _xml_escape_attr(title) \
.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
title_str = ' title="%s"' % title
else:
title_str = ''
if is_img:
result = '<img src="%s" alt="%s"%s%s' \
% (url.replace('"', '"'),
link_text.replace('"', '"'),
title_str, self.empty_element_suffix)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
elif start_idx >= anchor_allowed_pos:
result = '<a href="%s"%s>%s</a>' \
% (url, title_str, link_text)
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
else:
# This id isn't defined, leave the markup alone.
curr_pos = match.end()
continue
# Otherwise, it isn't markup.
curr_pos = start_idx + 1
return text
def header_id_from_text(self, text, prefix, n):
"""Generate a header id attribute value from the given header
HTML content.
This is only called if the "header-ids" extra is enabled.
Subclasses may override this for different header ids.
@param text {str} The text of the header tag
@param prefix {str} The requested prefix for header ids. This is the
value of the "header-ids" extra key, if any. Otherwise, None.
@param n {int} The <hN> tag number, i.e. `1` for an <h1> tag.
@returns {str} The value for the header tag's "id" attribute. Return
None to not have an id attribute and to exclude this header from
the TOC (if the "toc" extra is specified).
"""
header_id = _slugify(text)
if prefix and isinstance(prefix, base_string_type):
header_id = prefix + '-' + header_id
if header_id in self._count_from_header_id:
self._count_from_header_id[header_id] += 1
header_id += '-%s' % self._count_from_header_id[header_id]
else:
self._count_from_header_id[header_id] = 1
return header_id
_toc = None
def _toc_add_entry(self, level, id, name):
if self._toc is None:
self._toc = []
self._toc.append((level, id, self._unescape_special_chars(name)))
_setext_h_re = re.compile(r'^(.+)[ \t]*\n(=+|-+)[ \t]*\n+', re.M)
def _setext_h_sub(self, match):
n = {"=": 1, "-": 2}[match.group(2)[0]]
demote_headers = self.extras.get("demote-headers")
if demote_headers:
n = min(n + demote_headers, 6)
header_id_attr = ""
if "header-ids" in self.extras:
header_id = self.header_id_from_text(match.group(1),
self.extras["header-ids"], n)
if header_id:
header_id_attr = ' id="%s"' % header_id
html = self._run_span_gamut(match.group(1))
if "toc" in self.extras and header_id:
self._toc_add_entry(n, header_id, html)
return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n)
_atx_h_re = re.compile(r'''
^(\#{1,6}) # \1 = string of #'s
[ \t]+
(.+?) # \2 = Header text
[ \t]*
(?<!\\) # ensure not an escaped trailing '#'
\#* # optional closing #'s (not counted)
\n+
''', re.X | re.M)
def _atx_h_sub(self, match):
n = len(match.group(1))
demote_headers = self.extras.get("demote-headers")
if demote_headers:
n = min(n + demote_headers, 6)
header_id_attr = ""
if "header-ids" in self.extras:
header_id = self.header_id_from_text(match.group(2),
self.extras["header-ids"], n)
if header_id:
header_id_attr = ' id="%s"' % header_id
html = self._run_span_gamut(match.group(2))
if "toc" in self.extras and header_id:
self._toc_add_entry(n, header_id, html)
return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n)
def _do_headers(self, text):
# Setext-style headers:
# Header 1
# ========
#
# Header 2
# --------
text = self._setext_h_re.sub(self._setext_h_sub, text)
# atx-style headers:
# # Header 1
# ## Header 2
# ## Header 2 with closing hashes ##
# ...
# ###### Header 6
text = self._atx_h_re.sub(self._atx_h_sub, text)
return text
_marker_ul_chars = '*+-'
_marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars
_marker_ul = '(?:[%s])' % _marker_ul_chars
_marker_ol = r'(?:\d+\.)'
def _list_sub(self, match):
lst = match.group(1)
lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol"
result = self._process_list_items(lst)
if self.list_level:
return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type)
else:
return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type)
def _do_lists(self, text):
# Form HTML ordered (numbered) and unordered (bulleted) lists.
# Iterate over each *non-overlapping* list match.
pos = 0
while True:
# Find the *first* hit for either list style (ul or ol). We
# match ul and ol separately to avoid adjacent lists of different
# types running into each other (see issue #16).
hits = []
for marker_pat in (self._marker_ul, self._marker_ol):
less_than_tab = self.tab_width - 1
whole_list = r'''
( # \1 = whole list
( # \2
[ ]{0,%d}
(%s) # \3 = first list item marker
[ \t]+
(?!\ *\3\ ) # '- - - ...' isn't a list. See 'not_quite_a_list' test case.
)
(?:.+?)
( # \4
\Z
|
\n{2,}
(?=\S)
(?! # Negative lookahead for another list item marker
[ \t]*
%s[ \t]+
)
)
)
''' % (less_than_tab, marker_pat, marker_pat)
if self.list_level: # sub-list
list_re = re.compile("^"+whole_list, re.X | re.M | re.S)
else:
list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list,
re.X | re.M | re.S)
match = list_re.search(text, pos)
if match:
hits.append((match.start(), match))
if not hits:
break
hits.sort()
match = hits[0][1]
start, end = match.span()
text = text[:start] + self._list_sub(match) + text[end:]
pos = end
return text
_list_item_re = re.compile(r'''
(\n)? # leading line = \1
(^[ \t]*) # leading whitespace = \2
(?P<marker>%s) [ \t]+ # list marker = \3
((?:.+?) # list item text = \4
(\n{1,2})) # eols = \5
(?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+))
''' % (_marker_any, _marker_any),
re.M | re.X | re.S)
_last_li_endswith_two_eols = False
def _list_item_sub(self, match):
item = match.group(4)
leading_line = match.group(1)
leading_space = match.group(2)
if leading_line or "\n\n" in item or self._last_li_endswith_two_eols:
item = self._run_block_gamut(self._outdent(item))
else:
# Recursion for sub-lists:
item = self._do_lists(self._outdent(item))
if item.endswith('\n'):
item = item[:-1]
item = self._run_span_gamut(item)
self._last_li_endswith_two_eols = (len(match.group(5)) == 2)
return "<li>%s</li>\n" % item
def _process_list_items(self, list_str):
# Process the contents of a single ordered or unordered list,
# splitting it into individual list items.
# The $g_list_level global keeps track of when we're inside a list.
# Each time we enter a list, we increment it; when we leave a list,
# we decrement. If it's zero, we're not in a list anymore.
#
# We do this because when we're not inside a list, we want to treat
# something like this:
#
# I recommend upgrading to version
# 8. Oops, now this line is treated
# as a sub-list.
#
# As a single paragraph, despite the fact that the second line starts
# with a digit-period-space sequence.
#
# Whereas when we're inside a list (or sub-list), that line will be
# treated as the start of a sub-list. What a kludge, huh? This is
# an aspect of Markdown's syntax that's hard to parse perfectly
# without resorting to mind-reading. Perhaps the solution is to
# change the syntax rules such that sub-lists must start with a
# starting cardinal number; e.g. "1." or "a.".
self.list_level += 1
self._last_li_endswith_two_eols = False
list_str = list_str.rstrip('\n') + '\n'
list_str = self._list_item_re.sub(self._list_item_sub, list_str)
self.list_level -= 1
return list_str
def _get_pygments_lexer(self, lexer_name):
try:
from pygments import lexers, util
except ImportError:
return None
try:
return lexers.get_lexer_by_name(lexer_name)
except util.ClassNotFound:
return None
def _color_with_pygments(self, codeblock, lexer, **formatter_opts):
import pygments
import pygments.formatters
class HtmlCodeFormatter(pygments.formatters.HtmlFormatter):
def _wrap_code(self, inner):
"""A function for use in a Pygments Formatter which
wraps in <code> tags.
"""
yield 0, "<code>"
for tup in inner:
yield tup
yield 0, "</code>"
def wrap(self, source, outfile):
"""Return the source with a code, pre, and div."""
return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
formatter_opts.setdefault("cssclass", "codehilite")
formatter = HtmlCodeFormatter(**formatter_opts)
return pygments.highlight(codeblock, lexer, formatter)
def _code_block_sub(self, match, is_fenced_code_block=False):
lexer_name = None
if is_fenced_code_block:
lexer_name = match.group(1)
if lexer_name:
formatter_opts = self.extras['fenced-code-blocks'] or {}
codeblock = match.group(2)
codeblock = codeblock[:-1] # drop one trailing newline
else:
codeblock = match.group(1)
codeblock = self._outdent(codeblock)
codeblock = self._detab(codeblock)
codeblock = codeblock.lstrip('\n') # trim leading newlines
codeblock = codeblock.rstrip() # trim trailing whitespace
# Note: "code-color" extra is DEPRECATED.
if "code-color" in self.extras and codeblock.startswith(":::"):
lexer_name, rest = codeblock.split('\n', 1)
lexer_name = lexer_name[3:].strip()
codeblock = rest.lstrip("\n") # Remove lexer declaration line.
formatter_opts = self.extras['code-color'] or {}
if lexer_name:
lexer = self._get_pygments_lexer(lexer_name)
if lexer:
colored = self._color_with_pygments(codeblock, lexer,
**formatter_opts)
return "\n\n%s\n\n" % colored
codeblock = self._encode_code(codeblock)
pre_class_str = self._html_class_str_from_tag("pre")
code_class_str = self._html_class_str_from_tag("code")
return "\n\n<pre%s><code%s>%s\n</code></pre>\n\n" % (
pre_class_str, code_class_str, codeblock)
def _html_class_str_from_tag(self, tag):
"""Get the appropriate ' class="..."' string (note the leading
space), if any, for the given tag.
"""
if "html-classes" not in self.extras:
return ""
try:
html_classes_from_tag = self.extras["html-classes"]
except TypeError:
return ""
else:
if tag in html_classes_from_tag:
return ' class="%s"' % html_classes_from_tag[tag]
return ""
def _do_code_blocks(self, text):
"""Process Markdown `<pre><code>` blocks."""
code_block_re = re.compile(r'''
(?:\n\n|\A\n?)
( # $1 = the code block -- one or more lines, starting with a space/tab
(?:
(?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces
.*\n+
)+
)
((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc
''' % (self.tab_width, self.tab_width),
re.M | re.X)
return code_block_re.sub(self._code_block_sub, text)
_fenced_code_block_re = re.compile(r'''
(?:\n\n|\A\n?)
^```([\w+-]+)?[ \t]*\n # opening fence, $1 = optional lang
(.*?) # $2 = code block content
^```[ \t]*\n # closing fence
''', re.M | re.X | re.S)
def _fenced_code_block_sub(self, match):
return self._code_block_sub(match, is_fenced_code_block=True);
def _do_fenced_code_blocks(self, text):
"""Process ```-fenced unindented code blocks ('fenced-code-blocks' extra)."""
return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text)
# Rules for a code span:
# - backslash escapes are not interpreted in a code span
# - to include one or or a run of more backticks the delimiters must
# be a longer run of backticks
# - cannot start or end a code span with a backtick; pad with a
# space and that space will be removed in the emitted HTML
# See `test/tm-cases/escapes.text` for a number of edge-case
# examples.
_code_span_re = re.compile(r'''
(?<!\\)
(`+) # \1 = Opening run of `
(?!`) # See Note A test/tm-cases/escapes.text
(.+?) # \2 = The code block
(?<!`)
\1 # Matching closer
(?!`)
''', re.X | re.S)
def _code_span_sub(self, match):
c = match.group(2).strip(" \t")
c = self._encode_code(c)
return "<code>%s</code>" % c
def _do_code_spans(self, text):
# * Backtick quotes are used for <code></code> spans.
#
# * You can use multiple backticks as the delimiters if you want to
# include literal backticks in the code span. So, this input:
#
# Just type ``foo `bar` baz`` at the prompt.
#
# Will translate to:
#
# <p>Just type <code>foo `bar` baz</code> at the prompt.</p>
#
# There's no arbitrary limit to the number of backticks you
# can use as delimters. If you need three consecutive backticks
# in your code, use four for delimiters, etc.
#
# * You can use spaces to get literal backticks at the edges:
#
# ... type `` `bar` `` ...
#
# Turns to:
#
# ... type <code>`bar`</code> ...
return self._code_span_re.sub(self._code_span_sub, text)
def _encode_code(self, text):
"""Encode/escape certain characters inside Markdown code runs.
The point is that in code, these characters are literals,
and lose their special Markdown meanings.
"""
replacements = [
# Encode all ampersands; HTML entities are not
# entities within a Markdown code span.
('&', '&'),
# Do the angle bracket song and dance:
('<', '<'),
('>', '>'),
]
for before, after in replacements:
text = text.replace(before, after)
hashed = _hash_text(text)
self._escape_table[text] = hashed
return hashed
_strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
_em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
_code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
_code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
def _do_italics_and_bold(self, text):
# <strong> must go first:
if "code-friendly" in self.extras:
text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text)
text = self._code_friendly_em_re.sub(r"<em>\1</em>", text)
else:
text = self._strong_re.sub(r"<strong>\2</strong>", text)
text = self._em_re.sub(r"<em>\2</em>", text)
return text
# "smarty-pants" extra: Very liberal in interpreting a single prime as an
# apostrophe; e.g. ignores the fact that "round", "bout", "twer", and
# "twixt" can be written without an initial apostrophe. This is fine because
# using scare quotes (single quotation marks) is rare.
_apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))")
_contractions = ["tis", "twas", "twer", "neath", "o", "n",
"round", "bout", "twixt", "nuff", "fraid", "sup"]
def _do_smart_contractions(self, text):
text = self._apostrophe_year_re.sub(r"’\1", text)
for c in self._contractions:
text = text.replace("'%s" % c, "’%s" % c)
text = text.replace("'%s" % c.capitalize(),
"’%s" % c.capitalize())
return text
# Substitute double-quotes before single-quotes.
_opening_single_quote_re = re.compile(r"(?<!\S)'(?=\S)")
_opening_double_quote_re = re.compile(r'(?<!\S)"(?=\S)')
_closing_single_quote_re = re.compile(r"(?<=\S)'")
_closing_double_quote_re = re.compile(r'(?<=\S)"(?=(\s|,|;|\.|\?|!|$))')
def _do_smart_punctuation(self, text):
"""Fancifies 'single quotes', "double quotes", and apostrophes.
Converts --, ---, and ... into en dashes, em dashes, and ellipses.
Inspiration is: <http://daringfireball.net/projects/smartypants/>
See "test/tm-cases/smarty_pants.text" for a full discussion of the
support here and
<http://code.google.com/p/python-markdown2/issues/detail?id=42> for a
discussion of some diversion from the original SmartyPants.
"""
if "'" in text: # guard for perf
text = self._do_smart_contractions(text)
text = self._opening_single_quote_re.sub("‘", text)
text = self._closing_single_quote_re.sub("’", text)
if '"' in text: # guard for perf
text = self._opening_double_quote_re.sub("“", text)
text = self._closing_double_quote_re.sub("”", text)
text = text.replace("---", "—")
text = text.replace("--", "–")
text = text.replace("...", "…")
text = text.replace(" . . . ", "…")
text = text.replace(". . .", "…")
return text
_block_quote_re = re.compile(r'''
( # Wrap whole match in \1
(
^[ \t]*>[ \t]? # '>' at the start of a line
.+\n # rest of the first line
(.+\n)* # subsequent consecutive lines
\n* # blanks
)+
)
''', re.M | re.X)
_bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M);
_html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S)
def _dedent_two_spaces_sub(self, match):
return re.sub(r'(?m)^ ', '', match.group(1))
def _block_quote_sub(self, match):
bq = match.group(1)
bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting
bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines
bq = self._run_block_gamut(bq) # recurse
bq = re.sub('(?m)^', ' ', bq)
# These leading spaces screw with <pre> content, so we need to fix that:
bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
return "<blockquote>\n%s\n</blockquote>\n\n" % bq
def _do_block_quotes(self, text):
if '>' not in text:
return text
return self._block_quote_re.sub(self._block_quote_sub, text)
def _form_paragraphs(self, text):
# Strip leading and trailing lines:
text = text.strip('\n')
# Wrap <p> tags.
grafs = []
for i, graf in enumerate(re.split(r"\n{2,}", text)):
if graf in self.html_blocks:
# Unhashify HTML blocks
grafs.append(self.html_blocks[graf])
else:
cuddled_list = None
if "cuddled-lists" in self.extras:
# Need to put back trailing '\n' for `_list_item_re`
# match at the end of the paragraph.
li = self._list_item_re.search(graf + '\n')
# Two of the same list marker in this paragraph: a likely
# candidate for a list cuddled to preceding paragraph
# text (issue 33). Note the `[-1]` is a quick way to
# consider numeric bullets (e.g. "1." and "2.") to be
# equal.
if (li and len(li.group(2)) <= 3 and li.group("next_marker")
and li.group("marker")[-1] == li.group("next_marker")[-1]):
start = li.start()
cuddled_list = self._do_lists(graf[start:]).rstrip("\n")
assert cuddled_list.startswith("<ul>") or cuddled_list.startswith("<ol>")
graf = graf[:start]
# Wrap <p> tags.
graf = self._run_span_gamut(graf)
grafs.append("<p>" + graf.lstrip(" \t") + "</p>")
if cuddled_list:
grafs.append(cuddled_list)
return "\n\n".join(grafs)
def _add_footnotes(self, text):
if self.footnotes:
footer = [
'<div class="footnotes">',
'<hr' + self.empty_element_suffix,
'<ol>',
]
for i, id in enumerate(self.footnote_ids):
if i != 0:
footer.append('')
footer.append('<li id="fn-%s">' % id)
footer.append(self._run_block_gamut(self.footnotes[id]))
backlink = ('<a href="#fnref-%s" '
'class="footnoteBackLink" '
'title="Jump back to footnote %d in the text.">'
'↩</a>' % (id, i+1))
if footer[-1].endswith("</p>"):
footer[-1] = footer[-1][:-len("</p>")] \
+ ' ' + backlink + "</p>"
else:
footer.append("\n<p>%s</p>" % backlink)
footer.append('</li>')
footer.append('</ol>')
footer.append('</div>')
return text + '\n\n' + '\n'.join(footer)
else:
return text
# Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
# http://bumppo.net/projects/amputator/
_ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
_naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
_naked_gt_re = re.compile(r'''(?<![a-z0-9?!/'"-])>''', re.I)
def _encode_amps_and_angles(self, text):
# Smart processing for ampersands and angle brackets that need
# to be encoded.
text = self._ampersand_re.sub('&', text)
# Encode naked <'s
text = self._naked_lt_re.sub('<', text)
# Encode naked >'s
# Note: Other markdown implementations (e.g. Markdown.pl, PHP
# Markdown) don't do this.
text = self._naked_gt_re.sub('>', text)
return text
def _encode_backslash_escapes(self, text):
for ch, escape in list(self._escape_table.items()):
text = text.replace("\\"+ch, escape)
return text
_auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
def _auto_link_sub(self, match):
g1 = match.group(1)
return '<a href="%s">%s</a>' % (g1, g1)
_auto_email_link_re = re.compile(r"""
<
(?:mailto:)?
(
[-.\w]+
\@
[-\w]+(\.[-\w]+)*\.[a-z]+
)
>
""", re.I | re.X | re.U)
def _auto_email_link_sub(self, match):
return self._encode_email_address(
self._unescape_special_chars(match.group(1)))
def _do_auto_links(self, text):
text = self._auto_link_re.sub(self._auto_link_sub, text)
text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
return text
def _encode_email_address(self, addr):
# Input: an email address, e.g. "[email protected]"
#
# Output: the email address as a mailto link, with each character
# of the address encoded as either a decimal or hex entity, in
# the hopes of foiling most address harvesting spam bots. E.g.:
#
# <a href="mailto:foo@e
# xample.com">foo
# @example.com</a>
#
# Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
# mailing list: <http://tinyurl.com/yu7ue>
chars = [_xml_encode_email_char_at_random(ch)
for ch in "mailto:" + addr]
# Strip the mailto: from the visible part.
addr = '<a href="%s">%s</a>' \
% (''.join(chars), ''.join(chars[7:]))
return addr
def _do_link_patterns(self, text):
"""Caveat emptor: there isn't much guarding against link
patterns being formed inside other standard Markdown links, e.g.
inside a [link def][like this].
Dev Notes: *Could* consider prefixing regexes with a negative
lookbehind assertion to attempt to guard against this.
"""
link_from_hash = {}
for regex, repl in self.link_patterns:
replacements = []
for match in regex.finditer(text):
if hasattr(repl, "__call__"):
href = repl(match)
else:
href = match.expand(repl)
replacements.append((match.span(), href))
for (start, end), href in reversed(replacements):
escaped_href = (
href.replace('"', '"') # b/c of attr quote
# To avoid markdown <em> and <strong>:
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
hash = _hash_text(link)
link_from_hash[hash] = link
text = text[:start] + hash + text[end:]
for hash, link in list(link_from_hash.items()):
text = text.replace(hash, link)
return text
def _unescape_special_chars(self, text):
# Swap back in all the special characters we've hidden.
for ch, hash in list(self._escape_table.items()):
text = text.replace(hash, ch)
return text
def _outdent(self, text):
# Remove one level of line-leading tabs or spaces
return self._outdent_re.sub('', text)
class MarkdownWithExtras(Markdown):
"""A markdowner class that enables most extras:
- footnotes
- code-color (only has effect if 'pygments' Python module on path)
These are not included:
- pyshell (specific to Python-related documenting)
- code-friendly (because it *disables* part of the syntax)
- link-patterns (because you need to specify some actual
link-patterns anyway)
"""
extras = ["footnotes", "code-color"]
#---- internal support functions
class UnicodeWithAttrs(unicode):
"""A subclass of unicode used for the return value of conversion to
possibly attach some attributes. E.g. the "toc_html" attribute when
the "toc" extra is used.
"""
metadata = None
_toc = None
def toc_html(self):
"""Return the HTML for the current TOC.
This expects the `_toc` attribute to have been set on this instance.
"""
if self._toc is None:
return None
def indent():
return ' ' * (len(h_stack) - 1)
lines = []
h_stack = [0] # stack of header-level numbers
for level, id, name in self._toc:
if level > h_stack[-1]:
lines.append("%s<ul>" % indent())
h_stack.append(level)
elif level == h_stack[-1]:
lines[-1] += "</li>"
else:
while level < h_stack[-1]:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul></li>" % indent())
lines.append('%s<li><a href="#%s">%s</a>' % (
indent(), id, name))
while len(h_stack) > 1:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul>" % indent())
return '\n'.join(lines) + '\n'
toc_html = property(toc_html)
## {{{ http://code.activestate.com/recipes/577257/ (r1)
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
def _slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
"""
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode()
value = _slugify_strip_re.sub('', value).strip().lower()
return _slugify_hyphenate_re.sub('-', value)
## end of http://code.activestate.com/recipes/577257/ }}}
# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
def _curry(*args, **kwargs):
function, args = args[0], args[1:]
def result(*rest, **kwrest):
combined = kwargs.copy()
combined.update(kwrest)
return function(*args + rest, **combined)
return result
# Recipe: regex_from_encoded_pattern (1.0)
def _regex_from_encoded_pattern(s):
"""'foo' -> re.compile(re.escape('foo'))
'/foo/' -> re.compile('foo')
'/foo/i' -> re.compile('foo', re.I)
"""
if s.startswith('/') and s.rfind('/') != 0:
# Parse it: /PATTERN/FLAGS
idx = s.rfind('/')
pattern, flags_str = s[1:idx], s[idx+1:]
flag_from_char = {
"i": re.IGNORECASE,
"l": re.LOCALE,
"s": re.DOTALL,
"m": re.MULTILINE,
"u": re.UNICODE,
}
flags = 0
for char in flags_str:
try:
flags |= flag_from_char[char]
except KeyError:
raise ValueError("unsupported regex flag: '%s' in '%s' "
"(must be one of '%s')"
% (char, s, ''.join(list(flag_from_char.keys()))))
return re.compile(s[1:idx], flags)
else: # not an encoded regex
return re.compile(re.escape(s))
# Recipe: dedent (0.1.2)
def _dedentlines(lines, tabsize=8, skip_first_line=False):
"""_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
"lines" is a list of lines to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
Same as dedent() except operates on a sequence of lines. Note: the
lines list is modified **in-place**.
"""
DEBUG = False
if DEBUG:
print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
% (tabsize, skip_first_line))
indents = []
margin = None
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
indent = 0
for ch in line:
if ch == ' ':
indent += 1
elif ch == '\t':
indent += tabsize - (indent % tabsize)
elif ch in '\r\n':
continue # skip all-whitespace lines
else:
break
else:
continue # skip all-whitespace lines
if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if DEBUG: print("dedent: margin=%r" % margin)
if margin is not None and margin > 0:
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
removed = 0
for j, ch in enumerate(line):
if ch == ' ':
removed += 1
elif ch == '\t':
removed += tabsize - (removed % tabsize)
elif ch in '\r\n':
if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line)
lines[i] = lines[i][j:]
break
else:
raise ValueError("unexpected non-whitespace char %r in "
"line %r while removing %d-space margin"
% (ch, line, margin))
if DEBUG:
print("dedent: %r: %r -> removed %d/%d"\
% (line, ch, removed, margin))
if removed == margin:
lines[i] = lines[i][j+1:]
break
elif removed > margin:
lines[i] = ' '*(removed-margin) + lines[i][j+1:]
break
else:
if removed:
lines[i] = lines[i][removed:]
return lines
def _dedent(text, tabsize=8, skip_first_line=False):
"""_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces
"""
lines = text.splitlines(1)
_dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
return ''.join(lines)
class _memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
http://wiki.python.org/moin/PythonDecoratorLibrary
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
self.cache[args] = value = self.func(*args)
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def _xml_oneliner_re_from_tab_width(tab_width):
"""Standalone XML processing instruction regex."""
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in $1
[ ]{0,%d}
(?:
<\?\w+\b\s+.*?\?> # XML processing instruction
|
<\w+:\w+\b\s+.*?/> # namespaced single tag
)
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
def _hr_tag_re_from_tab_width(tab_width):
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in \1
[ ]{0,%d}
<(hr) # start tag = \2
\b # word break
([^<>])*? #
/?> # the matching end tag
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
def _xml_escape_attr(attr, skip_single_quote=True):
"""Escape the given string for use in an HTML/XML tag attribute.
By default this doesn't bother with escaping `'` to `'`, presuming that
the tag attribute is surrounded by double quotes.
"""
escaped = (attr
.replace('&', '&')
.replace('"', '"')
.replace('<', '<')
.replace('>', '>'))
if not skip_single_quote:
escaped = escaped.replace("'", "'")
return escaped
def _xml_encode_email_char_at_random(ch):
r = random()
# Roughly 10% raw, 45% hex, 45% dec.
# '@' *must* be encoded. I [John Gruber] insist.
# Issue 26: '_' must be encoded.
if r > 0.9 and ch not in "@_":
return ch
elif r < 0.45:
# The [1:] is to drop leading '0': 0x63 -> x63
return '&#%s;' % hex(ord(ch))[1:]
else:
return '&#%s;' % ord(ch)
#---- mainline
class _NoReflowFormatter(optparse.IndentedHelpFormatter):
"""An optparse formatter that does NOT reflow the description."""
def format_description(self, description):
return description or ""
def _test():
import doctest
doctest.testmod()
def main(argv=None):
if argv is None:
argv = sys.argv
if not logging.root.handlers:
logging.basicConfig()
usage = "usage: %prog [PATHS...]"
version = "%prog "+__version__
parser = optparse.OptionParser(prog="markdown2", usage=usage,
version=version, description=cmdln_desc,
formatter=_NoReflowFormatter())
parser.add_option("-v", "--verbose", dest="log_level",
action="store_const", const=logging.DEBUG,
help="more verbose output")
parser.add_option("--encoding",
help="specify encoding of text content")
parser.add_option("--html4tags", action="store_true", default=False,
help="use HTML 4 style for empty element tags")
parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
help="sanitize literal HTML: 'escape' escapes "
"HTML meta chars, 'replace' replaces with an "
"[HTML_REMOVED] note")
parser.add_option("-x", "--extras", action="append",
help="Turn on specific extra features (not part of "
"the core Markdown spec). See above.")
parser.add_option("--use-file-vars",
help="Look for and use Emacs-style 'markdown-extras' "
"file var to turn on extras. See "
"<https://github.com/trentm/python-markdown2/wiki/Extras>")
parser.add_option("--link-patterns-file",
help="path to a link pattern file")
parser.add_option("--self-test", action="store_true",
help="run internal self-tests (some doctests)")
parser.add_option("--compare", action="store_true",
help="run against Markdown.pl as well (for testing)")
parser.set_defaults(log_level=logging.INFO, compare=False,
encoding="utf-8", safe_mode=None, use_file_vars=False)
opts, paths = parser.parse_args()
log.setLevel(opts.log_level)
if opts.self_test:
return _test()
if opts.extras:
extras = {}
for s in opts.extras:
splitter = re.compile("[,;: ]+")
for e in splitter.split(s):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
extras[ename] = earg
else:
extras = None
if opts.link_patterns_file:
link_patterns = []
f = open(opts.link_patterns_file)
try:
for i, line in enumerate(f.readlines()):
if not line.strip(): continue
if line.lstrip().startswith("#"): continue
try:
pat, href = line.rstrip().rsplit(None, 1)
except ValueError:
raise MarkdownError("%s:%d: invalid link pattern line: %r"
% (opts.link_patterns_file, i+1, line))
link_patterns.append(
(_regex_from_encoded_pattern(pat), href))
finally:
f.close()
else:
link_patterns = None
from os.path import join, dirname, abspath, exists
markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
"Markdown.pl")
if not paths:
paths = ['-']
for path in paths:
if path == '-':
text = sys.stdin.read()
else:
fp = codecs.open(path, 'r', opts.encoding)
text = fp.read()
fp.close()
if opts.compare:
from subprocess import Popen, PIPE
print("==== Markdown.pl ====")
p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
p.stdin.write(text.encode('utf-8'))
p.stdin.close()
perl_html = p.stdout.read().decode('utf-8')
if py3:
sys.stdout.write(perl_html)
else:
sys.stdout.write(perl_html.encode(
sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
print("==== markdown2.py ====")
html = markdown(text,
html4tags=opts.html4tags,
safe_mode=opts.safe_mode,
extras=extras, link_patterns=link_patterns,
use_file_vars=opts.use_file_vars)
if py3:
sys.stdout.write(html)
else:
sys.stdout.write(html.encode(
sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if extras and "toc" in extras:
log.debug("toc_html: " +
html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if opts.compare:
test_dir = join(dirname(dirname(abspath(__file__))), "test")
if exists(join(test_dir, "test_markdown2.py")):
sys.path.insert(0, test_dir)
from test_markdown2 import norm_html_from_html
norm_html = norm_html_from_html(html)
norm_perl_html = norm_html_from_html(perl_html)
else:
norm_html = html
norm_perl_html = perl_html
print("==== match? %r ====" % (norm_perl_html == norm_html))
if __name__ == "__main__":
sys.exit( main(sys.argv) )
| gpl-2.0 | 8,272,548,722,294,440,000 | 39.036685 | 121 | 0.501525 | false |
BrianGladman/pthreads | build.vs/build_tests/_msvccompiler.py | 1 | 22500 | """
----------------------------------------------------------------------------
Copyright © 2001-2020 Python Software Foundation; All Rights Reserved
This file is distributed under the terms of this license:
https://docs.python.org/3/license.html
----------------------------------------------------------------------------
distutils._msvccompiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for Microsoft Visual Studio 2015.
The module is compatible with VS 2015 and later. You can find legacy support
for older versions in distutils.msvc9compiler and distutils.msvccompiler.
Written by Perry Stoll
hacked by Robin Becker and Thomas Heller to do a better job of
finding DevStudio (through the registry)
ported to VS 2005 and VS 2008 by Christian Heimes
ported to VS 2015 by Steve Dower
----------------------------------------------------------------------------
"""
import os
import shutil
import stat
import subprocess
import winreg
from distutils.errors import DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError
from distutils.ccompiler import CCompiler, gen_lib_options
from distutils import log
from distutils.util import get_platform
from itertools import count
def _find_vc2015():
try:
key = winreg.OpenKeyEx(
winreg.HKEY_LOCAL_MACHINE,
r"Software\Microsoft\VisualStudio\SxS\VC7",
access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY
)
except OSError:
log.debug("Visual C++ is not registered")
return None, None
best_version = 0
best_dir = None
with key:
for i in count():
try:
v, vc_dir, vt = winreg.EnumValue(key, i)
except OSError:
break
if v and vt == winreg.REG_SZ and os.path.isdir(vc_dir):
try:
version = int(float(v))
except (ValueError, TypeError):
continue
if version >= 14 and version > best_version:
best_version, best_dir = version, vc_dir
return best_version, best_dir
def _find_vc2017():
"""Returns "15, path" based on the result of invoking vswhere.exe
If no install is found, returns "None, None"
The version is returned to avoid unnecessarily changing the function
result. It may be ignored when the path is not None.
If vswhere.exe is not available, by definition, VS 2017 is not
installed.
"""
root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles")
if not root:
return None, None
try:
path = subprocess.check_output([
os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"),
"-latest",
"-prerelease",
"-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
"-property", "installationPath",
"-products", "*",
], encoding="mbcs", errors="strict").strip()
except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):
return None, None
path = os.path.join(path, "VC", "Auxiliary", "Build")
if os.path.isdir(path):
return 15, path
return None, None
PLAT_SPEC_TO_RUNTIME = {
'x86' : 'x86',
'x86_amd64' : 'x64',
'x86_arm' : 'arm',
'x86_arm64' : 'arm64'
}
def _find_vcvarsall(plat_spec):
_, best_dir = _find_vc2017()
vcruntime = None
if plat_spec in PLAT_SPEC_TO_RUNTIME:
vcruntime_plat = PLAT_SPEC_TO_RUNTIME[plat_spec]
else:
vcruntime_plat = 'x64' if 'amd64' in plat_spec else 'x86'
if best_dir:
vcredist = os.path.join(best_dir, "..", "..", "redist", "MSVC", "**",
vcruntime_plat, "Microsoft.VC14*.CRT", "vcruntime140.dll")
try:
import glob
vcruntime = glob.glob(vcredist, recursive=True)[-1]
except (ImportError, OSError, LookupError):
vcruntime = None
if not best_dir:
best_version, best_dir = _find_vc2015()
if best_version:
vcruntime = os.path.join(best_dir, 'redist', vcruntime_plat,
"Microsoft.VC140.CRT", "vcruntime140.dll")
if not best_dir:
log.debug("No suitable Visual C++ version found")
return None, None
vcvarsall = os.path.join(best_dir, "vcvarsall.bat")
if not os.path.isfile(vcvarsall):
log.debug("%s cannot be found", vcvarsall)
return None, None
if not vcruntime or not os.path.isfile(vcruntime):
log.debug("%s cannot be found", vcruntime)
vcruntime = None
return vcvarsall, vcruntime
def _get_vc_env(plat_spec):
if os.getenv("DISTUTILS_USE_SDK"):
return {
key.lower(): value
for key, value in os.environ.items()
}
vcvarsall, vcruntime = _find_vcvarsall(plat_spec)
if not vcvarsall:
raise DistutilsPlatformError("Unable to find vcvarsall.bat")
try:
out = subprocess.check_output(
'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec),
stderr=subprocess.STDOUT,
).decode('utf-16le', errors='replace')
except subprocess.CalledProcessError as exc:
log.error(exc.output)
raise DistutilsPlatformError("Error executing {}"
.format(exc.cmd))
env = {
key.lower(): value
for key, _, value in
(line.partition('=') for line in out.splitlines())
if key and value
}
if vcruntime:
env['py_vcruntime_redist'] = vcruntime
return env
def _find_exe(exe, paths=None):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
if not paths:
paths = os.getenv('path').split(os.pathsep)
for p in paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
return exe
# A map keyed by get_platform() return values to values accepted by
# 'vcvarsall.bat'. Always cross-compile from x86 to work with the
# lighter-weight MSVC installs that do not include native 64-bit tools.
PLAT_TO_VCVARS = {
'win32' : 'x86',
'win-amd64' : 'x86_amd64',
'win-arm32' : 'x86_arm',
'win-arm64' : 'x86_arm64'
}
# A set containing the DLLs that are guaranteed to be available for
# all micro versions of this Python version. Known extension
# dependencies that are not in this set will be copied to the output
# path.
_BUNDLED_DLLS = frozenset(['vcruntime140.dll'])
class MSVCCompiler(CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
# target platform (.plat_name is consistent with 'bdist')
self.plat_name = None
self.initialized = False
def initialize(self, plat_name=None):
# multi-init means we would need to check platform same each time...
assert not self.initialized, "don't init multiple times"
if plat_name is None:
plat_name = get_platform()
# sanity check for platforms to prevent obscure errors later.
if plat_name not in PLAT_TO_VCVARS:
raise DistutilsPlatformError("--plat-name must be one of {}"
.format(tuple(PLAT_TO_VCVARS)))
# Get the vcvarsall.bat spec for the requested platform.
plat_spec = PLAT_TO_VCVARS[plat_name]
vc_env = _get_vc_env(plat_spec)
if not vc_env:
raise DistutilsPlatformError("Unable to find a compatible "
"Visual Studio installation.")
self._paths = vc_env.get('path', '')
paths = self._paths.split(os.pathsep)
self.cc = _find_exe("cl.exe", paths)
self.linker = _find_exe("link.exe", paths)
self.lib = _find_exe("lib.exe", paths)
self.rc = _find_exe("rc.exe", paths) # resource compiler
self.mc = _find_exe("mc.exe", paths) # message compiler
self.mt = _find_exe("mt.exe", paths) # message compiler
self._vcruntime_redist = vc_env.get('py_vcruntime_redist', '')
for dir in vc_env.get('include', '').split(os.pathsep):
if dir:
self.add_include_dir(dir.rstrip(os.sep))
for dir in vc_env.get('lib', '').split(os.pathsep):
if dir:
self.add_library_dir(dir.rstrip(os.sep))
self.preprocess_options = None
# If vcruntime_redist is available, link against it dynamically. Otherwise,
# use /MT[d] to build statically, then switch from libucrt[d].lib to ucrt[d].lib
# later to dynamically link to ucrtbase but not vcruntime.
self.compile_options = [
'/nologo', '/Ox', '/W3', '/GL', '/DNDEBUG'
]
self.compile_options.append('/MT' if self._vcruntime_redist else '/MT')
self.compile_options_debug = [
'/nologo', '/Od', '/MDd', '/Zi', '/W3', '/D_DEBUG'
]
ldflags = [
'/nologo', '/INCREMENTAL:NO', '/LTCG'
]
if not self._vcruntime_redist:
ldflags.extend(('/nodefaultlib:libucrt.lib', '/nodefaultlib:msvcrt.lib', '/nodefaultlib:libcmtd.lib', '/nodefaultlib:msvcrtd.lib', 'ucrt.lib'))
ldflags_debug = [
'/nologo', '/INCREMENTAL:NO', '/LTCG', '/DEBUG:FULL'
]
self.ldflags_exe = [*ldflags, '/MANIFEST:EMBED,ID=1']
self.ldflags_exe_debug = [*ldflags_debug, '/MANIFEST:EMBED,ID=1']
self.ldflags_shared = [*ldflags, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO']
self.ldflags_shared_debug = [*ldflags_debug, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO']
self.ldflags_static = [*ldflags]
self.ldflags_static_debug = [*ldflags_debug]
self._ldflags = {
(CCompiler.EXECUTABLE, None): self.ldflags_exe,
(CCompiler.EXECUTABLE, False): self.ldflags_exe,
(CCompiler.EXECUTABLE, True): self.ldflags_exe_debug,
(CCompiler.SHARED_OBJECT, None): self.ldflags_shared,
(CCompiler.SHARED_OBJECT, False): self.ldflags_shared,
(CCompiler.SHARED_OBJECT, True): self.ldflags_shared_debug,
(CCompiler.SHARED_LIBRARY, None): self.ldflags_static,
(CCompiler.SHARED_LIBRARY, False): self.ldflags_static,
(CCompiler.SHARED_LIBRARY, True): self.ldflags_static_debug,
}
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames(self,
source_filenames,
strip_dir=0,
output_dir=''):
ext_map = {
**{ext: self.obj_extension for ext in self.src_extensions},
**{ext: self.res_extension for ext in self._rc_extensions + self._mc_extensions},
}
output_dir = output_dir or ''
def make_out_path(p):
base, ext = os.path.splitext(p)
if strip_dir:
base = os.path.basename(base)
else:
_, base = os.path.splitdrive(base)
if base.startswith((os.path.sep, os.path.altsep)):
base = base[1:]
try:
# XXX: This may produce absurdly long paths. We should check
# the length of the result and trim base until we fit within
# 260 characters.
return os.path.join(output_dir, base + ext_map[ext])
except LookupError:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError("Don't know how to compile {}".format(p))
return list(map(make_out_path, source_filenames))
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None,
defines=None):
if not self.initialized:
self.initialize()
compile_info = self._setup_compile(output_dir, macros, include_dirs,
sources, depends, extra_postargs)
macros, objects, extra_postargs, pp_opts, build = compile_info
compile_opts = extra_preargs or []
compile_opts.append('/c')
if defines is not None:
compile_opts.extend(defines)
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
add_cpp_opts = False
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
add_cpp_opts = True
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn([self.rc] + pp_opts + [output_opt, input_opt])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
# first compile .MC to .RC and .H file
self.spawn([self.mc, '-h', h_dir, '-r', rc_dir, src])
base, _ = os.path.splitext(os.path.basename (src))
rc_file = os.path.join(rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn([self.rc, "/fo" + obj, rc_file])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
else:
# how to handle this file?
raise CompileError("Don't know how to compile {} to {}"
.format(src, obj))
args = [self.cc] + compile_opts + pp_opts
if add_cpp_opts:
args.append('/EHsc')
args.append(input_opt)
args.append("/Fo" + obj)
args.extend(extra_postargs)
try:
self.spawn(args)
except DistutilsExecError as msg:
raise CompileError(msg)
return objects
def create_static_lib(self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized:
self.initialize()
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname,
output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
log.debug('Executing "%s" %s', self.lib, ' '.join(lib_args))
self.spawn([self.lib] + lib_args)
except DistutilsExecError as msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized:
self.initialize()
objects, output_dir = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
libraries, library_dirs, runtime_library_dirs = fixed_args
if runtime_library_dirs:
self.warn("I don't know what to do with 'runtime_library_dirs': "
+ str(runtime_library_dirs))
lib_opts = gen_lib_options(self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
ldflags = self._ldflags[target_desc, debug]
export_opts = ["/EXPORT:" + sym for sym in (export_symbols or [])]
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
build_temp = os.path.dirname(objects[0])
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
build_temp,
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
output_dir = os.path.dirname(os.path.abspath(output_filename))
self.mkpath(output_dir)
try:
log.debug('Executing "%s" %s', self.linker, ' '.join(ld_args))
self.spawn([self.linker] + ld_args)
self._copy_vcruntime(output_dir)
except DistutilsExecError as msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def _copy_vcruntime(self, output_dir):
vcruntime = self._vcruntime_redist
if not vcruntime or not os.path.isfile(vcruntime):
return
if os.path.basename(vcruntime).lower() in _BUNDLED_DLLS:
return
log.debug('Copying "%s"', vcruntime)
vcruntime = shutil.copy(vcruntime, output_dir)
os.chmod(vcruntime, stat.S_IWRITE)
def spawn(self, cmd):
old_path = os.getenv('path')
try:
os.environ['path'] = self._paths
return super().spawn(cmd)
finally:
os.environ['path'] = old_path
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option(self, dir):
raise DistutilsPlatformError(
"don't know how to set runtime library search path for MSVC")
def library_option(self, lib):
return self.library_filename(lib)
def find_library_file(self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename(name))
if os.path.isfile(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
| apache-2.0 | -3,225,636,429,154,730,000 | 36.373754 | 155 | 0.557491 | false |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_tensor_test.py | 54 | 9345 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stochastic graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.bayesflow.python.ops import stochastic_gradient_estimators
from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor_impl
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import normal
from tensorflow.python.platform import test
sge = stochastic_gradient_estimators
st = stochastic_tensor_impl
class StochasticTensorTest(test.TestCase):
def testConstructionAndValue(self):
with self.test_session() as sess:
mu = [0.0, 0.1, 0.2]
sigma = constant_op.constant([1.1, 1.2, 1.3])
sigma2 = constant_op.constant([0.1, 0.2, 0.3])
prior_default = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior_default.value_type, st.SampleValue))
prior_0 = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma),
dist_value_type=st.SampleValue())
self.assertTrue(isinstance(prior_0.value_type, st.SampleValue))
with st.value_type(st.SampleValue()):
prior = st.StochasticTensor(normal.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior.value_type, st.SampleValue))
likelihood = st.StochasticTensor(
normal.Normal(loc=prior, scale=sigma2))
self.assertTrue(isinstance(likelihood.value_type, st.SampleValue))
coll = ops.get_collection(st.STOCHASTIC_TENSOR_COLLECTION)
self.assertEqual(coll, [prior_default, prior_0, prior, likelihood])
# Also works: tf.convert_to_tensor(prior)
prior_default = array_ops.identity(prior_default)
prior_0 = array_ops.identity(prior_0)
prior = array_ops.identity(prior)
likelihood = array_ops.identity(likelihood)
# Mostly a smoke test for now...
prior_0_val, prior_val, prior_default_val, _ = sess.run(
[prior_0, prior, prior_default, likelihood])
self.assertEqual(prior_0_val.shape, prior_val.shape)
self.assertEqual(prior_default_val.shape, prior_val.shape)
# These are different random samples from the same distribution,
# so the values should differ.
self.assertGreater(np.abs(prior_0_val - prior_val).sum(), 1e-6)
self.assertGreater(np.abs(prior_default_val - prior_val).sum(), 1e-6)
def testMeanValue(self):
with self.test_session() as sess:
mu = [0.0, -1.0, 1.0]
sigma = constant_op.constant([1.1, 1.2, 1.3])
with st.value_type(st.MeanValue()):
prior = st.StochasticTensor(normal.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior.value_type, st.MeanValue))
prior_mean = prior.mean()
prior_value = prior.value()
prior_mean_val, prior_value_val = sess.run([prior_mean, prior_value])
self.assertAllEqual(prior_mean_val, mu)
self.assertAllEqual(prior_mean_val, prior_value_val)
def testSampleValueScalar(self):
with self.test_session() as sess:
mu = [[0.0, -1.0, 1.0], [0.0, -1.0, 1.0]]
sigma = constant_op.constant([[1.1, 1.2, 1.3], [1.1, 1.2, 1.3]])
with st.value_type(st.SampleValue()):
prior_single = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma))
prior_single_value = prior_single.value()
self.assertEqual(prior_single_value.get_shape(), (2, 3))
prior_single_value_val = sess.run([prior_single_value])[0]
self.assertEqual(prior_single_value_val.shape, (2, 3))
with st.value_type(st.SampleValue(1)):
prior_single = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior_single.value_type, st.SampleValue))
prior_single_value = prior_single.value()
self.assertEqual(prior_single_value.get_shape(), (1, 2, 3))
prior_single_value_val = sess.run([prior_single_value])[0]
self.assertEqual(prior_single_value_val.shape, (1, 2, 3))
with st.value_type(st.SampleValue(2)):
prior_double = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma))
prior_double_value = prior_double.value()
self.assertEqual(prior_double_value.get_shape(), (2, 2, 3))
prior_double_value_val = sess.run([prior_double_value])[0]
self.assertEqual(prior_double_value_val.shape, (2, 2, 3))
def testDistributionEntropy(self):
with self.test_session() as sess:
mu = [0.0, -1.0, 1.0]
sigma = constant_op.constant([1.1, 1.2, 1.3])
with st.value_type(st.MeanValue()):
prior = st.StochasticTensor(normal.Normal(loc=mu, scale=sigma))
entropy = prior.entropy()
deep_entropy = prior.distribution.entropy()
expected_deep_entropy = normal.Normal(
loc=mu, scale=sigma).entropy()
entropies = sess.run([entropy, deep_entropy, expected_deep_entropy])
self.assertAllEqual(entropies[2], entropies[0])
self.assertAllEqual(entropies[1], entropies[0])
def testSurrogateLoss(self):
with self.test_session():
mu = [[3.0, -4.0, 5.0], [6.0, -7.0, 8.0]]
sigma = constant_op.constant(1.0)
# With default
with st.value_type(st.MeanValue(stop_gradient=True)):
dt = st.StochasticTensor(normal.Normal(loc=mu, scale=sigma))
loss = dt.loss([constant_op.constant(2.0)])
self.assertTrue(loss is not None)
self.assertAllClose(
dt.distribution.log_prob(mu).eval() * 2.0, loss.eval())
# With passed-in loss_fn.
dt = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma),
dist_value_type=st.MeanValue(stop_gradient=True),
loss_fn=sge.get_score_function_with_constant_baseline(
baseline=constant_op.constant(8.0)))
loss = dt.loss([constant_op.constant(2.0)])
self.assertTrue(loss is not None)
self.assertAllClose((dt.distribution.log_prob(mu) * (2.0 - 8.0)).eval(),
loss.eval())
class ValueTypeTest(test.TestCase):
def testValueType(self):
type_mean = st.MeanValue()
type_reshape = st.SampleValue()
type_full = st.SampleValue()
with st.value_type(type_mean):
self.assertEqual(st.get_current_value_type(), type_mean)
with st.value_type(type_reshape):
self.assertEqual(st.get_current_value_type(), type_reshape)
with st.value_type(type_full):
self.assertEqual(st.get_current_value_type(), type_full)
self.assertEqual(st.get_current_value_type(), type_mean)
with self.assertRaisesRegexp(ValueError, "No value type currently set"):
st.get_current_value_type()
class ObservedStochasticTensorTest(test.TestCase):
def testConstructionAndValue(self):
with self.test_session() as sess:
mu = [0.0, 0.1, 0.2]
sigma = constant_op.constant([1.1, 1.2, 1.3])
obs = array_ops.zeros((2, 3))
z = st.ObservedStochasticTensor(
normal.Normal(loc=mu, scale=sigma), value=obs)
[obs_val, z_val] = sess.run([obs, z.value()])
self.assertAllEqual(obs_val, z_val)
coll = ops.get_collection(st.STOCHASTIC_TENSOR_COLLECTION)
self.assertEqual(coll, [z])
def testConstructionWithUnknownShapes(self):
mu = array_ops.placeholder(dtypes.float32)
sigma = array_ops.placeholder(dtypes.float32)
obs = array_ops.placeholder(dtypes.float32)
z = st.ObservedStochasticTensor(
normal.Normal(loc=mu, scale=sigma), value=obs)
mu2 = array_ops.placeholder(dtypes.float32, shape=[None])
sigma2 = array_ops.placeholder(dtypes.float32, shape=[None])
obs2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
z2 = st.ObservedStochasticTensor(
normal.Normal(loc=mu2, scale=sigma2), value=obs2)
coll = ops.get_collection(st.STOCHASTIC_TENSOR_COLLECTION)
self.assertEqual(coll, [z, z2])
def testConstructionErrors(self):
mu = [0., 0.]
sigma = [1., 1.]
self.assertRaises(
ValueError,
st.ObservedStochasticTensor,
normal.Normal(loc=mu, scale=sigma),
value=array_ops.zeros((3,)))
self.assertRaises(
ValueError,
st.ObservedStochasticTensor,
normal.Normal(loc=mu, scale=sigma),
value=array_ops.zeros((3, 1)))
self.assertRaises(
ValueError,
st.ObservedStochasticTensor,
normal.Normal(loc=mu, scale=sigma),
value=array_ops.zeros((1, 2), dtype=dtypes.int32))
if __name__ == "__main__":
test.main()
| mit | 644,858,043,063,645,400 | 38.100418 | 82 | 0.661209 | false |
Tokyo-Buffalo/tokyosouth | env/lib/python3.6/site-packages/pip/_vendor/requests/packages/urllib3/util/timeout.py | 713 | 9596 | from __future__ import absolute_import
# The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
def current_time():
"""
Retrieve the current time. This function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
""" Timeout configuration.
Timeouts can be defined as a default for a pool::
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``::
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If the type is not an integer or a float, or if it
is a numeric value less than zero.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
| mit | 532,520,070,062,713,150 | 38.652893 | 82 | 0.63516 | false |
duhzecca/cinder | cinder/volume/drivers/vmware/volumeops.py | 7 | 68492 | # Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Implements operations on volumes residing on VMware datastores.
"""
from oslo_log import log as logging
from oslo_utils import units
from oslo_vmware import exceptions
from oslo_vmware import pbm
from oslo_vmware import vim_util
import six
from six.moves import urllib
from cinder.i18n import _, _LE, _LI
from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions
LOG = logging.getLogger(__name__)
LINKED_CLONE_TYPE = 'linked'
FULL_CLONE_TYPE = 'full'
def split_datastore_path(datastore_path):
"""Split the datastore path to components.
return the datastore name, relative folder path and the file name
E.g. datastore_path = [datastore1] my_volume/my_volume.vmdk, returns
(datastore1, my_volume/, my_volume.vmdk)
:param datastore_path: Datastore path of a file
:return: Parsed datastore name, relative folder path and file name
"""
splits = datastore_path.split('[', 1)[1].split(']', 1)
datastore_name = None
folder_path = None
file_name = None
if len(splits) == 1:
datastore_name = splits[0]
else:
datastore_name, path = splits
# Path will be of form my_volume/my_volume.vmdk
# we need into my_volumes/ and my_volume.vmdk
splits = path.split('/')
file_name = splits[len(splits) - 1]
folder_path = path[:-len(file_name)]
return (datastore_name.strip(), folder_path.strip(), file_name.strip())
class VirtualDiskPath(object):
"""Class representing paths of files comprising a virtual disk."""
def __init__(self, ds_name, folder_path, disk_name):
"""Creates path object for the given disk.
:param ds_name: name of the datastore where disk is stored
:param folder_path: absolute path of the folder containing the disk
:param disk_name: name of the virtual disk
"""
self._descriptor_file_path = "%s%s.vmdk" % (folder_path, disk_name)
self._descriptor_ds_file_path = self.get_datastore_file_path(
ds_name, self._descriptor_file_path)
def get_datastore_file_path(self, ds_name, file_path):
"""Get datastore path corresponding to the given file path.
:param ds_name: name of the datastore containing the file represented
by the given file path
:param file_path: absolute path of the file
:return: datastore file path
"""
return "[%s] %s" % (ds_name, file_path)
def get_descriptor_file_path(self):
"""Get absolute file path of the virtual disk descriptor."""
return self._descriptor_file_path
def get_descriptor_ds_file_path(self):
"""Get datastore file path of the virtual disk descriptor."""
return self._descriptor_ds_file_path
class FlatExtentVirtualDiskPath(VirtualDiskPath):
"""Paths of files in a non-monolithic disk with a single flat extent."""
def __init__(self, ds_name, folder_path, disk_name):
"""Creates path object for the given disk.
:param ds_name: name of the datastore where disk is stored
:param folder_path: absolute path of the folder containing the disk
:param disk_name: name of the virtual disk
"""
super(FlatExtentVirtualDiskPath, self).__init__(
ds_name, folder_path, disk_name)
self._flat_extent_file_path = "%s%s-flat.vmdk" % (folder_path,
disk_name)
self._flat_extent_ds_file_path = self.get_datastore_file_path(
ds_name, self._flat_extent_file_path)
def get_flat_extent_file_path(self):
"""Get absolute file path of the flat extent."""
return self._flat_extent_file_path
def get_flat_extent_ds_file_path(self):
"""Get datastore file path of the flat extent."""
return self._flat_extent_ds_file_path
class MonolithicSparseVirtualDiskPath(VirtualDiskPath):
"""Paths of file comprising a monolithic sparse disk."""
pass
class VirtualDiskType(object):
"""Supported virtual disk types."""
EAGER_ZEROED_THICK = "eagerZeroedThick"
PREALLOCATED = "preallocated"
THIN = "thin"
# thick in extra_spec means lazy-zeroed thick disk
EXTRA_SPEC_DISK_TYPE_DICT = {'eagerZeroedThick': EAGER_ZEROED_THICK,
'thick': PREALLOCATED,
'thin': THIN
}
@staticmethod
def is_valid(extra_spec_disk_type):
"""Check if the given disk type in extra_spec is valid.
:param extra_spec_disk_type: disk type in extra_spec
:return: True if valid
"""
return (extra_spec_disk_type in
VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT)
@staticmethod
def validate(extra_spec_disk_type):
"""Validate the given disk type in extra_spec.
This method throws an instance of InvalidDiskTypeException if the given
disk type is invalid.
:param extra_spec_disk_type: disk type in extra_spec
:raises: InvalidDiskTypeException
"""
if not VirtualDiskType.is_valid(extra_spec_disk_type):
raise vmdk_exceptions.InvalidDiskTypeException(
disk_type=extra_spec_disk_type)
@staticmethod
def get_virtual_disk_type(extra_spec_disk_type):
"""Return disk type corresponding to the extra_spec disk type.
:param extra_spec_disk_type: disk type in extra_spec
:return: virtual disk type
:raises: InvalidDiskTypeException
"""
VirtualDiskType.validate(extra_spec_disk_type)
return (VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT[
extra_spec_disk_type])
class VirtualDiskAdapterType(object):
"""Supported virtual disk adapter types."""
LSI_LOGIC = "lsiLogic"
BUS_LOGIC = "busLogic"
LSI_LOGIC_SAS = "lsiLogicsas"
IDE = "ide"
@staticmethod
def is_valid(adapter_type):
"""Check if the given adapter type is valid.
:param adapter_type: adapter type to check
:return: True if valid
"""
return adapter_type in [VirtualDiskAdapterType.LSI_LOGIC,
VirtualDiskAdapterType.BUS_LOGIC,
VirtualDiskAdapterType.LSI_LOGIC_SAS,
VirtualDiskAdapterType.IDE]
@staticmethod
def validate(extra_spec_adapter_type):
"""Validate the given adapter type in extra_spec.
This method throws an instance of InvalidAdapterTypeException if the
given adapter type is invalid.
:param extra_spec_adapter_type: adapter type in extra_spec
:raises: InvalidAdapterTypeException
"""
if not VirtualDiskAdapterType.is_valid(extra_spec_adapter_type):
raise vmdk_exceptions.InvalidAdapterTypeException(
invalid_type=extra_spec_adapter_type)
@staticmethod
def get_adapter_type(extra_spec_adapter_type):
"""Get the adapter type to be used in VirtualDiskSpec.
:param extra_spec_adapter_type: adapter type in the extra_spec
:return: adapter type to be used in VirtualDiskSpec
"""
VirtualDiskAdapterType.validate(extra_spec_adapter_type)
# We set the adapter type as lsiLogic for lsiLogicsas since it is not
# supported by VirtualDiskManager APIs. This won't be a problem because
# we attach the virtual disk to the correct controller type and the
# disk adapter type is always resolved using its controller key.
if extra_spec_adapter_type == VirtualDiskAdapterType.LSI_LOGIC_SAS:
return VirtualDiskAdapterType.LSI_LOGIC
return extra_spec_adapter_type
class ControllerType(object):
"""Encapsulate various controller types."""
LSI_LOGIC = 'VirtualLsiLogicController'
BUS_LOGIC = 'VirtualBusLogicController'
LSI_LOGIC_SAS = 'VirtualLsiLogicSASController'
IDE = 'VirtualIDEController'
CONTROLLER_TYPE_DICT = {
VirtualDiskAdapterType.LSI_LOGIC: LSI_LOGIC,
VirtualDiskAdapterType.BUS_LOGIC: BUS_LOGIC,
VirtualDiskAdapterType.LSI_LOGIC_SAS: LSI_LOGIC_SAS,
VirtualDiskAdapterType.IDE: IDE}
@staticmethod
def get_controller_type(adapter_type):
"""Get the disk controller type based on the given adapter type.
:param adapter_type: disk adapter type
:return: controller type corresponding to the given adapter type
:raises: InvalidAdapterTypeException
"""
if adapter_type in ControllerType.CONTROLLER_TYPE_DICT:
return ControllerType.CONTROLLER_TYPE_DICT[adapter_type]
raise vmdk_exceptions.InvalidAdapterTypeException(
invalid_type=adapter_type)
@staticmethod
def is_scsi_controller(controller_type):
"""Check if the given controller is a SCSI controller.
:param controller_type: controller type
:return: True if the controller is a SCSI controller
"""
return controller_type in [ControllerType.LSI_LOGIC,
ControllerType.BUS_LOGIC,
ControllerType.LSI_LOGIC_SAS]
class VMwareVolumeOps(object):
"""Manages volume operations."""
def __init__(self, session, max_objects):
self._session = session
self._max_objects = max_objects
self._folder_cache = {}
def get_backing(self, name):
"""Get the backing based on name.
:param name: Name of the backing
:return: Managed object reference to the backing
"""
retrieve_result = self._session.invoke_api(vim_util, 'get_objects',
self._session.vim,
'VirtualMachine',
self._max_objects)
while retrieve_result:
vms = retrieve_result.objects
for vm in vms:
if vm.propSet[0].val == name:
# We got the result, so cancel further retrieval.
self.cancel_retrieval(retrieve_result)
return vm.obj
# Result not obtained, continue retrieving results.
retrieve_result = self.continue_retrieval(retrieve_result)
LOG.debug("Did not find any backing with name: %s", name)
def delete_backing(self, backing):
"""Delete the backing.
:param backing: Managed object reference to the backing
"""
LOG.debug("Deleting the VM backing: %s.", backing)
task = self._session.invoke_api(self._session.vim, 'Destroy_Task',
backing)
LOG.debug("Initiated deletion of VM backing: %s.", backing)
self._session.wait_for_task(task)
LOG.info(_LI("Deleted the VM backing: %s."), backing)
# TODO(kartikaditya) Keep the methods not specific to volume in
# a different file
def get_host(self, instance):
"""Get host under which instance is present.
:param instance: Managed object reference of the instance VM
:return: Host managing the instance VM
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, instance,
'runtime.host')
def is_host_usable(self, host):
"""Check if the given ESX host is usable.
A host is usable if it is connected to vCenter server and not in
maintenance mode.
:param host: Managed object reference to the ESX host
:return: True if host is usable, False otherwise
"""
runtime_info = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
host,
'runtime')
return (runtime_info.connectionState == 'connected' and
not runtime_info.inMaintenanceMode)
def get_hosts(self):
"""Get all host from the inventory.
:return: All the hosts from the inventory
"""
return self._session.invoke_api(vim_util, 'get_objects',
self._session.vim,
'HostSystem', self._max_objects)
def continue_retrieval(self, retrieve_result):
"""Continue retrieval of results if necessary.
:param retrieve_result: Result from RetrievePropertiesEx
"""
return self._session.invoke_api(vim_util, 'continue_retrieval',
self._session.vim, retrieve_result)
def cancel_retrieval(self, retrieve_result):
"""Cancel retrieval of results if necessary.
:param retrieve_result: Result from RetrievePropertiesEx
"""
self._session.invoke_api(vim_util, 'cancel_retrieval',
self._session.vim, retrieve_result)
def _is_usable(self, mount_info):
"""Check if a datastore is usable as per the given mount info.
The datastore is considered to be usable for a host only if it is
writable, mounted and accessible.
:param mount_info: Host mount information
:return: True if datastore is usable
"""
writable = mount_info.accessMode == 'readWrite'
# If mounted attribute is not set, then default is True
mounted = getattr(mount_info, 'mounted', True)
# If accessible attribute is not set, then default is False
accessible = getattr(mount_info, 'accessible', False)
return writable and mounted and accessible
def get_connected_hosts(self, datastore):
"""Get all the hosts to which the datastore is connected and usable.
The datastore is considered to be usable for a host only if it is
writable, mounted and accessible.
:param datastore: Reference to the datastore entity
:return: List of managed object references of all connected
hosts
"""
summary = self.get_summary(datastore)
if not summary.accessible:
return []
host_mounts = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datastore,
'host')
if not hasattr(host_mounts, 'DatastoreHostMount'):
return []
connected_hosts = []
for host_mount in host_mounts.DatastoreHostMount:
if self._is_usable(host_mount.mountInfo):
connected_hosts.append(host_mount.key.value)
return connected_hosts
def is_datastore_accessible(self, datastore, host):
"""Check if the datastore is accessible to the given host.
:param datastore: datastore reference
:return: True if the datastore is accessible
"""
hosts = self.get_connected_hosts(datastore)
return host.value in hosts
def _in_maintenance(self, summary):
"""Check if a datastore is entering maintenance or in maintenance.
:param summary: Summary information about the datastore
:return: True if the datastore is entering maintenance or in
maintenance
"""
if hasattr(summary, 'maintenanceMode'):
return summary.maintenanceMode in ['enteringMaintenance',
'inMaintenance']
return False
def _is_valid(self, datastore, host):
"""Check if the datastore is valid for the given host.
A datastore is considered valid for a host only if the datastore is
writable, mounted and accessible. Also, the datastore should not be
in maintenance mode.
:param datastore: Reference to the datastore entity
:param host: Reference to the host entity
:return: True if datastore can be used for volume creation
"""
summary = self.get_summary(datastore)
in_maintenance = self._in_maintenance(summary)
if not summary.accessible or in_maintenance:
return False
host_mounts = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datastore,
'host')
for host_mount in host_mounts.DatastoreHostMount:
if host_mount.key.value == host.value:
return self._is_usable(host_mount.mountInfo)
return False
def get_dss_rp(self, host):
"""Get accessible datastores and resource pool of the host.
:param host: Managed object reference of the host
:return: Datastores accessible to the host and resource pool to which
the host belongs to
"""
props = self._session.invoke_api(vim_util, 'get_object_properties',
self._session.vim, host,
['datastore', 'parent'])
# Get datastores and compute resource or cluster compute resource
datastores = []
compute_resource = None
for elem in props:
for prop in elem.propSet:
if prop.name == 'datastore' and prop.val:
# Consider only if datastores are present under host
datastores = prop.val.ManagedObjectReference
elif prop.name == 'parent':
compute_resource = prop.val
LOG.debug("Datastores attached to host %(host)s are: %(ds)s.",
{'host': host, 'ds': datastores})
# Filter datastores based on if it is accessible, mounted and writable
valid_dss = []
for datastore in datastores:
if self._is_valid(datastore, host):
valid_dss.append(datastore)
# Get resource pool from compute resource or cluster compute resource
resource_pool = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
compute_resource,
'resourcePool')
if not valid_dss:
msg = _("There are no valid datastores attached to %s.") % host
LOG.error(msg)
raise exceptions.VimException(msg)
else:
LOG.debug("Valid datastores are: %s", valid_dss)
return (valid_dss, resource_pool)
def _get_parent(self, child, parent_type):
"""Get immediate parent of given type via 'parent' property.
:param child: Child entity reference
:param parent_type: Entity type of the parent
:return: Immediate parent of specific type up the hierarchy via
'parent' property
"""
if not child:
return None
if child._type == parent_type:
return child
parent = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, child, 'parent')
return self._get_parent(parent, parent_type)
def get_dc(self, child):
"""Get parent datacenter up the hierarchy via 'parent' property.
:param child: Reference of the child entity
:return: Parent Datacenter of the param child entity
"""
return self._get_parent(child, 'Datacenter')
def get_vmfolder(self, datacenter):
"""Get the vmFolder.
:param datacenter: Reference to the datacenter entity
:return: vmFolder property of the datacenter
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datacenter,
'vmFolder')
def _get_child_folder(self, parent_folder, child_folder_name):
# Get list of child entities for the parent folder
prop_val = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, parent_folder,
'childEntity')
if prop_val and hasattr(prop_val, 'ManagedObjectReference'):
child_entities = prop_val.ManagedObjectReference
# Return if the child folder with input name is already present
for child_entity in child_entities:
if child_entity._type != 'Folder':
continue
child_entity_name = self.get_entity_name(child_entity)
if (child_entity_name
and (urllib.parse.unquote(child_entity_name)
== child_folder_name)):
LOG.debug("Child folder: %s exists.", child_folder_name)
return child_entity
def create_folder(self, parent_folder, child_folder_name):
"""Creates child folder with given name under the given parent folder.
The method first checks if a child folder already exists, if it does,
then it returns a moref for the folder, else it creates one and then
return the moref.
:param parent_folder: Reference to the folder entity
:param child_folder_name: Name of the child folder
:return: Reference to the child folder with input name if it already
exists, else create one and return the reference
"""
LOG.debug("Creating folder: %(child_folder_name)s under parent "
"folder: %(parent_folder)s.",
{'child_folder_name': child_folder_name,
'parent_folder': parent_folder})
child_folder = self._get_child_folder(parent_folder, child_folder_name)
if not child_folder:
# Need to create the child folder.
try:
child_folder = self._session.invoke_api(self._session.vim,
'CreateFolder',
parent_folder,
name=child_folder_name)
LOG.debug("Created child folder: %s.", child_folder)
except exceptions.DuplicateName:
# Another thread is trying to create the same folder, ignore
# the exception.
child_folder = self._get_child_folder(parent_folder,
child_folder_name)
return child_folder
def create_vm_inventory_folder(self, datacenter, path_comp):
"""Create and return a VM inventory folder.
This method caches references to inventory folders returned.
:param datacenter: Reference to datacenter
:param path_comp: Path components as a list
"""
LOG.debug("Creating inventory folder: %(path_comp)s under VM folder "
"of datacenter: %(datacenter)s.",
{'path_comp': path_comp,
'datacenter': datacenter})
path = "/" + datacenter.value
parent = self._folder_cache.get(path)
if not parent:
parent = self.get_vmfolder(datacenter)
self._folder_cache[path] = parent
folder = None
for folder_name in path_comp:
path = "/".join([path, folder_name])
folder = self._folder_cache.get(path)
if not folder:
folder = self.create_folder(parent, folder_name)
self._folder_cache[path] = folder
parent = folder
LOG.debug("Inventory folder for path: %(path)s is %(folder)s.",
{'path': path,
'folder': folder})
return folder
def extend_virtual_disk(self, requested_size_in_gb, name, dc_ref,
eager_zero=False):
"""Extend the virtual disk to the requested size.
:param requested_size_in_gb: Size of the volume in GB
:param name: Name of the backing
:param dc_ref: Reference to datacenter
:param eager_zero: Boolean determining if the free space
is zeroed out
"""
LOG.debug("Extending the volume %(name)s to %(size)s GB.",
{'name': name, 'size': requested_size_in_gb})
diskMgr = self._session.vim.service_content.virtualDiskManager
# VMWare API needs the capacity unit to be in KB, so convert the
# capacity unit from GB to KB.
size_in_kb = requested_size_in_gb * units.Mi
task = self._session.invoke_api(self._session.vim,
"ExtendVirtualDisk_Task",
diskMgr,
name=name,
datacenter=dc_ref,
newCapacityKb=size_in_kb,
eagerZero=eager_zero)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully extended the volume %(name)s to "
"%(size)s GB."),
{'name': name, 'size': requested_size_in_gb})
def _create_controller_config_spec(self, adapter_type):
"""Returns config spec for adding a disk controller."""
cf = self._session.vim.client.factory
controller_type = ControllerType.get_controller_type(adapter_type)
controller_device = cf.create('ns0:%s' % controller_type)
controller_device.key = -100
controller_device.busNumber = 0
if ControllerType.is_scsi_controller(controller_type):
controller_device.sharedBus = 'noSharing'
controller_spec = cf.create('ns0:VirtualDeviceConfigSpec')
controller_spec.operation = 'add'
controller_spec.device = controller_device
return controller_spec
def _create_disk_backing(self, disk_type, vmdk_ds_file_path):
"""Creates file backing for virtual disk."""
cf = self._session.vim.client.factory
disk_device_bkng = cf.create('ns0:VirtualDiskFlatVer2BackingInfo')
if disk_type == VirtualDiskType.EAGER_ZEROED_THICK:
disk_device_bkng.eagerlyScrub = True
elif disk_type == VirtualDiskType.THIN:
disk_device_bkng.thinProvisioned = True
disk_device_bkng.fileName = vmdk_ds_file_path or ''
disk_device_bkng.diskMode = 'persistent'
return disk_device_bkng
def _create_virtual_disk_config_spec(self, size_kb, disk_type,
controller_key, vmdk_ds_file_path):
"""Returns config spec for adding a virtual disk."""
cf = self._session.vim.client.factory
disk_device = cf.create('ns0:VirtualDisk')
# disk size should be at least 1024KB
disk_device.capacityInKB = max(units.Ki, int(size_kb))
if controller_key < 0:
disk_device.key = controller_key - 1
else:
disk_device.key = -101
disk_device.unitNumber = 0
disk_device.controllerKey = controller_key
disk_device.backing = self._create_disk_backing(disk_type,
vmdk_ds_file_path)
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.operation = 'add'
if vmdk_ds_file_path is None:
disk_spec.fileOperation = 'create'
disk_spec.device = disk_device
return disk_spec
def _create_specs_for_disk_add(self, size_kb, disk_type, adapter_type,
vmdk_ds_file_path=None):
"""Create controller and disk config specs for adding a new disk.
:param size_kb: disk size in KB
:param disk_type: disk provisioning type
:param adapter_type: disk adapter type
:param vmdk_ds_file_path: Optional datastore file path of an existing
virtual disk. If specified, file backing is
not created for the virtual disk.
:return: list containing controller and disk config specs
"""
controller_spec = None
if adapter_type == 'ide':
# For IDE disks, use one of the default IDE controllers (with keys
# 200 and 201) created as part of backing VM creation.
controller_key = 200
else:
controller_spec = self._create_controller_config_spec(adapter_type)
controller_key = controller_spec.device.key
disk_spec = self._create_virtual_disk_config_spec(size_kb,
disk_type,
controller_key,
vmdk_ds_file_path)
specs = [disk_spec]
if controller_spec is not None:
specs.append(controller_spec)
return specs
def _get_extra_config_option_values(self, extra_config):
cf = self._session.vim.client.factory
option_values = []
for key, value in six.iteritems(extra_config):
opt = cf.create('ns0:OptionValue')
opt.key = key
opt.value = value
option_values.append(opt)
return option_values
def _get_create_spec_disk_less(self, name, ds_name, profileId=None,
extra_config=None):
"""Return spec for creating disk-less backing.
:param name: Name of the backing
:param ds_name: Datastore name where the disk is to be provisioned
:param profileId: Storage profile ID for the backing
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Spec for creation
"""
cf = self._session.vim.client.factory
vm_file_info = cf.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = '[%s]' % ds_name
create_spec = cf.create('ns0:VirtualMachineConfigSpec')
create_spec.name = name
create_spec.guestId = 'otherGuest'
create_spec.numCPUs = 1
create_spec.memoryMB = 128
create_spec.files = vm_file_info
# Set the hardware version to a compatible version supported by
# vSphere 5.0. This will ensure that the backing VM can be migrated
# without any incompatibility issues in a mixed cluster of ESX hosts
# with versions 5.0 or above.
create_spec.version = "vmx-08"
if profileId:
vmProfile = cf.create('ns0:VirtualMachineDefinedProfileSpec')
vmProfile.profileId = profileId
create_spec.vmProfile = [vmProfile]
if extra_config:
create_spec.extraConfig = self._get_extra_config_option_values(
extra_config)
return create_spec
def get_create_spec(self, name, size_kb, disk_type, ds_name,
profileId=None, adapter_type='lsiLogic',
extra_config=None):
"""Return spec for creating backing with a single disk.
:param name: name of the backing
:param size_kb: disk size in KB
:param disk_type: disk provisioning type
:param ds_name: datastore name where the disk is to be provisioned
:param profileId: storage profile ID for the backing
:param adapter_type: disk adapter type
:param extra_config: key-value pairs to be written to backing's
extra-config
:return: spec for creation
"""
create_spec = self._get_create_spec_disk_less(
name, ds_name, profileId=profileId, extra_config=extra_config)
create_spec.deviceChange = self._create_specs_for_disk_add(
size_kb, disk_type, adapter_type)
return create_spec
def _create_backing_int(self, folder, resource_pool, host, create_spec):
"""Helper for create backing methods."""
LOG.debug("Creating volume backing with spec: %s.", create_spec)
task = self._session.invoke_api(self._session.vim, 'CreateVM_Task',
folder, config=create_spec,
pool=resource_pool, host=host)
task_info = self._session.wait_for_task(task)
backing = task_info.result
LOG.info(_LI("Successfully created volume backing: %s."), backing)
return backing
def create_backing(self, name, size_kb, disk_type, folder, resource_pool,
host, ds_name, profileId=None, adapter_type='lsiLogic',
extra_config=None):
"""Create backing for the volume.
Creates a VM with one VMDK based on the given inputs.
:param name: Name of the backing
:param size_kb: Size in KB of the backing
:param disk_type: VMDK type for the disk
:param folder: Folder, where to create the backing under
:param resource_pool: Resource pool reference
:param host: Host reference
:param ds_name: Datastore name where the disk is to be provisioned
:param profileId: Storage profile ID to be associated with backing
:param adapter_type: Disk adapter type
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Reference to the created backing entity
"""
LOG.debug("Creating volume backing with name: %(name)s "
"disk_type: %(disk_type)s size_kb: %(size_kb)s "
"adapter_type: %(adapter_type)s profileId: %(profile)s at "
"folder: %(folder)s resource_pool: %(resource_pool)s "
"host: %(host)s datastore_name: %(ds_name)s.",
{'name': name, 'disk_type': disk_type, 'size_kb': size_kb,
'folder': folder, 'resource_pool': resource_pool,
'ds_name': ds_name, 'profile': profileId, 'host': host,
'adapter_type': adapter_type})
create_spec = self.get_create_spec(
name, size_kb, disk_type, ds_name, profileId=profileId,
adapter_type=adapter_type, extra_config=extra_config)
return self._create_backing_int(folder, resource_pool, host,
create_spec)
def create_backing_disk_less(self, name, folder, resource_pool,
host, ds_name, profileId=None,
extra_config=None):
"""Create disk-less volume backing.
This type of backing is useful for creating volume from image. The
downloaded image from the image service can be copied to a virtual
disk of desired provisioning type and added to the backing VM.
:param name: Name of the backing
:param folder: Folder where the backing is created
:param resource_pool: Resource pool reference
:param host: Host reference
:param ds_name: Name of the datastore used for VM storage
:param profileId: Storage profile ID to be associated with backing
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Reference to the created backing entity
"""
LOG.debug("Creating disk-less volume backing with name: %(name)s "
"profileId: %(profile)s at folder: %(folder)s "
"resource pool: %(resource_pool)s host: %(host)s "
"datastore_name: %(ds_name)s.",
{'name': name, 'profile': profileId, 'folder': folder,
'resource_pool': resource_pool, 'host': host,
'ds_name': ds_name})
create_spec = self._get_create_spec_disk_less(
name, ds_name, profileId=profileId, extra_config=extra_config)
return self._create_backing_int(folder, resource_pool, host,
create_spec)
def get_datastore(self, backing):
"""Get datastore where the backing resides.
:param backing: Reference to the backing
:return: Datastore reference to which the backing belongs
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'datastore').ManagedObjectReference[0]
def get_summary(self, datastore):
"""Get datastore summary.
:param datastore: Reference to the datastore
:return: 'summary' property of the datastore
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datastore,
'summary')
def _create_relocate_spec_disk_locator(self, datastore, disk_type,
disk_device):
"""Creates spec for disk type conversion during relocate."""
cf = self._session.vim.client.factory
disk_locator = cf.create("ns0:VirtualMachineRelocateSpecDiskLocator")
disk_locator.datastore = datastore
disk_locator.diskId = disk_device.key
disk_locator.diskBackingInfo = self._create_disk_backing(disk_type,
None)
return disk_locator
def _get_relocate_spec(self, datastore, resource_pool, host,
disk_move_type, disk_type=None, disk_device=None):
"""Return spec for relocating volume backing.
:param datastore: Reference to the datastore
:param resource_pool: Reference to the resource pool
:param host: Reference to the host
:param disk_move_type: Disk move type option
:param disk_type: Destination disk type
:param disk_device: Virtual device corresponding to the disk
:return: Spec for relocation
"""
cf = self._session.vim.client.factory
relocate_spec = cf.create('ns0:VirtualMachineRelocateSpec')
relocate_spec.datastore = datastore
relocate_spec.pool = resource_pool
relocate_spec.host = host
relocate_spec.diskMoveType = disk_move_type
if disk_type is not None and disk_device is not None:
disk_locator = self._create_relocate_spec_disk_locator(datastore,
disk_type,
disk_device)
relocate_spec.disk = [disk_locator]
LOG.debug("Spec for relocating the backing: %s.", relocate_spec)
return relocate_spec
def relocate_backing(
self, backing, datastore, resource_pool, host, disk_type=None):
"""Relocates backing to the input datastore and resource pool.
The implementation uses moveAllDiskBackingsAndAllowSharing disk move
type.
:param backing: Reference to the backing
:param datastore: Reference to the datastore
:param resource_pool: Reference to the resource pool
:param host: Reference to the host
:param disk_type: destination disk type
"""
LOG.debug("Relocating backing: %(backing)s to datastore: %(ds)s "
"and resource pool: %(rp)s with destination disk type: "
"%(disk_type)s.",
{'backing': backing,
'ds': datastore,
'rp': resource_pool,
'disk_type': disk_type})
# Relocate the volume backing
disk_move_type = 'moveAllDiskBackingsAndAllowSharing'
disk_device = None
if disk_type is not None:
disk_device = self._get_disk_device(backing)
relocate_spec = self._get_relocate_spec(datastore, resource_pool, host,
disk_move_type, disk_type,
disk_device)
task = self._session.invoke_api(self._session.vim, 'RelocateVM_Task',
backing, spec=relocate_spec)
LOG.debug("Initiated relocation of volume backing: %s.", backing)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully relocated volume backing: %(backing)s "
"to datastore: %(ds)s and resource pool: %(rp)s."),
{'backing': backing, 'ds': datastore, 'rp': resource_pool})
def move_backing_to_folder(self, backing, folder):
"""Move the volume backing to the folder.
:param backing: Reference to the backing
:param folder: Reference to the folder
"""
LOG.debug("Moving backing: %(backing)s to folder: %(fol)s.",
{'backing': backing, 'fol': folder})
task = self._session.invoke_api(self._session.vim,
'MoveIntoFolder_Task', folder,
list=[backing])
LOG.debug("Initiated move of volume backing: %(backing)s into the "
"folder: %(fol)s.", {'backing': backing, 'fol': folder})
self._session.wait_for_task(task)
LOG.info(_LI("Successfully moved volume "
"backing: %(backing)s into the "
"folder: %(fol)s."), {'backing': backing, 'fol': folder})
def create_snapshot(self, backing, name, description, quiesce=False):
"""Create snapshot of the backing with given name and description.
:param backing: Reference to the backing entity
:param name: Snapshot name
:param description: Snapshot description
:param quiesce: Whether to quiesce the backing when taking snapshot
:return: Created snapshot entity reference
"""
LOG.debug("Snapshoting backing: %(backing)s with name: %(name)s.",
{'backing': backing, 'name': name})
task = self._session.invoke_api(self._session.vim,
'CreateSnapshot_Task',
backing, name=name,
description=description,
memory=False, quiesce=quiesce)
LOG.debug("Initiated snapshot of volume backing: %(backing)s "
"named: %(name)s.", {'backing': backing, 'name': name})
task_info = self._session.wait_for_task(task)
snapshot = task_info.result
LOG.info(_LI("Successfully created snapshot: %(snap)s for volume "
"backing: %(backing)s."),
{'snap': snapshot, 'backing': backing})
return snapshot
@staticmethod
def _get_snapshot_from_tree(name, root):
"""Get snapshot by name from the snapshot tree root.
:param name: Snapshot name
:param root: Current root node in the snapshot tree
:return: None in the snapshot tree with given snapshot name
"""
if not root:
return None
if root.name == name:
return root.snapshot
if (not hasattr(root, 'childSnapshotList') or
not root.childSnapshotList):
# When root does not have children, the childSnapshotList attr
# is missing sometime. Adding an additional check.
return None
for node in root.childSnapshotList:
snapshot = VMwareVolumeOps._get_snapshot_from_tree(name, node)
if snapshot:
return snapshot
def get_snapshot(self, backing, name):
"""Get snapshot of the backing with given name.
:param backing: Reference to the backing entity
:param name: Snapshot name
:return: Snapshot entity of the backing with given name
"""
snapshot = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'snapshot')
if not snapshot or not snapshot.rootSnapshotList:
return None
for root in snapshot.rootSnapshotList:
return VMwareVolumeOps._get_snapshot_from_tree(name, root)
def snapshot_exists(self, backing):
"""Check if the given backing contains snapshots."""
snapshot = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'snapshot')
if snapshot is None or snapshot.rootSnapshotList is None:
return False
return len(snapshot.rootSnapshotList) != 0
def delete_snapshot(self, backing, name):
"""Delete a given snapshot from volume backing.
:param backing: Reference to the backing entity
:param name: Snapshot name
"""
LOG.debug("Deleting the snapshot: %(name)s from backing: "
"%(backing)s.",
{'name': name, 'backing': backing})
snapshot = self.get_snapshot(backing, name)
if not snapshot:
LOG.info(_LI("Did not find the snapshot: %(name)s for backing: "
"%(backing)s. Need not delete anything."),
{'name': name, 'backing': backing})
return
task = self._session.invoke_api(self._session.vim,
'RemoveSnapshot_Task',
snapshot, removeChildren=False)
LOG.debug("Initiated snapshot: %(name)s deletion for backing: "
"%(backing)s.",
{'name': name, 'backing': backing})
self._session.wait_for_task(task)
LOG.info(_LI("Successfully deleted snapshot: %(name)s of backing: "
"%(backing)s."), {'backing': backing, 'name': name})
def _get_folder(self, backing):
"""Get parent folder of the backing.
:param backing: Reference to the backing entity
:return: Reference to parent folder of the backing entity
"""
return self._get_parent(backing, 'Folder')
def _get_clone_spec(self, datastore, disk_move_type, snapshot, backing,
disk_type, host=None, resource_pool=None,
extra_config=None):
"""Get the clone spec.
:param datastore: Reference to datastore
:param disk_move_type: Disk move type
:param snapshot: Reference to snapshot
:param backing: Source backing VM
:param disk_type: Disk type of clone
:param host: Target host
:param resource_pool: Target resource pool
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Clone spec
"""
if disk_type is not None:
disk_device = self._get_disk_device(backing)
else:
disk_device = None
relocate_spec = self._get_relocate_spec(datastore, resource_pool, host,
disk_move_type, disk_type,
disk_device)
cf = self._session.vim.client.factory
clone_spec = cf.create('ns0:VirtualMachineCloneSpec')
clone_spec.location = relocate_spec
clone_spec.powerOn = False
clone_spec.template = False
clone_spec.snapshot = snapshot
if extra_config:
config_spec = cf.create('ns0:VirtualMachineConfigSpec')
config_spec.extraConfig = self._get_extra_config_option_values(
extra_config)
clone_spec.config = config_spec
LOG.debug("Spec for cloning the backing: %s.", clone_spec)
return clone_spec
def clone_backing(self, name, backing, snapshot, clone_type, datastore,
disk_type=None, host=None, resource_pool=None,
extra_config=None):
"""Clone backing.
If the clone_type is 'full', then a full clone of the source volume
backing will be created. Else, if it is 'linked', then a linked clone
of the source volume backing will be created.
:param name: Name for the clone
:param backing: Reference to the backing entity
:param snapshot: Snapshot point from which the clone should be done
:param clone_type: Whether a full clone or linked clone is to be made
:param datastore: Reference to the datastore entity
:param disk_type: Disk type of the clone
:param host: Target host
:param resource_pool: Target resource pool
:param extra_config: Key-value pairs to be written to backing's
extra-config
"""
LOG.debug("Creating a clone of backing: %(back)s, named: %(name)s, "
"clone type: %(type)s from snapshot: %(snap)s on "
"resource pool: %(resource_pool)s, host: %(host)s, "
"datastore: %(ds)s with disk type: %(disk_type)s.",
{'back': backing, 'name': name, 'type': clone_type,
'snap': snapshot, 'ds': datastore, 'disk_type': disk_type,
'host': host, 'resource_pool': resource_pool})
folder = self._get_folder(backing)
if clone_type == LINKED_CLONE_TYPE:
disk_move_type = 'createNewChildDiskBacking'
else:
disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
clone_spec = self._get_clone_spec(
datastore, disk_move_type, snapshot, backing, disk_type, host=host,
resource_pool=resource_pool, extra_config=extra_config)
task = self._session.invoke_api(self._session.vim, 'CloneVM_Task',
backing, folder=folder, name=name,
spec=clone_spec)
LOG.debug("Initiated clone of backing: %s.", name)
task_info = self._session.wait_for_task(task)
new_backing = task_info.result
LOG.info(_LI("Successfully created clone: %s."), new_backing)
return new_backing
def _reconfigure_backing(self, backing, reconfig_spec):
"""Reconfigure backing VM with the given spec."""
LOG.debug("Reconfiguring backing VM: %(backing)s with spec: %(spec)s.",
{'backing': backing,
'spec': reconfig_spec})
reconfig_task = self._session.invoke_api(self._session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
LOG.debug("Task: %s created for reconfiguring backing VM.",
reconfig_task)
self._session.wait_for_task(reconfig_task)
def attach_disk_to_backing(self, backing, size_in_kb, disk_type,
adapter_type, vmdk_ds_file_path):
"""Attach an existing virtual disk to the backing VM.
:param backing: reference to the backing VM
:param size_in_kb: disk size in KB
:param disk_type: virtual disk type
:param adapter_type: disk adapter type
:param vmdk_ds_file_path: datastore file path of the virtual disk to
be attached
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to add new disk: "
"%(path)s with size (KB): %(size)d and adapter type: "
"%(adapter_type)s.",
{'backing': backing,
'path': vmdk_ds_file_path,
'size': size_in_kb,
'adapter_type': adapter_type})
cf = self._session.vim.client.factory
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
specs = self._create_specs_for_disk_add(size_in_kb,
disk_type,
adapter_type,
vmdk_ds_file_path)
reconfig_spec.deviceChange = specs
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %s reconfigured with new disk.", backing)
def rename_backing(self, backing, new_name):
"""Rename backing VM.
:param backing: VM to be renamed
:param new_name: new VM name
"""
LOG.info(_LI("Renaming backing VM: %(backing)s to %(new_name)s."),
{'backing': backing,
'new_name': new_name})
rename_task = self._session.invoke_api(self._session.vim,
"Rename_Task",
backing,
newName=new_name)
LOG.debug("Task: %s created for renaming VM.", rename_task)
self._session.wait_for_task(rename_task)
LOG.info(_LI("Backing VM: %(backing)s renamed to %(new_name)s."),
{'backing': backing,
'new_name': new_name})
def change_backing_profile(self, backing, profile_id):
"""Change storage profile of the backing VM.
The current profile is removed if the new profile is None.
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to change profile to:"
" %(profile)s.",
{'backing': backing,
'profile': profile_id})
cf = self._session.vim.client.factory
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
if profile_id is None:
vm_profile = cf.create('ns0:VirtualMachineEmptyProfileSpec')
vm_profile.dynamicType = 'profile'
else:
vm_profile = cf.create('ns0:VirtualMachineDefinedProfileSpec')
vm_profile.profileId = profile_id.uniqueId
reconfig_spec.vmProfile = [vm_profile]
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %(backing)s reconfigured with new profile: "
"%(profile)s.",
{'backing': backing,
'profile': profile_id})
def update_backing_disk_uuid(self, backing, disk_uuid):
"""Update backing VM's disk UUID.
:param backing: Reference to backing VM
:param disk_uuid: New disk UUID
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to change disk UUID "
"to: %(disk_uuid)s.",
{'backing': backing,
'disk_uuid': disk_uuid})
disk_device = self._get_disk_device(backing)
disk_device.backing.uuid = disk_uuid
cf = self._session.vim.client.factory
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.device = disk_device
disk_spec.operation = 'edit'
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
reconfig_spec.deviceChange = [disk_spec]
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %(backing)s reconfigured with new disk UUID: "
"%(disk_uuid)s.",
{'backing': backing,
'disk_uuid': disk_uuid})
def delete_file(self, file_path, datacenter=None):
"""Delete file or folder on the datastore.
:param file_path: Datastore path of the file or folder
"""
LOG.debug("Deleting file: %(file)s under datacenter: %(dc)s.",
{'file': file_path, 'dc': datacenter})
fileManager = self._session.vim.service_content.fileManager
task = self._session.invoke_api(self._session.vim,
'DeleteDatastoreFile_Task',
fileManager,
name=file_path,
datacenter=datacenter)
LOG.debug("Initiated deletion via task: %s.", task)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully deleted file: %s."), file_path)
def create_datastore_folder(self, ds_name, folder_path, datacenter):
"""Creates a datastore folder.
This method returns silently if the folder already exists.
:param ds_name: datastore name
:param folder_path: path of folder to create
:param datacenter: datacenter of target datastore
"""
fileManager = self._session.vim.service_content.fileManager
ds_folder_path = "[%s] %s" % (ds_name, folder_path)
LOG.debug("Creating datastore folder: %s.", ds_folder_path)
try:
self._session.invoke_api(self._session.vim,
'MakeDirectory',
fileManager,
name=ds_folder_path,
datacenter=datacenter)
LOG.info(_LI("Created datastore folder: %s."), folder_path)
except exceptions.FileAlreadyExistsException:
LOG.debug("Datastore folder: %s already exists.", folder_path)
def get_path_name(self, backing):
"""Get path name of the backing.
:param backing: Reference to the backing entity
:return: Path name of the backing
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'config.files').vmPathName
def get_entity_name(self, entity):
"""Get name of the managed entity.
:param entity: Reference to the entity
:return: Name of the managed entity
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, entity, 'name')
def _get_disk_device(self, backing):
"""Get the virtual device corresponding to disk."""
hardware_devices = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
backing,
'config.hardware.device')
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
return device
LOG.error(_LE("Virtual disk device of "
"backing: %s not found."), backing)
raise vmdk_exceptions.VirtualDiskNotFoundException()
def get_vmdk_path(self, backing):
"""Get the vmdk file name of the backing.
The vmdk file path of the backing returned is of the form:
"[datastore1] my_folder/my_vm.vmdk"
:param backing: Reference to the backing
:return: VMDK file path of the backing
"""
disk_device = self._get_disk_device(backing)
backing = disk_device.backing
if backing.__class__.__name__ != "VirtualDiskFlatVer2BackingInfo":
msg = _("Invalid disk backing: %s.") % backing.__class__.__name__
LOG.error(msg)
raise AssertionError(msg)
return backing.fileName
def get_disk_size(self, backing):
"""Get disk size of the backing.
:param backing: backing VM reference
:return: disk size in bytes
"""
disk_device = self._get_disk_device(backing)
return disk_device.capacityInKB * units.Ki
def _get_virtual_disk_create_spec(self, size_in_kb, adapter_type,
disk_type):
"""Return spec for file-backed virtual disk creation."""
cf = self._session.vim.client.factory
spec = cf.create('ns0:FileBackedVirtualDiskSpec')
spec.capacityKb = size_in_kb
spec.adapterType = VirtualDiskAdapterType.get_adapter_type(
adapter_type)
spec.diskType = VirtualDiskType.get_virtual_disk_type(disk_type)
return spec
def create_virtual_disk(self, dc_ref, vmdk_ds_file_path, size_in_kb,
adapter_type='busLogic', disk_type='preallocated'):
"""Create virtual disk with the given settings.
:param dc_ref: datacenter reference
:param vmdk_ds_file_path: datastore file path of the virtual disk
:param size_in_kb: disk size in KB
:param adapter_type: disk adapter type
:param disk_type: vmdk type
"""
virtual_disk_spec = self._get_virtual_disk_create_spec(size_in_kb,
adapter_type,
disk_type)
LOG.debug("Creating virtual disk with spec: %s.", virtual_disk_spec)
disk_manager = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'CreateVirtualDisk_Task',
disk_manager,
name=vmdk_ds_file_path,
datacenter=dc_ref,
spec=virtual_disk_spec)
LOG.debug("Task: %s created for virtual disk creation.", task)
self._session.wait_for_task(task)
LOG.debug("Created virtual disk with spec: %s.", virtual_disk_spec)
def create_flat_extent_virtual_disk_descriptor(
self, dc_ref, path, size_in_kb, adapter_type, disk_type):
"""Create descriptor for a single flat extent virtual disk.
To create the descriptor, we create a virtual disk and delete its flat
extent.
:param dc_ref: reference to the datacenter
:param path: descriptor datastore file path
:param size_in_kb: size of the virtual disk in KB
:param adapter_type: virtual disk adapter type
:param disk_type: type of the virtual disk
"""
LOG.debug("Creating descriptor: %(path)s with size (KB): %(size)s, "
"adapter_type: %(adapter_type)s and disk_type: "
"%(disk_type)s.",
{'path': path.get_descriptor_ds_file_path(),
'size': size_in_kb,
'adapter_type': adapter_type,
'disk_type': disk_type
})
self.create_virtual_disk(dc_ref, path.get_descriptor_ds_file_path(),
size_in_kb, adapter_type, disk_type)
self.delete_file(path.get_flat_extent_ds_file_path(), dc_ref)
LOG.debug("Created descriptor: %s.",
path.get_descriptor_ds_file_path())
def copy_vmdk_file(self, src_dc_ref, src_vmdk_file_path,
dest_vmdk_file_path, dest_dc_ref=None):
"""Copy contents of the src vmdk file to dest vmdk file.
:param src_dc_ref: Reference to datacenter containing src datastore
:param src_vmdk_file_path: Source vmdk file path
:param dest_vmdk_file_path: Destination vmdk file path
:param dest_dc_ref: Reference to datacenter of dest datastore.
If unspecified, source datacenter is used.
"""
LOG.debug('Copying disk: %(src)s to %(dest)s.',
{'src': src_vmdk_file_path,
'dest': dest_vmdk_file_path})
dest_dc_ref = dest_dc_ref or src_dc_ref
diskMgr = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'CopyVirtualDisk_Task',
diskMgr,
sourceName=src_vmdk_file_path,
sourceDatacenter=src_dc_ref,
destName=dest_vmdk_file_path,
destDatacenter=dest_dc_ref,
force=True)
LOG.debug("Initiated copying disk data via task: %s.", task)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully copied disk at: %(src)s to: %(dest)s."),
{'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path})
def delete_vmdk_file(self, vmdk_file_path, dc_ref):
"""Delete given vmdk files.
:param vmdk_file_path: VMDK file path to be deleted
:param dc_ref: Reference to datacenter that contains this VMDK file
"""
LOG.debug("Deleting vmdk file: %s.", vmdk_file_path)
diskMgr = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'DeleteVirtualDisk_Task',
diskMgr,
name=vmdk_file_path,
datacenter=dc_ref)
LOG.debug("Initiated deleting vmdk file via task: %s.", task)
self._session.wait_for_task(task)
LOG.info(_LI("Deleted vmdk file: %s."), vmdk_file_path)
def get_profile(self, backing):
"""Query storage profile associated with the given backing.
:param backing: backing reference
:return: profile name
"""
profile_ids = pbm.get_profiles(self._session, backing)
if profile_ids:
return pbm.get_profiles_by_ids(self._session, profile_ids)[0].name
def _get_all_clusters(self):
clusters = {}
retrieve_result = self._session.invoke_api(vim_util, 'get_objects',
self._session.vim,
'ClusterComputeResource',
self._max_objects)
while retrieve_result:
if retrieve_result.objects:
for cluster in retrieve_result.objects:
name = urllib.parse.unquote(cluster.propSet[0].val)
clusters[name] = cluster.obj
retrieve_result = self.continue_retrieval(retrieve_result)
return clusters
def get_cluster_refs(self, names):
"""Get references to given clusters.
:param names: list of cluster names
:return: Dictionary of cluster names to references
"""
clusters = self._get_all_clusters()
for name in names:
if name not in clusters:
LOG.error(_LE("Compute cluster: %s not found."), name)
raise vmdk_exceptions.ClusterNotFoundException(cluster=name)
return {name: clusters[name] for name in names}
def get_cluster_hosts(self, cluster):
"""Get hosts in the given cluster.
:param cluster: cluster reference
:return: references to hosts in the cluster
"""
hosts = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
cluster,
'host')
host_refs = []
if hosts and hosts.ManagedObjectReference:
host_refs.extend(hosts.ManagedObjectReference)
return host_refs
| apache-2.0 | 7,172,195,736,493,397,000 | 42.653282 | 79 | 0.567891 | false |
dirn/Simon | tests/test_query.py | 1 | 14495 | try:
import unittest2 as unittest
except ImportError:
import unittest
import collections
try:
from unittest import mock
except ImportError:
import mock
from pymongo.cursor import Cursor
from simon import connection, query
from simon._compat import range
from .utils import AN_OBJECT_ID, ModelFactory
DefaultModel = ModelFactory('DefaultModel')
MappedModel = ModelFactory('MappedModel', field_map={'fake': 'real'})
class TestQ(unittest.TestCase):
"""Test the `Q` class."""
def test___init__(self):
"""Test the `__init__()` method."""
q = query.Q(a=1)
self.assertEqual(q._filter, {'a': 1})
q = query.Q(a=1, b=2)
self.assertEqual(q._filter, {'a': 1, 'b': 2})
def test___and__(self):
"""Test the `__and__()` method."""
q1 = query.Q(a=1)
q1._add_filter = mock.Mock()
q2 = query.Q(b=2)
q1.__and__(q2)
q1._add_filter.assert_called_with(q2, '$and')
def test___or__(self):
"""Test the `__or__()` method."""
q1 = query.Q(a=1)
q1._add_filter = mock.Mock()
q2 = query.Q(b=2)
q1.__or__(q2)
q1._add_filter.assert_called_with(q2, '$or')
def test__add_filter(self):
"""Test the `_add_filter()` method."""
q1 = query.Q(a=1)
q2 = query.Q(a=1)
expected = {'a': 1}
actual = q1._add_filter(q2, query.Q.AND)._filter
self.assertEqual(actual, expected)
q1 = query.Q(a=1)
q2 = query.Q(b=2)
expected = {'$and': [{'a': 1}, {'b': 2}]}
actual = q1._add_filter(q2, query.Q.AND)._filter
self.assertEqual(actual, expected)
expected = {'$or': [{'a': 1}, {'b': 2}]}
actual = q1._add_filter(q2, query.Q.OR)._filter
self.assertEqual(actual, expected)
def test__add_filter_combine_conditions(self):
"""Test the `_add_filter()` method with different conditions."""
q1 = query.Q(a=1)
q2 = query.Q(b=2)
q3 = query.Q(c=3)
expected = {'$or': [{'$and': [{'a': 1}, {'b': 2}]}, {'c': 3}]}
tmp = q1._add_filter(q2, query.Q.AND)
actual = tmp._add_filter(q3, query.Q.OR)._filter
self.assertEqual(actual, expected)
expected = {'$and': [{'$or': [{'a': 1}, {'b': 2}]}, {'c': 3}]}
tmp = q1._add_filter(q2, query.Q.OR)
actual = tmp._add_filter(q3, query.Q.AND)._filter
self.assertEqual(actual, expected)
def test__add_filter_filter_doesnt_exist(self):
"""Test the `_add_filter()` method with a new filter."""
q1 = query.Q(a=1)
q2 = query.Q(b=2)
q3 = query.Q(c=3)
expected = {'$and': [{'a': 1}, {'b': 2}, {'c': 3}]}
tmp = q1._add_filter(q2, query.Q.AND)
actual = tmp._add_filter(q3, query.Q.AND)._filter
self.assertEqual(actual, expected)
expected = {'$or': [{'a': 1}, {'b': 2}, {'c': 3}]}
tmp = q1._add_filter(q2, query.Q.OR)
actual = tmp._add_filter(q3, query.Q.OR)._filter
self.assertEqual(actual, expected)
def test__add_filter_filter_exists(self):
"""Test the `_add_filter()` method with a filter that exists."""
q1 = query.Q(a=1)
q2 = query.Q(b=2)
expected = {'$and': [{'a': 1}, {'b': 2}]}
tmp = q1._add_filter(q2, query.Q.AND)
actual = tmp._add_filter(q2, query.Q.AND)._filter
self.assertEqual(actual, expected)
expected = {'$or': [{'a': 1}, {'b': 2}]}
tmp = q1._add_filter(q2, query.Q.OR)
actual = tmp._add_filter(q2, query.Q.OR)._filter
self.assertEqual(actual, expected)
def test__add_filter_typeerror(self):
"""Test that `_add_filter()` raises `TypeError`."""
q = query.Q(a=1)
with self.assertRaises(TypeError):
q._add_filter(1, query.Q.AND)
class TestQuerySet(unittest.TestCase):
"""Test :class:`~simon.query.QuerySet` functionality"""
@classmethod
def setUpClass(cls):
with mock.patch('simon.connection.MongoClient'):
cls.connection = connection.connect('localhost', name='test-simon')
def setUp(cls):
cls.cursor = mock.MagicMock(spec=Cursor)
cls.qs = query.QuerySet(cursor=cls.cursor)
cls.model_qs = query.QuerySet(cursor=cls.cursor, cls=DefaultModel)
def test_count(self):
"""Test the `count()` method."""
self.qs.count()
self.cursor.count.assert_called_with(with_limit_and_skip=True)
# cursor.count() should get cached as qs._count, so it should
# only be called once by qs.count()
self.qs.count()
self.cursor.count.assert_not_called()
def test_count_typeerror(self):
"""Test that `count()` raises `TypeError`."""
qs = query.QuerySet()
with self.assertRaises(TypeError):
qs.count()
def test_distinct(self):
"""Test the `distinct()` method."""
self.qs.distinct('a')
self.cursor.distinct.assert_called_with('a')
def test_distinct_field_map(self):
"""Test the `distinct()` method with a name in `field_map`."""
self.model_qs._cls = MappedModel
self.model_qs.distinct('fake')
self.cursor.distinct.assert_called_with('real')
def test_distinct_nested_field(self):
"""Test the `distinct()` method with a nested field."""
self.model_qs.distinct('a__b')
self.cursor.distinct.assert_called_with('a.b')
def test_limit(self):
"""Test the `limit()` method."""
self.qs.limit(1)
self.cursor.clone.assert_called_with()
self.cursor.clone().limit.assert_called_with(1)
self.qs.limit(2)
self.cursor.clone.assert_called_with()
self.cursor.clone().limit.assert_called_with(2)
def test_skip(self):
"""Test the `skip()` method."""
self.qs.skip(1)
self.cursor.clone.assert_called_with()
self.cursor.clone().skip.assert_called_with(1)
self.qs.skip(2)
self.cursor.clone.assert_called_with()
self.cursor.clone().skip.assert_called_with(2)
def test_sort(self):
"""Test the `sort()` method."""
qs = self.qs.sort('_id')
self.cursor.clone.assert_called_with()
self.assertEqual(qs._sorting, [('_id', 1)])
qs._cursor.sort.assert_not_called()
qs = self.qs.sort('-_id')
self.cursor.clone.assert_called_with()
self.assertEqual(qs._sorting, [('_id', -1)])
qs._cursor.sort.assert_not_called()
def test_sort_field_map(self):
"""Test the `sort()` method with a name in `field_map`."""
self.model_qs._cls = MappedModel
qs = self.model_qs.sort('fake')
self.cursor.clone.assert_called_with()
self.assertEqual(qs._sorting, [('real', 1)])
qs._cursor.sort.assert_not_called()
def test_sort_multiple_ascending(self):
"""Test the `sort()` method for multiple ascending keys."""
qs = self.qs.sort('a', 'b')
self.cursor.clone.assert_called_with()
self.assertEqual(qs._sorting, [('a', 1), ('b', 1)])
qs._cursor.sort.assert_not_called()
def test_sort_multiple_descending(self):
"""Test the `sort()` method for multiple descending keys."""
qs = self.qs.sort('-a', '-b')
self.cursor.clone.assert_called_with()
self.assertEqual(qs._sorting, [('a', -1), ('b', -1)])
qs._cursor.sort.assert_not_called()
def test_sort_multiple_ascending_then_descending(self):
"""Test the `sort()` method for multiple keys ascending first."""
qs = self.qs.sort('a', '-b')
self.cursor.clone.assert_called_with()
self.assertEqual(qs._sorting, [('a', 1), ('b', -1)])
qs._cursor.sort.assert_not_called()
def test_sort_multiple_descending_then_ascending(self):
"""Test the `sort()` method for multiple keys descending first."""
qs = self.qs.sort('-a', 'b')
self.cursor.clone.assert_called_with()
self.assertEqual(qs._sorting, [('a', -1), ('b', 1)])
qs._cursor.sort.assert_not_called()
def test_sort_nested_field(self):
"""Test the `sort()` method with a nested field."""
qs = self.model_qs.sort('a__b')
self.cursor.clone.assert_called_with()
self.assertEqual(qs._sorting, [('a.b', 1)])
qs._cursor.sort.assert_not_called()
def test__fill_to(self):
"""Test the `_fill_to()` method."""
self.cursor.count.return_value = 3
self.qs._fill_to(2)
self.assertEqual(len(self.qs._items), 3)
def test__fill_to_as_documents(self):
"""Test that `_fill_to()` stores documents."""
if hasattr(self.cursor, 'next'):
self.cursor.next.return_value = {'_id': AN_OBJECT_ID}
else:
self.cursor.__next__.return_value = {'_id': AN_OBJECT_ID}
self.cursor.count.return_value = 1
self.qs._fill_to(0)
self.assertIsInstance(self.qs._items[0], dict)
def test__fill_to_as_model(self):
"""Test that `_fill_to()` stores model instances."""
if hasattr(self.cursor, 'next'):
self.cursor.next.return_value = {'_id': AN_OBJECT_ID}
else:
self.cursor.__next__.return_value = {'_id': AN_OBJECT_ID}
self.cursor.count.return_value = 1
self.model_qs._fill_to(0)
self.assertIsInstance(self.model_qs._items[0], self.model_qs._cls)
def test__fill_to_indexes(self):
("Test that `_fill_to()` property fills to the specified "
"index.")
self.cursor.count.return_value = 3
for x in range(3):
self.qs._fill_to(x)
self.assertEqual(len(self.qs._items), x + 1)
def test__fill_to_overfill(self):
("Test that `_fill_to()` correctly handles indexes greater than"
" the maximum index of the result cache.")
self.cursor.count.return_value = 3
self.qs._fill_to(3)
self.assertEqual(len(self.qs._items), 3)
def test__fill_to_sort(self):
"""Test that `_fill_to()` correctly handles sorting."""
self.cursor.count.return_value = 3
self.qs._sorting = [('a', 1)]
self.qs._fill_to(0)
self.cursor.sort.assert_called_with([('a', 1)])
self.assertIsNone(self.qs._sorting)
def test__fill_to_twice(self):
"""Test that `_fill_to()` can be called multiple times."""
self.cursor.count.return_value = 3
self.qs._fill_to(0)
self.assertEqual(len(self.qs._items), 1)
self.qs._fill_to(0)
self.assertEqual(len(self.qs._items), 1)
self.qs._fill_to(3)
self.assertEqual(len(self.qs._items), 3)
self.qs._fill_to(3)
self.assertEqual(len(self.qs._items), 3)
def test___getitem__(self):
"""Test the `__getitem__()` method."""
self.cursor.count.return_value = 3
# qs._fill_to() would normally populate qs._items
self.qs._items = range(3)
with mock.patch.object(self.qs, '_fill_to') as _fill_to:
for x in range(3):
self.assertEqual(self.qs[x], self.qs._items[x])
_fill_to.assert_called_with(x)
def test___getitem___slice(self):
"""Test the `__getitem__()` method with slices."""
self.cursor.count.return_value = 3
# qs._fill_to() would normally populate qs._items
self.qs._items = [0, 1, 2]
with mock.patch.object(self.qs, '_fill_to') as _fill_to:
self.assertEqual(self.qs[1:], self.qs._items[1:])
_fill_to.assert_called_with(2)
self.assertEqual(self.qs[:1], self.qs._items[:1])
_fill_to.assert_called_with(0)
self.assertEqual(self.qs[1:2], self.qs._items[1:2])
_fill_to.assert_called_with(1)
self.assertEqual(self.qs[::2], self.qs._items[::2])
_fill_to.assert_called_with(2)
self.assertEqual(self.qs[1::2], self.qs._items[1::2])
_fill_to.assert_called_with(2)
self.assertEqual(self.qs[::], self.qs._items[::])
_fill_to.assert_called_with(2)
def test___getitem___indexerror(self):
"""Test that `__getitem__()` raises `IndexError`."""
self.cursor.count.return_value = 3
with self.assertRaises(IndexError) as e:
self.model_qs[3]
expected = "No such item in 'QuerySet' for 'DefaultModel' object"
actual = str(e.exception)
self.assertEqual(actual, expected)
with self.assertRaises(IndexError) as e:
self.qs[3]
expected = "No such item in 'QuerySet'"
actual = str(e.exception)
self.assertEqual(actual, expected)
def test___getitem___typeerror(self):
"""Test that `__getitem__()` raises `TypeError`."""
with self.assertRaises(TypeError):
self.qs[-1]
def test___iter__(self):
"""Test the `__iter__()` method."""
self.assertIsInstance(self.qs.__iter__(), collections.Iterable)
def test___iter___fills_cache(self):
"""Test that `__iter__()` fills the result cache."""
self.cursor.count.return_value = 3
def append_to_cache(v):
self.qs._items.append(v)
with mock.patch.object(self.qs, '_fill_to') as _fill_to:
_fill_to.side_effect = append_to_cache
i = 0
for x in self.qs:
_fill_to.assert_called_with(i)
i += 1
self.assertEqual(len(self.qs._items), 3)
def test__iter___fills_cache_partial(self):
"""Test that `__iter__()` fills the rest of the result cache."""
self.cursor.count.return_value = 3
self.qs._items = [0]
def append_to_cache(v):
self.qs._items.append(v)
with mock.patch.object(self.qs, '_fill_to') as _fill_to:
_fill_to.side_effect = append_to_cache
i = 0
for x in self.qs:
if i == 0:
# qs._fill_to(0) will already have been called
_fill_to.assert_not_called()
else:
_fill_to.assert_called_with(i)
i += 1
self.assertEqual(len(self.qs._items), 3)
def test___len__(self):
"""Test the `__len__()` method."""
self.cursor.count.return_value = 3
self.assertEqual(len(self.qs), self.cursor.count())
| bsd-3-clause | -364,584,956,808,677,700 | 29.197917 | 79 | 0.556192 | false |
sharad/calibre | src/calibre/utils/config_base.py | 1 | 19477 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import os, re, cPickle, traceback
from functools import partial
from collections import defaultdict
from copy import deepcopy
from calibre.utils.lock import LockError, ExclusiveFile
from calibre.constants import config_dir, CONFIG_DIR_MODE
plugin_dir = os.path.join(config_dir, 'plugins')
def make_config_dir():
if not os.path.exists(plugin_dir):
os.makedirs(plugin_dir, mode=CONFIG_DIR_MODE)
class Option(object):
def __init__(self, name, switches=[], help='', type=None, choices=None,
check=None, group=None, default=None, action=None, metavar=None):
if choices:
type = 'choice'
self.name = name
self.switches = switches
self.help = help.replace('%default', repr(default)) if help else None
self.type = type
if self.type is None and action is None and choices is None:
if isinstance(default, float):
self.type = 'float'
elif isinstance(default, int) and not isinstance(default, bool):
self.type = 'int'
self.choices = choices
self.check = check
self.group = group
self.default = default
self.action = action
self.metavar = metavar
def __eq__(self, other):
return self.name == getattr(other, 'name', other)
def __repr__(self):
return 'Option: '+self.name
def __str__(self):
return repr(self)
class OptionValues(object):
def copy(self):
return deepcopy(self)
class OptionSet(object):
OVERRIDE_PAT = re.compile(r'#{3,100} Override Options #{15}(.*?)#{3,100} End Override #{3,100}',
re.DOTALL|re.IGNORECASE)
def __init__(self, description=''):
self.description = description
self.defaults = {}
self.preferences = []
self.group_list = []
self.groups = {}
self.set_buffer = {}
def has_option(self, name_or_option_object):
if name_or_option_object in self.preferences:
return True
for p in self.preferences:
if p.name == name_or_option_object:
return True
return False
def get_option(self, name_or_option_object):
idx = self.preferences.index(name_or_option_object)
if idx > -1:
return self.preferences[idx]
for p in self.preferences:
if p.name == name_or_option_object:
return p
def add_group(self, name, description=''):
if name in self.group_list:
raise ValueError('A group by the name %s already exists in this set'%name)
self.groups[name] = description
self.group_list.append(name)
return partial(self.add_opt, group=name)
def update(self, other):
for name in other.groups.keys():
self.groups[name] = other.groups[name]
if name not in self.group_list:
self.group_list.append(name)
for pref in other.preferences:
if pref in self.preferences:
self.preferences.remove(pref)
self.preferences.append(pref)
def smart_update(self, opts1, opts2):
'''
Updates the preference values in opts1 using only the non-default preference values in opts2.
'''
for pref in self.preferences:
new = getattr(opts2, pref.name, pref.default)
if new != pref.default:
setattr(opts1, pref.name, new)
def remove_opt(self, name):
if name in self.preferences:
self.preferences.remove(name)
def add_opt(self, name, switches=[], help=None, type=None, choices=None,
group=None, default=None, action=None, metavar=None):
'''
Add an option to this section.
:param name: The name of this option. Must be a valid Python identifier.
Must also be unique in this OptionSet and all its subsets.
:param switches: List of command line switches for this option
(as supplied to :module:`optparse`). If empty, this
option will not be added to the command line parser.
:param help: Help text.
:param type: Type checking of option values. Supported types are:
`None, 'choice', 'complex', 'float', 'int', 'string'`.
:param choices: List of strings or `None`.
:param group: Group this option belongs to. You must previously
have created this group with a call to :method:`add_group`.
:param default: The default value for this option.
:param action: The action to pass to optparse. Supported values are:
`None, 'count'`. For choices and boolean options,
action is automatically set correctly.
'''
pref = Option(name, switches=switches, help=help, type=type, choices=choices,
group=group, default=default, action=action, metavar=None)
if group is not None and group not in self.groups.keys():
raise ValueError('Group %s has not been added to this section'%group)
if pref in self.preferences:
raise ValueError('An option with the name %s already exists in this set.'%name)
self.preferences.append(pref)
self.defaults[name] = default
def retranslate_help(self):
t = _
for opt in self.preferences:
if opt.help:
opt.help = t(opt.help)
def option_parser(self, user_defaults=None, usage='', gui_mode=False):
from calibre.utils.config import OptionParser
parser = OptionParser(usage, gui_mode=gui_mode)
groups = defaultdict(lambda : parser)
for group, desc in self.groups.items():
groups[group] = parser.add_option_group(group.upper(), desc)
for pref in self.preferences:
if not pref.switches:
continue
g = groups[pref.group]
action = pref.action
if action is None:
action = 'store'
if pref.default is True or pref.default is False:
action = 'store_' + ('false' if pref.default else 'true')
args = dict(
dest=pref.name,
help=pref.help,
metavar=pref.metavar,
type=pref.type,
choices=pref.choices,
default=getattr(user_defaults, pref.name, pref.default),
action=action,
)
g.add_option(*pref.switches, **args)
return parser
def get_override_section(self, src):
match = self.OVERRIDE_PAT.search(src)
if match:
return match.group()
return ''
def parse_string(self, src):
options = {'cPickle':cPickle}
if src is not None:
try:
if not isinstance(src, unicode):
src = src.decode('utf-8')
src = src.replace(u'PyQt%d.QtCore' % 4, u'PyQt5.QtCore')
exec src in options
except:
print 'Failed to parse options string:'
print repr(src)
traceback.print_exc()
opts = OptionValues()
for pref in self.preferences:
val = options.get(pref.name, pref.default)
formatter = __builtins__.get(pref.type, None)
if callable(formatter):
val = formatter(val)
setattr(opts, pref.name, val)
return opts
def render_group(self, name, desc, opts):
prefs = [pref for pref in self.preferences if pref.group == name]
lines = ['### Begin group: %s'%(name if name else 'DEFAULT')]
if desc:
lines += map(lambda x: '# '+x, desc.split('\n'))
lines.append(' ')
for pref in prefs:
lines.append('# '+pref.name.replace('_', ' '))
if pref.help:
lines += map(lambda x: '# ' + x, pref.help.split('\n'))
lines.append('%s = %s'%(pref.name,
self.serialize_opt(getattr(opts, pref.name, pref.default))))
lines.append(' ')
return '\n'.join(lines)
def serialize_opt(self, val):
if val is val is True or val is False or val is None or \
isinstance(val, (int, float, long, basestring)):
return repr(val)
pickle = cPickle.dumps(val, -1)
return 'cPickle.loads(%s)'%repr(pickle)
def serialize(self, opts):
src = '# %s\n\n'%(self.description.replace('\n', '\n# '))
groups = [self.render_group(name, self.groups.get(name, ''), opts)
for name in [None] + self.group_list]
return src + '\n\n'.join(groups)
class ConfigInterface(object):
def __init__(self, description):
self.option_set = OptionSet(description=description)
self.add_opt = self.option_set.add_opt
self.add_group = self.option_set.add_group
self.remove_opt = self.remove = self.option_set.remove_opt
self.parse_string = self.option_set.parse_string
self.get_option = self.option_set.get_option
self.preferences = self.option_set.preferences
def update(self, other):
self.option_set.update(other.option_set)
def option_parser(self, usage='', gui_mode=False):
return self.option_set.option_parser(user_defaults=self.parse(),
usage=usage, gui_mode=gui_mode)
def smart_update(self, opts1, opts2):
self.option_set.smart_update(opts1, opts2)
class Config(ConfigInterface):
'''
A file based configuration.
'''
def __init__(self, basename, description=''):
ConfigInterface.__init__(self, description)
self.config_file_path = os.path.join(config_dir, basename+'.py')
def parse(self):
src = ''
if os.path.exists(self.config_file_path):
try:
with ExclusiveFile(self.config_file_path) as f:
try:
src = f.read().decode('utf-8')
except ValueError:
print "Failed to parse", self.config_file_path
traceback.print_exc()
except LockError:
raise IOError('Could not lock config file: %s'%self.config_file_path)
return self.option_set.parse_string(src)
def as_string(self):
if not os.path.exists(self.config_file_path):
return ''
try:
with ExclusiveFile(self.config_file_path) as f:
return f.read().decode('utf-8')
except LockError:
raise IOError('Could not lock config file: %s'%self.config_file_path)
def set(self, name, val):
if not self.option_set.has_option(name):
raise ValueError('The option %s is not defined.'%name)
try:
if not os.path.exists(config_dir):
make_config_dir()
with ExclusiveFile(self.config_file_path) as f:
src = f.read()
opts = self.option_set.parse_string(src)
setattr(opts, name, val)
footer = self.option_set.get_override_section(src)
src = self.option_set.serialize(opts)+ '\n\n' + footer + '\n'
f.seek(0)
f.truncate()
if isinstance(src, unicode):
src = src.encode('utf-8')
f.write(src)
except LockError:
raise IOError('Could not lock config file: %s'%self.config_file_path)
class StringConfig(ConfigInterface):
'''
A string based configuration
'''
def __init__(self, src, description=''):
ConfigInterface.__init__(self, description)
self.src = src
def parse(self):
return self.option_set.parse_string(self.src)
def set(self, name, val):
if not self.option_set.has_option(name):
raise ValueError('The option %s is not defined.'%name)
opts = self.option_set.parse_string(self.src)
setattr(opts, name, val)
footer = self.option_set.get_override_section(self.src)
self.src = self.option_set.serialize(opts)+ '\n\n' + footer + '\n'
class ConfigProxy(object):
'''
A Proxy to minimize file reads for widely used config settings
'''
def __init__(self, config):
self.__config = config
self.__opts = None
@property
def defaults(self):
return self.__config.option_set.defaults
def refresh(self):
self.__opts = self.__config.parse()
def retranslate_help(self):
self.__config.option_set.retranslate_help()
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, val):
return self.set(key, val)
def __delitem__(self, key):
self.set(key, self.defaults[key])
def get(self, key):
if self.__opts is None:
self.refresh()
return getattr(self.__opts, key)
def set(self, key, val):
if self.__opts is None:
self.refresh()
setattr(self.__opts, key, val)
return self.__config.set(key, val)
def help(self, key):
return self.__config.get_option(key).help
def _prefs():
c = Config('global', 'calibre wide preferences')
c.add_opt('database_path',
default=os.path.expanduser('~/library1.db'),
help=_('Path to the database in which books are stored'))
c.add_opt('filename_pattern', default=ur'(?P<title>.+) - (?P<author>[^_]+)',
help=_('Pattern to guess metadata from filenames'))
c.add_opt('isbndb_com_key', default='',
help=_('Access key for isbndb.com'))
c.add_opt('network_timeout', default=5,
help=_('Default timeout for network operations (seconds)'))
c.add_opt('library_path', default=None,
help=_('Path to directory in which your library of books is stored'))
c.add_opt('language', default=None,
help=_('The language in which to display the user interface'))
c.add_opt('output_format', default='EPUB',
help=_('The default output format for ebook conversions.'))
c.add_opt('input_format_order', default=['EPUB', 'AZW3', 'MOBI', 'LIT', 'PRC',
'FB2', 'HTML', 'HTM', 'XHTM', 'SHTML', 'XHTML', 'ZIP', 'ODT', 'RTF', 'PDF',
'TXT'],
help=_('Ordered list of formats to prefer for input.'))
c.add_opt('read_file_metadata', default=True,
help=_('Read metadata from files'))
c.add_opt('worker_process_priority', default='normal',
help=_('The priority of worker processes. A higher priority '
'means they run faster and consume more resources. '
'Most tasks like conversion/news download/adding books/etc. '
'are affected by this setting.'))
c.add_opt('swap_author_names', default=False,
help=_('Swap author first and last names when reading metadata'))
c.add_opt('add_formats_to_existing', default=False,
help=_('Add new formats to existing book records'))
c.add_opt('check_for_dupes_on_ctl', default=False,
help=_('Check for duplicates when copying to another library'))
c.add_opt('installation_uuid', default=None, help='Installation UUID')
c.add_opt('new_book_tags', default=[], help=_('Tags to apply to books added to the library'))
c.add_opt('mark_new_books', default=False, help=_(
'Mark newly added books. The mark is a temporary mark that is automatically removed when calibre is restarted.'))
# these are here instead of the gui preferences because calibredb and
# calibre server can execute searches
c.add_opt('saved_searches', default={}, help=_('List of named saved searches'))
c.add_opt('user_categories', default={}, help=_('User-created tag browser categories'))
c.add_opt('manage_device_metadata', default='manual',
help=_('How and when calibre updates metadata on the device.'))
c.add_opt('limit_search_columns', default=False,
help=_('When searching for text without using lookup '
'prefixes, as for example, Red instead of title:Red, '
'limit the columns searched to those named below.'))
c.add_opt('limit_search_columns_to',
default=['title', 'authors', 'tags', 'series', 'publisher'],
help=_('Choose columns to be searched when not using prefixes, '
'as for example, when searching for Red instead of '
'title:Red. Enter a list of search/lookup names '
'separated by commas. Only takes effect if you set the option '
'to limit search columns above.'))
c.add_opt('use_primary_find_in_search', default=True,
help=_(u'Characters typed in the search box will match their '
'accented versions, based on the language you have chosen '
'for the calibre interface. For example, in '
u' English, searching for n will match %s and n, but if '
'your language is Spanish it will only match n. Note that '
'this is much slower than a simple search on very large '
'libraries.')%u'\xf1')
c.add_opt('migrated', default=False, help='For Internal use. Don\'t modify.')
return c
prefs = ConfigProxy(_prefs())
if prefs['installation_uuid'] is None:
import uuid
prefs['installation_uuid'] = str(uuid.uuid4())
# Read tweaks
def read_raw_tweaks():
make_config_dir()
default_tweaks = P('default_tweaks.py', data=True,
allow_user_override=False)
tweaks_file = os.path.join(config_dir, 'tweaks.py')
if not os.path.exists(tweaks_file):
with open(tweaks_file, 'wb') as f:
f.write(default_tweaks)
with open(tweaks_file, 'rb') as f:
return default_tweaks, f.read()
def read_tweaks():
default_tweaks, tweaks = read_raw_tweaks()
l, g = {}, {}
try:
exec tweaks in g, l
except:
import traceback
print 'Failed to load custom tweaks file'
traceback.print_exc()
dl, dg = {}, {}
exec default_tweaks in dg, dl
dl.update(l)
return dl
def write_tweaks(raw):
make_config_dir()
tweaks_file = os.path.join(config_dir, 'tweaks.py')
with open(tweaks_file, 'wb') as f:
f.write(raw)
tweaks = read_tweaks()
def reset_tweaks_to_default():
default_tweaks = P('default_tweaks.py', data=True,
allow_user_override=False)
dl, dg = {}, {}
exec default_tweaks in dg, dl
tweaks.clear()
tweaks.update(dl)
class Tweak(object):
def __init__(self, name, value):
self.name, self.value = name, value
def __enter__(self):
self.origval = tweaks[self.name]
tweaks[self.name] = self.value
def __exit__(self, *args):
tweaks[self.name] = self.origval
| gpl-3.0 | -2,838,697,234,434,908,700 | 37.72167 | 121 | 0.573497 | false |
Lawrence-Liu/scikit-learn | examples/cluster/plot_segmentation_toy.py | 258 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause | 2,322,426,478,103,312,400 | 33.040816 | 79 | 0.653777 | false |
vlachoudis/sl4a | python/src/Lib/bsddb/db.py | 194 | 2730 | #----------------------------------------------------------------------
# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA
# and Andrew Kuchling. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# o Redistributions of source code must retain the above copyright
# notice, this list of conditions, and the disclaimer that follows.
#
# o Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions, and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# o Neither the name of Digital Creations nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS
# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL
# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#----------------------------------------------------------------------
# This module is just a placeholder for possible future expansion, in
# case we ever want to augment the stuff in _db in any way. For now
# it just simply imports everything from _db.
import sys
absolute_import = (sys.version_info[0] >= 3)
if not absolute_import :
if __name__.startswith('bsddb3.') :
# import _pybsddb binary as it should be the more recent version from
# a standalone pybsddb addon package than the version included with
# python as bsddb._bsddb.
from _pybsddb import *
from _pybsddb import __version__
else:
from _bsddb import *
from _bsddb import __version__
else :
# Because this syntaxis is not valid before Python 2.5
if __name__.startswith('bsddb3.') :
exec("from ._pybsddb import *")
exec("from ._pybsddb import __version__")
else :
exec("from ._bsddb import *")
exec("from ._bsddb import __version__")
| apache-2.0 | 9,187,898,677,825,794,000 | 44.5 | 77 | 0.677656 | false |
agentxan/nzbToMedia | libs/beets/plugins.py | 4 | 15698 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Support for beets plugins."""
from __future__ import division, absolute_import, print_function
import inspect
import traceback
import re
from collections import defaultdict
from functools import wraps
import beets
from beets import logging
from beets import mediafile
PLUGIN_NAMESPACE = 'beetsplug'
# Plugins using the Last.fm API can share the same API key.
LASTFM_KEY = '2dc3914abf35f0d9c92d97d8f8e42b43'
# Global logger.
log = logging.getLogger('beets')
class PluginConflictException(Exception):
"""Indicates that the services provided by one plugin conflict with
those of another.
For example two plugins may define different types for flexible fields.
"""
class PluginLogFilter(logging.Filter):
"""A logging filter that identifies the plugin that emitted a log
message.
"""
def __init__(self, plugin):
self.prefix = u'{0}: '.format(plugin.name)
def filter(self, record):
if hasattr(record.msg, 'msg') and isinstance(record.msg.msg,
basestring):
# A _LogMessage from our hacked-up Logging replacement.
record.msg.msg = self.prefix + record.msg.msg
elif isinstance(record.msg, basestring):
record.msg = self.prefix + record.msg
return True
# Managing the plugins themselves.
class BeetsPlugin(object):
"""The base class for all beets plugins. Plugins provide
functionality by defining a subclass of BeetsPlugin and overriding
the abstract methods defined here.
"""
def __init__(self, name=None):
"""Perform one-time plugin setup.
"""
self.name = name or self.__module__.split('.')[-1]
self.config = beets.config[self.name]
if not self.template_funcs:
self.template_funcs = {}
if not self.template_fields:
self.template_fields = {}
if not self.album_template_fields:
self.album_template_fields = {}
self.import_stages = []
self._log = log.getChild(self.name)
self._log.setLevel(logging.NOTSET) # Use `beets` logger level.
if not any(isinstance(f, PluginLogFilter) for f in self._log.filters):
self._log.addFilter(PluginLogFilter(self))
def commands(self):
"""Should return a list of beets.ui.Subcommand objects for
commands that should be added to beets' CLI.
"""
return ()
def get_import_stages(self):
"""Return a list of functions that should be called as importer
pipelines stages.
The callables are wrapped versions of the functions in
`self.import_stages`. Wrapping provides some bookkeeping for the
plugin: specifically, the logging level is adjusted to WARNING.
"""
return [self._set_log_level_and_params(logging.WARNING, import_stage)
for import_stage in self.import_stages]
def _set_log_level_and_params(self, base_log_level, func):
"""Wrap `func` to temporarily set this plugin's logger level to
`base_log_level` + config options (and restore it to its previous
value after the function returns). Also determines which params may not
be sent for backwards-compatibility.
"""
argspec = inspect.getargspec(func)
@wraps(func)
def wrapper(*args, **kwargs):
assert self._log.level == logging.NOTSET
verbosity = beets.config['verbose'].get(int)
log_level = max(logging.DEBUG, base_log_level - 10 * verbosity)
self._log.setLevel(log_level)
try:
try:
return func(*args, **kwargs)
except TypeError as exc:
if exc.args[0].startswith(func.__name__):
# caused by 'func' and not stuff internal to 'func'
kwargs = dict((arg, val) for arg, val in kwargs.items()
if arg in argspec.args)
return func(*args, **kwargs)
else:
raise
finally:
self._log.setLevel(logging.NOTSET)
return wrapper
def queries(self):
"""Should return a dict mapping prefixes to Query subclasses.
"""
return {}
def track_distance(self, item, info):
"""Should return a Distance object to be added to the
distance for every track comparison.
"""
return beets.autotag.hooks.Distance()
def album_distance(self, items, album_info, mapping):
"""Should return a Distance object to be added to the
distance for every album-level comparison.
"""
return beets.autotag.hooks.Distance()
def candidates(self, items, artist, album, va_likely):
"""Should return a sequence of AlbumInfo objects that match the
album whose items are provided.
"""
return ()
def item_candidates(self, item, artist, title):
"""Should return a sequence of TrackInfo objects that match the
item provided.
"""
return ()
def album_for_id(self, album_id):
"""Return an AlbumInfo object or None if no matching release was
found.
"""
return None
def track_for_id(self, track_id):
"""Return a TrackInfo object or None if no matching release was
found.
"""
return None
def add_media_field(self, name, descriptor):
"""Add a field that is synchronized between media files and items.
When a media field is added ``item.write()`` will set the name
property of the item's MediaFile to ``item[name]`` and save the
changes. Similarly ``item.read()`` will set ``item[name]`` to
the value of the name property of the media file.
``descriptor`` must be an instance of ``mediafile.MediaField``.
"""
# Defer impor to prevent circular dependency
from beets import library
mediafile.MediaFile.add_field(name, descriptor)
library.Item._media_fields.add(name)
_raw_listeners = None
listeners = None
def register_listener(self, event, func):
"""Add a function as a listener for the specified event.
"""
wrapped_func = self._set_log_level_and_params(logging.WARNING, func)
cls = self.__class__
if cls.listeners is None or cls._raw_listeners is None:
cls._raw_listeners = defaultdict(list)
cls.listeners = defaultdict(list)
if func not in cls._raw_listeners[event]:
cls._raw_listeners[event].append(func)
cls.listeners[event].append(wrapped_func)
template_funcs = None
template_fields = None
album_template_fields = None
@classmethod
def template_func(cls, name):
"""Decorator that registers a path template function. The
function will be invoked as ``%name{}`` from path format
strings.
"""
def helper(func):
if cls.template_funcs is None:
cls.template_funcs = {}
cls.template_funcs[name] = func
return func
return helper
@classmethod
def template_field(cls, name):
"""Decorator that registers a path template field computation.
The value will be referenced as ``$name`` from path format
strings. The function must accept a single parameter, the Item
being formatted.
"""
def helper(func):
if cls.template_fields is None:
cls.template_fields = {}
cls.template_fields[name] = func
return func
return helper
_classes = set()
def load_plugins(names=()):
"""Imports the modules for a sequence of plugin names. Each name
must be the name of a Python module under the "beetsplug" namespace
package in sys.path; the module indicated should contain the
BeetsPlugin subclasses desired.
"""
for name in names:
modname = '{0}.{1}'.format(PLUGIN_NAMESPACE, name)
try:
try:
namespace = __import__(modname, None, None)
except ImportError as exc:
# Again, this is hacky:
if exc.args[0].endswith(' ' + name):
log.warn(u'** plugin {0} not found', name)
else:
raise
else:
for obj in getattr(namespace, name).__dict__.values():
if isinstance(obj, type) and issubclass(obj, BeetsPlugin) \
and obj != BeetsPlugin and obj not in _classes:
_classes.add(obj)
except:
log.warn(
u'** error loading plugin {}:\n{}',
name,
traceback.format_exc(),
)
_instances = {}
def find_plugins():
"""Returns a list of BeetsPlugin subclass instances from all
currently loaded beets plugins. Loads the default plugin set
first.
"""
load_plugins()
plugins = []
for cls in _classes:
# Only instantiate each plugin class once.
if cls not in _instances:
_instances[cls] = cls()
plugins.append(_instances[cls])
return plugins
# Communication with plugins.
def commands():
"""Returns a list of Subcommand objects from all loaded plugins.
"""
out = []
for plugin in find_plugins():
out += plugin.commands()
return out
def queries():
"""Returns a dict mapping prefix strings to Query subclasses all loaded
plugins.
"""
out = {}
for plugin in find_plugins():
out.update(plugin.queries())
return out
def types(model_cls):
# Gives us `item_types` and `album_types`
attr_name = '{0}_types'.format(model_cls.__name__.lower())
types = {}
for plugin in find_plugins():
plugin_types = getattr(plugin, attr_name, {})
for field in plugin_types:
if field in types and plugin_types[field] != types[field]:
raise PluginConflictException(
u'Plugin {0} defines flexible field {1} '
u'which has already been defined with '
u'another type.'.format(plugin.name, field)
)
types.update(plugin_types)
return types
def track_distance(item, info):
"""Gets the track distance calculated by all loaded plugins.
Returns a Distance object.
"""
from beets.autotag.hooks import Distance
dist = Distance()
for plugin in find_plugins():
dist.update(plugin.track_distance(item, info))
return dist
def album_distance(items, album_info, mapping):
"""Returns the album distance calculated by plugins."""
from beets.autotag.hooks import Distance
dist = Distance()
for plugin in find_plugins():
dist.update(plugin.album_distance(items, album_info, mapping))
return dist
def candidates(items, artist, album, va_likely):
"""Gets MusicBrainz candidates for an album from each plugin.
"""
out = []
for plugin in find_plugins():
out.extend(plugin.candidates(items, artist, album, va_likely))
return out
def item_candidates(item, artist, title):
"""Gets MusicBrainz candidates for an item from the plugins.
"""
out = []
for plugin in find_plugins():
out.extend(plugin.item_candidates(item, artist, title))
return out
def album_for_id(album_id):
"""Get AlbumInfo objects for a given ID string.
"""
out = []
for plugin in find_plugins():
res = plugin.album_for_id(album_id)
if res:
out.append(res)
return out
def track_for_id(track_id):
"""Get TrackInfo objects for a given ID string.
"""
out = []
for plugin in find_plugins():
res = plugin.track_for_id(track_id)
if res:
out.append(res)
return out
def template_funcs():
"""Get all the template functions declared by plugins as a
dictionary.
"""
funcs = {}
for plugin in find_plugins():
if plugin.template_funcs:
funcs.update(plugin.template_funcs)
return funcs
def import_stages():
"""Get a list of import stage functions defined by plugins."""
stages = []
for plugin in find_plugins():
stages += plugin.get_import_stages()
return stages
# New-style (lazy) plugin-provided fields.
def item_field_getters():
"""Get a dictionary mapping field names to unary functions that
compute the field's value.
"""
funcs = {}
for plugin in find_plugins():
if plugin.template_fields:
funcs.update(plugin.template_fields)
return funcs
def album_field_getters():
"""As above, for album fields.
"""
funcs = {}
for plugin in find_plugins():
if plugin.album_template_fields:
funcs.update(plugin.album_template_fields)
return funcs
# Event dispatch.
def event_handlers():
"""Find all event handlers from plugins as a dictionary mapping
event names to sequences of callables.
"""
all_handlers = defaultdict(list)
for plugin in find_plugins():
if plugin.listeners:
for event, handlers in plugin.listeners.items():
all_handlers[event] += handlers
return all_handlers
def send(event, **arguments):
"""Send an event to all assigned event listeners.
`event` is the name of the event to send, all other named arguments
are passed along to the handlers.
Return a list of non-None values returned from the handlers.
"""
log.debug(u'Sending event: {0}', event)
results = []
for handler in event_handlers()[event]:
result = handler(**arguments)
if result is not None:
results.append(result)
return results
def feat_tokens(for_artist=True):
"""Return a regular expression that matches phrases like "featuring"
that separate a main artist or a song title from secondary artists.
The `for_artist` option determines whether the regex should be
suitable for matching artist fields (the default) or title fields.
"""
feat_words = ['ft', 'featuring', 'feat', 'feat.', 'ft.']
if for_artist:
feat_words += ['with', 'vs', 'and', 'con', '&']
return '(?<=\s)(?:{0})(?=\s)'.format(
'|'.join(re.escape(x) for x in feat_words)
)
def sanitize_choices(choices, choices_all):
"""Clean up a stringlist configuration attribute: keep only choices
elements present in choices_all, remove duplicate elements, expand '*'
wildcard while keeping original stringlist order.
"""
seen = set()
others = [x for x in choices_all if x not in choices]
res = []
for s in choices:
if s in list(choices_all) + ['*']:
if not (s in seen or seen.add(s)):
res.extend(list(others) if s == '*' else [s])
return res
| gpl-3.0 | -2,953,152,942,254,646,000 | 31.102249 | 79 | 0.612052 | false |
mulkieran/pyblk | tests/test_traversal.py | 1 | 4103 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Anne Mulhern <[email protected]>
"""
tests.test_traversal
====================
Tests traversing the sysfs hierarchy.
.. moduleauthor:: mulhern <[email protected]>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pyblk
import pytest
from hypothesis import given
from hypothesis import strategies
from hypothesis import Settings
from ._constants import BOTHS
from ._constants import CONTEXT
from ._constants import EITHERS
from ._constants import HOLDERS
from ._constants import SLAVES
NUM_TESTS = 5
# Use conditional to avoid processing tests if number of examples is too small.
# pytest.mark.skipif allows the test to be built, resulting in a hypothesis
# error if SLAVES or HOLDERS is empty.
if len(BOTHS) == 0:
@pytest.mark.skipif(
True,
reason="no slaves or holders data for tests"
)
class TestTraversal(object):
# pylint: disable=too-few-public-methods
"""
An empty test class which is always skipped.
"""
def test_dummy(self):
"""
A dummy test, for which pytest can show a skip message.
"""
pass
else:
class TestTraversal(object):
"""
A class for testing sysfs traversals.
"""
@given(
strategies.sampled_from(SLAVES),
settings=Settings(max_examples=NUM_TESTS)
)
def test_slaves(self, device):
"""
Verify slaves do not contain originating device.
"""
assert device not in pyblk.slaves(CONTEXT, device)
@given(
strategies.sampled_from(HOLDERS),
settings=Settings(max_examples=NUM_TESTS)
)
def test_holders(self, device):
"""
Verify holders do not contain originating device.
"""
assert device not in pyblk.holders(CONTEXT, device)
@given(
strategies.sampled_from(EITHERS),
strategies.booleans(),
settings=Settings(max_examples=2 * NUM_TESTS)
)
def test_inverse(self, device, recursive):
"""
Verify that a round-trip traversal will encounter the original
device.
:param device: the device to test
:param bool recursive: if True, test recursive relationship
If recursive is True, test ancestor/descendant relationship.
If recursive is False, tests parent/child relationship.
"""
# pylint: disable=too-many-function-args
slaves = list(pyblk.slaves(CONTEXT, device, recursive))
for slave in slaves:
assert device in list(
pyblk.holders(CONTEXT, slave, recursive)
)
holders = list(pyblk.holders(CONTEXT, device, recursive))
for holder in holders:
assert device in list(
pyblk.slaves(CONTEXT, holder, recursive)
)
| gpl-2.0 | -3,320,666,480,792,319,500 | 32.909091 | 79 | 0.63807 | false |
zaina/nova | nova/tests/unit/api/ec2/test_ec2utils.py | 84 | 2549 | # Copyright 2014 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.ec2 import ec2utils
from nova import context
from nova import objects
from nova import test
class EC2UtilsTestCase(test.TestCase):
def setUp(self):
self.ctxt = context.get_admin_context()
ec2utils.reset_cache()
super(EC2UtilsTestCase, self).setUp()
def test_get_int_id_from_snapshot_uuid(self):
smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid')
smap.create()
smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt,
'fake-uuid')
self.assertEqual(smap.id, smap_id)
def test_get_int_id_from_snapshot_uuid_creates_mapping(self):
smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt,
'fake-uuid')
smap = objects.EC2SnapshotMapping.get_by_id(self.ctxt, smap_id)
self.assertEqual('fake-uuid', smap.uuid)
def test_get_snapshot_uuid_from_int_id(self):
smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid')
smap.create()
smap_uuid = ec2utils.get_snapshot_uuid_from_int_id(self.ctxt, smap.id)
self.assertEqual(smap.uuid, smap_uuid)
def test_id_to_glance_id(self):
s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid')
s3imap.create()
uuid = ec2utils.id_to_glance_id(self.ctxt, s3imap.id)
self.assertEqual(uuid, s3imap.uuid)
def test_glance_id_to_id(self):
s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid')
s3imap.create()
s3imap_id = ec2utils.glance_id_to_id(self.ctxt, s3imap.uuid)
self.assertEqual(s3imap_id, s3imap.id)
def test_glance_id_to_id_creates_mapping(self):
s3imap_id = ec2utils.glance_id_to_id(self.ctxt, 'fake-uuid')
s3imap = objects.S3ImageMapping.get_by_id(self.ctxt, s3imap_id)
self.assertEqual('fake-uuid', s3imap.uuid)
| apache-2.0 | -389,551,313,306,299,900 | 40.786885 | 78 | 0.651628 | false |
thedemz/M101P | chapter2/hw2-2.py | 1 | 1698 | import sys
import os
file_path = os.path.abspath(__file__)
dir_path = os.path.dirname(file_path)
lib_path = os.path.join(dir_path, "lib")
sys.path.insert(0, lib_path)
import pymongo
# connnecto to the db on standard port
connection = pymongo.MongoClient("mongodb://localhost")
db = connection.students # attach to db
def get_students():
collection = db.grades # specify the colllection
print("There are 200 students")
print("There should be 800 grades")
grades = collection.find().count()
print("Counted Grades:", grades)
result = collection.find({'type': 'homework'}, {"student_id": 1, "score": 1, "type":1, "_id": 1}).sort(
[("student_id", 1), ("score", 1)]
)
print("Counted With type Homework:", result.count())
return result
def mark_lowest_score( grades ):
collection = db.grades # specify the colllection
student_id = None
for ix in grades:
ix["lowest"] = False
print(student_id, ix["student_id"])
if student_id != ix["student_id"]:
student_id = ix["student_id"]
ix["lowest"] = True
print("True")
else:
print("False")
collection.save( ix )
def delete_lowest():
collection = db.grades # specify the colllection
grades = collection.find().count()
if grades == 800:
print("Removing lowest grades from total", grades)
collection.remove({"lowest": True})
else:
print("Already deleted!", grades)
if __name__ == "__main__":
print("import the data with:")
grades = get_students()
mark_lowest_score(grades)
delete_lowest()
| apache-2.0 | -5,790,267,279,438,200,000 | 19.962963 | 107 | 0.590106 | false |
chiffa/numpy | numpy/distutils/conv_template.py | 38 | 9684 | #!/usr/bin/python
"""
takes templated file .xxx.src and produces .xxx file where .xxx is
.i or .c or .h, using the following template rules
/**begin repeat -- on a line by itself marks the start of a repeated code
segment
/**end repeat**/ -- on a line by itself marks it's end
After the /**begin repeat and before the */, all the named templates are placed
these should all have the same number of replacements
Repeat blocks can be nested, with each nested block labeled with its depth,
i.e.
/**begin repeat1
*....
*/
/**end repeat1**/
When using nested loops, you can optionally exclude particular
combinations of the variables using (inside the comment portion of the inner loop):
:exclude: var1=value1, var2=value2, ...
This will exclude the pattern where var1 is value1 and var2 is value2 when
the result is being generated.
In the main body each replace will use one entry from the list of named replacements
Note that all #..# forms in a block must have the same number of
comma-separated entries.
Example:
An input file containing
/**begin repeat
* #a = 1,2,3#
* #b = 1,2,3#
*/
/**begin repeat1
* #c = ted, jim#
*/
@a@, @b@, @c@
/**end repeat1**/
/**end repeat**/
produces
line 1 "template.c.src"
/*
*********************************************************************
** This file was autogenerated from a template DO NOT EDIT!!**
** Changes should be made to the original source (.src) file **
*********************************************************************
*/
#line 9
1, 1, ted
#line 9
1, 1, jim
#line 9
2, 2, ted
#line 9
2, 2, jim
#line 9
3, 3, ted
#line 9
3, 3, jim
"""
from __future__ import division, absolute_import, print_function
__all__ = ['process_str', 'process_file']
import os
import sys
import re
from numpy.distutils.compat import get_exception
# names for replacement that are already global.
global_names = {}
# header placed at the front of head processed file
header =\
"""
/*
*****************************************************************************
** This file was autogenerated from a template DO NOT EDIT!!!! **
** Changes should be made to the original source (.src) file **
*****************************************************************************
*/
"""
# Parse string for repeat loops
def parse_structure(astr, level):
"""
The returned line number is from the beginning of the string, starting
at zero. Returns an empty list if no loops found.
"""
if level == 0 :
loopbeg = "/**begin repeat"
loopend = "/**end repeat**/"
else :
loopbeg = "/**begin repeat%d" % level
loopend = "/**end repeat%d**/" % level
ind = 0
line = 0
spanlist = []
while True:
start = astr.find(loopbeg, ind)
if start == -1:
break
start2 = astr.find("*/", start)
start2 = astr.find("\n", start2)
fini1 = astr.find(loopend, start2)
fini2 = astr.find("\n", fini1)
line += astr.count("\n", ind, start2+1)
spanlist.append((start, start2+1, fini1, fini2+1, line))
line += astr.count("\n", start2+1, fini2)
ind = fini2
spanlist.sort()
return spanlist
def paren_repl(obj):
torep = obj.group(1)
numrep = obj.group(2)
return ','.join([torep]*int(numrep))
parenrep = re.compile(r"[(]([^)]*)[)]\*(\d+)")
plainrep = re.compile(r"([^*]+)\*(\d+)")
def parse_values(astr):
# replaces all occurrences of '(a,b,c)*4' in astr
# with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate
# empty values, i.e., ()*4 yields ',,,'. The result is
# split at ',' and a list of values returned.
astr = parenrep.sub(paren_repl, astr)
# replaces occurrences of xxx*3 with xxx, xxx, xxx
astr = ','.join([plainrep.sub(paren_repl, x.strip())
for x in astr.split(',')])
return astr.split(',')
stripast = re.compile(r"\n\s*\*?")
named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#")
exclude_vars_re = re.compile(r"(\w*)=(\w*)")
exclude_re = re.compile(":exclude:")
def parse_loop_header(loophead) :
"""Find all named replacements in the header
Returns a list of dictionaries, one for each loop iteration,
where each key is a name to be substituted and the corresponding
value is the replacement string.
Also return a list of exclusions. The exclusions are dictionaries
of key value pairs. There can be more than one exclusion.
[{'var1':'value1', 'var2', 'value2'[,...]}, ...]
"""
# Strip out '\n' and leading '*', if any, in continuation lines.
# This should not effect code previous to this change as
# continuation lines were not allowed.
loophead = stripast.sub("", loophead)
# parse out the names and lists of values
names = []
reps = named_re.findall(loophead)
nsub = None
for rep in reps:
name = rep[0]
vals = parse_values(rep[1])
size = len(vals)
if nsub is None :
nsub = size
elif nsub != size :
msg = "Mismatch in number of values:\n%s = %s" % (name, vals)
raise ValueError(msg)
names.append((name, vals))
# Find any exclude variables
excludes = []
for obj in exclude_re.finditer(loophead):
span = obj.span()
# find next newline
endline = loophead.find('\n', span[1])
substr = loophead[span[1]:endline]
ex_names = exclude_vars_re.findall(substr)
excludes.append(dict(ex_names))
# generate list of dictionaries, one for each template iteration
dlist = []
if nsub is None :
raise ValueError("No substitution variables found")
for i in range(nsub) :
tmp = {}
for name, vals in names :
tmp[name] = vals[i]
dlist.append(tmp)
return dlist
replace_re = re.compile(r"@([\w]+)@")
def parse_string(astr, env, level, line) :
lineno = "#line %d\n" % line
# local function for string replacement, uses env
def replace(match):
name = match.group(1)
try :
val = env[name]
except KeyError:
msg = 'line %d: no definition of key "%s"'%(line, name)
raise ValueError(msg)
return val
code = [lineno]
struct = parse_structure(astr, level)
if struct :
# recurse over inner loops
oldend = 0
newlevel = level + 1
for sub in struct:
pref = astr[oldend:sub[0]]
head = astr[sub[0]:sub[1]]
text = astr[sub[1]:sub[2]]
oldend = sub[3]
newline = line + sub[4]
code.append(replace_re.sub(replace, pref))
try :
envlist = parse_loop_header(head)
except ValueError:
e = get_exception()
msg = "line %d: %s" % (newline, e)
raise ValueError(msg)
for newenv in envlist :
newenv.update(env)
newcode = parse_string(text, newenv, newlevel, newline)
code.extend(newcode)
suff = astr[oldend:]
code.append(replace_re.sub(replace, suff))
else :
# replace keys
code.append(replace_re.sub(replace, astr))
code.append('\n')
return ''.join(code)
def process_str(astr):
code = [header]
code.extend(parse_string(astr, global_names, 0, 1))
return ''.join(code)
include_src_re = re.compile(r"(\n|\A)#include\s*['\"]"
r"(?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
def resolve_includes(source):
d = os.path.dirname(source)
fid = open(source)
lines = []
for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d, fn)
if os.path.isfile(fn):
print('Including file', fn)
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
fid.close()
return lines
def process_file(source):
lines = resolve_includes(source)
sourcefile = os.path.normcase(source).replace("\\", "\\\\")
try:
code = process_str(''.join(lines))
except ValueError:
e = get_exception()
raise ValueError('In "%s" loop at %s' % (sourcefile, e))
return '#line 1 "%s"\n%s' % (sourcefile, code)
def unique_key(adict):
# this obtains a unique key given a dictionary
# currently it works by appending together n of the letters of the
# current keys and increasing n until a unique key is found
# -- not particularly quick
allkeys = list(adict.keys())
done = False
n = 1
while not done:
newkey = "".join([x[:n] for x in allkeys])
if newkey in allkeys:
n += 1
else:
done = True
return newkey
if __name__ == "__main__":
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file, 'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname, 'w')
allstr = fid.read()
try:
writestr = process_str(allstr)
except ValueError:
e = get_exception()
raise ValueError("In %s loop at %s" % (file, e))
outfile.write(writestr)
| bsd-3-clause | -2,417,040,942,528,632,000 | 27.735905 | 84 | 0.545539 | false |
spisneha25/django | tests/sitemaps_tests/test_https.py | 205 | 3608 | from __future__ import unicode_literals
from datetime import date
from django.test import ignore_warnings, override_settings
from django.utils.deprecation import RemovedInDjango110Warning
from .base import SitemapTestsBase
@override_settings(ROOT_URLCONF='sitemaps_tests.urls.https')
class HTTPSSitemapTests(SitemapTestsBase):
protocol = 'https'
@ignore_warnings(category=RemovedInDjango110Warning)
def test_secure_sitemap_index(self):
"A secure sitemap index can be rendered"
# The URL for views.sitemap in tests/urls/https.py has been updated
# with a name but since reversing by Python path is tried first
# before reversing by name and works since we're giving
# name='django.contrib.sitemaps.views.sitemap', we need to silence
# the erroneous warning until reversing by dotted path is removed.
# The test will work without modification when it's removed.
response = self.client.get('/secure/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/secure/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_secure_sitemap_section(self):
"A secure sitemap section can be rendered"
response = self.client.get('/secure/sitemap-simple.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@override_settings(SECURE_PROXY_SSL_HEADER=False)
class HTTPSDetectionSitemapTests(SitemapTestsBase):
extra = {'wsgi.url_scheme': 'https'}
@ignore_warnings(category=RemovedInDjango110Warning)
def test_sitemap_index_with_https_request(self):
"A sitemap index requested in HTTPS is rendered with HTTPS links"
# The URL for views.sitemap in tests/urls/https.py has been updated
# with a name but since reversing by Python path is tried first
# before reversing by name and works since we're giving
# name='django.contrib.sitemaps.views.sitemap', we need to silence
# the erroneous warning until reversing by dotted path is removed.
# The test will work without modification when it's removed.
response = self.client.get('/simple/index.xml', **self.extra)
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url.replace('http://', 'https://')
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_sitemap_section_with_https_request(self):
"A sitemap section requested in HTTPS is rendered with HTTPS links"
response = self.client.get('/simple/sitemap-simple.xml', **self.extra)
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url.replace('http://', 'https://'), date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
| bsd-3-clause | 6,530,611,526,291,074,000 | 49.111111 | 109 | 0.703714 | false |
catapult-project/catapult | third_party/gsutil/third_party/pyu2f/pyu2f/tests/hardware_test.py | 7 | 7337 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pyu2f.hardware."""
import sys
import mock
from pyu2f import errors
from pyu2f import hardware
if sys.version_info[:2] < (2, 7):
import unittest2 as unittest # pylint: disable=g-import-not-at-top
else:
import unittest # pylint: disable=g-import-not-at-top
class HardwareTest(unittest.TestCase):
def testSimpleCommands(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
sk.CmdBlink(5)
mock_transport.SendBlink.assert_called_once_with(5)
sk.CmdWink()
mock_transport.SendWink.assert_called_once_with()
sk.CmdPing(bytearray(b'foo'))
mock_transport.SendPing.assert_called_once_with(bytearray(b'foo'))
def testRegisterInvalidParams(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
self.assertRaises(errors.InvalidRequestError, sk.CmdRegister, '1234',
'1234')
def testRegisterSuccess(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
challenge_param = b'01234567890123456789012345678901'
app_param = b'01234567890123456789012345678901'
mock_transport.SendMsgBytes.return_value = bytearray(
[0x01, 0x02, 0x90, 0x00])
reply = sk.CmdRegister(challenge_param, app_param)
self.assertEquals(reply, bytearray([0x01, 0x02]))
self.assertEquals(mock_transport.SendMsgBytes.call_count, 1)
(sent_msg,), _ = mock_transport.SendMsgBytes.call_args
self.assertEquals(sent_msg[0:4], bytearray([0x00, 0x01, 0x03, 0x00]))
self.assertEquals(sent_msg[7:-2], bytearray(challenge_param + app_param))
def testRegisterTUPRequired(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
challenge_param = b'01234567890123456789012345678901'
app_param = b'01234567890123456789012345678901'
mock_transport.SendMsgBytes.return_value = bytearray([0x69, 0x85])
self.assertRaises(errors.TUPRequiredError, sk.CmdRegister, challenge_param,
app_param)
self.assertEquals(mock_transport.SendMsgBytes.call_count, 1)
def testVersion(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
mock_transport.SendMsgBytes.return_value = bytearray(b'U2F_V2\x90\x00')
reply = sk.CmdVersion()
self.assertEquals(reply, bytearray(b'U2F_V2'))
self.assertEquals(mock_transport.SendMsgBytes.call_count, 1)
(sent_msg,), _ = mock_transport.SendMsgBytes.call_args
self.assertEquals(sent_msg, bytearray(
[0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00]))
def testVersionFallback(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
mock_transport.SendMsgBytes.side_effect = [
bytearray([0x67, 0x00]),
bytearray(b'U2F_V2\x90\x00')]
reply = sk.CmdVersion()
self.assertEquals(reply, bytearray(b'U2F_V2'))
self.assertEquals(mock_transport.SendMsgBytes.call_count, 2)
(sent_msg,), _ = mock_transport.SendMsgBytes.call_args_list[0]
self.assertEquals(len(sent_msg), 7)
self.assertEquals(sent_msg[0:4], bytearray([0x00, 0x03, 0x00, 0x00]))
self.assertEquals(sent_msg[4:7], bytearray([0x00, 0x00, 0x00])) # Le
(sent_msg,), _ = mock_transport.SendMsgBytes.call_args_list[1]
self.assertEquals(len(sent_msg), 9)
self.assertEquals(sent_msg[0:4], bytearray([0x00, 0x03, 0x00, 0x00]))
self.assertEquals(sent_msg[4:7], bytearray([0x00, 0x00, 0x00])) # Lc
self.assertEquals(sent_msg[7:9], bytearray([0x00, 0x00])) # Le
def testVersionErrors(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
mock_transport.SendMsgBytes.return_value = bytearray([0xfa, 0x05])
self.assertRaises(errors.ApduError, sk.CmdVersion)
self.assertEquals(mock_transport.SendMsgBytes.call_count, 1)
def testAuthenticateSuccess(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
challenge_param = b'01234567890123456789012345678901'
app_param = b'01234567890123456789012345678901'
key_handle = b'\x01\x02\x03\x04'
mock_transport.SendMsgBytes.return_value = bytearray(
[0x01, 0x02, 0x90, 0x00])
reply = sk.CmdAuthenticate(challenge_param, app_param, key_handle)
self.assertEquals(reply, bytearray([0x01, 0x02]))
self.assertEquals(mock_transport.SendMsgBytes.call_count, 1)
(sent_msg,), _ = mock_transport.SendMsgBytes.call_args
self.assertEquals(sent_msg[0:4], bytearray([0x00, 0x02, 0x03, 0x00]))
self.assertEquals(
sent_msg[7:-2],
bytearray(challenge_param + app_param + bytearray([4, 1, 2, 3, 4])))
def testAuthenticateCheckOnly(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
challenge_param = b'01234567890123456789012345678901'
app_param = b'01234567890123456789012345678901'
key_handle = b'\x01\x02\x03\x04'
mock_transport.SendMsgBytes.return_value = bytearray(
[0x01, 0x02, 0x90, 0x00])
reply = sk.CmdAuthenticate(challenge_param,
app_param,
key_handle,
check_only=True)
self.assertEquals(reply, bytearray([0x01, 0x02]))
self.assertEquals(mock_transport.SendMsgBytes.call_count, 1)
(sent_msg,), _ = mock_transport.SendMsgBytes.call_args
self.assertEquals(sent_msg[0:4], bytearray([0x00, 0x02, 0x07, 0x00]))
self.assertEquals(
sent_msg[7:-2],
bytearray(challenge_param + app_param + bytearray([4, 1, 2, 3, 4])))
def testAuthenticateTUPRequired(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
challenge_param = b'01234567890123456789012345678901'
app_param = b'01234567890123456789012345678901'
key_handle = b'\x01\x02\x03\x04'
mock_transport.SendMsgBytes.return_value = bytearray([0x69, 0x85])
self.assertRaises(errors.TUPRequiredError, sk.CmdAuthenticate,
challenge_param, app_param, key_handle)
self.assertEquals(mock_transport.SendMsgBytes.call_count, 1)
def testAuthenticateInvalidKeyHandle(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
challenge_param = b'01234567890123456789012345678901'
app_param = b'01234567890123456789012345678901'
key_handle = b'\x01\x02\x03\x04'
mock_transport.SendMsgBytes.return_value = bytearray([0x6a, 0x80])
self.assertRaises(errors.InvalidKeyHandleError, sk.CmdAuthenticate,
challenge_param, app_param, key_handle)
self.assertEquals(mock_transport.SendMsgBytes.call_count, 1)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 3,535,769,689,450,709,500 | 36.055556 | 79 | 0.702194 | false |
ASlave2Audio/Restaurant-App | mingw/bin/lib/xdrlib.py | 197 | 5563 | """Implements (a subset of) Sun XDR -- eXternal Data Representation.
See: RFC 1014
"""
import struct
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
__all__ = ["Error", "Packer", "Unpacker", "ConversionError"]
# exceptions
class Error(Exception):
"""Exception class for this module. Use:
except xdrlib.Error, var:
# var has the Error instance for the exception
Public ivars:
msg -- contains the message
"""
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return repr(self.msg)
def __str__(self):
return str(self.msg)
class ConversionError(Error):
pass
class Packer:
"""Pack various data representations into a buffer."""
def __init__(self):
self.reset()
def reset(self):
self.__buf = _StringIO()
def get_buffer(self):
return self.__buf.getvalue()
# backwards compatibility
get_buf = get_buffer
def pack_uint(self, x):
self.__buf.write(struct.pack('>L', x))
def pack_int(self, x):
self.__buf.write(struct.pack('>l', x))
pack_enum = pack_int
def pack_bool(self, x):
if x: self.__buf.write('\0\0\0\1')
else: self.__buf.write('\0\0\0\0')
def pack_uhyper(self, x):
self.pack_uint(x>>32 & 0xffffffffL)
self.pack_uint(x & 0xffffffffL)
pack_hyper = pack_uhyper
def pack_float(self, x):
try: self.__buf.write(struct.pack('>f', x))
except struct.error, msg:
raise ConversionError, msg
def pack_double(self, x):
try: self.__buf.write(struct.pack('>d', x))
except struct.error, msg:
raise ConversionError, msg
def pack_fstring(self, n, s):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
data = s[:n]
n = ((n+3)//4)*4
data = data + (n - len(data)) * '\0'
self.__buf.write(data)
pack_fopaque = pack_fstring
def pack_string(self, s):
n = len(s)
self.pack_uint(n)
self.pack_fstring(n, s)
pack_opaque = pack_string
pack_bytes = pack_string
def pack_list(self, list, pack_item):
for item in list:
self.pack_uint(1)
pack_item(item)
self.pack_uint(0)
def pack_farray(self, n, list, pack_item):
if len(list) != n:
raise ValueError, 'wrong array size'
for item in list:
pack_item(item)
def pack_array(self, list, pack_item):
n = len(list)
self.pack_uint(n)
self.pack_farray(n, list, pack_item)
class Unpacker:
"""Unpacks various data representations from the given buffer."""
def __init__(self, data):
self.reset(data)
def reset(self, data):
self.__buf = data
self.__pos = 0
def get_position(self):
return self.__pos
def set_position(self, position):
self.__pos = position
def get_buffer(self):
return self.__buf
def done(self):
if self.__pos < len(self.__buf):
raise Error('unextracted data remains')
def unpack_uint(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
x = struct.unpack('>L', data)[0]
try:
return int(x)
except OverflowError:
return x
def unpack_int(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>l', data)[0]
unpack_enum = unpack_int
def unpack_bool(self):
return bool(self.unpack_int())
def unpack_uhyper(self):
hi = self.unpack_uint()
lo = self.unpack_uint()
return long(hi)<<32 | lo
def unpack_hyper(self):
x = self.unpack_uhyper()
if x >= 0x8000000000000000L:
x = x - 0x10000000000000000L
return x
def unpack_float(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>f', data)[0]
def unpack_double(self):
i = self.__pos
self.__pos = j = i+8
data = self.__buf[i:j]
if len(data) < 8:
raise EOFError
return struct.unpack('>d', data)[0]
def unpack_fstring(self, n):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
i = self.__pos
j = i + (n+3)//4*4
if j > len(self.__buf):
raise EOFError
self.__pos = j
return self.__buf[i:i+n]
unpack_fopaque = unpack_fstring
def unpack_string(self):
n = self.unpack_uint()
return self.unpack_fstring(n)
unpack_opaque = unpack_string
unpack_bytes = unpack_string
def unpack_list(self, unpack_item):
list = []
while 1:
x = self.unpack_uint()
if x == 0: break
if x != 1:
raise ConversionError, '0 or 1 expected, got %r' % (x,)
item = unpack_item()
list.append(item)
return list
def unpack_farray(self, n, unpack_item):
list = []
for i in range(n):
list.append(unpack_item())
return list
def unpack_array(self, unpack_item):
n = self.unpack_uint()
return self.unpack_farray(n, unpack_item)
| mit | 4,179,870,690,447,247,000 | 23.082251 | 71 | 0.53712 | false |
dhhjx880713/GPy | GPy/likelihoods/mixed_noise.py | 7 | 3266 | # Copyright (c) 2012-2014 The GPy authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy import stats, special
from . import link_functions
from .likelihood import Likelihood
from .gaussian import Gaussian
from ..core.parameterization import Param
from paramz.transformations import Logexp
from ..core.parameterization import Parameterized
import itertools
class MixedNoise(Likelihood):
def __init__(self, likelihoods_list, name='mixed_noise'):
#NOTE at the moment this likelihood only works for using a list of gaussians
super(Likelihood, self).__init__(name=name)
self.link_parameters(*likelihoods_list)
self.likelihoods_list = likelihoods_list
self.log_concave = False
def gaussian_variance(self, Y_metadata):
assert all([isinstance(l, Gaussian) for l in self.likelihoods_list])
ind = Y_metadata['output_index'].flatten()
variance = np.zeros(ind.size)
for lik, j in zip(self.likelihoods_list, range(len(self.likelihoods_list))):
variance[ind==j] = lik.variance
return variance
def betaY(self,Y,Y_metadata):
#TODO not here.
return Y/self.gaussian_variance(Y_metadata=Y_metadata)[:,None]
def update_gradients(self, gradients):
self.gradient = gradients
def exact_inference_gradients(self, dL_dKdiag, Y_metadata):
assert all([isinstance(l, Gaussian) for l in self.likelihoods_list])
ind = Y_metadata['output_index'].flatten()
return np.array([dL_dKdiag[ind==i].sum() for i in range(len(self.likelihoods_list))])
def predictive_values(self, mu, var, full_cov=False, Y_metadata=None):
ind = Y_metadata['output_index'].flatten()
_variance = np.array([self.likelihoods_list[j].variance for j in ind ])
if full_cov:
var += np.eye(var.shape[0])*_variance
else:
var += _variance
return mu, var
def predictive_variance(self, mu, sigma, Y_metadata):
_variance = self.gaussian_variance(Y_metadata)
return _variance + sigma**2
def predictive_quantiles(self, mu, var, quantiles, Y_metadata):
ind = Y_metadata['output_index'].flatten()
outputs = np.unique(ind)
Q = np.zeros( (mu.size,len(quantiles)) )
for j in outputs:
q = self.likelihoods_list[j].predictive_quantiles(mu[ind==j,:],
var[ind==j,:],quantiles,Y_metadata=None)
Q[ind==j,:] = np.hstack(q)
return [q[:,None] for q in Q.T]
def samples(self, gp, Y_metadata):
"""
Returns a set of samples of observations based on a given value of the latent variable.
:param gp: latent variable
"""
N1, N2 = gp.shape
Ysim = np.zeros((N1,N2))
ind = Y_metadata['output_index'].flatten()
for j in np.unique(ind):
flt = ind==j
gp_filtered = gp[flt,:]
n1 = gp_filtered.shape[0]
lik = self.likelihoods_list[j]
_ysim = np.array([np.random.normal(lik.gp_link.transf(gpj), scale=np.sqrt(lik.variance), size=1) for gpj in gp_filtered.flatten()])
Ysim[flt,:] = _ysim.reshape(n1,N2)
return Ysim
| bsd-3-clause | 8,159,442,841,256,426,000 | 38.829268 | 143 | 0.630435 | false |
sdgathman/cjdns | node_build/dependencies/libuv/build/gyp/test/ninja/action_dependencies/gyptest-action-dependencies.py | 54 | 1972 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that building an object file correctly depends on running actions in
dependent targets, but not the targets themselves.
"""
import os
import sys
import TestGyp
# NOTE(piman): This test will not work with other generators because:
# - it explicitly tests the optimization, which is not implemented (yet?) on
# other generators
# - it relies on the exact path to output object files, which is generator
# dependent, and actually, relies on the ability to build only that object file,
# which I don't think is available on all generators.
# TODO(piman): Extend to other generators when possible.
test = TestGyp.TestGyp(formats=['ninja'])
# xcode-ninja doesn't support building single object files by design.
if test.format == 'xcode-ninja':
test.skip_test()
test.run_gyp('action_dependencies.gyp', chdir='src')
chdir = 'relocate/src'
test.relocate('src', chdir)
objext = '.obj' if sys.platform == 'win32' else '.o'
test.build('action_dependencies.gyp',
os.path.join('obj', 'b.b' + objext),
chdir=chdir)
# The 'a' actions should be run (letting b.c compile), but the a static library
# should not be built.
test.built_file_must_not_exist('a', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_not_exist('b', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_exist(os.path.join('obj', 'b.b' + objext), chdir=chdir)
test.build('action_dependencies.gyp',
os.path.join('obj', 'c.c' + objext),
chdir=chdir)
# 'a' and 'b' should be built, so that the 'c' action succeeds, letting c.c
# compile
test.built_file_must_exist('a', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_exist('b', type=test.EXECUTABLE, chdir=chdir)
test.built_file_must_exist(os.path.join('obj', 'c.c' + objext), chdir=chdir)
test.pass_test()
| gpl-3.0 | -7,519,894,939,436,061,000 | 34.214286 | 80 | 0.712982 | false |
Distrotech/bzr | bzrlib/tests/test_reconcile.py | 2 | 2802 | # Copyright (C) 2006, 2008-2011 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Tests for reconiliation behaviour that is repository independent."""
from bzrlib import (
bzrdir,
errors,
tests,
)
from bzrlib.reconcile import reconcile, Reconciler
from bzrlib.tests import per_repository
class TestWorksWithSharedRepositories(per_repository.TestCaseWithRepository):
def test_reweave_empty(self):
# we want a repo capable format
parent = bzrdir.BzrDirMetaFormat1().initialize('.')
parent.create_repository(shared=True)
parent.root_transport.mkdir('child')
child = bzrdir.BzrDirMetaFormat1().initialize('child')
self.assertRaises(errors.NoRepositoryPresent, child.open_repository)
reconciler = Reconciler(child)
reconciler.reconcile()
# smoke test for reconcile appears to work too.
reconcile(child)
# no inconsistent parents should have been found
# but the values should have been set.
self.assertEqual(0, reconciler.inconsistent_parents)
# and no garbage inventories
self.assertEqual(0, reconciler.garbage_inventories)
class TestReconciler(tests.TestCaseWithTransport):
def test_reconciler_with_no_branch(self):
repo = self.make_repository('repo')
reconciler = Reconciler(repo.bzrdir)
reconciler.reconcile()
# no inconsistent parents should have been found
# but the values should have been set.
self.assertEqual(0, reconciler.inconsistent_parents)
# and no garbage inventories
self.assertEqual(0, reconciler.garbage_inventories)
self.assertIs(None, reconciler.fixed_branch_history)
def test_reconciler_finds_branch(self):
a_branch = self.make_branch('a_branch')
reconciler = Reconciler(a_branch.bzrdir)
reconciler.reconcile()
# It should have checked the repository, and the branch
self.assertEqual(0, reconciler.inconsistent_parents)
self.assertEqual(0, reconciler.garbage_inventories)
self.assertIs(False, reconciler.fixed_branch_history)
| gpl-2.0 | 5,502,516,384,254,209,000 | 39.028571 | 78 | 0.717345 | false |
ryfx/modrana | modules/mod_cron.py | 1 | 13149 | # -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# A timing and scheduling module for modRana.
#----------------------------------------------------------------------------
# Copyright 2007, Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
from __future__ import with_statement # for python 2.5
from modules.base_module import RanaModule
import threading
# only import GKT libs if GTK GUI is used
from core import gs
if gs.GUIString == "GTK":
import gobject
elif gs.GUIString.lower() == "qt5":
import pyotherside
elif gs.GUIString.lower() == "qml":
from PySide import QtCore
def getModule(*args, **kwargs):
"""
return module version corresponding to the currently used toolkit
(eq. one that uses the timers provided by the toolkit
- gobject.timeout_add, QTimer, etc.
"""
if gs.GUIString.lower() == 'qt5':
return CronQt5(*args, **kwargs)
if gs.GUIString == 'QML':
return CronQt(*args, **kwargs)
elif gs.GUIString == 'GTK': # GTK for now
return CronGTK(*args, **kwargs)
else:
return Cron(*args, **kwargs)
class Cron(RanaModule):
"""A timing and scheduling module for modRana"""
# -> this is an abstract class
# that specifies and interface for concrete implementations
#
# Why is there a special module for timing ?
# The reason is twofold:
# Toolkit independence and power saving/monitoring.
#
# If all timing calls go through this module,
# the underlying engine (currently glibs gobject)
# can be more easily changed than rewriting code everywhere.
#
# Also, modRana targets mobile devices with limited power budget.
# If all timing goes through this module, rogue modules many frequent
# timers can be easily identified.
# It might be also possible to stop or pause some/all of the timers
# after a period of inactivity, or some such.
def __init__(self, *args, **kwargs):
RanaModule.__init__(self, *args, **kwargs)
def addIdle(self, callback, args):
"""add a callback that is called once the main loop becomes idle"""
pass
def addTimeout(self, callback, timeout, caller, description, args=None):
"""the callback will be called timeout + time needed to execute the callback
and other events"""
if not args: args = []
pass
def _doTimeout(self, timeoutId, callback, args):
"""wrapper about the timeout function, which makes it possible to check
if a timeout is still in progress from the "outside"
- like this, the underlying timer should also be easily replaceable
"""
if callback(*args) == False:
# the callback returned False,
# that means it wants to quit the timeout
# stop tracking
self.removeTimeout(timeoutId)
# propagate the quit signal
return False
else:
return True # just run the loop
def removeTimeout(self, timeoutId):
"""remove timeout with a given id"""
pass
def modifyTimeout(self, timeoutId, newTimeout):
"""modify the duration of a timeout in progress"""
pass
class CronGTK(Cron):
"""A GTK timing and scheduling module for modRana"""
def __init__(self, *args, **kwargs):
Cron.__init__(self, *args, **kwargs)
gui = self.modrana.gui
self.nextId = 0
# cronTab and activeIds should be in sync
self.cronTab = {"idle": {}, "timeout": {}}
self.dataLock = threading.RLock()
def _getID(self):
"""get an unique id for timing related request that can be
returned to the callers and used as a handle
TODO: can int overflow in Python ?"""
timeoutId = self.nextId
self.nextId += 1
return timeoutId
def addIdle(self, callback, args):
"""add a callback that is called once the main loop becomes idle"""
gobject.idle_add(callback, *args)
def addTimeout(self, callback, timeout, caller, description, args=None):
"""the callback will be called timeout + time needed to execute the callback
and other events"""
if not args: args = []
timeoutId = self._getID()
realId = gobject.timeout_add(timeout, self._doTimeout, timeoutId, callback, args)
timeoutTuple = (callback, args, timeout, caller, description, realId)
with self.dataLock:
self.cronTab['timeout'][timeoutId] = timeoutTuple
return timeoutId
def removeTimeout(self, timeoutId):
"""remove timeout with a given id"""
with self.dataLock:
if timeoutId in self.cronTab['timeout'].keys():
(callback, args, timeout, caller, description, realId) = self.cronTab['timeout'][timeoutId]
del self.cronTab['timeout'][timeoutId]
gobject.source_remove(realId)
else:
self.log.error("can't remove timeout, wrong id: %s", timeoutId)
def modifyTimeout(self, timeoutId, newTimeout):
"""modify the duration of a timeout in progress"""
with self.dataLock:
if timeoutId in self.cronTab['timeout'].keys():
# load the timeout description
(callback, args, timeout, caller, description, realId) = self.cronTab['timeout'][timeoutId]
gobject.source_remove(realId) # remove the old timeout
realId = gobject.timeout_add(newTimeout, self._doTimeout, timeoutId, callback, args) # new timeout
# update the timeout description
self.cronTab['timeout'][timeoutId] = (callback, args, newTimeout, caller, description, realId)
else:
self.log.error("can't modify timeout, wrong id: %s", timeoutId)
class CronQt(Cron):
"""A Qt timing and scheduling module for modRana"""
def __init__(self, *args, **kwargs):
Cron.__init__(self, *args, **kwargs)
self.nextId = 0
# cronTab and activeIds should be in sync
self.cronTab = {"idle": {}, "timeout": {}}
self.dataLock = threading.RLock()
def _getID(self):
"""get an unique id for timing related request that can be
returned to the callers and used as a handle
TODO: can int overflow in Python ?
TODO: id recycling ?"""
with self.dataLock:
timeoutId = self.nextId
self.nextId += 1
return timeoutId
def addIdle(self, callback, args):
"""add a callback that is called once the main loop becomes idle"""
pass
def addTimeout(self, callback, timeout, caller, description, args=None):
"""the callback will be called timeout + time needed to execute the callback
and other events
"""
if not args: args = []
# create and configure the timer
timer = QtCore.QTimer()
# timer.setInterval(timeout)
timeoutId = self._getID()
# create a new function that calls the callback processing function
# with thh provided arguments"""
handleThisTimeout = lambda: self._doTimeout(timeoutId, callback, args)
# connect this function to the timeout
timer.timeout.connect(handleThisTimeout)
# store timer data
timeoutTuple = (callback, args, timeout, caller, description, timeoutId, timer)
with self.dataLock:
self.cronTab['timeout'][timeoutId] = timeoutTuple
# start the timer
timer.start(timeout)
# return the id
return timeoutId
def removeTimeout(self, timeoutId):
"""remove timeout with a given id"""
with self.dataLock:
if timeoutId in self.cronTab['timeout'].keys():
(callback, args, timeout, caller, description, timeoutId, timer) = self.cronTab['timeout'][timeoutId]
timer.stop()
del self.cronTab['timeout'][timeoutId]
else:
self.log.error("can't remove timeout, wrong id: %s", timeoutId)
def modifyTimeout(self, timeoutId, newTimeout):
"""modify the duration of a timeout in progress"""
with self.dataLock:
if timeoutId in self.cronTab['timeout'].keys():
# load the timeout data
(callback, args, timeout, caller, description, timeoutId, timer) = self.cronTab['timeout'][timeoutId]
# reset the timeout duration
timer.setInterval(newTimeout)
# update the timeout data
self.cronTab['timeout'][timeoutId] = (callback, args, newTimeout, caller, description, timeoutId, timer)
else:
self.log.error("can't modify timeout, wrong id: %s", timeoutId)
class CronQt5(Cron):
"""A Qt 5 timing and scheduling module for modRana"""
def __init__(self, *args, **kwargs):
Cron.__init__(self, *args, **kwargs)
self.nextId = 0
# cronTab and activeIds should be in sync
self.cronTab = {"idle": {}, "timeout": {}}
self.dataLock = threading.RLock()
def _timerTriggered(self, timerId):
with self.dataLock:
timerTuple = self.cronTab['timeout'].get(timerId)
if timerTuple:
call = timerTuple[6]
call()
else:
self.log.error("unknown timer triggered: %s", timerId)
def _getID(self):
"""get an unique id for timing related request that can be
returned to the callers and used as a handle
TODO: can int overflow in Python ?
TODO: id recycling ?"""
with self.dataLock:
timeoutId = self.nextId
self.nextId += 1
return timeoutId
def addTimeout(self, callback, timeout, caller, description, args=None):
"""the callback will be called timeout + time needed to execute the callback
and other events
"""
if not args: args = []
timeoutId = self._getID()
self.log.debug("qt5: adding a %s ms timeout from %s as %s", timeout, caller, timeoutId)
# create a new function that calls the callback processing function
# with thh provided arguments
handleThisTimeout = lambda: self._doTimeout(timeoutId, callback, args)
# store timer data
# - we don't actually have a Python-side timer object, so we just store
# the callback function and tell QML to add the timer
timeoutTuple = (callback, args, timeout, caller, description, timeoutId, handleThisTimeout)
with self.dataLock:
self.cronTab['timeout'][timeoutId] = timeoutTuple
pyotherside.send("addTimer", timeoutId, timeout)
# return the id
return timeoutId
def removeTimeout(self, timeoutId):
"""remove timeout with a given id"""
with self.dataLock:
if timeoutId in self.cronTab['timeout'].keys():
caller = self.cronTab['timeout'][timeoutId][3]
del self.cronTab['timeout'][timeoutId]
pyotherside.send("removeTimer", timeoutId)
self.log.debug("qt5: timeout %s from %s has been removed", timeoutId, caller)
else:
self.log.error("can't remove timeout, wrong id: %s", timeoutId)
def modifyTimeout(self, timeoutId, newTimeout):
"""modify the duration of a timeout in progress"""
with self.dataLock:
if timeoutId in self.cronTab['timeout'].keys():
# we don't store the timeout value Python-side,
# so we just notify QML about the change
pyotherside.send("modifyTimerTimeout", timeoutId, newTimeout)
else:
self.log.error("can't modify timeout, wrong id: %s", timeoutId)
# def _addInfo(self, id, info):
# """add a message for a timeout handler to read"""
# with self.dataLock:
# if id in self.info:
# self.info[id].append(info) # add message to queue
# else:
# self.info[id] = [info] # create message queue
#
# def _popInfo(self, id):
# with self.dataLock:
# if id in self.info:
# try:
# return self.info[id].pop() # try to return the message
# except IndexError:
# del self.info[id] # message queue empty, delete it
# return None
# else:
# return None
| gpl-3.0 | -5,857,624,237,907,751,000 | 39.334356 | 120 | 0.605673 | false |
samueldotj/TeeRISC-Simulator | util/o3-pipeview.py | 58 | 15788 | #! /usr/bin/env python
# Copyright (c) 2011 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Giacomo Gabrielli
# Pipeline activity viewer for the O3 CPU model.
import optparse
import os
import sys
import copy
# Temporary storage for instructions. The queue is filled in out-of-order
# until it reaches 'max_threshold' number of instructions. It is then
# sorted out and instructions are printed out until their number drops to
# 'min_threshold'.
# It is assumed that the instructions are not out of order for more then
# 'min_threshold' places - otherwise they will appear out of order.
insts = {
'queue': [] , # Instructions to print.
'max_threshold':2000, # Instructions are sorted out and printed when
# their number reaches this threshold.
'min_threshold':1000, # Printing stops when this number is reached.
'sn_start':0, # The first instruction seq. number to be printed.
'sn_stop':0, # The last instruction seq. number to be printed.
'tick_start':0, # The first tick to be printed
'tick_stop':0, # The last tick to be printed
'tick_drift':2000, # Used to calculate the start and the end of main
# loop. We assume here that the instructions are not
# out of order for more then 2000 CPU ticks,
# otherwise the print may not start/stop
# at the time specified by tick_start/stop.
'only_committed':0, # Set if only committed instructions are printed.
}
def process_trace(trace, outfile, cycle_time, width, color, timestamps,
committed_only, store_completions, start_tick, stop_tick, start_sn, stop_sn):
global insts
insts['sn_start'] = start_sn
insts['sn_stop'] = stop_sn
insts['tick_start'] = start_tick
insts['tick_stop'] = stop_tick
insts['tick_drift'] = insts['tick_drift'] * cycle_time
insts['only_committed'] = committed_only
line = None
fields = None
# Skip lines up to the starting tick
if start_tick != 0:
while True:
line = trace.readline()
if not line: return
fields = line.split(':')
if fields[0] != 'O3PipeView': continue
if int(fields[2]) >= start_tick: break
elif start_sn != 0:
while True:
line = trace.readline()
if not line: return
fields = line.split(':')
if fields[0] != 'O3PipeView': continue
if fields[1] == 'fetch' and int(fields[5]) >= start_sn: break
else:
line = trace.readline()
if not line: return
fields = line.split(':')
# Skip lines up to next instruction fetch
while fields[0] != 'O3PipeView' or fields[1] != 'fetch':
line = trace.readline()
if not line: return
fields = line.split(':')
# Print header
outfile.write('// f = fetch, d = decode, n = rename, p = dispatch, '
'i = issue, c = complete, r = retire')
if store_completions:
outfile.write(', s = store-complete')
outfile.write('\n\n')
outfile.write(' ' + 'timeline'.center(width) +
' ' + 'tick'.center(15) +
' ' + 'pc.upc'.center(12) +
' ' + 'disasm'.ljust(25) +
' ' + 'seq_num'.center(10))
if timestamps:
outfile.write('timestamps'.center(25))
outfile.write('\n')
# Region of interest
curr_inst = {}
while True:
if fields[0] == 'O3PipeView':
curr_inst[fields[1]] = int(fields[2])
if fields[1] == 'fetch':
if ((stop_tick > 0 and int(fields[2]) > stop_tick+insts['tick_drift']) or
(stop_sn > 0 and int(fields[5]) > (stop_sn+insts['max_threshold']))):
print_insts(outfile, cycle_time, width, color, timestamps, 0)
return
(curr_inst['pc'], curr_inst['upc']) = fields[3:5]
curr_inst['sn'] = int(fields[5])
curr_inst['disasm'] = ' '.join(fields[6][:-1].split())
elif fields[1] == 'retire':
if curr_inst['retire'] == 0:
curr_inst['disasm'] = '-----' + curr_inst['disasm']
if store_completions:
curr_inst[fields[3]] = int(fields[4])
queue_inst(outfile, curr_inst, cycle_time, width, color, timestamps, store_completions)
line = trace.readline()
if not line:
print_insts(outfile, cycle_time, width, color, timestamps, store_completions, 0)
return
fields = line.split(':')
#Sorts out instructions according to sequence number
def compare_by_sn(a, b):
return cmp(a['sn'], b['sn'])
# Puts new instruction into the print queue.
# Sorts out and prints instructions when their number reaches threshold value
def queue_inst(outfile, inst, cycle_time, width, color, timestamps, store_completions):
global insts
l_copy = copy.deepcopy(inst)
insts['queue'].append(l_copy)
if len(insts['queue']) > insts['max_threshold']:
print_insts(outfile, cycle_time, width, color, timestamps, store_completions, insts['min_threshold'])
# Sorts out and prints instructions in print queue
def print_insts(outfile, cycle_time, width, color, timestamps, store_completions, lower_threshold):
global insts
insts['queue'].sort(compare_by_sn)
while len(insts['queue']) > lower_threshold:
print_item=insts['queue'].pop(0)
# As the instructions are processed out of order the main loop starts
# earlier then specified by start_sn/tick and finishes later then what
# is defined in stop_sn/tick.
# Therefore, here we have to filter out instructions that reside out of
# the specified boundaries.
if (insts['sn_start'] > 0 and print_item['sn'] < insts['sn_start']):
continue; # earlier then the starting sequence number
if (insts['sn_stop'] > 0 and print_item['sn'] > insts['sn_stop']):
continue; # later then the ending sequence number
if (insts['tick_start'] > 0 and print_item['fetch'] < insts['tick_start']):
continue; # earlier then the starting tick number
if (insts['tick_stop'] > 0 and print_item['fetch'] > insts['tick_stop']):
continue; # later then the ending tick number
if (insts['only_committed'] != 0 and print_item['retire'] == 0):
continue; # retire is set to zero if it hasn't been completed
print_inst(outfile, print_item, cycle_time, width, color, timestamps, store_completions)
# Prints a single instruction
def print_inst(outfile, inst, cycle_time, width, color, timestamps, store_completions):
if color:
from m5.util.terminal import termcap
else:
from m5.util.terminal import no_termcap as termcap
# Pipeline stages
stages = [{'name': 'fetch',
'color': termcap.Blue + termcap.Reverse,
'shorthand': 'f'},
{'name': 'decode',
'color': termcap.Yellow + termcap.Reverse,
'shorthand': 'd'},
{'name': 'rename',
'color': termcap.Magenta + termcap.Reverse,
'shorthand': 'n'},
{'name': 'dispatch',
'color': termcap.Green + termcap.Reverse,
'shorthand': 'p'},
{'name': 'issue',
'color': termcap.Red + termcap.Reverse,
'shorthand': 'i'},
{'name': 'complete',
'color': termcap.Cyan + termcap.Reverse,
'shorthand': 'c'},
{'name': 'retire',
'color': termcap.Blue + termcap.Reverse,
'shorthand': 'r'}
]
if store_completions:
stages.append(
{'name': 'store',
'color': termcap.Yellow + termcap.Reverse,
'shorthand': 's'})
# Print
time_width = width * cycle_time
base_tick = (inst['fetch'] / time_width) * time_width
# Find out the time of the last event - it may not
# be 'retire' if the instruction is not comlpeted.
last_event_time = max(inst['fetch'], inst['decode'],inst['rename'],
inst['dispatch'],inst['issue'], inst['complete'], inst['retire'])
if store_completions:
last_event_time = max(last_event_time, inst['store'])
# Timeline shorter then time_width is printed in compact form where
# the print continues at the start of the same line.
if ((last_event_time - inst['fetch']) < time_width):
num_lines = 1 # compact form
else:
num_lines = ((last_event_time - base_tick) / time_width) + 1
curr_color = termcap.Normal
# This will visually distinguish completed and abandoned intructions.
if inst['retire'] == 0: dot = '=' # abandoned instruction
else: dot = '.' # completed instruction
for i in range(num_lines):
start_tick = base_tick + i * time_width
end_tick = start_tick + time_width
if num_lines == 1: # compact form
end_tick += (inst['fetch'] - base_tick)
events = []
for stage_idx in range(len(stages)):
tick = inst[stages[stage_idx]['name']]
if tick != 0:
if tick >= start_tick and tick < end_tick:
events.append((tick % time_width,
stages[stage_idx]['name'],
stage_idx, tick))
events.sort()
outfile.write('[')
pos = 0
if num_lines == 1 and events[0][2] != 0: # event is not fetch
curr_color = stages[events[0][2] - 1]['color']
for event in events:
if (stages[event[2]]['name'] == 'dispatch' and
inst['dispatch'] == inst['issue']):
continue
outfile.write(curr_color + dot * ((event[0] / cycle_time) - pos))
outfile.write(stages[event[2]]['color'] +
stages[event[2]]['shorthand'])
if event[3] != last_event_time: # event is not the last one
curr_color = stages[event[2]]['color']
else:
curr_color = termcap.Normal
pos = (event[0] / cycle_time) + 1
outfile.write(curr_color + dot * (width - pos) + termcap.Normal +
']-(' + str(base_tick + i * time_width).rjust(15) + ') ')
if i == 0:
outfile.write('%s.%s %s [%s]' % (
inst['pc'].rjust(10),
inst['upc'],
inst['disasm'].ljust(25),
str(inst['sn']).rjust(10)))
if timestamps:
outfile.write(' f=%s, r=%s' % (inst['fetch'], inst['retire']))
outfile.write('\n')
else:
outfile.write('...'.center(12) + '\n')
def validate_range(my_range):
my_range = [int(i) for i in my_range.split(':')]
if (len(my_range) != 2 or
my_range[0] < 0 or
my_range[1] > 0 and my_range[0] >= my_range[1]):
return None
return my_range
def main():
# Parse options
usage = ('%prog [OPTION]... TRACE_FILE')
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'-o',
dest='outfile',
default=os.path.join(os.getcwd(), 'o3-pipeview.out'),
help="output file (default: '%default')")
parser.add_option(
'-t',
dest='tick_range',
default='0:-1',
help="tick range (default: '%default'; -1 == inf.)")
parser.add_option(
'-i',
dest='inst_range',
default='0:-1',
help="instruction range (default: '%default'; -1 == inf.)")
parser.add_option(
'-w',
dest='width',
type='int', default=80,
help="timeline width (default: '%default')")
parser.add_option(
'--color',
action='store_true', default=False,
help="enable colored output (default: '%default')")
parser.add_option(
'-c', '--cycle-time',
type='int', default=1000,
help="CPU cycle time in ticks (default: '%default')")
parser.add_option(
'--timestamps',
action='store_true', default=False,
help="print fetch and retire timestamps (default: '%default')")
parser.add_option(
'--only_committed',
action='store_true', default=False,
help="display only committed (completed) instructions (default: '%default')")
parser.add_option(
'--store_completions',
action='store_true', default=False,
help="additionally display store completion ticks (default: '%default')")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error('incorrect number of arguments')
sys.exit(1)
tick_range = validate_range(options.tick_range)
if not tick_range:
parser.error('invalid range')
sys.exit(1)
inst_range = validate_range(options.inst_range)
if not inst_range:
parser.error('invalid range')
sys.exit(1)
# Process trace
print 'Processing trace... ',
with open(args[0], 'r') as trace:
with open(options.outfile, 'w') as out:
process_trace(trace, out, options.cycle_time, options.width,
options.color, options.timestamps,
options.only_committed, options.store_completions,
*(tick_range + inst_range))
print 'done!'
if __name__ == '__main__':
sys.path.append(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..', 'src', 'python'))
main()
| bsd-3-clause | 5,734,665,356,773,524,000 | 40.877984 | 109 | 0.585381 | false |
drewp/tahoe-lafs | setuptools-0.6c16dev3.egg/setuptools/command/develop.py | 7 | 5478 | from setuptools.command.easy_install import easy_install
from distutils.util import convert_path
from pkg_resources import Distribution, PathMetadata, normalize_path
from distutils import log
from distutils.errors import *
import sys, os, setuptools, glob
class develop(easy_install):
"""Set up package for development"""
description = "install package in 'development mode'"
user_options = easy_install.user_options + [
("uninstall", "u", "Uninstall this source package"),
("egg-path=", None, "Set the path to be used in the .egg-link file"),
]
boolean_options = easy_install.boolean_options + ['uninstall']
command_consumes_arguments = False # override base
def run(self):
self.old_run()
if sys.platform == "win32":
from setuptools.command.scriptsetup import do_scriptsetup
do_scriptsetup()
def old_run(self):
if self.uninstall:
self.multi_version = True
self.uninstall_link()
else:
self.install_for_development()
self.warn_deprecated_options()
def initialize_options(self):
self.uninstall = None
self.egg_path = None
easy_install.initialize_options(self)
self.setup_path = None
self.always_copy_from = '.' # always copy eggs installed in curdir
def finalize_options(self):
ei = self.get_finalized_command("egg_info")
if ei.broken_egg_info:
raise DistutilsError(
"Please rename %r to %r before using 'develop'"
% (ei.egg_info, ei.broken_egg_info)
)
self.args = [ei.egg_name]
easy_install.finalize_options(self)
# pick up setup-dir .egg files only: no .egg-info
self.package_index.scan(glob.glob('*.egg'))
self.egg_link = os.path.join(self.install_dir, ei.egg_name+'.egg-link')
self.egg_base = ei.egg_base
if self.egg_path is None:
self.egg_path = os.path.abspath(ei.egg_base)
target = normalize_path(self.egg_base)
if normalize_path(os.path.join(self.install_dir, self.egg_path)) != target:
raise DistutilsOptionError(
"--egg-path must be a relative path from the install"
" directory to "+target
)
# Make a distribution for the package's source
self.dist = Distribution(
target,
PathMetadata(target, os.path.abspath(ei.egg_info)),
project_name = ei.egg_name
)
p = self.egg_base.replace(os.sep,'/')
if p!= os.curdir:
p = '../' * (p.count('/')+1)
self.setup_path = p
p = normalize_path(os.path.join(self.install_dir, self.egg_path, p))
if p != normalize_path(os.curdir):
raise DistutilsOptionError(
"Can't get a consistent path to setup script from"
" installation directory", p, normalize_path(os.curdir))
def install_for_development(self):
# Ensure metadata is up-to-date
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
self.install_site_py() # ensure that target dir is site-safe
if setuptools.bootstrap_install_from:
self.easy_install(setuptools.bootstrap_install_from)
setuptools.bootstrap_install_from = None
# create an .egg-link in the installation dir, pointing to our egg
log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
if not self.dry_run:
f = open(self.egg_link,"w")
f.write(self.egg_path + "\n" + self.setup_path)
f.close()
# postprocess the installed distro, fixing up .pth, installing scripts,
# and handling requirements
self.process_distribution(None, self.dist, not self.no_deps)
def uninstall_link(self):
if os.path.exists(self.egg_link):
log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
contents = [line.rstrip() for line in file(self.egg_link)]
if contents not in ([self.egg_path], [self.egg_path, self.setup_path]):
log.warn("Link points to %s: uninstall aborted", contents)
return
if not self.dry_run:
os.unlink(self.egg_link)
if not self.dry_run:
self.update_pth(self.dist) # remove any .pth link to us
if self.distribution.scripts:
# XXX should also check for entry point scripts!
log.warn("Note: you must uninstall or replace scripts manually!")
def install_egg_scripts(self, dist):
if dist is not self.dist:
# Installing a dependency, so fall back to normal behavior
return easy_install.install_egg_scripts(self,dist)
# create wrapper scripts in the script dir, pointing to dist.scripts
# new-style...
self.install_wrapper_scripts(dist)
# ...and old-style
for script_name in self.distribution.scripts or []:
script_path = os.path.abspath(convert_path(script_name))
script_name = os.path.basename(script_path)
f = open(script_path,'rU')
script_text = f.read()
f.close()
self.install_script(dist, script_name, script_text, script_path)
| gpl-2.0 | 6,660,655,729,136,031,000 | 32.2 | 83 | 0.599489 | false |
odoousers2014/LibrERP | account_financial_report_webkit/wizard/general_ledger_wizard.py | 2 | 6208 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi, Guewen Baconnier
# Copyright Camptocamp SA 2011
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from osv import fields, osv
class AccountReportGeneralLedgerWizard(osv.osv_memory):
"""Will launch general ledger report and pass requiered args"""
_inherit = "account.common.account.report"
_name = "general.ledger.webkit"
_description = "General Ledger Report"
def _get_account_ids(self, cr, uid, context=None):
res = False
if context.get('active_model', False) == 'account.account' and context.get('active_ids', False):
res = context['active_ids']
return res
_columns = {
'amount_currency': fields.boolean("With Currency",
help="It adds the currency column"),
'display_account': fields.selection([('bal_all', 'All'),
('bal_mix', 'With transactions or non zero balance')],
'Display accounts',
required=True),
'account_ids': fields.many2many('account.account', string='Filter on accounts',
help="""Only selected accounts will be printed. Leave empty to print all accounts."""),
'centralize': fields.boolean('Activate Centralization', help='Uncheck to display all the details of centralized accounts.')
}
_defaults = {
'amount_currency': False,
'display_account': 'bal_mix',
'account_ids': _get_account_ids,
'centralize': True,
}
def _check_fiscalyear(self, cr, uid, ids, context=None):
obj = self.read(cr, uid, ids[0], ['fiscalyear_id', 'filter'], context=context)
if not obj['fiscalyear_id'] and obj['filter'] == 'filter_no':
return False
return True
_constraints = [
(_check_fiscalyear, 'When no Fiscal year is selected, you must choose to filter by periods or by date.', ['filter']),
]
def pre_print_report(self, cr, uid, ids, data, context=None):
data = super(AccountReportGeneralLedgerWizard, self).pre_print_report(cr, uid, ids, data, context)
if context is None:
context = {}
# will be used to attach the report on the main account
data['ids'] = [data['form']['chart_account_id']]
vals = self.read(cr, uid, ids,
['amount_currency',
'display_account',
'account_ids',
'centralize'],
context=context)[0]
data['form'].update(vals)
return data
def onchange_filter(self, cr, uid, ids, filter='filter_no', fiscalyear_id=False, context=None):
res = {}
if filter == 'filter_no':
res['value'] = {'period_from': False, 'period_to': False, 'date_from': False ,'date_to': False}
if filter == 'filter_date':
if fiscalyear_id:
fyear = self.pool.get('account.fiscalyear').browse(cr, uid, fiscalyear_id, context=context)
date_from = fyear.date_start
date_to = fyear.date_stop > time.strftime('%Y-%m-%d') and time.strftime('%Y-%m-%d') or fyear.date_stop
else:
date_from, date_to = time.strftime('%Y-01-01'), time.strftime('%Y-%m-%d')
res['value'] = {'period_from': False, 'period_to': False, 'date_from': date_from, 'date_to': date_to}
if filter == 'filter_period' and fiscalyear_id:
start_period = end_period = False
cr.execute('''
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND COALESCE(p.special, FALSE) = FALSE
ORDER BY p.date_start ASC
LIMIT 1) AS period_start
UNION ALL
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.date_start < NOW()
AND COALESCE(p.special, FALSE) = FALSE
ORDER BY p.date_stop DESC
LIMIT 1) AS period_stop''', (fiscalyear_id, fiscalyear_id))
periods = [i[0] for i in cr.fetchall()]
if periods:
start_period = end_period = periods[0]
if len(periods) > 1:
end_period = periods[1]
res['value'] = {'period_from': start_period, 'period_to': end_period, 'date_from': False, 'date_to': False}
return res
def _print_report(self, cursor, uid, ids, data, context=None):
context = context or {}
# we update form with display account value
data = self.pre_print_report(cursor, uid, ids, data, context=context)
return {'type': 'ir.actions.report.xml',
'report_name': 'account.account_report_general_ledger_webkit',
'datas': data}
AccountReportGeneralLedgerWizard()
| agpl-3.0 | 2,558,250,226,260,745,000 | 46.030303 | 131 | 0.534311 | false |
john-wang-metro/metro-openerp | metro_mrp/__openerp__.py | 2 | 2231 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Metro MRP',
'version': '1.0',
'category': 'Metro',
'description': """
Metro MRP Extension:
1.Add CNC Work Order
(Ported to OpenERP v 7.0 by Metro Tower Trucks.
""",
'author': 'Metro Tower Trucks',
'website': 'http://www.metrotowtrucks.com',
'depends': ["metro", "sale", "metro_stock", "product_manufacturer", "document", "mrp_operations", "procurement", "mrp","project"],
'data': [
'security/ir.model.access.csv',
'security/mrp_security.xml',
'res_config_view.xml',
'wizard/work_order_cnc_line_done_view.xml',
'wizard/wo_material_request_view.xml',
'wizard/mo_actions_view.xml',
'work_order_cnc_view.xml',
'mrp_view.xml',
'mrp_sequence.xml',
'wizard/add_common_bom_view.xml',
'wizard/bom_import_view.xml',
'mrp_workflow.xml',
'pdm.xml',
'procurement_view.xml'
],
'test': [],
'demo': [],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 7,604,554,904,541,281,000 | 34.412698 | 134 | 0.566114 | false |
thispc/download-manager | module/plugins/crypter/CryptCat.py | 8 | 1949 | # -*- coding: utf-8 -*-
import re
from ..internal.SimpleCrypter import SimpleCrypter
class CryptCat(SimpleCrypter):
__name__ = "CryptCat"
__type__ = "crypter"
__version__ = "0.04"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?crypt\.cat/\w+'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("folder_per_package", "Default;Yes;No",
"Create folder for each package", "Default"),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """crypt.cat decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
OFFLINE_PATTERN = r'Folder not available!'
LINK_PATTERN = r'<input .+?readonly="" value="\s*(.+?)" type="text">'
def get_links(self):
baseurl = self.req.http.lastEffectiveURL
url, inputs = self.parse_html_form()
if ">Enter your password.<" in self.data:
password = self.get_password()
if not password:
self.fail(_("Password required"))
inputs['Pass1'] = password
elif "Enter Captcha" in self.data:
m = re.search(r'<img src="(.+?)"', self.data)
if m is not None:
captcha_code = self.captcha.decrypt(
m.group(1), input_type="jpeg")
inputs['security_code'] = captcha_code
else:
return []
else:
return []
self.data = self.load(baseurl, post=inputs, ref=baseurl)
if "You have entered an incorrect password." in self.data:
self.fail(_("Wrong password"))
elif "Your filled the captcha wrongly!" in self.data:
self.retry_captcha()
return re.findall(self.LINK_PATTERN, self.data)
| gpl-3.0 | 6,248,309,538,033,200,000 | 30.95082 | 95 | 0.541303 | false |
simonmonk/squid | build/lib.linux-armv6l-2.7/squid.py | 1 | 1424 | #squid.py Library
import RPi.GPIO as GPIO
import time
WHITE = (30, 30, 30)
OFF = (0, 0, 0)
RED = (100, 0, 0)
GREEN = (0, 100, 0)
BLUE = (0, 0, 100)
YELLOW = (50, 50, 0)
PURPLE = (50, 0, 50)
CYAN = (0, 50, 50)
class Squid:
RED_PIN = 0
GREEN_PIN = 0
BLUE_PIN = 0
red_pwm = 0
green_pwm = 0
blue_pwm = 0
def __init__(self, red_pin, green_pin, blue_pin):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
self.RED_PIN, self.GREEN_PIN, self.BLUE_PIN = red_pin, green_pin, blue_pin
GPIO.setup(self.RED_PIN, GPIO.OUT)
self.red_pwm = GPIO.PWM(self.RED_PIN, 500)
self.red_pwm.start(0)
GPIO.setup(self.GREEN_PIN, GPIO.OUT)
self.green_pwm = GPIO.PWM(self.GREEN_PIN, 500)
self.green_pwm.start(0)
GPIO.setup(self.BLUE_PIN, GPIO.OUT)
self.blue_pwm = GPIO.PWM(self.BLUE_PIN, 500)
self.blue_pwm.start(0)
def set_red(self, brightness):
self.red_pwm.ChangeDutyCycle(brightness)
def set_green(self, brightness):
self.green_pwm.ChangeDutyCycle(brightness)
def set_blue(self, brightness):
self.blue_pwm.ChangeDutyCycle(brightness)
def set_color(self, (r, g, b), brightness = 100):
self.set_red(r * brightness / 100)
self.set_green(g * brightness / 100)
self.set_blue(b * brightness / 100)
| mit | 5,289,138,572,080,418,000 | 24.428571 | 82 | 0.571629 | false |
hyowon/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_mock.py | 496 | 5168 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for mock module."""
import Queue
import threading
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from test import mock
class MockConnTest(unittest.TestCase):
"""A unittest for MockConn class."""
def setUp(self):
self._conn = mock.MockConn('ABC\r\nDEFG\r\n\r\nHIJK')
def test_readline(self):
self.assertEqual('ABC\r\n', self._conn.readline())
self.assertEqual('DEFG\r\n', self._conn.readline())
self.assertEqual('\r\n', self._conn.readline())
self.assertEqual('HIJK', self._conn.readline())
self.assertEqual('', self._conn.readline())
def test_read(self):
self.assertEqual('ABC\r\nD', self._conn.read(6))
self.assertEqual('EFG\r\n\r\nHI', self._conn.read(9))
self.assertEqual('JK', self._conn.read(10))
self.assertEqual('', self._conn.read(10))
def test_read_and_readline(self):
self.assertEqual('ABC\r\nD', self._conn.read(6))
self.assertEqual('EFG\r\n', self._conn.readline())
self.assertEqual('\r\nHIJK', self._conn.read(9))
self.assertEqual('', self._conn.readline())
def test_write(self):
self._conn.write('Hello\r\n')
self._conn.write('World\r\n')
self.assertEqual('Hello\r\nWorld\r\n', self._conn.written_data())
class MockBlockingConnTest(unittest.TestCase):
"""A unittest for MockBlockingConn class."""
def test_read(self):
"""Tests that data put to MockBlockingConn by put_bytes method can be
read from it.
"""
class LineReader(threading.Thread):
"""A test class that launches a thread, calls readline on the
specified conn repeatedly and puts the read data to the specified
queue.
"""
def __init__(self, conn, queue):
threading.Thread.__init__(self)
self._queue = queue
self._conn = conn
self.setDaemon(True)
self.start()
def run(self):
while True:
data = self._conn.readline()
self._queue.put(data)
conn = mock.MockBlockingConn()
queue = Queue.Queue()
reader = LineReader(conn, queue)
self.failUnless(queue.empty())
conn.put_bytes('Foo bar\r\n')
read = queue.get()
self.assertEqual('Foo bar\r\n', read)
class MockTableTest(unittest.TestCase):
"""A unittest for MockTable class."""
def test_create_from_dict(self):
table = mock.MockTable({'Key': 'Value'})
self.assertEqual('Value', table.get('KEY'))
self.assertEqual('Value', table['key'])
def test_create_from_list(self):
table = mock.MockTable([('Key', 'Value')])
self.assertEqual('Value', table.get('KEY'))
self.assertEqual('Value', table['key'])
def test_create_from_tuple(self):
table = mock.MockTable((('Key', 'Value'),))
self.assertEqual('Value', table.get('KEY'))
self.assertEqual('Value', table['key'])
def test_set_and_get(self):
table = mock.MockTable()
self.assertEqual(None, table.get('Key'))
table['Key'] = 'Value'
self.assertEqual('Value', table.get('Key'))
self.assertEqual('Value', table.get('key'))
self.assertEqual('Value', table.get('KEY'))
self.assertEqual('Value', table['Key'])
self.assertEqual('Value', table['key'])
self.assertEqual('Value', table['KEY'])
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 | 1,818,418,664,186,237,200 | 34.641379 | 77 | 0.642995 | false |
hlieberman/debian-ansible | docsite/build-site.py | 35 | 3249 | #!/usr/bin/env python
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of the Ansible Documentation
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
__docformat__ = 'restructuredtext'
import os
import sys
import traceback
try:
from sphinx.application import Sphinx
except ImportError:
print "#################################"
print "Dependency missing: Python Sphinx"
print "#################################"
sys.exit(1)
import os
class SphinxBuilder(object):
"""
Creates HTML documentation using Sphinx.
"""
def __init__(self):
"""
Run the DocCommand.
"""
print "Creating html documentation ..."
try:
buildername = 'html'
outdir = os.path.abspath(os.path.join(os.getcwd(), "htmlout"))
# Create the output directory if it doesn't exist
if not os.access(outdir, os.F_OK):
os.mkdir(outdir)
doctreedir = os.path.join('./', '.doctrees')
confdir = os.path.abspath('./')
srcdir = os.path.abspath('rst')
freshenv = True
# Create the builder
app = Sphinx(srcdir,
confdir,
outdir,
doctreedir,
buildername,
{},
sys.stdout,
sys.stderr,
freshenv)
app.builder.build_all()
except ImportError, ie:
traceback.print_exc()
except Exception, ex:
print >> sys.stderr, "FAIL! exiting ... (%s)" % ex
def build_docs(self):
self.app.builder.build_all()
def build_rst_docs():
docgen = SphinxBuilder()
if __name__ == '__main__':
if '-h' in sys.argv or '--help' in sys.argv:
print "This script builds the html documentation from rst/asciidoc sources.\n"
print " Run 'make docs' to build everything."
print " Run 'make viewdocs' to build and then preview in a web browser."
sys.exit(0)
# The 'htmldocs' make target will call this scrip twith the 'rst'
# parameter' We don't need to run the 'htmlman' target then.
if "rst" in sys.argv:
build_rst_docs()
else:
# By default, preform the rst->html transformation and then
# the asciidoc->html trasnformation
build_rst_docs()
if "view" in sys.argv:
import webbrowser
if not webbrowser.open('htmlout/index.html'):
print >> sys.stderr, "Could not open on your webbrowser."
| gpl-3.0 | 3,547,201,197,303,134,000 | 30.543689 | 86 | 0.569406 | false |
Maximilian-Reuter/SickRage-1 | lib/twilio/rest/resources/pricing/voice.py | 35 | 4195 | from .. import NextGenInstanceResource, NextGenListResource
class Voice(object):
"""Holds references to the Voice pricing resources."""
name = "Voice"
key = "voice"
def __init__(self, base_uri, auth, timeout):
self.uri = "%s/Voice" % base_uri
self.countries = VoiceCountries(self.uri, auth, timeout)
self.numbers = VoiceNumbers(self.uri, auth, timeout)
class VoiceCountry(NextGenInstanceResource):
"""Pricing information for Twilio Voice services in a specific country.
.. attribute:: country
The full name of the country.
.. attribute:: iso_country
The country's 2-character ISO code.
.. attribute:: price_unit
The currency in which prices are measured, in ISO 4127 format
(e.g. 'usd', 'eur', 'jpy').
.. attribute:: outbound_prefix_prices
A list of dicts containing pricing information as follows:
- prefix_list: a list of number prefixes in the requested country
that have the same pricing
- friendly_name: a descriptive name for this prefix set
- call_base_price: the base price per minute for calls to numbers
matching any of these prefixes
- call_current_price: the current price per minute (including
volume discounts, etc.) for your account to make calls to
numbers matching these prefixes
.. attribute:: inbound_call_prices
A list of dicts containing pricing information for inbound calls:
- number_type: 'local', 'mobile', 'national', or 'toll_free'
- call_base_price: the base price per minute to receive a call
to this number type
- call_current_price: the current price per minute (including
volume discounts, etc.) for your account to receive a call
to this number type
"""
id_key = "iso_country"
class VoiceCountries(NextGenListResource):
instance = VoiceCountry
key = "countries"
name = "Countries"
def get(self, iso_country):
"""Retrieve pricing information for Twilio Voice in the specified
country.
:param iso_country: The two-letter ISO code for the country
"""
return self.get_instance(iso_country)
def list(self):
"""Retrieve the list of countries in which Twilio Voice is
available."""
resp, page = self.request("GET", self.uri)
return [self.load_instance(i) for i in page[self.key]]
class VoiceNumber(NextGenInstanceResource):
"""Pricing information for Twilio Voice services to and from a given
phone number.
.. attribute:: phone_number
The E.164-formatted phone number this pricing information applies to
.. attribute:: country
The name of the country this phone number belongs to
.. attribute:: iso_country
The two-character ISO code for the country
.. attribute:: outbound_call_price
A dict containing pricing information for outbound calls to this
number:
- base_price: the base price per minute for a call to this number
- current_price: the current price per minute (including discounts,
etc.) for a call to this number
.. attribute:: inbound_call_price
A dict containing pricing information for inbound call to this number,
or null if this number is not Twilio-hosted.
- number_type: "local", "mobile", "national", or "toll_free"
- call_base_price: the base price per minute to receive a call to
this number
- call_current_price: the current price per minute (including
discounts, etc.) to receive a call to this number
"""
id_key = "number"
class VoiceNumbers(NextGenListResource):
instance = VoiceNumber
key = "numbers"
name = "Numbers"
def get(self, phone_number):
""" Retrieve pricing information for a specific phone number.
:param phone_number: the E.164-formatted number to retrieve info for
:return: a :class:`VoiceNumber` instance
"""
return self.get_instance(phone_number)
| gpl-3.0 | -8,683,439,944,656,884,000 | 31.022901 | 79 | 0.64267 | false |
taedori81/django-oscar | src/oscar/apps/address/migrations/0001_initial.py | 58 | 4480 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import oscar.models.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Country',
fields=[
('iso_3166_1_a2', models.CharField(primary_key=True, max_length=2, verbose_name='ISO 3166-1 alpha-2', serialize=False)),
('iso_3166_1_a3', models.CharField(max_length=3, verbose_name='ISO 3166-1 alpha-3', blank=True)),
('iso_3166_1_numeric', models.CharField(max_length=3, verbose_name='ISO 3166-1 numeric', blank=True)),
('printable_name', models.CharField(max_length=128, verbose_name='Country name')),
('name', models.CharField(max_length=128, verbose_name='Official name')),
('display_order', models.PositiveSmallIntegerField(default=0, verbose_name='Display order', db_index=True, help_text='Higher the number, higher the country in the list.')),
('is_shipping_country', models.BooleanField(default=False, db_index=True, verbose_name='Is shipping country')),
],
options={
'ordering': ('-display_order', 'printable_name'),
'verbose_name_plural': 'Countries',
'verbose_name': 'Country',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(verbose_name='Title', max_length=64, blank=True, choices=[('Mr', 'Mr'), ('Miss', 'Miss'), ('Mrs', 'Mrs'), ('Ms', 'Ms'), ('Dr', 'Dr')])),
('first_name', models.CharField(max_length=255, verbose_name='First name', blank=True)),
('last_name', models.CharField(max_length=255, verbose_name='Last name', blank=True)),
('line1', models.CharField(max_length=255, verbose_name='First line of address')),
('line2', models.CharField(max_length=255, verbose_name='Second line of address', blank=True)),
('line3', models.CharField(max_length=255, verbose_name='Third line of address', blank=True)),
('line4', models.CharField(max_length=255, verbose_name='City', blank=True)),
('state', models.CharField(max_length=255, verbose_name='State/County', blank=True)),
('postcode', oscar.models.fields.UppercaseCharField(max_length=64, verbose_name='Post/Zip-code', blank=True)),
('search_text', models.TextField(editable=False, verbose_name='Search text - used only for searching addresses')),
('phone_number', oscar.models.fields.PhoneNumberField(verbose_name='Phone number', help_text='In case we need to call you about your order', blank=True)),
('notes', models.TextField(verbose_name='Instructions', help_text='Tell us anything we should know when delivering your order.', blank=True)),
('is_default_for_shipping', models.BooleanField(default=False, verbose_name='Default shipping address?')),
('is_default_for_billing', models.BooleanField(default=False, verbose_name='Default billing address?')),
('num_orders', models.PositiveIntegerField(default=0, verbose_name='Number of Orders')),
('hash', models.CharField(max_length=255, editable=False, db_index=True, verbose_name='Address Hash')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('country', models.ForeignKey(verbose_name='Country', to='address.Country')),
('user', models.ForeignKey(verbose_name='User', related_name='addresses', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-num_orders'],
'verbose_name_plural': 'User addresses',
'verbose_name': 'User address',
'abstract': False,
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='useraddress',
unique_together=set([('user', 'hash')]),
),
]
| bsd-3-clause | -5,734,636,698,089,433,000 | 62.098592 | 188 | 0.599554 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.