repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
pfmoore/pip | src/pip/_internal/cache.py | 6 | 9958 | """Cache Management
"""
import hashlib
import json
import logging
import os
from typing import Any, Dict, List, Optional, Set
from pip._vendor.packaging.tags import Tag, interpreter_name, interpreter_version
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.exceptions import InvalidWheelFilename
from pip._internal.models.format_control import FormatControl
from pip._internal.models.link import Link
from pip._internal.models.wheel import Wheel
from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds
from pip._internal.utils.urls import path_to_url
logger = logging.getLogger(__name__)
def _hash_dict(d):
# type: (Dict[str, str]) -> str
"""Return a stable sha224 of a dictionary."""
s = json.dumps(d, sort_keys=True, separators=(",", ":"), ensure_ascii=True)
return hashlib.sha224(s.encode("ascii")).hexdigest()
class Cache:
"""An abstract class - provides cache directories for data from links
:param cache_dir: The root of the cache.
:param format_control: An object of FormatControl class to limit
binaries being read from the cache.
:param allowed_formats: which formats of files the cache should store.
('binary' and 'source' are the only allowed values)
"""
def __init__(self, cache_dir, format_control, allowed_formats):
# type: (str, FormatControl, Set[str]) -> None
super().__init__()
assert not cache_dir or os.path.isabs(cache_dir)
self.cache_dir = cache_dir or None
self.format_control = format_control
self.allowed_formats = allowed_formats
_valid_formats = {"source", "binary"}
assert self.allowed_formats.union(_valid_formats) == _valid_formats
def _get_cache_path_parts(self, link):
# type: (Link) -> List[str]
"""Get parts of part that must be os.path.joined with cache_dir
"""
# We want to generate an url to use as our cache key, we don't want to
# just re-use the URL because it might have other items in the fragment
# and we don't care about those.
key_parts = {"url": link.url_without_fragment}
if link.hash_name is not None and link.hash is not None:
key_parts[link.hash_name] = link.hash
if link.subdirectory_fragment:
key_parts["subdirectory"] = link.subdirectory_fragment
# Include interpreter name, major and minor version in cache key
# to cope with ill-behaved sdists that build a different wheel
# depending on the python version their setup.py is being run on,
# and don't encode the difference in compatibility tags.
# https://github.com/pypa/pip/issues/7296
key_parts["interpreter_name"] = interpreter_name()
key_parts["interpreter_version"] = interpreter_version()
# Encode our key url with sha224, we'll use this because it has similar
# security properties to sha256, but with a shorter total output (and
# thus less secure). However the differences don't make a lot of
# difference for our use case here.
hashed = _hash_dict(key_parts)
# We want to nest the directories some to prevent having a ton of top
# level directories where we might run out of sub directories on some
# FS.
parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
return parts
def _get_candidates(self, link, canonical_package_name):
# type: (Link, str) -> List[Any]
can_not_cache = (
not self.cache_dir or
not canonical_package_name or
not link
)
if can_not_cache:
return []
formats = self.format_control.get_allowed_formats(
canonical_package_name
)
if not self.allowed_formats.intersection(formats):
return []
candidates = []
path = self.get_path_for_link(link)
if os.path.isdir(path):
for candidate in os.listdir(path):
candidates.append((candidate, path))
return candidates
def get_path_for_link(self, link):
# type: (Link) -> str
"""Return a directory to store cached items in for link.
"""
raise NotImplementedError()
def get(
self,
link, # type: Link
package_name, # type: Optional[str]
supported_tags, # type: List[Tag]
):
# type: (...) -> Link
"""Returns a link to a cached item if it exists, otherwise returns the
passed link.
"""
raise NotImplementedError()
class SimpleWheelCache(Cache):
"""A cache of wheels for future installs.
"""
def __init__(self, cache_dir, format_control):
# type: (str, FormatControl) -> None
super().__init__(cache_dir, format_control, {"binary"})
def get_path_for_link(self, link):
# type: (Link) -> str
"""Return a directory to store cached wheels for link
Because there are M wheels for any one sdist, we provide a directory
to cache them in, and then consult that directory when looking up
cache hits.
We only insert things into the cache if they have plausible version
numbers, so that we don't contaminate the cache with things that were
not unique. E.g. ./package might have dozens of installs done for it
and build a version of 0.0...and if we built and cached a wheel, we'd
end up using the same wheel even if the source has been edited.
:param link: The link of the sdist for which this will cache wheels.
"""
parts = self._get_cache_path_parts(link)
assert self.cache_dir
# Store wheels within the root cache_dir
return os.path.join(self.cache_dir, "wheels", *parts)
def get(
self,
link, # type: Link
package_name, # type: Optional[str]
supported_tags, # type: List[Tag]
):
# type: (...) -> Link
candidates = []
if not package_name:
return link
canonical_package_name = canonicalize_name(package_name)
for wheel_name, wheel_dir in self._get_candidates(
link, canonical_package_name
):
try:
wheel = Wheel(wheel_name)
except InvalidWheelFilename:
continue
if canonicalize_name(wheel.name) != canonical_package_name:
logger.debug(
"Ignoring cached wheel %s for %s as it "
"does not match the expected distribution name %s.",
wheel_name, link, package_name,
)
continue
if not wheel.supported(supported_tags):
# Built for a different python/arch/etc
continue
candidates.append(
(
wheel.support_index_min(supported_tags),
wheel_name,
wheel_dir,
)
)
if not candidates:
return link
_, wheel_name, wheel_dir = min(candidates)
return Link(path_to_url(os.path.join(wheel_dir, wheel_name)))
class EphemWheelCache(SimpleWheelCache):
"""A SimpleWheelCache that creates it's own temporary cache directory
"""
def __init__(self, format_control):
# type: (FormatControl) -> None
self._temp_dir = TempDirectory(
kind=tempdir_kinds.EPHEM_WHEEL_CACHE,
globally_managed=True,
)
super().__init__(self._temp_dir.path, format_control)
class CacheEntry:
def __init__(
self,
link, # type: Link
persistent, # type: bool
):
self.link = link
self.persistent = persistent
class WheelCache(Cache):
"""Wraps EphemWheelCache and SimpleWheelCache into a single Cache
This Cache allows for gracefully degradation, using the ephem wheel cache
when a certain link is not found in the simple wheel cache first.
"""
def __init__(self, cache_dir, format_control):
# type: (str, FormatControl) -> None
super().__init__(cache_dir, format_control, {'binary'})
self._wheel_cache = SimpleWheelCache(cache_dir, format_control)
self._ephem_cache = EphemWheelCache(format_control)
def get_path_for_link(self, link):
# type: (Link) -> str
return self._wheel_cache.get_path_for_link(link)
def get_ephem_path_for_link(self, link):
# type: (Link) -> str
return self._ephem_cache.get_path_for_link(link)
def get(
self,
link, # type: Link
package_name, # type: Optional[str]
supported_tags, # type: List[Tag]
):
# type: (...) -> Link
cache_entry = self.get_cache_entry(link, package_name, supported_tags)
if cache_entry is None:
return link
return cache_entry.link
def get_cache_entry(
self,
link, # type: Link
package_name, # type: Optional[str]
supported_tags, # type: List[Tag]
):
# type: (...) -> Optional[CacheEntry]
"""Returns a CacheEntry with a link to a cached item if it exists or
None. The cache entry indicates if the item was found in the persistent
or ephemeral cache.
"""
retval = self._wheel_cache.get(
link=link,
package_name=package_name,
supported_tags=supported_tags,
)
if retval is not link:
return CacheEntry(retval, persistent=True)
retval = self._ephem_cache.get(
link=link,
package_name=package_name,
supported_tags=supported_tags,
)
if retval is not link:
return CacheEntry(retval, persistent=False)
return None
| mit | 8,222,681,920,759,544,000 | 33.696864 | 81 | 0.597911 | false |
LIS/lis-tempest | tempest/api/data_processing/test_cluster_templates.py | 6 | 5123 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.data_processing import base as dp_base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest import test
class ClusterTemplateTest(dp_base.BaseDataProcessingTest):
# Link to the API documentation is http://docs.openstack.org/developer/
# sahara/restapi/rest_api_v1.0.html#cluster-templates
@classmethod
def skip_checks(cls):
super(ClusterTemplateTest, cls).skip_checks()
if cls.default_plugin is None:
raise cls.skipException("No Sahara plugins configured")
@classmethod
def resource_setup(cls):
super(ClusterTemplateTest, cls).resource_setup()
# pre-define a node group templates
node_group_template_w = cls.get_node_group_template('worker1')
if node_group_template_w is None:
raise exceptions.InvalidConfiguration(
message="No known Sahara plugin was found")
node_group_template_w['name'] = data_utils.rand_name(
'sahara-ng-template')
resp_body = cls.create_node_group_template(**node_group_template_w)
node_group_template_id = resp_body['id']
configured_node_group_templates = {'worker1': node_group_template_id}
cls.full_cluster_template = cls.get_cluster_template(
configured_node_group_templates)
# create cls.cluster_template variable to use for comparison to cluster
# template response body. The 'node_groups' field in the response body
# has some extra info that post body does not have. The 'node_groups'
# field in the response body is something like this
#
# 'node_groups': [
# {
# 'count': 3,
# 'name': 'worker-node',
# 'volume_mount_prefix': '/volumes/disk',
# 'created_at': '2014-05-21 14:31:37',
# 'updated_at': None,
# 'floating_ip_pool': None,
# ...
# },
# ...
# ]
cls.cluster_template = cls.full_cluster_template.copy()
del cls.cluster_template['node_groups']
def _create_cluster_template(self, template_name=None):
"""Creates Cluster Template with optional name specified.
It creates template, ensures template name and response body.
Returns id and name of created template.
"""
if not template_name:
# generate random name if it's not specified
template_name = data_utils.rand_name('sahara-cluster-template')
# create cluster template
resp_body = self.create_cluster_template(template_name,
**self.full_cluster_template)
# ensure that template created successfully
self.assertEqual(template_name, resp_body['name'])
self.assertDictContainsSubset(self.cluster_template, resp_body)
return resp_body['id'], template_name
@test.attr(type='smoke')
@test.idempotent_id('3525f1f1-3f9c-407d-891a-a996237e728b')
def test_cluster_template_create(self):
self._create_cluster_template()
@test.attr(type='smoke')
@test.idempotent_id('7a161882-e430-4840-a1c6-1d928201fab2')
def test_cluster_template_list(self):
template_info = self._create_cluster_template()
# check for cluster template in list
templates = self.client.list_cluster_templates()['cluster_templates']
templates_info = [(template['id'], template['name'])
for template in templates]
self.assertIn(template_info, templates_info)
@test.attr(type='smoke')
@test.idempotent_id('2b75fe22-f731-4b0f-84f1-89ab25f86637')
def test_cluster_template_get(self):
template_id, template_name = self._create_cluster_template()
# check cluster template fetch by id
template = self.client.get_cluster_template(template_id)
template = template['cluster_template']
self.assertEqual(template_name, template['name'])
self.assertDictContainsSubset(self.cluster_template, template)
@test.attr(type='smoke')
@test.idempotent_id('ff1fd989-171c-4dd7-91fd-9fbc71b09675')
def test_cluster_template_delete(self):
template_id, _ = self._create_cluster_template()
# delete the cluster template by id
self.client.delete_cluster_template(template_id)
# TODO(ylobankov): check that cluster template is really deleted
| apache-2.0 | -2,413,352,540,128,404,500 | 40.314516 | 79 | 0.644935 | false |
tafaRU/odoo | openerp/addons/base/ir/ir_model.py | 10 | 61098 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from collections import defaultdict
import logging
import re
import time
import types
import openerp
from openerp import SUPERUSER_ID
from openerp import models, tools, api
from openerp.modules.registry import RegistryManager
from openerp.osv import fields, osv
from openerp.osv.orm import BaseModel, Model, MAGIC_COLUMNS, except_orm
from openerp.tools import config
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def _get_fields_type(self, cr, uid, context=None):
# Avoid too many nested `if`s below, as RedHat's Python 2.6
# break on it. See bug 939653.
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType and \
issubclass(v, fields._column) and \
v != fields._column and \
not v._deprecated and \
not issubclass(v, fields.function)])
def _in_modules(self, cr, uid, ids, field_name, arg, context=None):
#pseudo-method used by fields.function in ir.model/ir.model.fields
module_pool = self.pool["ir.module.module"]
installed_module_ids = module_pool.search(cr, uid, [('state','=','installed')])
installed_module_names = module_pool.read(cr, uid, installed_module_ids, ['name'], context=context)
installed_modules = set(x['name'] for x in installed_module_names)
result = {}
xml_ids = osv.osv._get_xml_ids(self, cr, uid, ids)
for k,v in xml_ids.iteritems():
result[k] = ', '.join(sorted(installed_modules & set(xml_id.split('.')[0] for xml_id in v)))
return result
class ir_model(osv.osv):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _is_osv_memory(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids, context=context)
res = dict.fromkeys(ids)
for model in models:
if model.model in self.pool:
res[model.id] = self.pool[model.model].is_transient()
else:
_logger.error('Missing model %s' % (model.model, ))
return res
def _search_osv_memory(self, cr, uid, model, name, domain, context=None):
if not domain:
return []
__, operator, value = domain[0]
if operator not in ['=', '!=']:
raise osv.except_osv(_("Invalid Search Criteria"), _('The osv_memory field can only be compared with = and != operator.'))
value = bool(value) if operator == '=' else not bool(value)
all_model_ids = self.search(cr, uid, [], context=context)
is_osv_mem = self._is_osv_memory(cr, uid, all_model_ids, 'osv_memory', arg=None, context=context)
return [('id', 'in', [id for id in is_osv_mem if bool(is_osv_mem[id]) == value])]
def _view_ids(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids)
res = {}
for model in models:
res[model.id] = self.pool["ir.ui.view"].search(cr, uid, [('model', '=', model.model)])
return res
def _inherited_models(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for model in self.browse(cr, uid, ids, context=context):
res[model.id] = []
inherited_models = [model_name for model_name in self.pool[model.model]._inherits]
if inherited_models:
res[model.id] = self.search(cr, uid, [('model', 'in', inherited_models)], context=context)
return res
_columns = {
'name': fields.char('Model Description', translate=True, required=True),
'model': fields.char('Model', required=True, select=1),
'info': fields.text('Information'),
'field_id': fields.one2many('ir.model.fields', 'model_id', 'Fields', required=True, copy=True),
'inherited_model_ids': fields.function(_inherited_models, type="many2many", obj="ir.model", string="Inherited models",
help="The list of models that extends the current model."),
'state': fields.selection([('manual','Custom Object'),('base','Base Object')],'Type', readonly=True),
'access_ids': fields.one2many('ir.model.access', 'model_id', 'Access'),
'osv_memory': fields.function(_is_osv_memory, string='Transient Model', type='boolean',
fnct_search=_search_osv_memory,
help="This field specifies whether the model is transient or not (i.e. if records are automatically deleted from the database or not)"),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the object is defined or inherited'),
'view_ids': fields.function(_view_ids, type='one2many', obj='ir.ui.view', string='Views'),
}
_defaults = {
'model': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
}
def _check_model_name(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context=context):
if model.state=='manual':
if not model.model.startswith('x_'):
return False
if not re.match('^[a-z_A-Z0-9.]+$',model.model):
return False
return True
def _model_name_msg(self, cr, uid, ids, context=None):
return _('The Object name must start with x_ and not contain any special character !')
_constraints = [
(_check_model_name, _model_name_msg, ['model']),
]
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
# overridden to allow searching both on model name (model field)
# and model description (name field)
def _name_search(self, cr, uid, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return self.name_get(cr, name_get_uid or uid,
super(ir_model, self).search(cr, uid, domain, limit=limit, context=context),
context=context)
def _drop_table(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context):
model_pool = self.pool[model.model]
cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
result = cr.fetchone()
if result and result[0] == 'v':
cr.execute('DROP view %s' % (model_pool._table,))
elif result and result[0] == 'r':
cr.execute('DROP TABLE %s CASCADE' % (model_pool._table,))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module tables
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG):
for model in self.browse(cr, user, ids, context):
if model.state != 'manual':
raise except_orm(_('Error'), _("Model '%s' contains module data and cannot be removed!") % (model.name,))
self._drop_table(cr, user, ids, context)
res = super(ir_model, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# only reload pool for normal unlink. For module uninstall the
# reload is done independently in openerp.modules.loading
cr.commit() # must be committed before reloading registry in new cursor
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context:
context = dict(context)
context.pop('__last_update', None)
# Filter out operations 4 link from field id, because openerp-web
# always write (4,id,False) even for non dirty items
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(ir_model,self).write(cr, user, ids, vals, context)
def create(self, cr, user, vals, context=None):
if context is None:
context = {}
if context and context.get('manual'):
vals['state']='manual'
res = super(ir_model,self).create(cr, user, vals, context)
if vals.get('state','base')=='manual':
self.instanciate(cr, user, vals['model'], context)
model = self.pool[vals['model']]
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
self.pool.setup_models(cr, partial=(not self.pool.ready))
RegistryManager.signal_registry_change(cr.dbname)
return res
def instanciate(self, cr, user, model, context=None):
if isinstance(model, unicode):
model = model.encode('utf-8')
class CustomModel(models.Model):
_name = model
_module = False
_custom = True
obj = CustomModel._build_model(self.pool, cr)
obj._rec_name = CustomModel._rec_name = (
'x_name' if 'x_name' in obj._columns else
list(obj._columns)[0] if obj._columns else
'id'
)
class ir_model_fields(osv.osv):
_name = 'ir.model.fields'
_description = "Fields"
_rec_name = 'field_description'
_columns = {
'name': fields.char('Name', required=True, select=1),
'complete_name': fields.char('Complete Name', select=1),
'model': fields.char('Object Name', required=True, select=1,
help="The technical name of the model this field belongs to"),
'relation': fields.char('Object Relation',
help="For relationship fields, the technical name of the target model"),
'relation_field': fields.char('Relation Field',
help="For one2many fields, the field on the target model that implement the opposite many2one relationship"),
'model_id': fields.many2one('ir.model', 'Model', required=True, select=True, ondelete='cascade',
help="The model this field belongs to"),
'field_description': fields.char('Field Label', required=True),
'ttype': fields.selection(_get_fields_type, 'Field Type', required=True),
'selection': fields.char('Selection Options', help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]"),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'select_level': fields.selection([('0','Not Searchable'),('1','Always Searchable'),('2','Advanced Search (deprecated)')],'Searchable', required=True),
'translate': fields.boolean('Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)"),
'size': fields.integer('Size'),
'state': fields.selection([('manual','Custom Field'),('base','Base Field')],'Type', required=True, readonly=True, select=1),
'on_delete': fields.selection([('cascade', 'Cascade'), ('set null', 'Set NULL'), ('restrict', 'Restrict')],
'On Delete', help='On delete property for many2one fields'),
'domain': fields.char('Domain', help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]"),
'groups': fields.many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id', 'Groups'),
'selectable': fields.boolean('Selectable'),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the field is defined'),
'serialization_field_id': fields.many2one('ir.model.fields', 'Serialization Field', domain = "[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation."),
}
_rec_name='field_description'
_defaults = {
'selection': "",
'domain': "[]",
'name': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
'on_delete': 'set null',
'select_level': '0',
'field_description': '',
'selectable': 1,
}
_order = "name"
def _check_selection(self, cr, uid, selection, context=None):
try:
selection_list = eval(selection)
except Exception:
_logger.warning('Invalid selection list definition for fields.selection', exc_info=True)
raise except_orm(_('Error'),
_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
check = True
if not (isinstance(selection_list, list) and selection_list):
check = False
else:
for item in selection_list:
if not (isinstance(item, (tuple,list)) and len(item) == 2):
check = False
break
if not check:
raise except_orm(_('Error'),
_("The Selection Options expression is must be in the [('key','Label'), ...] format!"))
return True
def _size_gt_zero_msg(self, cr, user, ids, context=None):
return _('Size of the field can never be less than 0 !')
_sql_constraints = [
('size_gt_zero', 'CHECK (size>=0)',_size_gt_zero_msg ),
]
def _drop_column(self, cr, uid, ids, context=None):
for field in self.browse(cr, uid, ids, context):
if field.name in MAGIC_COLUMNS:
continue
model = self.pool[field.model]
cr.execute('select relkind from pg_class where relname=%s', (model._table,))
result = cr.fetchone()
cr.execute("SELECT column_name FROM information_schema.columns WHERE table_name ='%s' and column_name='%s'" %(model._table, field.name))
column_name = cr.fetchone()
if column_name and (result and result[0] == 'r'):
cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
# remove m2m relation table for custom fields
# we consider the m2m relation is only one way as it's not possible
# to specify the relation table in the interface for custom fields
# TODO master: maybe use ir.model.relations for custom fields
if field.state == 'manual' and field.ttype == 'many2many':
rel_name = model._fields[field.name].relation
cr.execute('DROP table "%s"' % (rel_name))
model._pop_field(field.name)
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module columns
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self.browse(cr, user, ids, context)):
raise except_orm(_('Error'), _("This column contains module data and cannot be removed!"))
self._drop_column(cr, user, ids, context)
res = super(ir_model_fields, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
cr.commit()
self.pool.setup_models(cr, partial=(not self.pool.ready))
RegistryManager.signal_registry_change(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
if 'model_id' in vals:
model_data = self.pool['ir.model'].browse(cr, user, vals['model_id'])
vals['model'] = model_data.model
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
if vals.get('ttype', False) == 'selection':
if not vals.get('selection',False):
raise except_orm(_('Error'), _('For selection fields, the Selection Options must be given!'))
self._check_selection(cr, user, vals['selection'], context=context)
res = super(ir_model_fields,self).create(cr, user, vals, context)
if vals.get('state','base') == 'manual':
if not vals['name'].startswith('x_'):
raise except_orm(_('Error'), _("Custom fields must have a name that starts with 'x_' !"))
if vals.get('relation',False) and not self.pool['ir.model'].search(cr, user, [('model','=',vals['relation'])]):
raise except_orm(_('Error'), _("Model %s does not exist!") % vals['relation'])
if vals['model'] in self.pool:
model = self.pool[vals['model']]
if vals['model'].startswith('x_') and vals['name'] == 'x_name':
model._rec_name = 'x_name'
if self.pool.fields_by_model is not None:
cr.execute('SELECT * FROM ir_model_fields WHERE id=%s', (res,))
self.pool.fields_by_model.setdefault(vals['model'], []).append(cr.dictfetchone())
model.__init__(self.pool, cr)
#Added context to _auto_init for special treatment to custom field for select_level
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
self.pool.setup_models(cr, partial=(not self.pool.ready))
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
#For the moment renaming a sparse field or changing the storing system is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self.browse(cr, user, ids, context=context):
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise except_orm(_('Error!'), _('Changing the storing system for field "%s" is not allowed.')%field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise except_orm(_('Error!'), _('Renaming sparse field "%s" is not allowed')%field.name)
column_rename = None # if set, *one* column can be renamed here
models_patch = {} # structs of (obj, [(field, prop, change_to),..])
# data to be updated on the orm model
# static table of properties
model_props = [ # (our-name, fields.prop, set_fn)
('field_description', 'string', tools.ustr),
('required', 'required', bool),
('readonly', 'readonly', bool),
('domain', '_domain', eval),
('size', 'size', int),
('on_delete', 'ondelete', str),
('translate', 'translate', bool),
('selectable', 'selectable', bool),
('select_level', 'select', int),
('selection', 'selection', eval),
]
if vals and ids:
checked_selection = False # need only check it once, so defer
for item in self.browse(cr, user, ids, context=context):
obj = self.pool.get(item.model)
if item.state != 'manual':
raise except_orm(_('Error!'),
_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if item.ttype == 'selection' and 'selection' in vals \
and not checked_selection:
self._check_selection(cr, user, vals['selection'], context=context)
checked_selection = True
final_name = item.name
if 'name' in vals and vals['name'] != item.name:
# We need to rename the column
if column_rename:
raise except_orm(_('Error!'), _('Can only rename one column at a time!'))
if vals['name'] in obj._columns:
raise except_orm(_('Error!'), _('Cannot rename column to %s, because that column already exists!') % vals['name'])
if vals.get('state', 'base') == 'manual' and not vals['name'].startswith('x_'):
raise except_orm(_('Error!'), _('New column name must still start with x_ , because it is a custom field!'))
if '\'' in vals['name'] or '"' in vals['name'] or ';' in vals['name']:
raise ValueError('Invalid character in column name')
column_rename = (obj, (obj._table, item.name, vals['name']))
final_name = vals['name']
if 'model_id' in vals and vals['model_id'] != item.model_id.id:
raise except_orm(_("Error!"), _("Changing the model of a field is forbidden!"))
if 'ttype' in vals and vals['ttype'] != item.ttype:
raise except_orm(_("Error!"), _("Changing the type of a column is not yet supported. "
"Please drop it and create it again!"))
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj is not None:
models_patch.setdefault(obj._name, (obj,[]))
# find out which properties (per model) we need to update
for field_name, field_property, set_fn in model_props:
if field_name in vals:
property_value = set_fn(vals[field_name])
if getattr(obj._columns[item.name], field_property) != property_value:
models_patch[obj._name][1].append((final_name, field_property, property_value))
# our dict is ready here, but no properties are changed so far
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(ir_model_fields,self).write(cr, user, ids, vals, context=context)
if column_rename:
obj, rename = column_rename
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % rename)
# This is VERY risky, but let us have this feature:
# we want to change the key of field in obj._fields and obj._columns
field = obj._pop_field(rename[1])
obj._add_field(rename[2], field)
if models_patch:
# We have to update _columns of the model(s) and then call their
# _auto_init to sync the db with the model. Hopefully, since write()
# was called earlier, they will be in-sync before the _auto_init.
# Anything we don't update in _columns now will be reset from
# the model into ir.model.fields (db).
ctx = dict(context, select=vals.get('select_level', '0'),
update_custom_fields=True)
for __, patch_struct in models_patch.items():
obj = patch_struct[0]
# TODO: update new-style fields accordingly
for col_name, col_prop, val in patch_struct[1]:
setattr(obj._columns[col_name], col_prop, val)
obj._auto_init(cr, ctx)
obj._auto_end(cr, ctx) # actually create FKs!
if column_rename or models_patch:
self.pool.setup_models(cr, partial=(not self.pool.ready))
RegistryManager.signal_registry_change(cr.dbname)
return res
class ir_model_constraint(Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by OpenERP
models.
"""
_name = 'ir.model.constraint'
_columns = {
'name': fields.char('Constraint', required=True, select=1,
help="PostgreSQL constraint or foreign key name."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'type': fields.char('Constraint Type', required=True, size=1, select=1,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints."),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
ids_set = set(ids)
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model.model
model_obj = self.pool[model]
name = openerp.tools.ustr(data.name)
typ = data.type
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('f', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, model)
if typ == 'u':
# test if constraint exists
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('u', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, model)
self.unlink(cr, uid, ids, context)
class ir_model_relation(Model):
"""
This model tracks PostgreSQL tables used to implement OpenERP many2many
relations.
"""
_name = 'ir.model.relation'
_columns = {
'name': fields.char('Relation Name', required=True, select=1,
help="PostgreSQL table name implementing a many2many relation."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
ids_set = set(ids)
to_drop_table = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
name = openerp.tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if cr.fetchone() and not name in to_drop_table:
to_drop_table.append(name)
self.unlink(cr, uid, ids, context)
# drop m2m relation tables
for table in to_drop_table:
cr.execute('DROP TABLE %s CASCADE'% table,)
_logger.info('Dropped table %s', table)
cr.commit()
class ir_model_access(osv.osv):
_name = 'ir.model.access'
_columns = {
'name': fields.char('Name', required=True, select=True),
'active': fields.boolean('Active', help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module.'),
'model_id': fields.many2one('ir.model', 'Object', required=True, domain=[('osv_memory','=', False)], select=True, ondelete='cascade'),
'group_id': fields.many2one('res.groups', 'Group', ondelete='cascade', select=True),
'perm_read': fields.boolean('Read Access'),
'perm_write': fields.boolean('Write Access'),
'perm_create': fields.boolean('Create Access'),
'perm_unlink': fields.boolean('Delete Access'),
}
_defaults = {
'active': True,
}
def check_groups(self, cr, uid, group):
grouparr = group.split('.')
if not grouparr:
return False
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid IN (select res_id from ir_model_data where module=%s and name=%s)", (uid, grouparr[0], grouparr[1],))
return bool(cr.fetchone())
def check_group(self, cr, uid, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
for group_id in group_ids:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id = %s", (model_name, group_id)
)
r = cr.fetchone()
if r is None:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id IS NULL", (model_name, )
)
r = cr.fetchone()
access = bool(r and r[0])
if access:
return True
# pass no groups -> no access
return False
def group_names_with_access(self, cr, model_name, access_mode):
"""Returns the names of visible groups which have been granted ``access_mode`` on
the model ``model_name``.
:rtype: list
"""
assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode
cr.execute('''SELECT
c.name, g.name
FROM
ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE
m.model=%s AND
a.active IS True AND
a.perm_''' + access_mode, (model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()]
@tools.ormcache()
def check(self, cr, uid, model, mode='read', raise_exception=True, context=None):
if uid==1:
# User root have all accesses
# TODO: exclude xml-rpc requests
return True
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if model_name not in self.pool:
_logger.error('Missing model %s' % (model_name, ))
elif self.pool[model_name].is_transient():
return True
# We check if a specific rule exists
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' JOIN res_groups_users_rel gu ON (gu.gid = a.group_id) '
' WHERE m.model = %s '
' AND gu.uid = %s '
' AND a.active IS True '
, (model_name, uid,)
)
r = cr.fetchone()[0]
if r is None:
# there is no specific rule. We check the generic rule
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' WHERE a.group_id IS NULL '
' AND m.model = %s '
' AND a.active IS True '
, (model_name,)
)
r = cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(cr, model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise openerp.exceptions.AccessError(msg % msg_params)
return r or False
__cache_clearing_methods = []
def register_cache_clearing_method(self, model, method):
self.__cache_clearing_methods.append((model, method))
def unregister_cache_clearing_method(self, model, method):
try:
i = self.__cache_clearing_methods.index((model, method))
del self.__cache_clearing_methods[i]
except ValueError:
pass
def call_cache_clearing_methods(self, cr):
self.invalidate_cache(cr, SUPERUSER_ID)
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
if model in self.pool:
getattr(self.pool[model], method)()
#
# Check rights on actions
#
def write(self, cr, uid, ids, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).write(cr, uid, ids, values, context=context)
return res
def create(self, cr, uid, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).create(cr, uid, values, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).unlink(cr, uid, ids, context=context)
return res
class ir_model_data(osv.osv):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by OpenERP
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module,model,name'
def name_get(self, cr, uid, ids, context=None):
bymodel = defaultdict(dict)
names = {}
for res in self.browse(cr, uid, ids, context=context):
bymodel[res.model][res.res_id] = res
names[res.id] = res.complete_name
#result[res.model][res.res_id] = res.id
for model, id_map in bymodel.iteritems():
try:
ng = dict(self.pool[model].name_get(cr, uid, id_map.keys(), context=context))
except Exception:
pass
else:
for r in id_map.itervalues():
names[r.id] = ng.get(r.res_id, r.complete_name)
return [(i, names[i]) for i in ids]
def _complete_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
for res in self.browse(cr, uid, ids, context=context):
result[res.id] = (res.module and (res.module + '.') or '')+res.name
return result
_columns = {
'name': fields.char('External Identifier', required=True, select=1,
help="External Key/Identifier that can be used for "
"data integration with third-party systems"),
'complete_name': fields.function(_complete_name_get, type='char', string='Complete ID'),
'model': fields.char('Model Name', required=True, select=1),
'module': fields.char('Module', required=True, select=1),
'res_id': fields.integer('Record ID', select=1,
help="ID of the target record in the database"),
'noupdate': fields.boolean('Non Updatable'),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Init Date')
}
_defaults = {
'date_init': fields.datetime.now,
'date_update': fields.datetime.now,
'noupdate': False,
'module': ''
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)', 'You cannot have multiple records with the same external ID in the same module!'),
]
def __init__(self, pool, cr):
osv.osv.__init__(self, pool, cr)
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
# put loads on the class, in order to share it among all instances
type(self).loads = self.pool.model_data_reference_ids
def _auto_init(self, cr, context=None):
super(ir_model_data, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_model_data_module_name_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_model_data_module_name_index ON ir_model_data (module, name)')
# NEW V8 API
@tools.ormcache(skiparg=3)
def xmlid_lookup(self, cr, uid, xmlid):
"""Low level xmlid lookup
Return (id, res_model, res_id) or raise ValueError if not found
"""
module, name = xmlid.split('.', 1)
ids = self.search(cr, uid, [('module','=',module), ('name','=', name)])
if not ids:
raise ValueError('External ID not found in the system: %s' % (xmlid))
# the sql constraints ensure us we have only one result
res = self.read(cr, uid, ids[0], ['model', 'res_id'])
if not res['res_id']:
raise ValueError('External ID not found in the system: %s' % (xmlid))
return ids[0], res['model'], res['res_id']
def xmlid_to_res_model_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Return (res_model, res_id)"""
try:
return self.xmlid_lookup(cr, uid, xmlid)[1:3]
except ValueError:
if raise_if_not_found:
raise
return (False, False)
def xmlid_to_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Returns res_id """
return self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)[1]
def xmlid_to_object(self, cr, uid, xmlid, raise_if_not_found=False, context=None):
""" Return a browse_record
if not found and raise_if_not_found is True return None
"""
t = self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)
res_model, res_id = t
if res_model and res_id:
record = self.pool[res_model].browse(cr, uid, res_id, context=context)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xmlid))
return None
# OLD API
def _get_id(self, cr, uid, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[0]
def get_object_reference(self, cr, uid, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[1:3]
def check_object_reference(self, cr, uid, module, xml_id, raise_on_access_error=False):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached), if and only if the user has the necessary access rights
to see that object, otherwise raise a ValueError if raise_on_access_error is True or returns a tuple (model found, False)"""
model, res_id = self.get_object_reference(cr, uid, module, xml_id)
#search on id found in result to check if current user has read access right
check_right = self.pool.get(model).search(cr, uid, [('id', '=', res_id)])
if check_right:
return model, res_id
if raise_on_access_error:
raise ValueError('Not enough access rights on the external ID: %s.%s' % (module, xml_id))
return model, False
def get_object(self, cr, uid, module, xml_id, context=None):
""" Returns a browsable record for the given module name and xml_id.
If not found, raise a ValueError or return None, depending
on the value of `raise_exception`.
"""
return self.xmlid_to_object(cr, uid, "%s.%s" % (module, xml_id), raise_if_not_found=True, context=context)
def _update_dummy(self,cr, uid, model, module, xml_id=False, store=True):
if not xml_id:
return False
try:
id = self.read(cr, uid, [self._get_id(cr, uid, module, xml_id)], ['res_id'])[0]['res_id']
self.loads[(module,xml_id)] = (model,id)
except:
id = False
return id
def clear_caches(self):
""" Clears all orm caches on the object's methods
:returns: itself
"""
self.xmlid_lookup.clear_cache(self)
return self
def unlink(self, cr, uid, ids, context=None):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(ir_model_data,self).unlink(cr, uid, ids, context=context)
def _update(self,cr, uid, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False, context=None):
model_obj = self.pool[model]
if not context:
context = {}
# records created during module install should not display the messages of OpenChatter
context = dict(context, install_mode=True)
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
action_id = False
if xml_id:
cr.execute('''SELECT imd.id, imd.res_id, md.id, imd.model, imd.noupdate
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s''' % model_obj._table,
(module, xml_id))
results = cr.fetchall()
for imd_id2,res_id2,real_id2,real_model,noupdate_imd in results:
# In update mode, do not update a record if it's ir.model.data is flagged as noupdate
if mode == 'update' and noupdate_imd:
return res_id2
if not real_id2:
self.clear_caches()
cr.execute('delete from ir_model_data where id=%s', (imd_id2,))
res_id = False
else:
assert model == real_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, real_model, model)
res_id,action_id = res_id2,imd_id2
if action_id and res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
self.write(cr, uid, [action_id], {
'date_update': time.strftime('%Y-%m-%d %H:%M:%S'),
},context=context)
elif res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module':module,
'res_id':res_id,
'noupdate': noupdate,
},context=context)
else:
if mode=='init' or (mode=='update' and xml_id):
res_id = model_obj.create(cr, uid, values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module': module,
'res_id': res_id,
'noupdate': noupdate
},context=context)
if xml_id and res_id:
self.loads[(module, xml_id)] = (model, res_id)
for table, inherit_field in model_obj._inherits.iteritems():
inherit_id = model_obj.read(cr, uid, [res_id],
[inherit_field])[0][inherit_field]
self.loads[(module, xml_id + '_' + table.replace('.', '_'))] = (table, inherit_id)
return res_id
def ir_set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=None, xml_id=False):
if isinstance(models[0], (list, tuple)):
model,res_id = models[0]
else:
res_id=None
model = models[0]
if res_id:
where = ' and res_id=%s' % (res_id,)
else:
where = ' and (res_id is null)'
if key2:
where += ' and key2=\'%s\'' % (key2,)
else:
where += ' and (key2 is null)'
cr.execute('select * from ir_values where model=%s and key=%s and name=%s'+where,(model, key, name))
res = cr.fetchone()
ir_values_obj = openerp.registry(cr.dbname)['ir.values']
if not res:
ir_values_obj.set(cr, uid, key, key2, name, models, value, replace, isobject, meta)
elif xml_id:
cr.execute('UPDATE ir_values set value=%s WHERE model=%s and key=%s and name=%s'+where,(value, model, key, name))
ir_values_obj.invalidate_cache(cr, uid, ['value'])
return True
def _module_data_uninstall(self, cr, uid, modules_to_remove, context=None):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
ids = self.search(cr, uid, [('module', 'in', modules_to_remove)])
if uid != 1 and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
context[MODULE_UNINSTALL_FLAG] = True # enable model/field deletion
ids_set = set(ids)
wkf_todo = []
to_unlink = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
res_id = data.res_id
pair_to_unlink = (model, res_id)
if pair_to_unlink not in to_unlink:
to_unlink.append(pair_to_unlink)
if model == 'workflow.activity':
# Special treatment for workflow activities: temporarily revert their
# incoming transition and trigger an update to force all workflow items
# to move out before deleting them
cr.execute('select res_type,res_id from wkf_instance where id IN (select inst_id from wkf_workitem where act_id=%s)', (res_id,))
wkf_todo.extend(cr.fetchall())
cr.execute("update wkf_transition set condition='True', group_id=NULL, signal=NULL,act_to=act_from,act_from=%s where act_to=%s", (res_id,res_id))
self.invalidate_cache(cr, uid, context=context)
for model,res_id in wkf_todo:
try:
openerp.workflow.trg_write(uid, model, res_id, cr)
except Exception:
_logger.info('Unable to force processing of workflow for item %s@%s in order to leave activity to be deleted', res_id, model, exc_info=True)
def unlink_if_refcount(to_unlink):
for model, res_id in to_unlink:
external_ids = self.search(cr, uid, [('model', '=', model),('res_id', '=', res_id)])
if set(external_ids)-ids_set:
# if other modules have defined this record, we must not delete it
continue
if model == 'ir.model.fields':
# Don't remove the LOG_ACCESS_COLUMNS unless _log_access
# has been turned off on the model.
field = self.pool[model].browse(cr, uid, [res_id], context=context)[0]
if not field.exists():
_logger.info('Deleting orphan external_ids %s', external_ids)
self.unlink(cr, uid, external_ids)
continue
if field.name in openerp.models.LOG_ACCESS_COLUMNS and self.pool[field.model]._log_access:
continue
if field.name == 'id':
continue
_logger.info('Deleting %s@%s', res_id, model)
try:
cr.execute('SAVEPOINT record_unlink_save')
self.pool[model].unlink(cr, uid, [res_id], context=context)
except Exception:
_logger.info('Unable to delete %s@%s', res_id, model, exc_info=True)
cr.execute('ROLLBACK TO SAVEPOINT record_unlink_save')
else:
cr.execute('RELEASE SAVEPOINT record_unlink_save')
# Remove non-model records first, then model fields, and finish with models
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model not in ('ir.model','ir.model.fields','ir.model.constraint'))
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.constraint')
ir_module_module = self.pool['ir.module.module']
ir_model_constraint = self.pool['ir.model.constraint']
modules_to_remove_ids = ir_module_module.search(cr, uid, [('name', 'in', modules_to_remove)], context=context)
constraint_ids = ir_model_constraint.search(cr, uid, [('module', 'in', modules_to_remove_ids)], context=context)
ir_model_constraint._module_data_uninstall(cr, uid, constraint_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.fields')
ir_model_relation = self.pool['ir.model.relation']
relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove_ids)])
ir_model_relation._module_data_uninstall(cr, uid, relation_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model')
cr.commit()
self.unlink(cr, uid, ids, context)
def _process_end(self, cr, uid, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.loads.
"""
if not modules:
return True
to_unlink = []
cr.execute("""SELECT id,name,model,res_id,module FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND noupdate=%s ORDER BY id DESC""",
(tuple(modules), False))
for (id, name, model, res_id, module) in cr.fetchall():
if (module,name) not in self.loads:
to_unlink.append((model,res_id))
if not config.get('import_partial'):
for (model, res_id) in to_unlink:
if model in self.pool:
_logger.info('Deleting %s@%s', res_id, model)
self.pool[model].unlink(cr, uid, [res_id])
class wizard_model_menu(osv.osv_memory):
_name = 'wizard.ir.model.menu.create'
_columns = {
'menu_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
'name': fields.char('Menu Name', required=True),
}
def menu_create(self, cr, uid, ids, context=None):
if not context:
context = {}
model_pool = self.pool.get('ir.model')
for menu in self.browse(cr, uid, ids, context):
model = model_pool.browse(cr, uid, context.get('model_id'), context=context)
val = {
'name': menu.name,
'res_model': model.model,
'view_type': 'form',
'view_mode': 'tree,form'
}
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, val)
self.pool.get('ir.ui.menu').create(cr, uid, {
'name': menu.name,
'parent_id': menu.menu_id.id,
'action': 'ir.actions.act_window,%d' % (action_id,),
'icon': 'STOCK_INDENT'
}, context)
return {'type':'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,110,409,172,991,243,000 | 47.528991 | 207 | 0.557285 | false |
ClonedOne/pandalog_investigator | pandaloginvestigator/cli/cmds/detect.py | 1 | 1643 | from pandaloginvestigator.core.detection import suspect_builder
from pandaloginvestigator.core.utils import string_utils
import logging
import os
logger = logging.getLogger(__name__)
def detect_command(app):
try:
core_num = app.config.get('pandaloginvestigator', 'core_num')
except:
logger.error('core_num not set in configuration file')
return
try:
created_dirs_path = app.config.get('pandaloginvestigator', 'created_dirs_path')
except:
logger.error('created_dirs_path not set in configuration file')
return
try:
dir_redpills_path = app.config.get('pandaloginvestigator', 'dir_redpills_path')
except:
logger.error('dir_redpills_path not set in configuration file')
return
dir_results_path = os.path.join(created_dirs_path, string_utils.dir_results_path)
if not os.path.exists(dir_results_path):
os.makedirs(dir_results_path)
dir_analyzed_path = os.path.join(created_dirs_path, string_utils.dir_analyzed_path)
if not os.path.exists(dir_analyzed_path):
os.makedirs(dir_analyzed_path)
dir_clues_path = os.path.join(created_dirs_path, string_utils.dir_clues_path)
if not os.path.exists(dir_clues_path):
os.makedirs(dir_clues_path)
logger.debug(
'Detect command with parameters: {}, {}, {}, {}, {}'.format(
dir_results_path,
dir_redpills_path,
dir_analyzed_path,
dir_clues_path,
core_num,
)
)
suspect_builder.build_suspects(dir_results_path, dir_redpills_path, dir_analyzed_path, dir_clues_path, core_num)
| mit | 6,249,602,253,184,543,000 | 32.530612 | 116 | 0.65916 | false |
eXistenZNL/SickRage | sickbeard/providers/generic.py | 3 | 22456 | # coding=utf-8
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import datetime
import os
import re
import itertools
import urllib
import random
import sickbeard
import requests
from sickbeard import helpers, classes, logger, db
from sickbeard.common import MULTI_EP_RESULT, SEASON_RESULT, USER_AGENT
from sickbeard import tvcache
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from sickbeard.common import Quality
from hachoir_parser import createParser
from base64 import b16encode, b32decode
class GenericProvider:
NZB = "nzb"
TORRENT = "torrent"
def __init__(self, name):
# these need to be set in the subclass
self.providerType = None
self.name = name
self.proxy = ProviderProxy()
self.proxyGlypeProxySSLwarning = None
self.urls = {}
self.url = ''
self.show = None
self.supportsBacklog = False
self.supportsAbsoluteNumbering = False
self.anime_only = False
self.search_mode = None
self.search_fallback = False
self.enabled = False
self.enable_daily = False
self.enable_backlog = False
self.cache = tvcache.TVCache(self)
self.session = requests.Session()
self.headers = {'User-Agent': USER_AGENT}
self.btCacheURLS = [
'http://torcache.net/torrent/{torrent_hash}.torrent',
'http://thetorrent.org/torrent/{torrent_hash}.torrent',
'http://btdig.com/torrent/{torrent_hash}.torrent',
#'http://torrage.com/torrent/{torrent_hash}.torrent',
#'http://itorrents.org/torrent/{torrent_hash}.torrent',
]
random.shuffle(self.btCacheURLS)
def getID(self):
return GenericProvider.makeID(self.name)
@staticmethod
def makeID(name):
return re.sub("[^\w\d_]", "_", name.strip().lower())
def imageName(self):
return self.getID() + '.png'
def _checkAuth(self):
return True
def _doLogin(self):
return True
def isActive(self):
if self.providerType == GenericProvider.NZB and sickbeard.USE_NZBS:
return self.isEnabled()
elif self.providerType == GenericProvider.TORRENT and sickbeard.USE_TORRENTS:
return self.isEnabled()
else:
return False
def isEnabled(self):
"""
This should be overridden and should return the config setting eg. sickbeard.MYPROVIDER
"""
return False
def getResult(self, episodes):
"""
Returns a result of the correct type for this provider
"""
if self.providerType == GenericProvider.NZB:
result = classes.NZBSearchResult(episodes)
elif self.providerType == GenericProvider.TORRENT:
result = classes.TorrentSearchResult(episodes)
else:
result = classes.SearchResult(episodes)
result.provider = self
return result
def getURL(self, url, post_data=None, params=None, timeout=30, json=False):
"""
By default this is just a simple urlopen call but this method should be overridden
for providers with special URL requirements (like cookies)
"""
# check for auth
if not self._doLogin():
return
if self.proxy.isEnabled():
self.headers.update({'Referer': self.proxy.getProxyURL()})
self.proxyGlypeProxySSLwarning = self.proxy.getProxyURL() + 'includes/process.php?action=sslagree&submit=Continue anyway...'
else:
if 'Referer' in self.headers:
self.headers.pop('Referer')
self.proxyGlypeProxySSLwarning = None
return helpers.getURL(self.proxy._buildURL(url), post_data=post_data, params=params, headers=self.headers, timeout=timeout,
session=self.session, json=json, proxyGlypeProxySSLwarning=self.proxyGlypeProxySSLwarning)
def _makeURL(self, result):
urls = []
filename = u''
if result.url.startswith('magnet'):
try:
torrent_hash = re.findall('urn:btih:([\w]{32,40})', result.url)[0].upper()
try:
torrent_name = re.findall('dn=([^&]+)', result.url)[0]
except:
torrent_name = 'NO_DOWNLOAD_NAME'
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash)).upper()
if not torrent_hash:
logger.log("Unable to extract torrent hash from magnet: " + ex(result.url), logger.ERROR)
return (urls, filename)
urls = [x.format(torrent_hash=torrent_hash, torrent_name=torrent_name) for x in self.btCacheURLS]
except:
logger.log("Unable to extract torrent hash or name from magnet: " + ex(result.url), logger.ERROR)
return (urls, filename)
else:
urls = [result.url]
if self.providerType == GenericProvider.TORRENT:
filename = ek.ek(os.path.join, sickbeard.TORRENT_DIR,
helpers.sanitizeFileName(result.name) + '.' + self.providerType)
elif self.providerType == GenericProvider.NZB:
filename = ek.ek(os.path.join, sickbeard.NZB_DIR,
helpers.sanitizeFileName(result.name) + '.' + self.providerType)
return (urls, filename)
def downloadResult(self, result):
"""
Save the result to disk.
"""
# check for auth
if not self._doLogin():
return False
urls, filename = self._makeURL(result)
if self.proxy.isEnabled():
self.headers.update({'Referer': self.proxy.getProxyURL()})
elif 'Referer' in self.headers:
self.headers.pop('Referer')
for url in urls:
if 'NO_DOWNLOAD_NAME' in url:
continue
logger.log(u"Downloading a result from " + self.name + " at " + url)
if helpers.download_file(self.proxy._buildURL(url), filename, session=self.session, headers=self.headers):
if self._verify_download(filename):
logger.log(u"Saved result to " + filename, logger.INFO)
return True
else:
logger.log(u"Could not download %s" % url, logger.WARNING)
helpers._remove_file_failed(filename)
if len(urls):
logger.log(u"Failed to download any results", logger.WARNING)
return False
def _verify_download(self, file_name=None):
"""
Checks the saved file to see if it was actually valid, if not then consider the download a failure.
"""
# primitive verification of torrents, just make sure we didn't get a text file or something
if self.providerType == GenericProvider.TORRENT:
try:
parser = createParser(file_name)
if parser:
mime_type = parser._getMimeType()
try:
parser.stream._input.close()
except:
pass
if mime_type == 'application/x-bittorrent':
return True
except Exception as e:
logger.log(u"Failed to validate torrent file: " + ex(e), logger.DEBUG)
logger.log(u"Result is not a valid torrent file", logger.DEBUG)
return False
return True
def searchRSS(self, episodes):
return self.cache.findNeededEpisodes(episodes)
def getQuality(self, item, anime=False):
"""
Figures out the quality of the given RSS item node
item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
Returns a Quality value obtained from the node's data
"""
(title, url) = self._get_title_and_url(item)
quality = Quality.sceneQuality(title, anime)
return quality
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
return []
def _get_season_search_strings(self, episode):
return []
def _get_episode_search_strings(self, eb_obj, add_string=''):
return []
def _get_title_and_url(self, item):
"""
Retrieves the title and URL data from the item XML node
item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
Returns: A tuple containing two strings representing title and URL respectively
"""
title = item.get('title')
if title:
title = u'' + title.replace(' ', '.')
url = item.get('link')
if url:
url = url.replace('&', '&')
return title, url
def _get_size(self, item):
"""Gets the size from the item"""
if self.providerType != GenericProvider.NZB:
logger.log(u"Torrent Generic providers doesn't have _get_size() implemented yet", logger.DEBUG)
return -1
else:
size = item.get('links')[1].get('length')
if size:
size = int(size)
return size
else:
logger.log(u"Size was not found in your provider response", logger.DEBUG)
return -1
def findSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
self._checkAuth()
self.show = show
results = {}
itemList = []
searched_scene_season = None
for epObj in episodes:
# search cache for episode result
cacheResult = self.cache.searchCache(epObj, manualSearch, downCurQuality)
if cacheResult:
if epObj.episode not in results:
results[epObj.episode] = cacheResult
else:
results[epObj.episode].extend(cacheResult)
# found result, search next episode
continue
# skip if season already searched
if len(episodes) > 1 and search_mode == 'sponly' and searched_scene_season == epObj.scene_season:
continue
# mark season searched for season pack searches so we can skip later on
searched_scene_season = epObj.scene_season
search_strings = []
if len(episodes) > 1 and search_mode == 'sponly':
# get season search results
search_strings = self._get_season_search_strings(epObj)
elif search_mode == 'eponly':
# get single episode search results
search_strings = self._get_episode_search_strings(epObj)
if search_strings:
logger.log(u'search_strings = %s' % repr(search_strings), logger.DEBUG)
first = search_strings and isinstance(search_strings[0], dict) and 'rid' in search_strings[0]
if first:
logger.log(u'First search_string has rid', logger.DEBUG)
for curString in search_strings:
itemList += self._doSearch(curString, search_mode, len(episodes), epObj=epObj)
if first:
first = False
if itemList:
logger.log(u'First search_string had rid, and returned results, skipping query by string', logger.DEBUG)
break
else:
logger.log(u'First search_string had rid, but returned no results, searching with string query', logger.DEBUG)
# if we found what we needed already from cache then return results and exit
if len(results) == len(episodes):
return results
# sort list by quality
if len(itemList):
items = {}
itemsUnknown = []
for item in itemList:
quality = self.getQuality(item, anime=show.is_anime)
if quality == Quality.UNKNOWN:
itemsUnknown += [item]
else:
if quality not in items:
items[quality] = [item]
else:
items[quality].append(item)
itemList = list(itertools.chain(*[v for (k, v) in sorted(items.iteritems(), reverse=True)]))
itemList += itemsUnknown if itemsUnknown else []
# filter results
cl = []
for item in itemList:
(title, url) = self._get_title_and_url(item)
# parse the file name
try:
myParser = NameParser(False)
parse_result = myParser.parse(title)
except InvalidNameException:
logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.DEBUG)
continue
except InvalidShowException:
logger.log(u"Unable to parse the filename " + title + " into a valid show", logger.DEBUG)
continue
showObj = parse_result.show
quality = parse_result.quality
release_group = parse_result.release_group
version = parse_result.version
addCacheEntry = False
if not (showObj.air_by_date or showObj.sports):
if search_mode == 'sponly':
if len(parse_result.episode_numbers):
logger.log(
u"This is supposed to be a season pack search but the result " + title + " is not a valid season pack, skipping it",
logger.DEBUG)
addCacheEntry = True
if len(parse_result.episode_numbers) and (
parse_result.season_number not in set([ep.season for ep in episodes]) or not [ep for ep in episodes if
ep.scene_episode in parse_result.episode_numbers]):
logger.log(
u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
logger.DEBUG)
addCacheEntry = True
else:
if not len(parse_result.episode_numbers) and parse_result.season_number and not [ep for ep in
episodes if
ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
logger.log(
u"The result " + title + " doesn't seem to be a valid season that we are trying to snatch, ignoring",
logger.DEBUG)
addCacheEntry = True
elif len(parse_result.episode_numbers) and not [ep for ep in episodes if
ep.season == parse_result.season_number and ep.episode in parse_result.episode_numbers]:
logger.log(
u"The result " + title + " doesn't seem to be a valid episode that we are trying to snatch, ignoring",
logger.DEBUG)
addCacheEntry = True
if not addCacheEntry:
# we just use the existing info for normal searches
actual_season = parse_result.season_number
actual_episodes = parse_result.episode_numbers
else:
if not (parse_result.is_air_by_date):
logger.log(
u"This is supposed to be a date search but the result " + title + " didn't parse as one, skipping it",
logger.DEBUG)
addCacheEntry = True
else:
airdate = parse_result.air_date.toordinal()
myDB = db.DBConnection()
sql_results = myDB.select(
"SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?",
[showObj.indexerid, airdate])
if len(sql_results) != 1:
logger.log(
u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it",
logger.WARNING)
addCacheEntry = True
if not addCacheEntry:
actual_season = int(sql_results[0]["season"])
actual_episodes = [int(sql_results[0]["episode"])]
# add parsed result to cache for usage later on
if addCacheEntry:
logger.log(u"Adding item from search to cache: " + title, logger.DEBUG)
ci = self.cache._addCacheEntry(title, url, parse_result=parse_result)
if ci is not None:
cl.append(ci)
continue
# make sure we want the episode
wantEp = True
for epNo in actual_episodes:
if not showObj.wantEpisode(actual_season, epNo, quality, manualSearch, downCurQuality):
wantEp = False
break
if not wantEp:
logger.log(
u"Ignoring result " + title + " because we don't want an episode that is " +
Quality.qualityStrings[
quality], logger.INFO)
continue
logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
# make a result object
epObj = []
for curEp in actual_episodes:
epObj.append(showObj.getEpisode(actual_season, curEp))
result = self.getResult(epObj)
result.show = showObj
result.url = url
result.name = title
result.quality = quality
result.release_group = release_group
result.version = version
result.content = None
result.size = self._get_size(item)
if len(epObj) == 1:
epNum = epObj[0].episode
logger.log(u"Single episode result.", logger.DEBUG)
elif len(epObj) > 1:
epNum = MULTI_EP_RESULT
logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(
parse_result.episode_numbers), logger.DEBUG)
elif len(epObj) == 0:
epNum = SEASON_RESULT
logger.log(u"Separating full season result to check for later", logger.DEBUG)
if epNum not in results:
results[epNum] = [result]
else:
results[epNum].append(result)
# check if we have items to add to cache
if len(cl) > 0:
myDB = self.cache._getDB()
myDB.mass_action(cl)
return results
def findPropers(self, search_date=None):
results = self.cache.listPropers(search_date)
return [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time']), self.show) for x in
results]
def seedRatio(self):
'''
Provider should override this value if custom seed ratio enabled
It should return the value of the provider seed ratio
'''
return ''
class NZBProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.providerType = GenericProvider.NZB
class TorrentProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.providerType = GenericProvider.TORRENT
def _clean_title_from_provider(self, title):
if title:
title = u'' + title.replace(' ', '.')
return title
class ProviderProxy:
def __init__(self):
self.Type = 'GlypeProxy'
self.param = 'browse.php?u='
self.option = '&b=32&f=norefer'
self.enabled = False
self.url = None
self.urls = {
'getprivate.eu (NL)': 'http://getprivate.eu/',
'hideme.nl (NL)': 'http://hideme.nl/',
'proxite.eu (DE)': 'http://proxite.eu/',
'interproxy.net (EU)': 'http://interproxy.net/',
}
def isEnabled(self):
""" Return True if we Choose to call TPB via Proxy """
return self.enabled
def getProxyURL(self):
""" Return the Proxy URL Choosen via Provider Setting """
return str(self.url)
def _buildURL(self, url):
""" Return the Proxyfied URL of the page """
if self.isEnabled():
url = self.getProxyURL() + self.param + urllib.quote_plus(url.encode('UTF-8')) + self.option
logger.log(u"Proxified URL: " + url, logger.DEBUG)
return url
def _buildRE(self, regx):
""" Return the Proxyfied RE string """
if self.isEnabled():
regx = re.sub('//1', self.option, regx).replace('&', '&')
logger.log(u"Proxified REGEX: " + regx, logger.DEBUG)
else:
regx = re.sub('//1', '', regx)
return regx
| gpl-3.0 | 3,091,738,021,392,681,500 | 36.741176 | 189 | 0.553571 | false |
fangxingli/hue | desktop/core/src/desktop/management/commands/test.py | 4 | 3715 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Desktop-aware test runner.
Django's "test" command merely executes the test_runner,
so we circumvent it entirely and create our own.
"""
from django.test.utils import setup_test_environment
from django.conf import settings
from django.core.management.base import BaseCommand
from django.test.utils import get_runner
from django_nose import runner
import south.management.commands
import sys
import textwrap
import logging
from desktop import appmanager
from desktop.management.commands import test_windmill
class Command(BaseCommand):
help = textwrap.dedent("""\
Use the following arguments:
all Runs tests for all desktop applications and libraries
Additional arguments are passed to nose.
fast Runs the "fast" tests, namely those that don't start Hadoop.
specific Explicitly run specific tests using nose.
For example, to run all the filebrower tests or
to run a specific test function, use
test specific filebrowser
test specific useradmin.tests:test_user_admin
All additional arguments are passed directly to nose.
windmill Runs windmill tests
list_modules List test modules for all desktop applications and libraries
Common useful extra arguments for nose:
--nologcapture
--nocapture (-s)
--pdb-failures
--pdb
--with-xunit
""")
def run_from_argv(self, argv):
"""
Runs the tests.
This management command is unusual in that it doesn't
use Django's normal argument handling. (If it did, this
method would be callled handle().) We do so to more
easily pass arbitrary arguments to nose.
"""
args = argv[2:] # First two are "desktop" and "test"
# Patch South things in
south.management.commands.patch_for_test_db_setup()
south_logger = logging.getLogger('south')
south_logger.setLevel(logging.INFO)
if len(args) == 0:
print self.help
sys.exit(1)
nose_args = None
all_apps = [ app.module.__name__ for app in appmanager.DESKTOP_MODULES ]
if args[0] == "all":
nose_args = args + all_apps
elif args[0] == "fast":
nose_args = args + all_apps + ["-a", "!requires_hadoop"]
elif args[0] == "windmill":
args = args[1:]
ret = test_windmill.Command().handle(*args)
elif args[0] in ("specific", "nose"):
nose_args = args
elif args[0] == "list_modules":
print '\n'.join(all_apps)
sys.exit(0)
else:
print self.help
sys.exit(1)
if nose_args:
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1, interactive=False)
nose_args.remove(args[0])
ret = test_runner.run_tests(nose_args)
logging.info("Tests (%s) returned %s" % (' '.join(nose_args), ret))
if ret != 0:
sys.exit(1)
| apache-2.0 | -1,227,436,018,042,384,600 | 31.876106 | 80 | 0.667833 | false |
Giladx/cwiid | wmdemo/wmdemo.py | 8 | 6220 | #!/usr/bin/python
import cwiid
import sys
menu = '''1: toggle LED 1
2: toggle LED 2
3: toggle LED 3
4: toggle LED 4
5: toggle rumble
a: toggle accelerometer reporting
b: toggle button reporting
c: enable motionplus, if connected
e: toggle extension reporting
i: toggle ir reporting
m: toggle messages
p: print this menu
r: request status message ((t) enables callback output)
s: print current state
t: toggle status reporting
x: exit'''
def main():
led = 0
rpt_mode = 0
rumble = 0
mesg = False
#Connect to address given on command-line, if present
print 'Put Wiimote in discoverable mode now (press 1+2)...'
global wiimote
if len(sys.argv) > 1:
wiimote = cwiid.Wiimote(sys.argv[1])
else:
wiimote = cwiid.Wiimote()
wiimote.mesg_callback = callback
print menu
exit = 0
while not exit:
c = sys.stdin.read(1)
if c == '1':
led ^= cwiid.LED1_ON
wiimote.led = led
elif c == '2':
led ^= cwiid.LED2_ON
wiimote.led = led
elif c == '3':
led ^= cwiid.LED3_ON
wiimote.led = led
elif c == '4':
led ^= cwiid.LED4_ON
wiimote.led = led
elif c == '5':
rumble ^= 1
wiimote.rumble = rumble
elif c == 'a':
rpt_mode ^= cwiid.RPT_ACC
wiimote.rpt_mode = rpt_mode
elif c == 'b':
rpt_mode ^= cwiid.RPT_BTN
wiimote.rpt_mode = rpt_mode
elif c == 'c':
wiimote.enable(cwiid.FLAG_MOTIONPLUS)
elif c == 'e':
rpt_mode ^= cwiid.RPT_EXT
wiimote.rpt_mode = rpt_mode
elif c == 'i':
rpt_mode ^= cwiid.RPT_IR
wiimote.rpt_mode = rpt_mode
elif c == 'm':
mesg = not mesg
if mesg:
wiimote.enable(cwiid.FLAG_MESG_IFC);
else:
wiimote.disable(cwiid.FLAG_MESG_IFC);
elif c == 'p':
print menu
elif c == 'r':
wiimote.request_status()
elif c == 's':
print_state(wiimote.state)
elif c == 't':
rpt_mode ^= cwiid.RPT_STATUS
wiimote.rpt_mode = rpt_mode
elif c == 'x':
exit = -1;
elif c == '\n':
pass
else:
print 'invalid option'
wiimote.close()
def print_state(state):
print 'Report Mode:',
for r in ['STATUS', 'BTN', 'ACC', 'IR', 'NUNCHUK', 'CLASSIC', 'BALANCE', 'MOTIONPLUS']:
if state['rpt_mode'] & eval('cwiid.RPT_' + r):
print r,
print
print 'Active LEDs:',
for led in ['1','2','3','4']:
if state['led'] & eval('cwiid.LED' + led + '_ON'):
print led,
print
print 'Rumble:', state['rumble'] and 'On' or 'Off'
print 'Battery:', int(100.0 * state['battery'] / cwiid.BATTERY_MAX)
if 'buttons' in state:
print 'Buttons:', state['buttons']
if 'acc' in state:
print 'Acc: x=%d y=%d z=%d' % (state['acc'][cwiid.X],
state['acc'][cwiid.Y],
state['acc'][cwiid.Z])
if 'ir_src' in state:
valid_src = False
print 'IR:',
for src in state['ir_src']:
if src:
valid_src = True
print src['pos'],
if not valid_src:
print 'no sources detected'
else:
print
if state['ext_type'] == cwiid.EXT_NONE:
print 'No extension'
elif state['ext_type'] == cwiid.EXT_UNKNOWN:
print 'Unknown extension attached'
elif state['ext_type'] == cwiid.EXT_NUNCHUK:
if state.has_key('nunchuk'):
print 'Nunchuk: btns=%.2X stick=%r acc.x=%d acc.y=%d acc.z=%d' % \
(state['nunchuk']['buttons'], state['nunchuk']['stick'],
state['nunchuk']['acc'][cwiid.X],
state['nunchuk']['acc'][cwiid.Y],
state['nunchuk']['acc'][cwiid.Z])
elif state['ext_type'] == cwiid.EXT_CLASSIC:
if state.has_key('classic'):
print 'Classic: btns=%.4X l_stick=%r r_stick=%r l=%d r=%d' % \
(state['classic']['buttons'],
state['classic']['l_stick'], state['classic']['r_stick'],
state['classic']['l'], state['classic']['r'])
elif state['ext_type'] == cwiid.EXT_BALANCE:
if state.has_key('balance'):
print 'Balance: right_top=%d right_bottom=%d left_top=%d left_bottom=%d' % \
(state['balance']['right_top'], state['balance']['right_bottom'],
state['balance']['left_top'], state['balance']['left_bottom'])
elif state['ext_type'] == cwiid.EXT_MOTIONPLUS:
if state.has_key('motionplus'):
print 'MotionPlus: angle_rate=(%d,%d,%d)' % state['motionplus']['angle_rate']
def callback(mesg_list, time):
print 'time: %f' % time
for mesg in mesg_list:
if mesg[0] == cwiid.MESG_STATUS:
print 'Status Report: battery=%d extension=' % \
mesg[1]['battery'],
if mesg[1]['ext_type'] == cwiid.EXT_NONE:
print 'none'
elif mesg[1]['ext_type'] == cwiid.EXT_NUNCHUK:
print 'Nunchuk'
elif mesg[1]['ext_type'] == cwiid.EXT_CLASSIC:
print 'Classic Controller'
elif mesg[1]['ext_type'] == cwiid.EXT_BALANCE:
print 'Balance Board'
elif mesg[1]['ext_type'] == cwiid.EXT_MOTIONPLUS:
print 'MotionPlus'
else:
print 'Unknown Extension'
elif mesg[0] == cwiid.MESG_BTN:
print 'Button Report: %.4X' % mesg[1]
elif mesg[0] == cwiid.MESG_ACC:
print 'Acc Report: x=%d, y=%d, z=%d' % \
(mesg[1][cwiid.X], mesg[1][cwiid.Y], mesg[1][cwiid.Z])
elif mesg[0] == cwiid.MESG_IR:
valid_src = False
print 'IR Report: ',
for src in mesg[1]:
if src:
valid_src = True
print src['pos'],
if not valid_src:
print 'no sources detected'
else:
print
elif mesg[0] == cwiid.MESG_NUNCHUK:
print ('Nunchuk Report: btns=%.2X stick=%r ' + \
'acc.x=%d acc.y=%d acc.z=%d') % \
(mesg[1]['buttons'], mesg[1]['stick'],
mesg[1]['acc'][cwiid.X], mesg[1]['acc'][cwiid.Y],
mesg[1]['acc'][cwiid.Z])
elif mesg[0] == cwiid.MESG_CLASSIC:
print ('Classic Report: btns=%.4X l_stick=%r ' + \
'r_stick=%r l=%d r=%d') % \
(mesg[1]['buttons'], mesg[1]['l_stick'],
mesg[1]['r_stick'], mesg[1]['l'], mesg[1]['r'])
elif mesg[0] == cwiid.MESG_BALANCE:
print ('Balance Report: right_top=%d right_bottom=%d ' + \
'left_top=%d left_bottom=%d') % \
(mesg[1]['right_top'], mesg[1]['right_bottom'],
mesg[1]['left_top'], mesg[1]['left_bottom'])
elif mesg[0] == cwiid.MESG_MOTIONPLUS:
print 'MotionPlus Report: angle_rate=(%d,%d,%d)' % \
mesg[1]['angle_rate']
elif mesg[0] == cwiid.MESG_ERROR:
print "Error message received"
global wiimote
wiimote.close()
exit(-1)
else:
print 'Unknown Report'
main()
| gpl-2.0 | -2,666,027,691,027,644,000 | 26.522124 | 88 | 0.590997 | false |
lewischeng-ms/pox | tools/pox-log.py | 33 | 1697 | #!/usr/bin/env python
# Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
import json
import sys
import socket
decoder = json.JSONDecoder()
host = "localhost"
port = 7790
if len(sys.argv) >= 2 : host = sys.argv[1]
if len(sys.argv) >= 3 : port = int(sys.argv[2])
while True:
try:
sock = socket.socket()
sock.connect((host,port))
print "== Connected =="
buf = ''
try:
sock.send('{"hello":"logger","format":"%(levelname)-8s | %(name)-15s | %(message)s"}')
while True:
d = sock.recv(1024)
if len(d) == 0: raise RuntimeError()
if len(buf) == 0: d = d.lstrip() # protect from whitespace
buf += d
try:
while len(buf) > 0:
o = decoder.raw_decode(buf)
buf = buf[o[1]:].lstrip() # protect from whitespace
print o[0]['message']
except:
pass
except KeyboardInterrupt:
break
except:
print "== Disconnected =="
try:
sock.close()
except:
pass
except KeyboardInterrupt:
break
except:
pass
| gpl-3.0 | 7,504,908,176,214,537,000 | 25.515625 | 92 | 0.624632 | false |
JaDogg/__py_playground | reference/sketchbook/regex/nfa_rpn_vm.py | 1 | 4074 | """
The Thompson algorithm again but starting from a regex in RPN, as
Thompson did. In a more C-like style now, compiling to a virtual
machine. We differ from Thompson in parsing from right to left, and
thus needing fewer jump instructions and backpatching only for loops.
Also in detecting loops at match-time -- we should always terminate,
if this is correct.
"""
def match(re, s):
return run(prepare(re), s)
def prepare(re):
re = list(re)
insns = []
start = parse(insns, re, emit(insns, expect, EOF, -1))
assert not re, "Syntax error"
return insns, start
def show(insns, pc):
for k, (operator, operand) in enumerate(insns):
print '%2d %s %-6s %s' % (k,
'*' if k == pc else ' ',
operator.__name__,
operand)
def parse(insns, re, k):
if not re:
return k
else:
c = re.pop()
if c == '.': # (Concatenation operator)
rhs = parse(insns, re, k)
return parse(insns, re, rhs)
elif c == '|':
rhs = parse(insns, re, k)
return emit(insns, alt, rhs, parse(insns, re, k))
elif c == '*':
fork = emit_fork(insns, k)
patch(insns, fork, parse(insns, re, fork))
return fork
elif c == '?':
return emit(insns, alt, k, parse(insns, re, k))
elif c == '+':
fork = emit_fork(insns, k)
plus = parse(insns, re, fork)
patch(insns, fork, plus)
return plus
else:
return emit(insns, expect, c, k)
def emit_fork(insns, k): return emit(insns, alt, None, k)
def patch(insns, k, target): insns[k][1] = target
def emit(insns, operation, operand, k):
if len(insns) - 1 != k:
insns.append([jump, k])
insns.append([operation, operand])
return len(insns) - 1
def run((insns, start), s):
agenda = set([start])
for c in s:
agenda = step(insns, agenda, c)
if not agenda: break # Redundant test, can speed it
return ACCEPTED in step(insns, agenda, EOF)
def step(insns, agenda, c):
done, next = set(), set()
while agenda:
pc = agenda.pop()
while pc is not None:
done.add(pc) # TODO: we could get away with only adding loop headers
operator, operand = insns[pc]
pc = operator(done, agenda, next, pc, c, operand)
return next
def jump(done, agenda, next, pc, c, k):
return (k not in done) and k
def expect(done, agenda, next, pc, c, literal):
if c == literal: next.add(pc - 1)
return None
def alt(done, agenda, next, pc, c, k):
if k not in done: agenda.add(k)
return pc-1
EOF, ACCEPTED = 'EOF', -1
## match('', '')
#. True
## match('', 'A')
#. False
## match('x', '')
#. False
## match('x', 'y')
#. False
## match('x', 'x')
#. True
## match('x', 'xx')
#. False
## match('xx.', 'xx')
#. True
## match('ab.', '')
#. False
## match('ab.', 'ab')
#. True
## match('ab|', '')
#. False
## match('ab|', 'a')
#. True
## match('ab|', 'b')
#. True
## match('ab|', 'x')
#. False
## match('a*', '')
#. True
## match('a*', 'a')
#. True
## match('a*', 'x')
#. False
## match('a*', 'aa')
#. True
## match('a*', 'ax')
#. False
## match('a*', 'aaa')
#. True
## complicated = 'ab.axy..|*z.'
## match(complicated, '')
#. False
## match(complicated, 'z')
#. True
## match(complicated, 'abz')
#. True
## match(complicated, 'ababaxyab')
#. False
## match(complicated, 'ababaxyabz')
#. True
## match(complicated, 'ababaxyaxz')
#. False
tests = """\
ab.c.d.e.f.g. 1 abcdefg
ab|*a. 0 ababababab
ab|*a. 1 aaaaaaaaba
ab|*a. 0 aaaaaabac
abc|*.d. 1 abccbcccd
abc|*.d. 0 abccbcccde
ab?.c. 0 abbc
ab?.c. 1 abc
ab?.c. 1 ac
ab.+c. 0 c
ab.+c. 1 abc
ab.+c. 1 ababc
a**x. 1 aaax
""".splitlines()
for line in tests:
re, should_match, s = line.split()
assert int(should_match) == match(re, s), 'match(%r, %r)' % (re, s)
| mit | 3,458,246,283,264,082,000 | 24.622642 | 80 | 0.527982 | false |
benpatterson/edx-platform | cms/djangoapps/contentstore/management/commands/tests/test_reindex_library.py | 15 | 7173 | """ Tests for library reindex command """
import sys
import contextlib
import ddt
from django.core.management import call_command, CommandError
import mock
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, LibraryFactory
from opaque_keys import InvalidKeyError
from contentstore.management.commands.reindex_library import Command as ReindexCommand
from contentstore.courseware_index import SearchIndexingError
@contextlib.contextmanager
def nostderr():
"""
ContextManager to suppress stderr messages
http://stackoverflow.com/a/1810086/882918
"""
savestderr = sys.stderr
class Devnull(object):
""" /dev/null incarnation as output-stream-like object """
def write(self, _):
""" Write method - just does nothing"""
pass
sys.stderr = Devnull()
try:
yield
finally:
sys.stderr = savestderr
@ddt.ddt
class TestReindexLibrary(ModuleStoreTestCase):
""" Tests for library reindex command """
def setUp(self):
""" Setup method - create libraries and courses """
super(TestReindexLibrary, self).setUp()
self.store = modulestore()
self.first_lib = LibraryFactory.create(
org="test", library="lib1", display_name="run1", default_store=ModuleStoreEnum.Type.split
)
self.second_lib = LibraryFactory.create(
org="test", library="lib2", display_name="run2", default_store=ModuleStoreEnum.Type.split
)
self.first_course = CourseFactory.create(
org="test", course="course1", display_name="run1", default_store=ModuleStoreEnum.Type.split
)
self.second_course = CourseFactory.create(
org="test", course="course2", display_name="run1", default_store=ModuleStoreEnum.Type.split
)
REINDEX_PATH_LOCATION = 'contentstore.management.commands.reindex_library.LibrarySearchIndexer.do_library_reindex'
MODULESTORE_PATCH_LOCATION = 'contentstore.management.commands.reindex_library.modulestore'
YESNO_PATCH_LOCATION = 'contentstore.management.commands.reindex_library.query_yes_no'
def _get_lib_key(self, library):
""" Get's library key as it is passed to indexer """
return library.location.library_key
def _build_calls(self, *libraries):
""" BUilds a list of mock.call instances representing calls to reindexing method """
return [mock.call(self.store, self._get_lib_key(lib)) for lib in libraries]
def test_given_no_arguments_raises_command_error(self):
""" Test that raises CommandError for incorrect arguments """
with self.assertRaises(SystemExit), nostderr():
with self.assertRaisesRegexp(CommandError, ".* requires one or more arguments .*"):
call_command('reindex_library')
@ddt.data('qwerty', 'invalid_key', 'xblock-v1:qwe+rty')
def test_given_invalid_lib_key_raises_not_found(self, invalid_key):
""" Test that raises InvalidKeyError for invalid keys """
with self.assertRaises(InvalidKeyError):
call_command('reindex_library', invalid_key)
def test_given_course_key_raises_command_error(self):
""" Test that raises CommandError if course key is passed """
with self.assertRaises(SystemExit), nostderr():
with self.assertRaisesRegexp(CommandError, ".* is not a library key"):
call_command('reindex_library', unicode(self.first_course.id))
with self.assertRaises(SystemExit), nostderr():
with self.assertRaisesRegexp(CommandError, ".* is not a library key"):
call_command('reindex_library', unicode(self.second_course.id))
with self.assertRaises(SystemExit), nostderr():
with self.assertRaisesRegexp(CommandError, ".* is not a library key"):
call_command(
'reindex_library',
unicode(self.second_course.id),
unicode(self._get_lib_key(self.first_lib))
)
def test_given_id_list_indexes_libraries(self):
""" Test that reindexes libraries when given single library key or a list of library keys """
with mock.patch(self.REINDEX_PATH_LOCATION) as patched_index, \
mock.patch(self.MODULESTORE_PATCH_LOCATION, mock.Mock(return_value=self.store)):
call_command('reindex_library', unicode(self._get_lib_key(self.first_lib)))
self.assertEqual(patched_index.mock_calls, self._build_calls(self.first_lib))
patched_index.reset_mock()
call_command('reindex_library', unicode(self._get_lib_key(self.second_lib)))
self.assertEqual(patched_index.mock_calls, self._build_calls(self.second_lib))
patched_index.reset_mock()
call_command(
'reindex_library',
unicode(self._get_lib_key(self.first_lib)),
unicode(self._get_lib_key(self.second_lib))
)
expected_calls = self._build_calls(self.first_lib, self.second_lib)
self.assertEqual(patched_index.mock_calls, expected_calls)
def test_given_all_key_prompts_and_reindexes_all_libraries(self):
""" Test that reindexes all libraries when --all key is given and confirmed """
with mock.patch(self.YESNO_PATCH_LOCATION) as patched_yes_no:
patched_yes_no.return_value = True
with mock.patch(self.REINDEX_PATH_LOCATION) as patched_index, \
mock.patch(self.MODULESTORE_PATCH_LOCATION, mock.Mock(return_value=self.store)):
call_command('reindex_library', all=True)
patched_yes_no.assert_called_once_with(ReindexCommand.CONFIRMATION_PROMPT, default='no')
expected_calls = self._build_calls(self.first_lib, self.second_lib)
self.assertItemsEqual(patched_index.mock_calls, expected_calls)
def test_given_all_key_prompts_and_reindexes_all_libraries_cancelled(self):
""" Test that does not reindex anything when --all key is given and cancelled """
with mock.patch(self.YESNO_PATCH_LOCATION) as patched_yes_no:
patched_yes_no.return_value = False
with mock.patch(self.REINDEX_PATH_LOCATION) as patched_index, \
mock.patch(self.MODULESTORE_PATCH_LOCATION, mock.Mock(return_value=self.store)):
call_command('reindex_library', all=True)
patched_yes_no.assert_called_once_with(ReindexCommand.CONFIRMATION_PROMPT, default='no')
patched_index.assert_not_called()
def test_fail_fast_if_reindex_fails(self):
""" Test that fails on first reindexing exception """
with mock.patch(self.REINDEX_PATH_LOCATION) as patched_index:
patched_index.side_effect = SearchIndexingError("message", [])
with self.assertRaises(SearchIndexingError):
call_command('reindex_library', unicode(self._get_lib_key(self.second_lib)))
| agpl-3.0 | 1,158,595,123,801,546,500 | 46.190789 | 118 | 0.663321 | false |
poldrack/openfmri | pipeline/run_ANTS.py | 1 | 1827 | """ run_ANTS.py - run ANTS stripping and registration
"""
## Copyright 2011, Russell Poldrack. All rights reserved.
## Redistribution and use in source and binary forms, with or without modification, are
## permitted provided that the following conditions are met:
## 1. Redistributions of source code must retain the above copyright notice, this list of
## conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above copyright notice, this list
## of conditions and the following disclaimer in the documentation and/or other materials
## provided with the distribution.
## THIS SOFTWARE IS PROVIDED BY RUSSELL POLDRACK ``AS IS'' AND ANY EXPRESS OR IMPLIED
## WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
## FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RUSSELL POLDRACK OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
## SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
## ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
## NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
#dataset='ds001'
basedir='/corral/utexas/poldracklab/openfmri/shared/'
outfile=open('run_ANTS.sh','w')
for root,dirs,files in os.walk(basedir):
# if root.find(dataset)>-1:
for f in files:
if f.rfind('highres001.nii.gz')>-1:
outfile.write('python register_and_mask.py %s/%s\n'%(root,f))
outfile.close()
print 'now launch using: launch -s run_ANTS.sh -n ANTS -r 01:00:00'
| bsd-2-clause | 7,154,718,467,068,391,000 | 41.488372 | 95 | 0.736727 | false |
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/contrib/gis/db/backends/oracle/schema.py | 2 | 4144 | from django.contrib.gis.db.models.fields import GeometryField
from django.db.backends.oracle.schema import DatabaseSchemaEditor
from django.db.backends.utils import truncate_name
class OracleGISSchemaEditor(DatabaseSchemaEditor):
sql_add_geometry_metadata = ("""
INSERT INTO USER_SDO_GEOM_METADATA
("TABLE_NAME", "COLUMN_NAME", "DIMINFO", "SRID")
VALUES (
%(table)s,
%(column)s,
MDSYS.SDO_DIM_ARRAY(
MDSYS.SDO_DIM_ELEMENT('LONG', %(dim0)s, %(dim2)s, %(tolerance)s),
MDSYS.SDO_DIM_ELEMENT('LAT', %(dim1)s, %(dim3)s, %(tolerance)s)
),
%(srid)s
)""")
sql_add_spatial_index = 'CREATE INDEX %(index)s ON %(table)s(%(column)s) INDEXTYPE IS MDSYS.SPATIAL_INDEX'
sql_drop_spatial_index = 'DROP INDEX %(index)s'
sql_clear_geometry_table_metadata = 'DELETE FROM USER_SDO_GEOM_METADATA WHERE TABLE_NAME = %(table)s'
sql_clear_geometry_field_metadata = (
'DELETE FROM USER_SDO_GEOM_METADATA WHERE TABLE_NAME = %(table)s '
'AND COLUMN_NAME = %(column)s'
)
def __init__(self, *args, **kwargs):
super(OracleGISSchemaEditor, self).__init__(*args, **kwargs)
self.geometry_sql = []
def geo_quote_name(self, name):
return self.connection.ops.geo_quote_name(name)
def column_sql(self, model, field, include_default=False):
column_sql = super(OracleGISSchemaEditor, self).column_sql(model, field, include_default)
if isinstance(field, GeometryField):
db_table = model._meta.db_table
self.geometry_sql.append(
self.sql_add_geometry_metadata % {
'table': self.geo_quote_name(db_table),
'column': self.geo_quote_name(field.column),
'dim0': field._extent[0],
'dim1': field._extent[1],
'dim2': field._extent[2],
'dim3': field._extent[3],
'tolerance': field._tolerance,
'srid': field.srid,
}
)
if field.spatial_index:
self.geometry_sql.append(
self.sql_add_spatial_index % {
'index': self.quote_name(self._create_spatial_index_name(model, field)),
'table': self.quote_name(db_table),
'column': self.quote_name(field.column),
}
)
return column_sql
def create_model(self, model):
super(OracleGISSchemaEditor, self).create_model(model)
self.run_geometry_sql()
def delete_model(self, model):
super(OracleGISSchemaEditor, self).delete_model(model)
self.execute(self.sql_clear_geometry_table_metadata % {
'table': self.geo_quote_name(model._meta.db_table),
})
def add_field(self, model, field):
super(OracleGISSchemaEditor, self).add_field(model, field)
self.run_geometry_sql()
def remove_field(self, model, field):
if isinstance(field, GeometryField):
self.execute(self.sql_clear_geometry_field_metadata % {
'table': self.geo_quote_name(model._meta.db_table),
'column': self.geo_quote_name(field.column),
})
if field.spatial_index:
self.execute(self.sql_drop_spatial_index % {
'index': self.quote_name(self._create_spatial_index_name(model, field)),
})
super(OracleGISSchemaEditor, self).remove_field(model, field)
def run_geometry_sql(self):
for sql in self.geometry_sql:
self.execute(sql)
self.geometry_sql = []
def _create_spatial_index_name(self, model, field):
# Oracle doesn't allow object names > 30 characters. Use this scheme
# instead of self._create_index_name() for backwards compatibility.
return truncate_name('%s_%s_id' % (model._meta.db_table, field.column), 30)
| mit | 8,120,818,192,315,130,000 | 42.085106 | 110 | 0.561052 | false |
songyi199111/sentry | src/sentry/search/django/backend.py | 21 | 4201 | """
sentry.search.django.backend
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.db.models import Q
from sentry.api.paginator import Paginator
from sentry.search.base import SearchBackend
from sentry.search.django.constants import (
SORT_CLAUSES, SQLITE_SORT_CLAUSES, MYSQL_SORT_CLAUSES, MSSQL_SORT_CLAUSES,
MSSQL_ENGINES, ORACLE_SORT_CLAUSES
)
from sentry.utils.db import get_db_engine
class DjangoSearchBackend(SearchBackend):
def index(self, event):
pass
def query(self, project, query=None, status=None, tags=None,
bookmarked_by=None, assigned_to=None, first_release=None,
sort_by='date', date_filter='last_seen', date_from=None,
date_to=None, cursor=None, limit=100):
from sentry.models import Group
queryset = Group.objects.filter(project=project)
if query:
# TODO(dcramer): if we want to continue to support search on SQL
# we should at least optimize this in Postgres so that it does
# the query filter **after** the index filters, and restricts the
# result set
queryset = queryset.filter(
Q(message__icontains=query) |
Q(culprit__icontains=query)
)
if status is not None:
queryset = queryset.filter(status=status)
if bookmarked_by:
queryset = queryset.filter(
bookmark_set__project=project,
bookmark_set__user=bookmarked_by,
)
if assigned_to:
queryset = queryset.filter(
assignee_set__project=project,
assignee_set__user=assigned_to,
)
if first_release:
queryset = queryset.filter(
first_release__project=project,
first_release__version=first_release,
)
if tags:
for k, v in tags.iteritems():
queryset = queryset.filter(**dict(
grouptag__key=k,
grouptag__value=v,
))
if date_filter == 'first_seen':
if date_from and date_to:
queryset = queryset.filter(
first_seen__gte=date_from,
first_seen__lte=date_to,
)
elif date_from:
queryset = queryset.filter(first_seen__gte=date_from)
elif date_to:
queryset = queryset.filter(first_seen__lte=date_to)
elif date_filter == 'last_seen':
if date_from and date_to:
queryset = queryset.filter(
first_seen__gte=date_from,
last_seen__lte=date_to,
)
elif date_from:
queryset = queryset.filter(last_seen__gte=date_from)
elif date_to:
queryset = queryset.filter(last_seen__lte=date_to)
engine = get_db_engine('default')
if engine.startswith('sqlite'):
score_clause = SQLITE_SORT_CLAUSES[sort_by]
elif engine.startswith('mysql'):
score_clause = MYSQL_SORT_CLAUSES[sort_by]
elif engine.startswith('oracle'):
score_clause = ORACLE_SORT_CLAUSES[sort_by]
elif engine in MSSQL_ENGINES:
score_clause = MSSQL_SORT_CLAUSES[sort_by]
else:
score_clause = SORT_CLAUSES[sort_by]
if sort_by == 'tottime':
queryset = queryset.filter(time_spent_count__gt=0)
elif sort_by == 'avgtime':
queryset = queryset.filter(time_spent_count__gt=0)
queryset = queryset.extra(
select={'sort_value': score_clause},
)
# HACK: don't sort by the same column twice
if sort_by == 'date':
queryset = queryset.order_by('-sort_value')
else:
queryset = queryset.order_by('-sort_value', '-last_seen')
paginator = Paginator(queryset, '-sort_value')
return paginator.get_result(limit, cursor)
| bsd-3-clause | 329,046,697,419,729,700 | 34.008333 | 78 | 0.561057 | false |
shahar-stratoscale/nova | nova/paths.py | 36 | 2182 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from oslo.config import cfg
path_opts = [
cfg.StrOpt('pybasedir',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory where the nova python module is installed'),
cfg.StrOpt('bindir',
default=os.path.join(sys.prefix, 'local', 'bin'),
help='Directory where nova binaries are installed'),
cfg.StrOpt('state_path',
default='$pybasedir',
help="Top-level directory for maintaining nova's state"),
]
CONF = cfg.CONF
CONF.register_opts(path_opts)
def basedir_def(*args):
"""Return an uninterpolated path relative to $pybasedir."""
return os.path.join('$pybasedir', *args)
def bindir_def(*args):
"""Return an uninterpolated path relative to $bindir."""
return os.path.join('$bindir', *args)
def state_path_def(*args):
"""Return an uninterpolated path relative to $state_path."""
return os.path.join('$state_path', *args)
def basedir_rel(*args):
"""Return a path relative to $pybasedir."""
return os.path.join(CONF.pybasedir, *args)
def bindir_rel(*args):
"""Return a path relative to $bindir."""
return os.path.join(CONF.bindir, *args)
def state_path_rel(*args):
"""Return a path relative to $state_path."""
return os.path.join(CONF.state_path, *args)
| apache-2.0 | -1,007,451,602,583,617,500 | 31.567164 | 78 | 0.658112 | false |
hslee16/ansible-modules-extras | system/known_hosts.py | 6 | 10636 | #!/usr/bin/python
"""
Ansible module to manage the ssh known_hosts file.
Copyright(c) 2014, Matthew Vernon <[email protected]>
This module is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This module is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this module. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: known_hosts
short_description: Add or remove a host from the C(known_hosts) file
description:
- The M(known_hosts) module lets you add or remove a host keys from the C(known_hosts) file.
- Starting at Ansible 2.2, multiple entries per host are allowed, but only one for each key type supported by ssh.
This is useful if you're going to want to use the M(git) module over ssh, for example.
- If you have a very large number of host keys to manage, you will find the M(template) module more useful.
version_added: "1.9"
options:
name:
aliases: [ 'host' ]
description:
- The host to add or remove (must match a host specified in key)
required: true
default: null
key:
description:
- The SSH public host key, as a string (required if state=present, optional when state=absent, in which case all keys for the host are removed). The key must be in the right format for ssh (see ssh(1), section "SSH_KNOWN_HOSTS FILE FORMAT")
required: false
default: null
path:
description:
- The known_hosts file to edit
required: no
default: "(homedir)+/.ssh/known_hosts"
state:
description:
- I(present) to add the host key, I(absent) to remove it.
choices: [ "present", "absent" ]
required: no
default: present
requirements: [ ]
author: "Matthew Vernon (@mcv21)"
'''
EXAMPLES = '''
# Example using with_file to set the system known_hosts file
- name: tell the host about our servers it might want to ssh to
known_hosts: path='/etc/ssh/ssh_known_hosts'
name='foo.com.invalid'
key="{{ lookup('file', 'pubkeys/foo.com.invalid') }}"
'''
# Makes sure public host keys are present or absent in the given known_hosts
# file.
#
# Arguments
# =========
# name = hostname whose key should be added (alias: host)
# key = line(s) to add to known_hosts file
# path = the known_hosts file to edit (default: ~/.ssh/known_hosts)
# state = absent|present (default: present)
import os
import os.path
import tempfile
import errno
import re
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.basic import *
def enforce_state(module, params):
"""
Add or remove key.
"""
host = params["name"]
key = params.get("key",None)
port = params.get("port",None)
path = params.get("path")
state = params.get("state")
#Find the ssh-keygen binary
sshkeygen = module.get_bin_path("ssh-keygen",True)
# Trailing newline in files gets lost, so re-add if necessary
if key and key[-1] != '\n':
key+='\n'
if key is None and state != "absent":
module.fail_json(msg="No key specified when adding a host")
sanity_check(module,host,key,sshkeygen)
found,replace_or_add,found_line=search_for_host_key(module,host,key,path,sshkeygen)
#We will change state if found==True & state!="present"
#or found==False & state=="present"
#i.e found XOR (state=="present")
#Alternatively, if replace is true (i.e. key present, and we must change it)
if module.check_mode:
module.exit_json(changed = replace_or_add or (state=="present") != found)
#Now do the work.
#Only remove whole host if found and no key provided
if found and key is None and state=="absent":
module.run_command([sshkeygen,'-R',host,'-f',path], check_rc=True)
params['changed'] = True
#Next, add a new (or replacing) entry
if replace_or_add or found != (state=="present"):
try:
inf=open(path,"r")
except IOError:
e = get_exception
if e.errno == errno.ENOENT:
inf=None
else:
module.fail_json(msg="Failed to read %s: %s" % \
(path,str(e)))
try:
outf=tempfile.NamedTemporaryFile(dir=os.path.dirname(path))
if inf is not None:
for line_number, line in enumerate(inf, start=1):
if found_line==line_number and (replace_or_add or state=='absent'):
continue # skip this line to replace its key
outf.write(line)
inf.close()
if state == 'present':
outf.write(key)
outf.flush()
module.atomic_move(outf.name,path)
except (IOError,OSError):
e = get_exception()
module.fail_json(msg="Failed to write to file %s: %s" % \
(path,str(e)))
try:
outf.close()
except:
pass
params['changed'] = True
return params
def sanity_check(module,host,key,sshkeygen):
'''Check supplied key is sensible
host and key are parameters provided by the user; If the host
provided is inconsistent with the key supplied, then this function
quits, providing an error to the user.
sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
'''
#If no key supplied, we're doing a removal, and have nothing to check here.
if key is None:
return
#Rather than parsing the key ourselves, get ssh-keygen to do it
#(this is essential for hashed keys, but otherwise useful, as the
#key question is whether ssh-keygen thinks the key matches the host).
#The approach is to write the key to a temporary file,
#and then attempt to look up the specified host in that file.
try:
outf=tempfile.NamedTemporaryFile()
outf.write(key)
outf.flush()
except IOError:
e = get_exception()
module.fail_json(msg="Failed to write to temporary file %s: %s" % \
(outf.name,str(e)))
rc,stdout,stderr=module.run_command([sshkeygen,'-F',host,
'-f',outf.name],
check_rc=True)
try:
outf.close()
except:
pass
if stdout=='': #host not found
module.fail_json(msg="Host parameter does not match hashed host field in supplied key")
def search_for_host_key(module,host,key,path,sshkeygen):
'''search_for_host_key(module,host,key,path,sshkeygen) -> (found,replace_or_add,found_line)
Looks up host and keytype in the known_hosts file path; if it's there, looks to see
if one of those entries matches key. Returns:
found (Boolean): is host found in path?
replace_or_add (Boolean): is the key in path different to that supplied by user?
found_line (int or None): the line where a key of the same type was found
if found=False, then replace is always False.
sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
'''
if os.path.exists(path)==False:
return False, False, None
#openssh >=6.4 has changed ssh-keygen behaviour such that it returns
#1 if no host is found, whereas previously it returned 0
rc,stdout,stderr=module.run_command([sshkeygen,'-F',host,'-f',path],
check_rc=False)
if stdout=='' and stderr=='' and (rc==0 or rc==1):
return False, False, None #host not found, no other errors
if rc!=0: #something went wrong
module.fail_json(msg="ssh-keygen failed (rc=%d,stdout='%s',stderr='%s')" % (rc,stdout,stderr))
#If user supplied no key, we don't want to try and replace anything with it
if key is None:
return True, False, None
lines=stdout.split('\n')
new_key = normalize_known_hosts_key(key, host)
for l in lines:
if l=='':
continue
elif l[0]=='#': # info output from ssh-keygen; contains the line number where key was found
try:
# This output format has been hardcoded in ssh-keygen since at least OpenSSH 4.0
# It always outputs the non-localized comment before the found key
found_line = int(re.search(r'found: line (\d+)', l).group(1))
except IndexError:
e = get_exception()
module.fail_json(msg="failed to parse output of ssh-keygen for line number: '%s'" % l)
else:
found_key = normalize_known_hosts_key(l,host)
if new_key==found_key: #found a match
return True, False, found_line #found exactly the same key, don't replace
elif new_key['type'] == found_key['type']: # found a different key for the same key type
return True, True, found_line
#No match found, return found and replace, but no line
return True, True, None
def normalize_known_hosts_key(key, host):
'''
Transform a key, either taken from a known_host file or provided by the
user, into a normalized form.
The host part (which might include multiple hostnames or be hashed) gets
replaced by the provided host. Also, any spurious information gets removed
from the end (like the username@host tag usually present in hostkeys, but
absent in known_hosts files)
'''
k=key.strip() #trim trailing newline
k=key.split()
d = dict()
#The optional "marker" field, used for @cert-authority or @revoked
if k[0][0] == '@':
d['options'] = k[0]
d['host']=host
d['type']=k[2]
d['key']=k[3]
else:
d['host']=host
d['type']=k[1]
d['key']=k[2]
return d
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, type='str', aliases=['host']),
key = dict(required=False, type='str'),
path = dict(default="~/.ssh/known_hosts", type='path'),
state = dict(default='present', choices=['absent','present']),
),
supports_check_mode = True
)
results = enforce_state(module,module.params)
module.exit_json(**results)
main()
| gpl-3.0 | 6,375,780,330,987,241,000 | 36.583039 | 246 | 0.622602 | false |
mazaclub/mazabot-core | plugins/MessageParser/__init__.py | 2 | 2684 | ###
# Copyright (c) 2010, Daniel Folkinshteyn
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Add a description of the plugin (to be presented to the user inside the wizard)
here. This should describe *what* the plugin does.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = ""
# XXX Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.authors.unknown
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
# This is a url where the most recent plugin package can be downloaded.
__url__ = '' # 'http://supybot.com/Members/yourname/MessageParser/download'
import config
import plugin
reload(plugin) # In case we're being reloaded.
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| bsd-3-clause | -7,568,856,396,968,353,000 | 39.666667 | 79 | 0.762668 | false |
c0r73x/neotags.nvim | rplugin/python3/neotags/__init__.py | 1 | 2079 | # ============================================================================
# File: __init__.py
# Author: Christian Persson <[email protected]>
# Repository: https://github.com/c0r73x/neotags.nvim
# Released under the MIT license
# ============================================================================
import pynvim
from neotags.neotags import Neotags
@pynvim.plugin
class NeotagsHandlers(object):
def __init__(self, vim):
self.__vim = vim
self.__neotags = Neotags(self.__vim)
@pynvim.function('NeotagsInit')
def init(self, args):
self.__vim.async_call(self.__neotags.init)
@pynvim.function('NeotagsHighlight')
def highlight(self, args):
self.__vim.async_call(self.__neotags.update, False)
@pynvim.function('NeotagsRehighlight')
def rehighlight(self, args):
# self.__vim.async_call(self.__neotags.highlight, True)
self.__vim.async_call(self.__neotags.update, True)
@pynvim.function('NeotagsUpdate')
def update(self, args):
self.__vim.async_call(self.__neotags.update, True)
@pynvim.function('NeotagsToggle')
def toggle(self, args):
self.__vim.async_call(self.__neotags.toggle)
@pynvim.function('NeotagsAddProject')
def setbase(self, args):
self.__vim.async_call(self.__neotags.setBase, args)
@pynvim.function('NeotagsRemoveProject')
def removebase(self, args):
self.__vim.async_call(self.__neotags.removeBase, args)
@pynvim.function('NeotagsAddProjectDir')
def adddir(self, args):
self.__vim.async_call(self.__neotags.addExtraDir, args)
@pynvim.function('NeotagsRemoveProjectDir')
def removedir(self, args):
self.__vim.async_call(self.__neotags.removeExtraDir, args)
@pynvim.function('Neotags_Toggle_C_Binary')
def toggle_C_bin(self, args):
self.__vim.async_call(self.__neotags.toggle_C_bin)
@pynvim.function('Neotags_Toggle_Verbosity')
def toggle_verbosity(self, args):
self.__vim.async_call(self.__neotags.toggle_verbosity)
| mit | -902,618,798,874,772,000 | 33.081967 | 78 | 0.608466 | false |
cslzchen/osf.io | api_tests/users/views/test_user_files_list.py | 14 | 4794 | # -*- coding: utf-8 -*-
import pytest
from osf_tests.factories import AuthUserFactory
from api.base import utils
from api.base.settings.defaults import API_BASE
from osf.models import QuickFilesNode
from addons.osfstorage.models import OsfStorageFile
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestUserQuickFiles:
@pytest.fixture
def user(self):
return AuthUserFactory()
@pytest.fixture
def quickfiles(self, user):
return QuickFilesNode.objects.get(creator=user)
@pytest.fixture(autouse=True)
def add_quickfiles(self, quickfiles):
osfstorage = quickfiles.get_addon('osfstorage')
root = osfstorage.get_root()
root.append_file('Follow.txt')
root.append_file('The.txt')
root.append_file('Buzzards.txt')
@pytest.fixture()
def url(self, user):
return '/{}users/{}/quickfiles/'.format(API_BASE, user._id)
def test_authorized_gets_200(self, app, user, url):
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
def test_anonymous_gets_200(self, app, url):
res = app.get(url)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
def test_get_files_logged_in(self, app, user, url):
res = app.get(url, auth=user.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert len(ids) == OsfStorageFile.objects.count()
def test_get_files_not_logged_in(self, app, url):
res = app.get(url)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert len(ids) == OsfStorageFile.objects.count()
def test_get_files_logged_in_as_different_user(self, app, user, url):
user_two = AuthUserFactory()
res = app.get(url, auth=user_two.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert len(ids) == OsfStorageFile.objects.count()
def test_get_files_me(self, app, user, quickfiles):
user_two = AuthUserFactory()
quickfiles_two = QuickFilesNode.objects.get(creator=user_two)
osf_storage_two = quickfiles_two.get_addon('osfstorage')
root_two = osf_storage_two.get_root()
# these files should not be included in the users/me/files results
root_two.append_file('Sister.txt')
root_two.append_file('Abigail.txt')
url = '/{}users/me/quickfiles/'.format(API_BASE)
res = app.get(url, auth=user.auth)
node_json = res.json['data']
ids_returned = [each['id'] for each in node_json]
ids_from_files = quickfiles.files.all().values_list('_id', flat=True)
user_two_file_ids = quickfiles_two.files.all().values_list('_id', flat=True)
assert sorted(ids_returned) == sorted(ids_from_files)
for ident in user_two_file_ids:
assert ident not in ids_returned
def test_get_files_detail_has_user_relationship(self, app, user, quickfiles):
file_id = quickfiles.files.all().values_list('_id', flat=True).first()
url = '/{}files/{}/'.format(API_BASE, file_id)
res = app.get(url, auth=user.auth)
file_detail_json = res.json['data']
assert 'user' in file_detail_json['relationships']
assert 'node' not in file_detail_json['relationships']
assert file_detail_json['relationships']['user']['links']['related']['href'].split(
'/')[-2] == user._id
def test_get_files_has_links(self, app, user, url, quickfiles):
res = app.get(url, auth=user.auth)
file_detail_json = res.json['data'][0]
waterbutler_url = utils.waterbutler_api_url_for(
quickfiles._id,
'osfstorage',
file_detail_json['attributes']['path']
)
assert 'delete' in file_detail_json['links']
assert file_detail_json['links']['delete'] == waterbutler_url
assert 'download' in file_detail_json['links']
assert file_detail_json['links']['download'] == waterbutler_url
assert 'info' in file_detail_json['links']
assert 'move' in file_detail_json['links']
assert file_detail_json['links']['move'] == waterbutler_url
assert 'self' in file_detail_json['links']
assert 'upload' in file_detail_json['links']
assert file_detail_json['links']['upload'] == waterbutler_url
def test_disabled_users_quickfiles_gets_410(self, app, user, quickfiles, url):
user.is_disabled = True
user.save()
res = app.get(url, expect_errors=True)
assert res.status_code == 410
assert res.content_type == 'application/vnd.api+json'
| apache-2.0 | -7,323,185,194,422,189,000 | 35.59542 | 91 | 0.626825 | false |
KennyCandy/HAR | module123/HAR_v4_3.py | 2 | 17610 | # Note that the dataset must be already downloaded for this script to work, do:
# $ cd data/
# $ python download_dataset.py
# quoc_trinh
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import metrics
import os
import sys
import datetime
# get current file_name as [0] of array
file_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print(" File Name:")
print(file_name)
print("")
# FLAG to know that whether this is traning process or not.
FLAG = 'train'
N_HIDDEN_CONFIG = 8
save_path_name = file_name + "/model.ckpt"
print(datetime.datetime.now())
# Write to file: time to start, type, time to end
f = open(file_name + '/time.txt', 'a+')
f.write("------------- \n")
f.write("This is time \n")
f.write("Started at \n")
f.write(str(datetime.datetime.now())+'\n')
if __name__ == "__main__":
# -----------------------------
# step1: load and prepare data
# -----------------------------
# Those are separate normalised input features for the neural network
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATA_PATH = "../data/"
DATASET_PATH = DATA_PATH + "UCI HAR Dataset/"
print("\n" + "Dataset is now located at: " + DATASET_PATH)
# Preparing data set:
TRAIN = "train/"
TEST = "test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(' ', ' ').strip().split(' ') for row in file
]]
)
file.close()
"""Examples
--------
>> > x = np.arange(4).reshape((2, 2))
>> > x
array([[0, 1],
[2, 3]])
>> > np.transpose(x)
array([[0, 2],
[1, 3]])
>> > x = np.ones((1, 2, 3))
>> > np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
X_train = load_X(X_train_signals_paths) # [7352, 128, 9]
X_test = load_X(X_test_signals_paths) # [7352, 128, 9]
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 128
print(len(X_train[0][0])) # 9
print(type(X_train))
X_train = np.reshape(X_train, [-1, 32, 36])
X_test = np.reshape(X_test, [-1, 32, 36])
print("-----------------X_train---------------")
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 32
print(len(X_train[0][0])) # 36
print(type(X_train))
# exit()
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
def one_hot(label):
"""convert label from dense to one hot
argument:
label: ndarray dense label ,shape: [sample_num,1]
return:
one_hot_label: ndarray one hot, shape: [sample_num,n_class]
"""
label_num = len(label)
new_label = label.reshape(label_num) # shape : [sample_num]
# because max is 5, and we will create 6 columns
n_values = np.max(new_label) + 1
return np.eye(n_values)[np.array(new_label, dtype=np.int32)]
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# Subtract 1 to each output class for friendly 0-based indexing
return y_ - 1
y_train = one_hot(load_y(y_train_path))
y_test = one_hot(load_y(y_test_path))
print("---------y_train----------")
# print(y_train)
print(len(y_train)) # 7352
print(len(y_train[0])) # 6
# -----------------------------------
# step2: define parameters for model
# -----------------------------------
class Config(object):
"""
define a class to store parameters,
the input should be feature mat of training and testing
"""
def __init__(self, X_train, X_test):
# Input data
self.train_count = len(X_train) # 7352 training series
self.test_data_count = len(X_test) # 2947 testing series
self.n_steps = len(X_train[0]) # 128 time_steps per series
# Training
self.learning_rate = 0.0025
self.lambda_loss_amount = 0.0015
self.training_epochs = 3
self.batch_size = 1000
# LSTM structure
self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time
self.n_hidden = N_HIDDEN_CONFIG # nb of neurons inside the neural network
self.n_classes = 6 # Final output classes
self.W = {
'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32]
'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6]
}
self.biases = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32]
'output': tf.Variable(tf.random_normal([self.n_classes])) # [6]
}
config = Config(X_train, X_test)
# print("Some useful info to get an insight on dataset's shape and normalisation:")
# print("features shape, labels shape, each features mean, each features standard deviation")
# print(X_test.shape, y_test.shape,
# np.mean(X_test), np.std(X_test))
# print("the dataset is therefore properly normalised, as expected.")
#
#
# ------------------------------------------------------
# step3: Let's get serious and build the neural network
# ------------------------------------------------------
# [none, 128, 9]
X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])
# [none, 6]
Y = tf.placeholder(tf.float32, [None, config.n_classes])
print("-------X Y----------")
print(X)
X = tf.reshape(X, shape=[-1, 32, 36])
print(X)
print(Y)
Y = tf.reshape(Y, shape=[-1, 6])
print(Y)
# Weight Initialization
def weight_variable(shape):
# tra ve 1 gia tri random theo thuat toan truncated_ normal
initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def bias_varibale(shape):
initial = tf.constant(0.1, shape=shape, name='Bias')
return tf.Variable(initial)
# Convolution and Pooling
def conv2d(x, W):
# Must have `strides[0] = strides[3] = 1 `.
# For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `.
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d')
def max_pool_2x2(x):
return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME', name='max_pool')
def LSTM_Network(feature_mat, config):
"""model a LSTM Network,
it stacks 2 LSTM layers, each layer has n_hidden=32 cells
and 1 output layer, it is a full connet layer
argument:
feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs]
config: class containing config of network
return:
: matrix output shape [batch_size,n_classes]
"""
W_conv1 = weight_variable([3, 3, 1, 4])
b_conv1 = bias_varibale([4])
# x_image = tf.reshape(x, shape=[-1, 28, 28, 1])
feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1])
print("----feature_mat_image-----")
print(feature_mat_image.get_shape())
h_conv1 = tf.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Second Convolutional Layer
W_conv2 = weight_variable([3, 3, 4, 16])
b_conv2 = weight_variable([16])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = h_conv2
# Third Convolutional Layer
W_conv3 = weight_variable([3, 3, 16, 1])
b_conv3 = weight_variable([1])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
h_pool3 = h_conv3
h_pool3 = tf.reshape(h_pool3, shape=[-1, 16, 18])
feature_mat = h_pool3
print("----feature_mat-----")
print(feature_mat)
# exit()
# W_fc1 = weight_variable([8 * 9 * 1, 1024])
# b_fc1 = bias_varibale([1024])
# h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 9 * 1])
# h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# print("----h_fc1_drop-----")
# print(h_fc1)
# exit()
#
# # keep_prob = tf.placeholder(tf.float32)
# keep_prob = tf.placeholder(1.0)
# h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_prob)
# print("----h_fc1_drop-----")
# print(h_fc1_drop)
# exit()
#
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_varibale([10])
#
# y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# print("----y_conv-----")
# print(y_conv)
# exit()
# Exchange dim 1 and dim 0
# Start at: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36]
feature_mat = tf.transpose(feature_mat, [1, 0, 2])
# New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9]
print("----feature_mat-----")
print(feature_mat)
# exit()
# Temporarily crush the feature_mat's dimensions
feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9
# New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9
# Linear activation, reshaping inputs to the LSTM's number of hidden:
hidden = tf.nn.relu(tf.matmul(
feature_mat, config.W['hidden']
) + config.biases['hidden'])
# New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32]
print("--n_steps--")
print(config.n_steps)
print("--hidden--")
print(hidden)
# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(0, config.n_steps/4, hidden) # (0, 128, [128*batch_size, 32])
# New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden]
# Define LSTM cell of first hidden layer:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here
outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32)
# outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden]
print("------------------list-------------------")
print(outputs)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1] # Get the last element of the array: [?, 32]
print("------------------last outputs-------------------")
print (lstm_last_output)
# Linear activation
return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']
pred_Y = LSTM_Network(X, config) # shape[?,6]
print("------------------pred_Y-------------------")
print(pred_Y)
# Loss,train_step,evaluation
l2 = config.lambda_loss_amount * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
# step4: Hooray, now train the neural network
# --------------------------------------------
# Note that log_device_placement can be turned ON but will cause console spam.
# Initializing the variables
init = tf.initialize_all_variables()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
best_accuracy = 0.0
# sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
if (FLAG == 'train') : # If it is the training mode
with tf.Session() as sess:
# tf.initialize_all_variables().run()
sess.run(init) # .run()
f.write("---Save model \n")
# Start training for each batch and loop epochs
for i in range(config.training_epochs):
for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500)
range(config.batch_size, config.train_count + 1,
config.batch_size)): # (1500, 7353, 1500)
print(start)
print(end)
sess.run(train_step, feed_dict={X: X_train[start:end],
Y: y_train[start:end]})
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
print("traing iter: {},".format(i) + \
" test accuracy : {},".format(accuracy_out) + \
" loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
# Save the model in this session
save_path = saver.save(sess, file_name + "/model.ckpt")
print("Model saved in file: %s" % save_path)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
else :
# Running a new session
print("Starting 2nd session...")
with tf.Session() as sess:
# Initialize variables
sess.run(init)
f.write("---Restore model \n")
# Restore model weights from previously saved model
saver.restore(sess, file_name+ "/model.ckpt")
print("Model restored from file: %s" % save_path_name)
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
# print("traing iter: {}," + \
# " test accuracy : {},".format(accuracy_out) + \
# " loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
#
# #------------------------------------------------------------------
# # step5: Training is good, but having visual insight is even better
# #------------------------------------------------------------------
# # The code is in the .ipynb
#
# #------------------------------------------------------------------
# # step6: And finally, the multi-class confusion matrix and metrics!
# #------------------------------------------------------------------
# # The code is in the .ipynb
f.write("Ended at \n")
f.write(str(datetime.datetime.now())+'\n')
f.write("------------- \n")
f.close() | mit | -4,966,475,290,769,681,000 | 36.630342 | 116 | 0.535945 | false |
noelbk/neutron-juniper | neutron/common/utils.py | 5 | 7252 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011, Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Borrowed from nova code base, more utilities will be added/borrowed as and
# when needed.
# @author: Somik Behera, Nicira Networks, Inc.
"""Utilities and helper functions."""
import logging as std_logging
import os
import signal
import socket
from eventlet.green import subprocess
from oslo.config import cfg
from neutron.common import constants as q_const
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
LOG = logging.getLogger(__name__)
synchronized = lockutils.synchronized_with_prefix('neutron-')
def read_cached_file(filename, cache_info, reload_func=None):
"""Read from a file if it has been modified.
:param cache_info: dictionary to hold opaque cache.
:param reload_func: optional function to be called with data when
file is reloaded due to a modification.
:returns: data from file
"""
mtime = os.path.getmtime(filename)
if not cache_info or mtime != cache_info.get('mtime'):
LOG.debug(_("Reloading cached file %s"), filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
if reload_func:
reload_func(cache_info['data'])
return cache_info['data']
def find_config_file(options, config_file):
"""Return the first config file found.
We search for the paste config file in the following order:
* If --config-file option is used, use that
* Search for the configuration files via common cfg directories
:retval Full path to config file, or None if no config file found
"""
fix_path = lambda p: os.path.abspath(os.path.expanduser(p))
if options.get('config_file'):
if os.path.exists(options['config_file']):
return fix_path(options['config_file'])
dir_to_common = os.path.dirname(os.path.abspath(__file__))
root = os.path.join(dir_to_common, '..', '..', '..', '..')
# Handle standard directory search for the config file
config_file_dirs = [fix_path(os.path.join(os.getcwd(), 'etc')),
fix_path(os.path.join('~', '.neutron-venv', 'etc',
'neutron')),
fix_path('~'),
os.path.join(cfg.CONF.state_path, 'etc'),
os.path.join(cfg.CONF.state_path, 'etc', 'neutron'),
fix_path(os.path.join('~', '.local',
'etc', 'neutron')),
'/usr/etc/neutron',
'/usr/local/etc/neutron',
'/etc/neutron/',
# TODO(markmcclain) remove in Icehouse
'/usr/etc/quantum',
'/usr/local/etc/quantum',
'/etc/quantum/',
'/etc']
if 'plugin' in options:
config_file_dirs = [
os.path.join(x, 'neutron', 'plugins', options['plugin'])
for x in config_file_dirs
]
if os.path.exists(os.path.join(root, 'plugins')):
plugins = [fix_path(os.path.join(root, 'plugins', p, 'etc'))
for p in os.listdir(os.path.join(root, 'plugins'))]
plugins = [p for p in plugins if os.path.isdir(p)]
config_file_dirs.extend(plugins)
for cfg_dir in config_file_dirs:
cfg_file = os.path.join(cfg_dir, config_file)
if os.path.exists(cfg_file):
return cfg_file
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False,
env=None):
return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout,
stderr=stderr, preexec_fn=_subprocess_setup,
close_fds=True, env=env)
def parse_mappings(mapping_list, unique_values=True):
"""Parse a list of of mapping strings into a dictionary.
:param mapping_list: a list of strings of the form '<key>:<value>'
:param unique_values: values must be unique if True
:returns: a dict mapping keys to values
"""
mappings = {}
for mapping in mapping_list:
mapping = mapping.strip()
if not mapping:
continue
split_result = mapping.split(':')
if len(split_result) != 2:
raise ValueError(_("Invalid mapping: '%s'") % mapping)
key = split_result[0].strip()
if not key:
raise ValueError(_("Missing key in mapping: '%s'") % mapping)
value = split_result[1].strip()
if not value:
raise ValueError(_("Missing value in mapping: '%s'") % mapping)
if key in mappings:
raise ValueError(_("Key %(key)s in mapping: '%(mapping)s' not "
"unique") % {'key': key, 'mapping': mapping})
if unique_values and value in mappings.itervalues():
raise ValueError(_("Value %(value)s in mapping: '%(mapping)s' "
"not unique") % {'value': value,
'mapping': mapping})
mappings[key] = value
return mappings
def get_hostname():
return socket.gethostname()
def compare_elements(a, b):
"""Compare elements if a and b have same elements.
This method doesn't consider ordering
"""
if a is None:
a = []
if b is None:
b = []
return set(a) == set(b)
def dict2str(dic):
return ','.join("%s=%s" % (key, val)
for key, val in sorted(dic.iteritems()))
def str2dict(string):
res_dict = {}
for keyvalue in string.split(','):
(key, value) = keyvalue.split('=', 1)
res_dict[key] = value
return res_dict
def diff_list_of_dict(old_list, new_list):
new_set = set([dict2str(l) for l in new_list])
old_set = set([dict2str(l) for l in old_list])
added = new_set - old_set
removed = old_set - new_set
return [str2dict(a) for a in added], [str2dict(r) for r in removed]
def is_extension_supported(plugin, ext_alias):
return ext_alias in getattr(
plugin, "supported_extension_aliases", [])
def log_opt_values(log):
cfg.CONF.log_opt_values(log, std_logging.DEBUG)
def is_valid_vlan_tag(vlan):
return q_const.MIN_VLAN_TAG <= vlan <= q_const.MAX_VLAN_TAG
| apache-2.0 | -3,548,617,166,378,592,000 | 34.033816 | 78 | 0.591423 | false |
moijes12/oh-mainline | vendor/packages/celery/celery/tests/test_utils/test_utils_timeutils.py | 16 | 2257 | from __future__ import absolute_import
from datetime import datetime, timedelta
from celery.utils import timeutils
from celery.tests.utils import unittest
class test_timeutils(unittest.TestCase):
def test_delta_resolution(self):
D = timeutils.delta_resolution
dt = datetime(2010, 3, 30, 11, 50, 58, 41065)
deltamap = ((timedelta(days=2), datetime(2010, 3, 30, 0, 0)),
(timedelta(hours=2), datetime(2010, 3, 30, 11, 0)),
(timedelta(minutes=2), datetime(2010, 3, 30, 11, 50)),
(timedelta(seconds=2), dt))
for delta, shoulda in deltamap:
self.assertEqual(D(dt, delta), shoulda)
def test_timedelta_seconds(self):
deltamap = ((timedelta(seconds=1), 1),
(timedelta(seconds=27), 27),
(timedelta(minutes=3), 3 * 60),
(timedelta(hours=4), 4 * 60 * 60),
(timedelta(days=3), 3 * 86400))
for delta, seconds in deltamap:
self.assertEqual(timeutils.timedelta_seconds(delta), seconds)
def test_timedelta_seconds_returns_0_on_negative_time(self):
delta = timedelta(days=-2)
self.assertEqual(timeutils.timedelta_seconds(delta), 0)
def test_humanize_seconds(self):
t = ((4 * 60 * 60 * 24, "4 days"),
(1 * 60 * 60 * 24, "1 day"),
(4 * 60 * 60, "4 hours"),
(1 * 60 * 60, "1 hour"),
(4 * 60, "4 minutes"),
(1 * 60, "1 minute"),
(4, "4.00 seconds"),
(1, "1.00 second"),
(4.3567631221, "4.36 seconds"),
(0, "now"))
for seconds, human in t:
self.assertEqual(timeutils.humanize_seconds(seconds), human)
self.assertEqual(timeutils.humanize_seconds(4, prefix="about "),
"about 4.00 seconds")
def test_maybe_iso8601_datetime(self):
now = datetime.now()
self.assertIs(timeutils.maybe_iso8601(now), now)
def test_maybe_timdelta(self):
D = timeutils.maybe_timedelta
for i in (30, 30.6):
self.assertEqual(D(i), timedelta(seconds=i))
self.assertEqual(D(timedelta(days=2)), timedelta(days=2))
| agpl-3.0 | -4,155,436,085,156,951,000 | 34.825397 | 74 | 0.550288 | false |
defaultnamehere/grr | gui/urls.py | 7 | 1084 | #!/usr/bin/env python
"""URL definitions for GRR Admin Server."""
import mimetypes
import os
from django.conf import urls
from grr import gui
from grr.lib import registry
document_root = os.path.join(os.path.dirname(gui.__file__), "static")
help_root = os.path.join(os.path.dirname(os.path.dirname(gui.__file__)), "docs")
django_base = "django."
view_base = "grr.gui.views."
handler404 = "urls.handler404"
handler500 = view_base + "ServerError"
static_handler = django_base + "views.static.serve"
urlpatterns = urls.patterns(
"",
(r"^$", view_base + "Homepage"),
# Automatic rendering is done here
(r"^api/.+", view_base + "RenderApi"),
(r"^render/[^/]+/.*", view_base + "RenderGenericRenderer"),
(r"^download/[^/]+/.*", view_base + "RenderBinaryDownload"),
(r"^static/(.*)$", static_handler,
{"document_root": document_root}),
(r"^help/(.*)$", view_base + "RenderHelp")
)
class UrlsInit(registry.InitHook):
pre = []
def RunOnce(self):
"""Run this once on init."""
mimetypes.add_type("application/font-woff", ".woff", True)
| apache-2.0 | 3,967,850,197,345,033,700 | 25.439024 | 80 | 0.643911 | false |
wxgeo/geophar | wxgeometrie/sympy/printing/tests/test_rcode.py | 4 | 14177 | from sympy.core import (S, pi, oo, Symbol, symbols, Rational, Integer,
GoldenRatio, EulerGamma, Catalan, Lambda, Dummy, Eq)
from sympy.functions import (Piecewise, sin, cos, Abs, exp, ceiling, sqrt,
gamma, sign, Max, Min, factorial, beta)
from sympy.sets import Range
from sympy.logic import ITE
from sympy.codegen import For, aug_assign, Assignment
from sympy.utilities.pytest import raises
from sympy.printing.rcode import RCodePrinter
from sympy.utilities.lambdify import implemented_function
from sympy.tensor import IndexedBase, Idx
from sympy.matrices import Matrix, MatrixSymbol
from sympy import rcode
from difflib import Differ
from pprint import pprint
x, y, z = symbols('x,y,z')
def test_printmethod():
class fabs(Abs):
def _rcode(self, printer):
return "abs(%s)" % printer._print(self.args[0])
assert rcode(fabs(x)) == "abs(x)"
def test_rcode_sqrt():
assert rcode(sqrt(x)) == "sqrt(x)"
assert rcode(x**0.5) == "sqrt(x)"
assert rcode(sqrt(x)) == "sqrt(x)"
def test_rcode_Pow():
assert rcode(x**3) == "x^3"
assert rcode(x**(y**3)) == "x^(y^3)"
g = implemented_function('g', Lambda(x, 2*x))
assert rcode(1/(g(x)*3.5)**(x - y**x)/(x**2 + y)) == \
"(3.5*2*x)^(-x + y^x)/(x^2 + y)"
assert rcode(x**-1.0) == '1.0/x'
assert rcode(x**Rational(2, 3)) == 'x^(2.0/3.0)'
_cond_cfunc = [(lambda base, exp: exp.is_integer, "dpowi"),
(lambda base, exp: not exp.is_integer, "pow")]
assert rcode(x**3, user_functions={'Pow': _cond_cfunc}) == 'dpowi(x, 3)'
assert rcode(x**3.2, user_functions={'Pow': _cond_cfunc}) == 'pow(x, 3.2)'
def test_rcode_Max():
# Test for gh-11926
assert rcode(Max(x,x*x),user_functions={"Max":"my_max", "Pow":"my_pow"}) == 'my_max(x, my_pow(x, 2))'
def test_rcode_constants_mathh():
p=rcode(exp(1))
assert rcode(exp(1)) == "exp(1)"
assert rcode(pi) == "pi"
assert rcode(oo) == "Inf"
assert rcode(-oo) == "-Inf"
def test_rcode_constants_other():
assert rcode(2*GoldenRatio) == "GoldenRatio = 1.61803398874989;\n2*GoldenRatio"
assert rcode(
2*Catalan) == "Catalan = 0.915965594177219;\n2*Catalan"
assert rcode(2*EulerGamma) == "EulerGamma = 0.577215664901533;\n2*EulerGamma"
def test_rcode_Rational():
assert rcode(Rational(3, 7)) == "3.0/7.0"
assert rcode(Rational(18, 9)) == "2"
assert rcode(Rational(3, -7)) == "-3.0/7.0"
assert rcode(Rational(-3, -7)) == "3.0/7.0"
assert rcode(x + Rational(3, 7)) == "x + 3.0/7.0"
assert rcode(Rational(3, 7)*x) == "(3.0/7.0)*x"
def test_rcode_Integer():
assert rcode(Integer(67)) == "67"
assert rcode(Integer(-1)) == "-1"
def test_rcode_functions():
assert rcode(sin(x) ** cos(x)) == "sin(x)^cos(x)"
assert rcode(factorial(x) + gamma(y)) == "factorial(x) + gamma(y)"
assert rcode(beta(Min(x, y), Max(x, y))) == "beta(min(x, y), max(x, y))"
def test_rcode_inline_function():
x = symbols('x')
g = implemented_function('g', Lambda(x, 2*x))
assert rcode(g(x)) == "2*x"
g = implemented_function('g', Lambda(x, 2*x/Catalan))
assert rcode(
g(x)) == "Catalan = %s;\n2*x/Catalan" % Catalan.n()
A = IndexedBase('A')
i = Idx('i', symbols('n', integer=True))
g = implemented_function('g', Lambda(x, x*(1 + x)*(2 + x)))
res=rcode(g(A[i]), assign_to=A[i])
ref=(
"for (i in 1:n){\n"
" A[i] = (A[i] + 1)*(A[i] + 2)*A[i];\n"
"}"
)
assert res == ref
def test_rcode_exceptions():
assert rcode(ceiling(x)) == "ceiling(x)"
assert rcode(Abs(x)) == "abs(x)"
assert rcode(gamma(x)) == "gamma(x)"
def test_rcode_user_functions():
x = symbols('x', integer=False)
n = symbols('n', integer=True)
custom_functions = {
"ceiling": "myceil",
"Abs": [(lambda x: not x.is_integer, "fabs"), (lambda x: x.is_integer, "abs")],
}
assert rcode(ceiling(x), user_functions=custom_functions) == "myceil(x)"
assert rcode(Abs(x), user_functions=custom_functions) == "fabs(x)"
assert rcode(Abs(n), user_functions=custom_functions) == "abs(n)"
def test_rcode_boolean():
assert rcode(True) == "True"
assert rcode(S.true) == "True"
assert rcode(False) == "False"
assert rcode(S.false) == "False"
assert rcode(x & y) == "x & y"
assert rcode(x | y) == "x | y"
assert rcode(~x) == "!x"
assert rcode(x & y & z) == "x & y & z"
assert rcode(x | y | z) == "x | y | z"
assert rcode((x & y) | z) == "z | x & y"
assert rcode((x | y) & z) == "z & (x | y)"
def test_rcode_Relational():
from sympy import Eq, Ne, Le, Lt, Gt, Ge
assert rcode(Eq(x, y)) == "x == y"
assert rcode(Ne(x, y)) == "x != y"
assert rcode(Le(x, y)) == "x <= y"
assert rcode(Lt(x, y)) == "x < y"
assert rcode(Gt(x, y)) == "x > y"
assert rcode(Ge(x, y)) == "x >= y"
def test_rcode_Piecewise():
expr = Piecewise((x, x < 1), (x**2, True))
res=rcode(expr)
ref="ifelse(x < 1,x,x^2)"
assert res == ref
tau=Symbol("tau")
res=rcode(expr,tau)
ref="tau = ifelse(x < 1,x,x^2);"
assert res == ref
expr = 2*Piecewise((x, x < 1), (x**2, x<2), (x**3,True))
assert rcode(expr) == "2*ifelse(x < 1,x,ifelse(x < 2,x^2,x^3))"
res = rcode(expr, assign_to='c')
assert res == "c = 2*ifelse(x < 1,x,ifelse(x < 2,x^2,x^3));"
# Check that Piecewise without a True (default) condition error
#expr = Piecewise((x, x < 1), (x**2, x > 1), (sin(x), x > 0))
#raises(ValueError, lambda: rcode(expr))
expr = 2*Piecewise((x, x < 1), (x**2, x<2))
assert(rcode(expr))== "2*ifelse(x < 1,x,ifelse(x < 2,x^2,NA))"
def test_rcode_sinc():
from sympy import sinc
expr = sinc(x)
res = rcode(expr)
ref = "ifelse(x != 0,sin(x)/x,1)"
assert res == ref
def test_rcode_Piecewise_deep():
p = rcode(2*Piecewise((x, x < 1), (x + 1, x < 2), (x**2, True)))
assert p == "2*ifelse(x < 1,x,ifelse(x < 2,x + 1,x^2))"
expr = x*y*z + x**2 + y**2 + Piecewise((0, x < 0.5), (1, True)) + cos(z) - 1
p = rcode(expr)
ref="x^2 + x*y*z + y^2 + ifelse(x < 0.5,0,1) + cos(z) - 1"
assert p == ref
ref="c = x^2 + x*y*z + y^2 + ifelse(x < 0.5,0,1) + cos(z) - 1;"
p = rcode(expr, assign_to='c')
assert p == ref
def test_rcode_ITE():
expr = ITE(x < 1, y, z)
p = rcode(expr)
ref="ifelse(x < 1,y,z)"
assert p == ref
def test_rcode_settings():
raises(TypeError, lambda: rcode(sin(x), method="garbage"))
def test_rcode_Indexed():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o = symbols('n m o', integer=True)
i, j, k = Idx('i', n), Idx('j', m), Idx('k', o)
p = RCodePrinter()
p._not_r = set()
x = IndexedBase('x')[j]
assert p._print_Indexed(x) == 'x[j]'
A = IndexedBase('A')[i, j]
assert p._print_Indexed(A) == 'A[i, j]'
B = IndexedBase('B')[i, j, k]
assert p._print_Indexed(B) == 'B[i, j, k]'
assert p._not_r == set()
def test_rcode_Indexed_without_looking_for_contraction():
len_y = 5
y = IndexedBase('y', shape=(len_y,))
x = IndexedBase('x', shape=(len_y,))
Dy = IndexedBase('Dy', shape=(len_y-1,))
i = Idx('i', len_y-1)
e=Eq(Dy[i], (y[i+1]-y[i])/(x[i+1]-x[i]))
code0 = rcode(e.rhs, assign_to=e.lhs, contract=False)
assert code0 == 'Dy[i] = (y[%s] - y[i])/(x[%s] - x[i]);' % (i + 1, i + 1)
def test_rcode_loops_matrix_vector():
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (i in 1:m){\n'
' y[i] = 0;\n'
'}\n'
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' y[i] = A[i, j]*x[j] + y[i];\n'
' }\n'
'}'
)
c = rcode(A[i, j]*x[j], assign_to=y[i])
assert c == s
def test_dummy_loops():
# the following line could also be
# [Dummy(s, integer=True) for s in 'im']
# or [Dummy(integer=True) for s in 'im']
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'for (i_%(icount)i in 1:m_%(mcount)i){\n'
' y[i_%(icount)i] = x[i_%(icount)i];\n'
'}'
) % {'icount': i.label.dummy_index, 'mcount': m.dummy_index}
code = rcode(x[i], assign_to=y[i])
assert code == expected
def test_rcode_loops_add():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
z = IndexedBase('z')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (i in 1:m){\n'
' y[i] = x[i] + z[i];\n'
'}\n'
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' y[i] = A[i, j]*x[j] + y[i];\n'
' }\n'
'}'
)
c = rcode(A[i, j]*x[j] + x[i] + z[i], assign_to=y[i])
assert c == s
def test_rcode_loops_multiple_contractions():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s = (
'for (i in 1:m){\n'
' y[i] = 0;\n'
'}\n'
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' for (k in 1:o){\n'
' for (l in 1:p){\n'
' y[i] = a[i, j, k, l]*b[j, k, l] + y[i];\n'
' }\n'
' }\n'
' }\n'
'}'
)
c = rcode(b[j, k, l]*a[i, j, k, l], assign_to=y[i])
assert c == s
def test_rcode_loops_addfactor():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
c = IndexedBase('c')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s = (
'for (i in 1:m){\n'
' y[i] = 0;\n'
'}\n'
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' for (k in 1:o){\n'
' for (l in 1:p){\n'
' y[i] = (a[i, j, k, l] + b[i, j, k, l])*c[j, k, l] + y[i];\n'
' }\n'
' }\n'
' }\n'
'}'
)
c = rcode((a[i, j, k, l] + b[i, j, k, l])*c[j, k, l], assign_to=y[i])
assert c == s
def test_rcode_loops_multiple_terms():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
c = IndexedBase('c')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
s0 = (
'for (i in 1:m){\n'
' y[i] = 0;\n'
'}\n'
)
s1 = (
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' for (k in 1:o){\n'
' y[i] = b[j]*b[k]*c[i, j, k] + y[i];\n'
' }\n'
' }\n'
'}\n'
)
s2 = (
'for (i in 1:m){\n'
' for (k in 1:o){\n'
' y[i] = a[i, k]*b[k] + y[i];\n'
' }\n'
'}\n'
)
s3 = (
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' y[i] = a[i, j]*b[j] + y[i];\n'
' }\n'
'}\n'
)
c = rcode(
b[j]*a[i, j] + b[k]*a[i, k] + b[j]*b[k]*c[i, j, k], assign_to=y[i])
ref=dict()
ref[0] = s0 + s1 + s2 + s3[:-1]
ref[1] = s0 + s1 + s3 + s2[:-1]
ref[2] = s0 + s2 + s1 + s3[:-1]
ref[3] = s0 + s2 + s3 + s1[:-1]
ref[4] = s0 + s3 + s1 + s2[:-1]
ref[5] = s0 + s3 + s2 + s1[:-1]
assert (c == ref[0] or
c == ref[1] or
c == ref[2] or
c == ref[3] or
c == ref[4] or
c == ref[5])
def test_dereference_printing():
expr = x + y + sin(z) + z
assert rcode(expr, dereference=[z]) == "x + y + (*z) + sin((*z))"
def test_Matrix_printing():
# Test returning a Matrix
mat = Matrix([x*y, Piecewise((2 + x, y>0), (y, True)), sin(z)])
A = MatrixSymbol('A', 3, 1)
p = rcode(mat, A)
assert p == (
"A[0] = x*y;\n"
"A[1] = ifelse(y > 0,x + 2,y);\n"
"A[2] = sin(z);")
# Test using MatrixElements in expressions
expr = Piecewise((2*A[2, 0], x > 0), (A[2, 0], True)) + sin(A[1, 0]) + A[0, 0]
p = rcode(expr)
assert p == ("ifelse(x > 0,2*A[2],A[2]) + sin(A[1]) + A[0]")
# Test using MatrixElements in a Matrix
q = MatrixSymbol('q', 5, 1)
M = MatrixSymbol('M', 3, 3)
m = Matrix([[sin(q[1,0]), 0, cos(q[2,0])],
[q[1,0] + q[2,0], q[3, 0], 5],
[2*q[4, 0]/q[1,0], sqrt(q[0,0]) + 4, 0]])
assert rcode(m, M) == (
"M[0] = sin(q[1]);\n"
"M[1] = 0;\n"
"M[2] = cos(q[2]);\n"
"M[3] = q[1] + q[2];\n"
"M[4] = q[3];\n"
"M[5] = 5;\n"
"M[6] = 2*q[4]/q[1];\n"
"M[7] = sqrt(q[0]) + 4;\n"
"M[8] = 0;")
def test_rcode_sgn():
expr = sign(x) * y
assert rcode(expr) == 'y*sign(x)'
p = rcode(expr, 'z')
assert p == 'z = y*sign(x);'
p = rcode(sign(2 * x + x**2) * x + x**2)
assert p == "x^2 + x*sign(x^2 + 2*x)"
expr = sign(cos(x))
p = rcode(expr)
assert p == 'sign(cos(x))'
def test_rcode_Assignment():
assert rcode(Assignment(x, y + z)) == 'x = y + z;'
assert rcode(aug_assign(x, '+', y + z)) == 'x += y + z;'
def test_rcode_For():
f = For(x, Range(0, 10, 2), [aug_assign(y, '*', x)])
sol = rcode(f)
assert sol == ("for (x = 0; x < 10; x += 2) {\n"
" y *= x;\n"
"}")
def test_MatrixElement_printing():
# test cases for issue #11821
A = MatrixSymbol("A", 1, 3)
B = MatrixSymbol("B", 1, 3)
C = MatrixSymbol("C", 1, 3)
assert(rcode(A[0, 0]) == "A[0]")
assert(rcode(3 * A[0, 0]) == "3*A[0]")
F = C[0, 0].subs(C, A - B)
assert(rcode(F) == "(-B + A)[0]")
| gpl-2.0 | -7,460,385,559,712,265,000 | 27.873727 | 105 | 0.48226 | false |
ozburo/youtube-dl | youtube_dl/extractor/americastestkitchen.py | 4 | 6349 | # coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
int_or_none,
try_get,
unified_strdate,
unified_timestamp,
)
class AmericasTestKitchenIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:americastestkitchen|cooks(?:country|illustrated))\.com/(?P<resource_type>episode|videos)/(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.americastestkitchen.com/episode/582-weeknight-japanese-suppers',
'md5': 'b861c3e365ac38ad319cfd509c30577f',
'info_dict': {
'id': '5b400b9ee338f922cb06450c',
'title': 'Japanese Suppers',
'ext': 'mp4',
'description': 'md5:64e606bfee910627efc4b5f050de92b3',
'thumbnail': r're:^https?://',
'timestamp': 1523318400,
'upload_date': '20180410',
'release_date': '20180410',
'series': "America's Test Kitchen",
'season_number': 18,
'episode': 'Japanese Suppers',
'episode_number': 15,
},
'params': {
'skip_download': True,
},
}, {
# Metadata parsing behaves differently for newer episodes (705) as opposed to older episodes (582 above)
'url': 'https://www.americastestkitchen.com/episode/705-simple-chicken-dinner',
'md5': '06451608c57651e985a498e69cec17e5',
'info_dict': {
'id': '5fbe8c61bda2010001c6763b',
'title': 'Simple Chicken Dinner',
'ext': 'mp4',
'description': 'md5:eb68737cc2fd4c26ca7db30139d109e7',
'thumbnail': r're:^https?://',
'timestamp': 1610755200,
'upload_date': '20210116',
'release_date': '20210116',
'series': "America's Test Kitchen",
'season_number': 21,
'episode': 'Simple Chicken Dinner',
'episode_number': 3,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.americastestkitchen.com/videos/3420-pan-seared-salmon',
'only_matching': True,
}, {
'url': 'https://www.cookscountry.com/episode/564-when-only-chocolate-will-do',
'only_matching': True,
}, {
'url': 'https://www.cooksillustrated.com/videos/4478-beef-wellington',
'only_matching': True,
}]
def _real_extract(self, url):
resource_type, video_id = re.match(self._VALID_URL, url).groups()
is_episode = resource_type == 'episode'
if is_episode:
resource_type = 'episodes'
resource = self._download_json(
'https://www.americastestkitchen.com/api/v6/%s/%s' % (resource_type, video_id), video_id)
video = resource['video'] if is_episode else resource
episode = resource if is_episode else resource.get('episode') or {}
return {
'_type': 'url_transparent',
'url': 'https://player.zype.com/embed/%s.js?api_key=jZ9GUhRmxcPvX7M3SlfejB6Hle9jyHTdk2jVxG7wOHPLODgncEKVdPYBhuz9iWXQ' % video['zypeId'],
'ie_key': 'Zype',
'description': clean_html(video.get('description')),
'timestamp': unified_timestamp(video.get('publishDate')),
'release_date': unified_strdate(video.get('publishDate')),
'episode_number': int_or_none(episode.get('number')),
'season_number': int_or_none(episode.get('season')),
'series': try_get(episode, lambda x: x['show']['title']),
'episode': episode.get('title'),
}
class AmericasTestKitchenSeasonIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?P<show>americastestkitchen|cookscountry)\.com/episodes/browse/season_(?P<id>\d+)'
_TESTS = [{
# ATK Season
'url': 'https://www.americastestkitchen.com/episodes/browse/season_1',
'info_dict': {
'id': 'season_1',
'title': 'Season 1',
},
'playlist_count': 13,
}, {
# Cooks Country Season
'url': 'https://www.cookscountry.com/episodes/browse/season_12',
'info_dict': {
'id': 'season_12',
'title': 'Season 12',
},
'playlist_count': 13,
}]
def _real_extract(self, url):
show_name, season_number = re.match(self._VALID_URL, url).groups()
season_number = int(season_number)
slug = 'atk' if show_name == 'americastestkitchen' else 'cco'
season = 'Season %d' % season_number
season_search = self._download_json(
'https://y1fnzxui30-dsn.algolia.net/1/indexes/everest_search_%s_season_desc_production' % slug,
season, headers={
'Origin': 'https://www.%s.com' % show_name,
'X-Algolia-API-Key': '8d504d0099ed27c1b73708d22871d805',
'X-Algolia-Application-Id': 'Y1FNZXUI30',
}, query={
'facetFilters': json.dumps([
'search_season_list:' + season,
'search_document_klass:episode',
'search_show_slug:' + slug,
]),
'attributesToRetrieve': 'description,search_%s_episode_number,search_document_date,search_url,title' % slug,
'attributesToHighlight': '',
'hitsPerPage': 1000,
})
def entries():
for episode in (season_search.get('hits') or []):
search_url = episode.get('search_url')
if not search_url:
continue
yield {
'_type': 'url',
'url': 'https://www.%s.com%s' % (show_name, search_url),
'id': try_get(episode, lambda e: e['objectID'].split('_')[-1]),
'title': episode.get('title'),
'description': episode.get('description'),
'timestamp': unified_timestamp(episode.get('search_document_date')),
'season_number': season_number,
'episode_number': int_or_none(episode.get('search_%s_episode_number' % slug)),
'ie_key': AmericasTestKitchenIE.ie_key(),
}
return self.playlist_result(
entries(), 'season_%d' % season_number, season)
| unlicense | 7,919,049,104,063,309,000 | 38.930818 | 148 | 0.544495 | false |
gregomni/swift | utils/swift_build_support/swift_build_support/products/benchmarks.py | 5 | 3523 | # swift_build_support/products/benchmarks.py --------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2019 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
import os
import platform
from . import cmark
from . import foundation
from . import libcxx
from . import libdispatch
from . import libicu
from . import llbuild
from . import llvm
from . import product
from . import swift
from . import swiftpm
from . import xctest
from .. import shell
from .. import targets
# Build against the current installed toolchain.
class Benchmarks(product.Product):
@classmethod
def product_source_name(cls):
return "benchmarks"
@classmethod
def is_build_script_impl_product(cls):
return False
def should_build(self, host_target):
return True
def build(self, host_target):
run_build_script_helper(host_target, self, self.args)
def should_test(self, host_target):
return self.args.test_toolchainbenchmarks
def test(self, host_target):
"""Just run a single instance of the command for both .debug and
.release.
"""
cmdline = ['--num-iters=1', 'XorLoop']
bench_Onone = os.path.join(self.build_dir, 'bin', 'Benchmark_Onone')
shell.call([bench_Onone] + cmdline)
bench_O = os.path.join(self.build_dir, 'bin', 'Benchmark_O')
shell.call([bench_O] + cmdline)
bench_Osize = os.path.join(self.build_dir, 'bin', 'Benchmark_Osize')
shell.call([bench_Osize] + cmdline)
def should_install(self, host_target):
return False
def install(self, host_target):
pass
@classmethod
def get_dependencies(cls):
return [cmark.CMark,
llvm.LLVM,
libcxx.LibCXX,
libicu.LibICU,
swift.Swift,
libdispatch.LibDispatch,
foundation.Foundation,
xctest.XCTest,
llbuild.LLBuild,
swiftpm.SwiftPM]
def run_build_script_helper(host_target, product, args):
toolchain_path = args.install_destdir
if platform.system() == 'Darwin':
# The prefix is an absolute path, so concatenate without os.path.
toolchain_path += \
targets.darwin_toolchain_prefix(args.install_prefix)
# Our source_dir is expected to be './$SOURCE_ROOT/benchmarks'. That is due
# the assumption that each product is in its own build directory. This
# product is not like that and has its package/tools instead in
# ./$SOURCE_ROOT/swift/benchmark.
package_path = os.path.join(product.source_dir, '..', 'swift', 'benchmark')
package_path = os.path.abspath(package_path)
# We use a separate python helper to enable quicker iteration when working
# on this by avoiding going through build-script to test small changes.
helper_path = os.path.join(package_path, 'scripts',
'build_script_helper.py')
build_cmd = [
helper_path,
'--verbose',
'--package-path', package_path,
'--build-path', product.build_dir,
'--toolchain', toolchain_path,
]
shell.call(build_cmd)
| apache-2.0 | 6,749,485,384,416,161,000 | 31.027273 | 79 | 0.624468 | false |
DolphinDream/sverchok | nodes/modifier_change/extrude_multi_alt.py | 2 | 10156 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import random
from random import gauss
from math import radians
import bpy
import bmesh
from bpy.types import Operator
from mathutils import Euler, Vector
from bpy.props import FloatProperty, IntProperty, BoolProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode
from sverchok.utils.sv_bmesh_utils import bmesh_from_pydata, pydata_from_bmesh
sv_info = {
"author": "liero, Jimmy Hazevoet",
"converted_to_sverchok": "zeffii 2017",
"original": "mesh_extra_tools/mesh_mextrude_plus.py"
}
def gloc(self, r):
return Vector((self.offx, self.offy, self.offz))
def vloc(self, r):
random.seed(self.ran + r)
return self.off * (1 + gauss(0, self.var1 / 3))
def nrot(self, n):
return Euler((radians(self.nrotx) * n[0],
radians(self.nroty) * n[1],
radians(self.nrotz) * n[2]), 'XYZ')
def vrot(self, r):
random.seed(self.ran + r)
return Euler((radians(self.rotx) + gauss(0, self.var2 / 3),
radians(self.roty) + gauss(0, self.var2 / 3),
radians(self.rotz) + gauss(0, self.var2 / 3)), 'XYZ')
def vsca(self, r):
random.seed(self.ran + r)
return self.sca * (1 + gauss(0, self.var3 / 3))
class SvMExtrudeProps():
off: FloatProperty(
soft_min=0.001, soft_max=10, min=-100, max=100, default=1.0,
name="Offset", description="Translation", update=updateNode)
offx: FloatProperty(
soft_min=-10.0, soft_max=10.0, min=-100.0, max=100.0, default=0.0,
name="Loc X", description="Global Translation X", update=updateNode)
offy: FloatProperty(
soft_min=-10.0, soft_max=10.0, min=-100.0, max=100.0, default=0.0,
name="Loc Y", description="Global Translation Y", update=updateNode)
offz: FloatProperty(
soft_min=-10.0, soft_max=10.0, min=-100.0, max=100.0, default=0.0,
name="Loc Z", description="Global Translation Z", update=updateNode)
rotx: FloatProperty(
min=-85, max=85, soft_min=-30, soft_max=30, default=0,
name="Rot X", description="X Rotation", update=updateNode)
roty: FloatProperty(
min=-85, max=85, soft_min=-30, soft_max=30, default=0,
name="Rot Y", description="Y Rotation", update=updateNode)
rotz: FloatProperty(
min=-85, max=85, soft_min=-30, soft_max=30, default=-0,
name="Rot Z", description="Z Rotation", update=updateNode)
nrotx: FloatProperty(
min=-85, max=85, soft_min=-30, soft_max=30, default=0,
name="N Rot X", description="Normal X Rotation", update=updateNode)
nroty: FloatProperty(
min=-85, max=85, soft_min=-30, soft_max=30, default=0,
name="N Rot Y", description="Normal Y Rotation", update=updateNode)
nrotz: FloatProperty(
min=-85, max=85, soft_min=-30, soft_max=30, default=-0,
name="N Rot Z", description="Normal Z Rotation", update=updateNode)
sca: FloatProperty(
min=0.01, max=10, soft_min=0.5, soft_max=1.5, default=1.0,
name="Scale", description="Scaling of the selected faces after extrusion", update=updateNode)
var1: FloatProperty(
soft_min=-1, soft_max=1, default=0, min=-10, max=10,
name="Offset Var", description="Offset variation", update=updateNode)
var2: FloatProperty(
min=-10, max=10, soft_min=-1, soft_max=1, default=0,
name="Rotation Var", description="Rotation variation", update=updateNode)
var3: FloatProperty(
min=-10, max=10, soft_min=-1, soft_max=1, default=0,
name="Scale Noise", description="Scaling noise", update=updateNode)
var4: IntProperty(
min=0, max=100, default=100,
name="Probability", description="Probability, chance of extruding a face", update=updateNode)
num: IntProperty(
min=1, max=500, soft_max=100, default=5,
name="Repeat", description="Repetitions", update=updateNode)
ran: IntProperty(
min=-9999, max=9999, default=0,
name="Seed", description="Seed to feed random values", update=updateNode)
opt1: BoolProperty(
default=True, name="Polygon coordinates",
description="Polygon coordinates, Object coordinates", update=updateNode)
opt2: BoolProperty(
default=False, name="Proportional offset",
description="Scale * Offset", update=updateNode)
opt3: BoolProperty(
default=False, name="Per step rotation noise",
description="Per step rotation noise, Initial rotation noise", update=updateNode)
opt4: BoolProperty(
default=False, name="Per step scale noise",
description="Per step scale noise, Initial scale noise", update=updateNode)
def draw_ui(self, context, layout):
col = layout.column(align=True)
col.label(text="Transformations:")
col.prop(self, "off", slider=True)
col.prop(self, "offx", slider=True)
col.prop(self, "offy", slider=True)
col.prop(self, "offz", slider=True)
col = layout.column(align=True)
col.prop(self, "rotx", slider=True)
col.prop(self, "roty", slider=True)
col.prop(self, "rotz", slider=True)
col.prop(self, "nrotx", slider=True)
col.prop(self, "nroty", slider=True)
col.prop(self, "nrotz", slider=True)
col = layout.column(align=True)
col.prop(self, "sca", slider=True)
col = layout.column(align=True)
col.label(text="Variation settings:")
col.prop(self, "var1", slider=True)
col.prop(self, "var2", slider=True)
col.prop(self, "var3", slider=True)
col.prop(self, "var4", slider=True)
col.prop(self, "ran")
col = layout.column(align=False)
col.prop(self, 'num')
col = layout.column(align=True)
col.label(text="Options:")
col.prop(self, "opt1")
col.prop(self, "opt2")
col.prop(self, "opt3")
col.prop(self, "opt4")
def perform_mextrude(self, bm, sel):
after = []
origin = Vector([0.0, 0.0, 0.0])
# faces loop
for i, of in enumerate(sel):
nro = nrot(self, of.normal)
off = vloc(self, i)
loc = gloc(self, i)
of.normal_update()
# initial rotation noise
if not self.opt3:
rot = vrot(self, i)
# initial scale noise
if not self.opt4:
s = vsca(self, i)
# extrusion loop
for r in range(self.num):
# random probability % for extrusions
if self.var4 > int(random.random() * 100):
nf = of.copy()
nf.normal_update()
no = nf.normal.copy()
ce = nf.calc_center_bounds() if self.opt1 else origin
# per step rotation noise
if self.opt3:
rot = vrot(self, i + r)
# per step scale noise
if self.opt4:
s = vsca(self, i + r)
# proportional, scale * offset
if self.opt2:
off = s * off
for v in nf.verts:
v.co -= ce
v.co.rotate(nro)
v.co.rotate(rot)
v.co += ce + loc + no * off
v.co = v.co.lerp(ce, 1 - s)
# extrude code from TrumanBlending
for a, b in zip(of.loops, nf.loops):
sf = bm.faces.new((a.vert, a.link_loop_next.vert, b.link_loop_next.vert, b.vert))
sf.normal_update()
bm.faces.remove(of)
of = nf
bm.verts.index_update()
bm.faces.index_update()
after.append(of)
out_verts, _, out_faces = pydata_from_bmesh(bm)
del bm
return (out_verts, out_faces) or None
class SvMultiExtrudeAlt(bpy.types.Node, SverchCustomTreeNode, SvMExtrudeProps):
''' a SvMultiExtrudeAlt f '''
bl_idname = 'SvMultiExtrudeAlt'
bl_label = 'MultiExtrude Alt from addons'
sv_icon = 'SV_MULTI_EXTRUDE'
def sv_init(self, context):
self.inputs.new('SvVerticesSocket', 'verts')
self.inputs.new('SvStringsSocket', 'faces')
self.inputs.new('SvStringsSocket', 'face_masks')
self.outputs.new('SvVerticesSocket', 'verts')
self.outputs.new('SvStringsSocket', 'faces')
def draw_buttons(self, context, layout):
draw_ui(self, context, layout)
def process(self):
# bmesh operations
verts = self.inputs['verts'].sv_get()
faces = self.inputs['faces'].sv_get()
face_masks = self.inputs['face_masks'].sv_get()
out_verts, out_faces = [], []
for _verts, _faces, _face_mask in zip(verts, faces, face_masks):
bm = bmesh_from_pydata(_verts, [], _faces, normal_update=True)
sel = []
add_sell = sel.append
for f in (f for f in bm.faces if f.index in set(_face_mask)):
f.select = True
add_sell(f)
generated_data = perform_mextrude(self, bm, sel)
if generated_data:
outv, outf = generated_data
out_verts.append(outv)
out_faces.append(outf)
self.outputs['verts'].sv_set(out_verts)
self.outputs['faces'].sv_set(out_faces)
def register():
bpy.utils.register_class(SvMultiExtrudeAlt)
def unregister():
bpy.utils.unregister_class(SvMultiExtrudeAlt)
| gpl-3.0 | -7,165,845,987,041,847,000 | 32.081433 | 101 | 0.606046 | false |
jimsimon/sky_engine | sky/engine/bindings/scripts/v8_types.py | 10 | 13437 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Functions for type handling and type conversion (Blink/C++ <-> V8/JS).
Extends IdlType and IdlUnionType with V8-specific properties, methods, and
class methods.
Spec:
http://www.w3.org/TR/WebIDL/#es-type-mapping
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import posixpath
from idl_types import IdlTypeBase, IdlType, IdlUnionType, IdlArrayOrSequenceType, IdlNullableType
import v8_attributes # for IdlType.constructor_type_name
from v8_globals import includes
################################################################################
# V8-specific handling of IDL types
################################################################################
NON_WRAPPER_TYPES = frozenset([
'CompareHow',
'NodeFilter',
'SerializedScriptValue',
])
TYPED_ARRAYS = {
# (cpp_type, v8_type), used by constructor templates
'ArrayBuffer': None,
'ArrayBufferView': None,
'Float32Array': ('float', 'v8::kExternalFloatArray'),
'Float64Array': ('double', 'v8::kExternalDoubleArray'),
'Int8Array': ('signed char', 'v8::kExternalByteArray'),
'Int16Array': ('short', 'v8::kExternalShortArray'),
'Int32Array': ('int', 'v8::kExternalIntArray'),
'Uint8Array': ('unsigned char', 'v8::kExternalUnsignedByteArray'),
'Uint8ClampedArray': ('unsigned char', 'v8::kExternalPixelArray'),
'Uint16Array': ('unsigned short', 'v8::kExternalUnsignedShortArray'),
'Uint32Array': ('unsigned int', 'v8::kExternalUnsignedIntArray'),
}
IdlType.is_typed_array_element_type = property(
lambda self: self.base_type in TYPED_ARRAYS)
IdlType.is_wrapper_type = property(
lambda self: (self.is_interface_type and
self.base_type not in NON_WRAPPER_TYPES))
################################################################################
# C++ types
################################################################################
CPP_TYPE_SAME_AS_IDL_TYPE = set([
'double',
'float',
'long long',
'unsigned long long',
])
CPP_INT_TYPES = set([
'byte',
'long',
'short',
])
CPP_UNSIGNED_TYPES = set([
'octet',
'unsigned int',
'unsigned long',
'unsigned short',
])
CPP_SPECIAL_CONVERSION_RULES = {
'CompareHow': 'Range::CompareHow',
'Date': 'double',
'Promise': 'ScriptPromise',
'ScriptValue': 'ScriptValue',
'boolean': 'bool',
'unrestricted double': 'double',
'unrestricted float': 'float',
}
def cpp_type_initializer(idl_type):
"""Returns a string containing a C++ initialization statement for the
corresponding type.
|idl_type| argument is of type IdlType.
"""
base_idl_type = idl_type.base_type
if idl_type.native_array_element_type:
return ''
if idl_type.is_numeric_type:
return ' = 0'
if base_idl_type == 'boolean':
return ' = false'
if (base_idl_type in NON_WRAPPER_TYPES or
base_idl_type in CPP_SPECIAL_CONVERSION_RULES or
base_idl_type == 'any' or
idl_type.is_string_type or
idl_type.is_enum):
return ''
return ' = nullptr'
def cpp_type_union(idl_type, extended_attributes=None, raw_type=False):
# FIXME: Need to revisit the design of union support.
# http://crbug.com/240176
return None
def cpp_type_initializer_union(idl_type):
return (member_type.cpp_type_initializer for member_type in idl_type.member_types)
# Allow access as idl_type.cpp_type if no arguments
IdlTypeBase.cpp_type_initializer = property(cpp_type_initializer)
IdlUnionType.cpp_type = property(cpp_type_union)
IdlUnionType.cpp_type_initializer = property(cpp_type_initializer_union)
IdlUnionType.cpp_type_args = cpp_type_union
IdlArrayOrSequenceType.native_array_element_type = property(
lambda self: self.element_type)
def cpp_template_type(template, inner_type):
"""Returns C++ template specialized to type, with space added if needed."""
if inner_type.endswith('>'):
format_string = '{template}<{inner_type} >'
else:
format_string = '{template}<{inner_type}>'
return format_string.format(template=template, inner_type=inner_type)
# [ImplementedAs]
# This handles [ImplementedAs] on interface types, not [ImplementedAs] in the
# interface being generated. e.g., given:
# Foo.idl: interface Foo {attribute Bar bar};
# Bar.idl: [ImplementedAs=Zork] interface Bar {};
# when generating bindings for Foo, the [ImplementedAs] on Bar is needed.
# This data is external to Foo.idl, and hence computed as global information in
# compute_interfaces_info.py to avoid having to parse IDLs of all used interfaces.
IdlType.implemented_as_interfaces = {}
def implemented_as(idl_type):
base_idl_type = idl_type.base_type
if base_idl_type in IdlType.implemented_as_interfaces:
return IdlType.implemented_as_interfaces[base_idl_type]
return base_idl_type
IdlType.implemented_as = property(implemented_as)
IdlType.set_implemented_as_interfaces = classmethod(
lambda cls, new_implemented_as_interfaces:
cls.implemented_as_interfaces.update(new_implemented_as_interfaces))
################################################################################
# Includes
################################################################################
def includes_for_cpp_class(class_name, relative_dir_posix):
return set([posixpath.join('bindings', relative_dir_posix, class_name + '.h')])
INCLUDES_FOR_TYPE = {
'object': set(),
'CompareHow': set(),
'NodeList': set(['bindings/core/v8/V8NodeList.h',
'core/dom/NodeList.h',
'core/dom/StaticNodeList.h']),
'Promise': set(['bindings/core/v8/ScriptPromise.h']),
'SerializedScriptValue': set(['bindings/core/v8/SerializedScriptValue.h']),
'ScriptValue': set(['bindings/core/v8/ScriptValue.h']),
}
def includes_for_type(idl_type):
idl_type = idl_type.preprocessed_type
# Simple types
base_idl_type = idl_type.base_type
if base_idl_type in INCLUDES_FOR_TYPE:
return INCLUDES_FOR_TYPE[base_idl_type]
if idl_type.is_basic_type:
return set()
if idl_type.is_typed_array_element_type:
return set(['bindings/core/v8/custom/V8%sCustom.h' % base_idl_type])
if base_idl_type.endswith('ConstructorConstructor'):
# FIXME: rename to NamedConstructor
# FIXME: replace with a [NamedConstructorAttribute] extended attribute
# Ending with 'ConstructorConstructor' indicates a named constructor,
# and these do not have header files, as they are part of the generated
# bindings for the interface
return set()
if base_idl_type.endswith('Constructor'):
# FIXME: replace with a [ConstructorAttribute] extended attribute
base_idl_type = idl_type.constructor_type_name
if base_idl_type not in component_dir:
return set()
return set(['bindings/%s/v8/V8%s.h' % (component_dir[base_idl_type],
base_idl_type)])
IdlType.includes_for_type = property(includes_for_type)
IdlUnionType.includes_for_type = property(
lambda self: set.union(*[member_type.includes_for_type
for member_type in self.member_types]))
IdlArrayOrSequenceType.includes_for_type = property(
lambda self: self.element_type.includes_for_type)
def add_includes_for_type(idl_type):
includes.update(idl_type.includes_for_type)
IdlTypeBase.add_includes_for_type = add_includes_for_type
def includes_for_interface(interface_name):
return IdlType(interface_name).includes_for_type
def add_includes_for_interface(interface_name):
includes.update(includes_for_interface(interface_name))
def impl_should_use_nullable_container(idl_type):
return not(idl_type.cpp_type_has_null_value)
IdlTypeBase.impl_should_use_nullable_container = property(
impl_should_use_nullable_container)
def impl_includes_for_type(idl_type, interfaces_info):
includes_for_type = set()
if idl_type.impl_should_use_nullable_container:
includes_for_type.add('bindings/nullable.h')
idl_type = idl_type.preprocessed_type
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
includes_for_type.update(impl_includes_for_type(
native_array_element_type, interfaces_info))
includes_for_type.add('wtf/Vector.h')
if idl_type.is_string_type:
includes_for_type.add('wtf/text/WTFString.h')
if idl_type.name in interfaces_info:
interface_info = interfaces_info[idl_type.name]
includes_for_type.add(interface_info['include_path'])
return includes_for_type
IdlTypeBase.impl_includes_for_type = impl_includes_for_type
component_dir = {}
def set_component_dirs(new_component_dirs):
component_dir.update(new_component_dirs)
################################################################################
# C++ -> V8
################################################################################
def preprocess_idl_type(idl_type):
if idl_type.is_enum:
# Enumerations are internally DOMStrings
return IdlType('DOMString')
if (idl_type.name == 'Any' or idl_type.is_callback_function):
return IdlType('ScriptValue')
return idl_type
IdlTypeBase.preprocessed_type = property(preprocess_idl_type)
def preprocess_idl_type_and_value(idl_type, cpp_value, extended_attributes):
"""Returns IDL type and value, with preliminary type conversions applied."""
idl_type = idl_type.preprocessed_type
if idl_type.name == 'Promise':
idl_type = IdlType('ScriptValue')
if idl_type.base_type in ['long long', 'unsigned long long']:
# long long and unsigned long long are not representable in ECMAScript;
# we represent them as doubles.
is_nullable = idl_type.is_nullable
idl_type = IdlType('double')
if is_nullable:
idl_type = IdlNullableType(idl_type)
cpp_value = 'static_cast<double>(%s)' % cpp_value
return idl_type, cpp_value
IdlType.release = property(lambda self: self.is_interface_type)
IdlUnionType.release = property(
lambda self: [member_type.is_interface_type
for member_type in self.member_types])
def literal_cpp_value(idl_type, idl_literal):
"""Converts an expression that is a valid C++ literal for this type."""
# FIXME: add validation that idl_type and idl_literal are compatible
literal_value = str(idl_literal)
if idl_type.base_type in CPP_UNSIGNED_TYPES:
return literal_value + 'u'
return literal_value
IdlType.literal_cpp_value = literal_cpp_value
################################################################################
# Utility properties for nullable types
################################################################################
def cpp_type_has_null_value(idl_type):
# - String types (String/AtomicString) represent null as a null string,
# i.e. one for which String::isNull() returns true.
# - Wrapper types (raw pointer or RefPtr/PassRefPtr) represent null as
# a null pointer.
return (idl_type.is_string_type or idl_type.is_wrapper_type)
IdlTypeBase.cpp_type_has_null_value = property(cpp_type_has_null_value)
def is_implicit_nullable(idl_type):
# Nullable type where the corresponding C++ type supports a null value.
return idl_type.is_nullable and idl_type.cpp_type_has_null_value
def is_explicit_nullable(idl_type):
# Nullable type that isn't implicit nullable (see above.) For such types,
# we use Nullable<T> or similar explicit ways to represent a null value.
return idl_type.is_nullable and not idl_type.is_implicit_nullable
IdlTypeBase.is_implicit_nullable = property(is_implicit_nullable)
IdlUnionType.is_implicit_nullable = False
IdlTypeBase.is_explicit_nullable = property(is_explicit_nullable)
| bsd-3-clause | -8,908,105,044,277,456,000 | 35.414634 | 97 | 0.66049 | false |
epfl-lts2/pygsp | pygsp/tests/test_docstrings.py | 1 | 1160 | # -*- coding: utf-8 -*-
"""
Test suite for the docstrings of the pygsp package.
"""
import os
import unittest
import doctest
def gen_recursive_file(root, ext):
for root, _, filenames in os.walk(root):
for name in filenames:
if name.lower().endswith(ext):
yield os.path.join(root, name)
def test_docstrings(root, ext, setup=None):
files = list(gen_recursive_file(root, ext))
return doctest.DocFileSuite(*files, setUp=setup, tearDown=teardown,
module_relative=False)
def setup(doctest):
import numpy
import pygsp
doctest.globs = {
'graphs': pygsp.graphs,
'filters': pygsp.filters,
'utils': pygsp.utils,
'np': numpy,
}
def teardown(doctest):
"""Close matplotlib figures to avoid warning and save memory."""
import pygsp
pygsp.plotting.close_all()
# Docstrings from API reference.
suite_reference = test_docstrings('pygsp', '.py', setup)
# Docstrings from tutorials. No setup to not forget imports.
suite_tutorials = test_docstrings('.', '.rst')
suite = unittest.TestSuite([suite_reference, suite_tutorials])
| bsd-3-clause | 7,585,068,807,109,518,000 | 22.673469 | 71 | 0.642241 | false |
lanfker/tdma_imac | bindings/python/ns3modulescan.py | 19 | 13937 | #! /usr/bin/env python
import sys
import os.path
import pybindgen.settings
from pybindgen.gccxmlparser import ModuleParser, PygenClassifier, PygenSection, WrapperWarning
from pybindgen.typehandlers.codesink import FileCodeSink
from pygccxml.declarations import templates
from pygccxml.declarations.class_declaration import class_t
from pygccxml.declarations.calldef import free_function_t, member_function_t, constructor_t, calldef_t
## we need the smart pointer type transformation to be active even
## during gccxml scanning.
import ns3modulegen_core_customizations
## silence gccxmlparser errors; we only want error handling in the
## generated python script, not while scanning.
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, dummy_wrapper, dummy_exception, dummy_traceback_):
return True
pybindgen.settings.error_handler = ErrorHandler()
import warnings
warnings.filterwarnings(category=WrapperWarning, action='ignore')
type_annotations = {
'::ns3::AttributeChecker': {
'automatic_type_narrowing': 'true',
'allow_subclassing': 'false',
},
'::ns3::AttributeValue': {
'automatic_type_narrowing': 'true',
'allow_subclassing': 'false',
},
'::ns3::CommandLine': {
'allow_subclassing': 'true', # needed so that AddValue is able to set attributes on the object
},
'::ns3::NscTcpL4Protocol': {
'ignore': 'true', # this class is implementation detail
},
'ns3::RandomVariable::RandomVariable(ns3::RandomVariableBase const & variable) [constructor]': {
'ignore': None,
},
'ns3::RandomVariableBase * ns3::RandomVariable::Peek() const [member function]': {
'ignore': None,
},
'void ns3::RandomVariable::GetSeed(uint32_t * seed) const [member function]': {
'params': {'seed':{'direction':'out',
'array_length':'6'}}
},
'bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInfo * info) const [member function]': {
'params': {'info':{'transfer_ownership': 'false'}}
},
'static bool ns3::TypeId::LookupByNameFailSafe(std::string name, ns3::TypeId * tid) [member function]': {
'ignore': None, # manually wrapped in
},
'bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]': {
'params': {'obj': {'transfer_ownership':'false'}}
},
'bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]': {
'params': {'obj': {'transfer_ownership':'false'}}
},
'bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]': {
'params': {'obj': {'transfer_ownership':'false'}}
},
'bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]': {
'params': {'obj': {'transfer_ownership':'false'}}
},
'bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]': {
'params': {'object': {'transfer_ownership':'false'}}
},
'ns3::EmpiricalVariable::EmpiricalVariable(ns3::RandomVariableBase const & variable) [constructor]': {
'ignore': None
},
'static ns3::AttributeList * ns3::AttributeList::GetGlobal() [member function]': {
'caller_owns_return': 'false'
},
'void ns3::CommandLine::Parse(int argc, char * * argv) const [member function]': {
'ignore': None # manually wrapped
},
'extern void ns3::PythonCompleteConstruct(ns3::Ptr<ns3::Object> object, ns3::TypeId typeId, ns3::AttributeList const & attributes) [free function]': {
'ignore': None # used transparently by, should not be wrapped
},
'ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4ListRouting::GetRoutingProtocol(uint32_t index, int16_t & priority) const [member function]': {
'params': {'priority':{'direction':'out'}}
},
'ns3::Ipv4RoutingTableEntry * ns3::GlobalRouter::GetInjectedRoute(uint32_t i) [member function]': {
'params': {'return': { 'caller_owns_return': 'false',}},
},
'ns3::Ipv4RoutingTableEntry * ns3::Ipv4GlobalRouting::GetRoute(uint32_t i) const [member function]': {
'params': {'return': { 'caller_owns_return': 'false',}},
},
'::ns3::TestCase': {
'ignore': 'true', # we don't need to write test cases in Python
},
'::ns3::TestRunner': {
'ignore': 'true', # we don't need to write test cases in Python
},
'::ns3::TestSuite': {
'ignore': 'true', # we don't need to write test cases in Python
},
}
def get_ns3_relative_path(path):
l = []
head = path
while head:
head, tail = os.path.split(head)
if tail == 'ns3':
return os.path.join(*l)
l.insert(0, tail)
raise AssertionError("is the path %r inside ns3?!" % path)
def pre_scan_hook(dummy_module_parser,
pygccxml_definition,
global_annotations,
parameter_annotations):
ns3_header = get_ns3_relative_path(pygccxml_definition.location.file_name)
## Note: we don't include line numbers in the comments because
## those numbers are very likely to change frequently, which would
## cause needless changes, since the generated python files are
## kept under version control.
#global_annotations['pygen_comment'] = "%s:%i: %s" % \
# (ns3_header, pygccxml_definition.location.line, pygccxml_definition)
global_annotations['pygen_comment'] = "%s: %s" % \
(ns3_header, pygccxml_definition)
## handle ns3::Object::GetObject (left to its own devices,
## pybindgen will generate a mangled name containing the template
## argument type name).
if isinstance(pygccxml_definition, member_function_t) \
and pygccxml_definition.parent.name == 'Object' \
and pygccxml_definition.name == 'GetObject':
template_args = templates.args(pygccxml_definition.demangled_name)
if template_args == ['ns3::Object']:
global_annotations['template_instance_names'] = 'ns3::Object=>GetObject'
## Don't wrap Simulator::Schedule* (manually wrapped)
if isinstance(pygccxml_definition, member_function_t) \
and pygccxml_definition.parent.name == 'Simulator' \
and pygccxml_definition.name.startswith('Schedule'):
global_annotations['ignore'] = None
# manually wrapped
if isinstance(pygccxml_definition, member_function_t) \
and pygccxml_definition.parent.name == 'Simulator' \
and pygccxml_definition.name == 'Run':
global_annotations['ignore'] = True
## http://www.gccxml.org/Bug/view.php?id=9915
if isinstance(pygccxml_definition, calldef_t):
for arg in pygccxml_definition.arguments:
if arg.default_value is None:
continue
if "ns3::MilliSeconds( )" == arg.default_value:
arg.default_value = "ns3::MilliSeconds(0)"
if "ns3::Seconds( )" == arg.default_value:
arg.default_value = "ns3::Seconds(0)"
## classes
if isinstance(pygccxml_definition, class_t):
# no need for helper classes to allow subclassing in Python, I think...
#if pygccxml_definition.name.endswith('Helper'):
# global_annotations['allow_subclassing'] = 'false'
if pygccxml_definition.decl_string.startswith('::ns3::SimpleRefCount<'):
global_annotations['incref_method'] = 'Ref'
global_annotations['decref_method'] = 'Unref'
global_annotations['peekref_method'] = 'GetReferenceCount'
global_annotations['automatic_type_narrowing'] = 'true'
return
if pygccxml_definition.decl_string.startswith('::ns3::Callback<'):
# manually handled in ns3modulegen_core_customizations.py
global_annotations['ignore'] = None
return
if pygccxml_definition.decl_string.startswith('::ns3::TracedCallback<'):
global_annotations['ignore'] = None
return
if pygccxml_definition.decl_string.startswith('::ns3::Ptr<'):
# handled by pybindgen "type transformation"
global_annotations['ignore'] = None
return
# table driven class customization
try:
annotations = type_annotations[pygccxml_definition.decl_string]
except KeyError:
pass
else:
global_annotations.update(annotations)
## free functions
if isinstance(pygccxml_definition, free_function_t):
if pygccxml_definition.name == 'PeekPointer':
global_annotations['ignore'] = None
return
## table driven methods/constructors/functions customization
if isinstance(pygccxml_definition, (free_function_t, member_function_t, constructor_t)):
try:
annotations = type_annotations[str(pygccxml_definition)]
except KeyError:
pass
else:
for key,value in annotations.items():
if key == 'params':
parameter_annotations.update (value)
del annotations['params']
global_annotations.update(annotations)
# def post_scan_hook(dummy_module_parser, dummy_pygccxml_definition, pybindgen_wrapper):
# ## classes
# if isinstance(pybindgen_wrapper, CppClass):
# if pybindgen_wrapper.name.endswith('Checker'):
# print >> sys.stderr, "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", pybindgen_wrapper
# #pybindgen_wrapper.set_instance_creation_function(AttributeChecker_instance_creation_function)
def scan_callback_classes(module_parser, callback_classes_file):
callback_classes_file.write("callback_classes = [\n")
for cls in module_parser.module_namespace.classes(function=module_parser.location_filter,
recursive=False):
if not cls.name.startswith("Callback<"):
continue
assert templates.is_instantiation(cls.decl_string), "%s is not a template instantiation" % cls
dummy_cls_name, template_parameters = templates.split(cls.decl_string)
callback_classes_file.write(" %r,\n" % template_parameters)
callback_classes_file.write("]\n")
class MyPygenClassifier(PygenClassifier):
def __init__(self, headers_map, section_precendences):
self.headers_map = headers_map
self.section_precendences = section_precendences
def classify(self, pygccxml_definition):
name = os.path.basename(pygccxml_definition.location.file_name)
try:
return self.headers_map[name]
except KeyError:
return '__main__'
def get_section_precedence(self, section_name):
if section_name == '__main__':
return -1
return self.section_precendences[section_name]
def ns3_module_scan(top_builddir, pygen_file_name, everything_h, cflags):
ns3_modules = eval(sys.stdin.readline())
## do a topological sort on the modules graph
from topsort import topsort
graph = []
module_names = ns3_modules.keys()
module_names.sort()
for ns3_module_name in module_names:
ns3_module_deps = list(ns3_modules[ns3_module_name][0])
ns3_module_deps.sort()
for dep in ns3_module_deps:
graph.append((dep, ns3_module_name))
sorted_ns3_modules = topsort(graph)
#print >> sys.stderr, "******* topological sort: ", sorted_ns3_modules
sections = [PygenSection('__main__', FileCodeSink(open(pygen_file_name, "wt")))]
headers_map = {} # header_name -> section_name
section_precendences = {} # section_name -> precedence
for prec, ns3_module in enumerate(sorted_ns3_modules):
section_name = "ns3_module_%s" % ns3_module.replace('-', '_')
file_name = os.path.join(os.path.dirname(pygen_file_name), "%s.py" % section_name)
sections.append(PygenSection(section_name, FileCodeSink(open(file_name, "wt")),
section_name + "__local"))
for header in ns3_modules[ns3_module][1]:
headers_map[header] = section_name
section_precendences[section_name] = prec
module_parser = ModuleParser('ns3', 'ns3')
module_parser.add_pre_scan_hook(pre_scan_hook)
#module_parser.add_post_scan_hook(post_scan_hook)
gccxml_options = dict(
include_paths=[top_builddir],
define_symbols={
#'NS3_ASSERT_ENABLE': None,
#'NS3_LOG_ENABLE': None,
},
cflags=('--gccxml-cxxflags "%s -DPYTHON_SCAN"' % cflags)
)
module_parser.parse_init([everything_h],
None, whitelist_paths=[top_builddir, os.path.dirname(everything_h)],
#includes=['"ns3/everything.h"'],
pygen_sink=sections,
pygen_classifier=MyPygenClassifier(headers_map, section_precendences),
gccxml_options=gccxml_options)
module_parser.scan_types()
callback_classes_file = open(os.path.join(os.path.dirname(pygen_file_name), "callbacks_list.py"), "wt")
scan_callback_classes(module_parser, callback_classes_file)
callback_classes_file.close()
module_parser.scan_methods()
module_parser.scan_functions()
module_parser.parse_finalize()
for section in sections:
section.code_sink.file.close()
if __name__ == '__main__':
ns3_module_scan(sys.argv[1], sys.argv[3], sys.argv[2], sys.argv[4])
| gpl-2.0 | 6,564,324,079,077,996,000 | 40.852853 | 154 | 0.62883 | false |
jenmud/ansible-graph | ansible_graph/__init__.py | 1 | 4018 | """
Setup the environment by parsing the command line options and staring
a ruruki http server.
"""
import argparse
import logging
import os
from ansible.inventory import Inventory
from ansible.vars import VariableManager
from ansible.parsing.dataloader import DataLoader
from ansible.playbook import Playbook
from ruruki_eye.server import run
from ansible_graph.scrape import GRAPH
from ansible_graph.scrape import scrape_inventroy, scrape_playbook
__all__ = ["create_playbook", "create_inventory"]
LOADER = DataLoader()
VARIABLE_MANAGER = VariableManager()
def setup_loader(path):
"""
Setup the ansible loader with the base dir path.
:param path: Ansible base directory path.
:type path: :class:`str`
"""
logging.info("Updated ansible loader basedir to %r", path)
LOADER.set_basedir(path)
def create_inventory(inventory_path):
"""
Load the given inventory and return the ansible inventory.
:param inventory_path: Path to the inventory file.
:type inventory_path: :class:`str`
:returns: The loaded ansible inventory.
:rtype: :class:`ansible.inventory.Inventory`
"""
try:
inventory = Inventory(
loader=LOADER,
variable_manager=VARIABLE_MANAGER,
host_list=inventory_path,
)
scrape_inventroy(inventory)
return inventory
except Exception as error: # pylint: disable=broad-except
logging.exception(
"Unexpected error scrapping inventory %r: %r",
inventory_path, error
)
raise argparse.ArgumentTypeError(error)
def create_playbook(playbook_path):
"""
Load the given playbook and return the ansible playbook.
:param playbook_path: Path to the playbook file.
:type playbook_path: :class:`str`
:returns: The loaded ansible playbook.
:rtype: :class:`ansible.playbook.Playbook`
"""
try:
playbook = Playbook.load(
playbook_path,
loader=LOADER,
variable_manager=VARIABLE_MANAGER,
)
scrape_playbook(playbook)
return playbook
except Exception as error: # pylint: disable=broad-except
logging.exception(
"Unexpected error scrapping playbook %r: %r",
playbook_path, error
)
raise argparse.ArgumentTypeError(error)
def parse_arguments():
"""
Parse the command line arguments.
:returns: All the command line arguments.
:rtype: :class:`argparse.Namespace`
"""
parser = argparse.ArgumentParser(
description="Ansible inventory grapher."
)
parser.add_argument(
"-b",
"--base-dir",
default=os.getcwd(),
type=setup_loader,
help="Ansible base directory path (default: %(default)s)",
)
parser.add_argument(
"--runserver",
action="store_true",
help="Start a ruruki http server.",
)
parser.add_argument(
"--address",
default="0.0.0.0",
help="Address to start the web server on. (default: %(default)s)",
)
parser.add_argument(
"--port",
type=int,
default=8000,
help=(
"Port number that the web server will accept connections on. "
"(default: %(default)d)"
),
)
parser.add_argument(
"-i",
"--inventories",
nargs="*",
type=create_inventory,
required=False,
help=(
"One of more inventories to load and scrape."
),
)
parser.add_argument(
"-p",
"--playbooks",
nargs="*",
type=create_playbook,
required=False,
help=(
"One of more playbooks to load and scrape."
),
)
return parser.parse_args()
def main():
"""
Entry point.
"""
logging.basicConfig(level=logging.INFO)
namespace = parse_arguments()
if namespace.runserver is True:
run(namespace.address, namespace.port, False, GRAPH)
| mit | -7,022,939,356,492,488,000 | 23.650307 | 74 | 0.612992 | false |
simonpcook/mframetest | printers/mediawiki.py | 1 | 10383 | ###### mediawiki.py - MediaWiki Printer #######################################
##
## MFrameTest
##
## Copyright (C) 2012-2013 Embecosm Limited
##
## This file is part of MFrameTest.
##
## MFrameTest is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## MFrameTest is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with MFrameTest. If not, see <http://www.gnu.org/licenses/>.
##
###############################################################################
##
## Class for Printing results to a MediaWiki wiki
## (This class is based off the GitHub version)
## This requires the mwclient python package, but should be easily convertable
## to alternatives.
##
###############################################################################
import math, re, sys
import mwclient
""" Class for storing results to MediaWiki """
class mediawiki:
_CONFIGKEY = 'print_mediawiki'
config = None
verbose = False
index = None
key = None
description = None
username = None
password = None
wikiURL = None
site = None
""" Default Wiki Index Page """
_DEFAULT_INDEX = """This page contains the summary of test results for %s
{|
<!-- ## NEXTROW ## -->
|}
<!-- ## NEXTKEY 1 ## --> """
""" Default Test Page """
_DEFAULT_TESTPAGE = """
__NOTOC__
[[%s-Test-%i| « Previous Test]] | [[%s-Test-%i| Next Test »]]
''Note:'' As pass results may be large and push the limits of the wiki,
they are on a separate page, [[%s-Passes-%s|here]]. Lists of newly broken/fixed
tests can be found [[%s-Changed-%s|here]].
== Test Environment ==
%s
== Test Results ==
%s
"""
""" Class Constructor. Loads and parses configuration. """
def __init__(self, config):
# Load config and set variables
self.config = config
try:
if config.get('core', 'verbose') == '1':
self.verbose = True
except:
pass
# If we don't have a test specific config section, raise error
if not self._CONFIGKEY in config._sections:
sys.stderr.write('Error: Config for MediaWiki Printer not found. Is ' + \
'this the correct printer?\n')
sys.exit(1)
# Load config
self.index = self.getConfig('index')
if self.index == None:
sys.stderr.write('Error: MediaWiki config is missing index page name.\n')
sys.exit(1)
self.key = self.getConfig('key')
if self.key == None:
sys.stderr.write('Error: MediaWiki config is missing test key.\n')
sys.exit(1)
self.description = self.getConfig('Description')
if self.description == None:
self.description = 'Unnamed Test Suite'
self.username = self.getConfig('username')
if self.username == None:
sys.stderr.write('Error: MediaWiki config is missing username.\n')
sys.exit(1)
self.password = self.getConfig('password')
if self.key == None:
sys.stderr.write('Error: MediaWiki config is missing password.\n')
sys.exit(1)
self.wikiURL = self.getConfig('wikiurl')
if self.wikiURL == None:
sys.stderr.write('Error: MediaWiki config is missing test key.\n')
sys.exit(1)
if not self.wikiURL.startswith('http://'):
sys.stderr.write('Error: mwclient only supports http://\n')
sys.exit(1)
# Try to connect
url = self.wikiURL.split('/', 3)
print url
self.site = mwclient.Site(url[2], path='/'+url[3])
self.site.login(username=self.username, password=self.password)
""" Helper function to pull class-specific configuration variables """
def getConfig(self, name):
if not self.config:
sys.stderr.write('Error: Tried to load config with no config loaded')
sys.exit(1)
try:
return self.config.get(self._CONFIGKEY, name)
except:
return None
""" If possible, returns the previous set of test results from the wiki. """
def loadLastTest(self):
# If there is no index (i.e. first test), then return an empty set
index = self.site.pages[self.index].edit()
if index == '':
return {}
# Find index number of previous test key, if invalid (or not exist),
# return empty set
try:
nextkey = re.search("<!-- ## NEXTKEY ([0-9]*) ## -->", index)
if nextkey == None:
return {}
nextkey = nextkey.groups()[0]
prevkey = int(nextkey) - 1
page = self.site.pages[self.key + '-Passes-' + str(prevkey)].edit()
if page == '':
return {}
# Load and return set of previous results, if an exception occurs, just
# return an empty set
results = {}
page = page.split('\n')
for line in page:
# New test set
if line.startswith('== '):
test = {}
results[line[3:-3]] = test
elif line.startswith('=== '):
testlist = []
if line == '=== Unexpected Failures ===':
test['FAIL'] = testlist
elif line == '=== Unexpected Passes ===':
test['XPASS'] = testlist
else:
testlist.append(line[4:])
except:
return {}
return results
""" Stores results to wiki. """
def storeResults(self, rundesc, results, env):
# If the index page does not exist, attempt to create a new one
if self.site.pages[self.index].edit() == '':
sys.stderr.write('Warning: No index found, creating new.\n')
index = self._DEFAULT_INDEX % self.description
else:
index = self.site.pages[self.index].edit()
# Find index number of next key, exit if unable to parse
nextkey = re.search("<!-- ## NEXTKEY ([0-9]*) ## -->", index)
if nextkey == None:
sys.stderr.write('Error: Unable to parse index.')
sys.exit(1)
nextkey = nextkey.groups()[0]
# Build testresult row
testtable = self.genResultTable(results, env, 2)
testrow = '<!-- ## NEXTROW ## -->\n|-\n ! '
testrow += '[[%s-Test-%s|Test %s]]<br>\'\'%s\'\'<br>\'\'%s\'\' || %s ' % \
(self.key, nextkey, nextkey, env['Test Date'], rundesc, testtable)
# Update index and next row key
index = index.replace('<!-- ## NEXTROW ## -->', testrow)
index = index.replace('<!-- ## NEXTKEY ' + nextkey + ' ## -->', \
'<!-- ## NEXTKEY ' + str(int(nextkey) + 1) + ' ## -->')
# Build results pages
envtable = self.genEnvTable(env)
testtable = self.genResultTable(results, env, 3)
passtable = self.genPassTable(results)
difftable = self.genDiffTable(results)
testpage = self._DEFAULT_TESTPAGE % (self.key, int(nextkey)-1, self.key,
int(nextkey)+1, self.key, nextkey, self.key, nextkey,
envtable, testtable)
# Write, commit and push new pages, using the key as a directory
if self.verbose:
sys.stderr.write('Updating wiki\n')
logmessage = 'Updated wiki for test ' + self.key + '-' + nextkey
self.site.pages[self.index].save(text=index, summary=logmessage)
self.site.pages[self.key + '-Test-' + nextkey].save(text=testpage, \
summary=logmessage)
self.site.pages[self.key + '-Passes-' + nextkey].save(text=passtable, \
summary=logmessage)
self.site.pages[self.key + '-Changed-' + nextkey].save(text=difftable, \
summary=logmessage)
""" Builds table of newly broken/fixed tests. """
def genDiffTable(self, results):
table = ''
for dataset in sorted(results):
if 'testlist' in results[dataset].keys():
resultset = results[dataset]['testlist']
table += '== ' + dataset + ' ==\n'
table += '=== Newly Broken ===\n'
for test in resultset['NEWFAIL']:
table += ' ' + test + '\n'
table += '=== Newly Fixed ===\n'
for test in resultset['NOTFAIL']:
table += ' ' + test + '\n'
table += '\n'
return table
""" Builds table of environment variables. """
def genEnvTable(self, env):
result = '{|'
for key in sorted(env):
result += '\n|-\n! ' + key + ' || ' + str(env[key])
result += '\n|}'
return result
""" Builds list of unexpected failures. """
def genPassTable(self, results):
table = ''
for dataset in sorted(results):
if 'testlist' in results[dataset].keys():
resultset = results[dataset]['testlist']
table += '== ' + dataset + ' ==\n'
table += '=== Unexpected Failures ===\n'
for test in resultset['FAIL']:
table += ' ' + test + '\n'
table += '=== Unexpected Passes ===\n'
for test in resultset['XPASS']:
table += ' ' + test + '\n'
table += '\n'
return table
""" Builds results table. """
def genResultTable(self, results, env, width):
colid = 0
testrow = '\n{|'
for dataset in sorted(results):
if colid == 0:
testrow += '\n|-'
resultset = results[dataset]['results']
testrow += self.resultFormat(dataset, resultset, env)
colid = (colid + 1) % width
testrow += '\n|}\n'
return testrow
""" Formats one set of summary results. """
def resultFormat(self, name, dataset, env):
# Calculate padding size
pad = int(math.ceil(math.log10(max(dataset))))
retval = '\n|| \'\'\'%s\'\'\'<pre>' % name
if dataset[0] > 0:
retval += '\n%s expected passes ' % str(dataset[0]).rjust(pad)
if dataset[1] > 0:
retval += '\n%s unexpected failures ' % str(dataset[1]).rjust(pad)
if dataset[2] > 0:
retval += '\n%s unexpected successes ' % str(dataset[2]).rjust(pad)
if dataset[3] > 0:
retval += '\n%s expected failures ' % str(dataset[3]).rjust(pad)
if dataset[4] > 0:
retval += '\n%s unresolved testcases ' % str(dataset[4]).rjust(pad)
if dataset[5] > 0:
retval += '\n%s untested testcases ' % str(dataset[5]).rjust(pad)
if dataset[6] > 0:
retval += '\n%s unsupported tests ' % str(dataset[6]).rjust(pad)
retval += '</pre>'
return retval
""" Post-execution cleanup (if required). """
def cleanup(self):
pass
| gpl-3.0 | -6,518,489,005,081,915,000 | 34.558219 | 79 | 0.585284 | false |
thaim/ansible | lib/ansible/module_utils/network/junos/argspec/lag_interfaces/lag_interfaces.py | 23 | 1944 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The arg spec for the junos_lag_interfaces module
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class Lag_interfacesArgs(object):
"""The arg spec for the junos_lag_interfaces module
"""
def __init__(self, **kwargs):
pass
argument_spec = {'config': {'elements': 'dict',
'options': {'members': {'elements': 'dict',
'options': {'link_type': {'choices': ['primary',
'backup']},
'member': {'type': 'str'}},
'type': 'list'},
'mode': {'choices': ['active', 'passive']},
'name': {'required': True, 'type': 'str'},
'link_protection': {'type': 'bool'}},
'type': 'list'},
'state': {'choices': ['merged', 'replaced', 'overridden', 'deleted'],
'default': 'merged',
'type': 'str'}}
| mit | 5,864,629,116,853,440,000 | 37.117647 | 105 | 0.400206 | false |
Distrotech/yum-utils | plugins/post-transaction-actions/post-transaction-actions.py | 4 | 6748 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2008 Red Hat, Inc
# written by Seth Vidal <[email protected]>
"""
This plugin runs actions following the transaction based on the content of
the transaction.
"""
from yum.plugins import TYPE_CORE
from yum.constants import *
import yum.misc
from yum.parser import varReplace
from yum.packages import parsePackages
import fnmatch
import re
import os
import glob
import shlex
requires_api_version = '2.4'
plugin_type = (TYPE_CORE,)
_just_installed = {} # pkgtup = po
def parse_actions(ddir, conduit):
"""read in .action files from ddir path.
store content in a list of tuples"""
action_tuples = [] # (action key, action_state, shell command)
action_file_list = []
if os.access(ddir, os.R_OK):
action_file_list.extend(glob.glob(ddir + "*.action"))
if action_file_list:
for f in action_file_list:
for line in open(f).readlines():
line = line.strip()
if line and line[0] != "#":
try:
(a_key, a_state, a_command) = line.split(':', 2)
except ValueError,e:
conduit.error(2,'Bad Action Line: %s' % line)
continue
else:
action_tuples.append((a_key, a_state, a_command))
return action_tuples
def _get_installed_po(rpmdb, pkgtup):
(n,a,e,v,r) = pkgtup
if pkgtup in _just_installed:
return _just_installed[pkgtup]
return rpmdb.searchNevra(name=n, arch=a, epoch=e, ver=v, rel=r)[0]
def _convert_vars(txmbr, command):
"""converts %options on the command to their values from the package it
is running it for: takes $name, $arch, $ver, $rel, $epoch,
$state, $repoid"""
state_dict = { TS_INSTALL: 'install',
TS_TRUEINSTALL: 'install',
TS_OBSOLETING: 'obsoleting',
TS_UPDATE: 'updating',
TS_ERASE: 'remove',
TS_OBSOLETED: 'obsoleted',
TS_UPDATED: 'updated'}
try:
state = state_dict[txmbr.output_state]
except KeyError:
state = 'unknown - %s' % txmbr.output_state
vardict = {'name': txmbr.name,
'arch': txmbr.arch,
'ver': txmbr.version,
'rel': txmbr.release,
'epoch': txmbr.epoch,
'repoid': txmbr.repoid,
'state': state }
result = varReplace(command, vardict)
return result
def pretrans_hook(conduit):
# Prefetch filelist for packages to be removed,
# otherwise for updated packages headers will not be available
ts = conduit.getTsInfo()
removes = ts.getMembersWithState(output_states=TS_REMOVE_STATES)
for txmbr in removes:
txmbr.po.filelist
def posttrans_hook(conduit):
# we have provides/requires for everything
# we do not have filelists for erasures
# we have to fetch filelists for the package object for installs/updates
action_dir = conduit.confString('main','actiondir','/etc/yum/post-actions/')
action_tuples = parse_actions(action_dir, conduit)
commands_to_run = {}
ts = conduit.getTsInfo()
rpmdb = conduit.getRpmDB()
all = ts.getMembers()
removes = ts.getMembersWithState(output_states=TS_REMOVE_STATES)
installs = ts.getMembersWithState(output_states=TS_INSTALL_STATES)
updates = ts.getMembersWithState(output_states=[TS_UPDATE, TS_OBSOLETING])
for (a_k, a_s, a_c) in action_tuples:
#print 'if %s in state %s the run %s' %( a_k, a_s, a_c)
if a_s == 'update':
pkgset = updates
elif a_s == 'install':
pkgset = installs
elif a_s == 'remove':
pkgset = removes
elif a_s == 'any':
pkgset = all
else:
# no idea what this is skip it
conduit.error(2,'whaa? %s' % a_s)
continue
if a_k.startswith('/'):
if yum.misc.re_glob(a_k):
restring = fnmatch.translate(a_k)
c_string = re.compile(restring)
for txmbr in pkgset:
matched = False
thispo = txmbr.po
if txmbr.output_state in TS_INSTALL_STATES:
# thispo is AvailablePackage; filelist access could trigger download
# of the filelist. Since it's installed now, use rpmdb data instead.
thispo = _get_installed_po(rpmdb, txmbr.pkgtup)
if not yum.misc.re_glob(a_k):
if a_k in thispo.filelist + thispo.dirlist + thispo.ghostlist:
thiscommand = _convert_vars(txmbr, a_c)
commands_to_run[thiscommand] = 1
matched = True
else:
for name in thispo.filelist + thispo.dirlist + thispo.ghostlist:
if c_string.match(name):
thiscommand = _convert_vars(txmbr, a_c)
commands_to_run[thiscommand] = 1
matched = True
break
if matched:
break
continue
if a_k.find('/') == -1: # pkgspec
pkgs = [ txmbr.po for txmbr in pkgset ]
e,m,u = parsePackages(pkgs, [a_k])
if not u:
for pkg in e+m:
for txmbr in ts.getMembers(pkgtup=pkg.pkgtup):
thiscommand = _convert_vars(txmbr, a_c)
commands_to_run[thiscommand] = 1
continue
for comm in commands_to_run.keys():
try:
args = shlex.split(comm)
except ValueError, e:
conduit.error(2,"command was not parseable: %s" % comm)
continue
#try
conduit.info(2,'Running post transaction command: %s' % comm)
p = os.system(comm)
#except?
| gpl-2.0 | -846,722,321,956,620,400 | 35.085561 | 89 | 0.566686 | false |
nielsvanoch/django | django/db/backends/mysql/introspection.py | 13 | 8710 | import re
from .base import FIELD_TYPE
from django.utils.datastructures import OrderedSet
from django.db.backends import BaseDatabaseIntrospection, FieldInfo, TableInfo
from django.utils.encoding import force_text
foreign_key_re = re.compile(r"\sCONSTRAINT `[^`]*` FOREIGN KEY \(`([^`]*)`\) REFERENCES `([^`]*)` \(`([^`]*)`\)")
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = {
FIELD_TYPE.BLOB: 'TextField',
FIELD_TYPE.CHAR: 'CharField',
FIELD_TYPE.DECIMAL: 'DecimalField',
FIELD_TYPE.NEWDECIMAL: 'DecimalField',
FIELD_TYPE.DATE: 'DateField',
FIELD_TYPE.DATETIME: 'DateTimeField',
FIELD_TYPE.DOUBLE: 'FloatField',
FIELD_TYPE.FLOAT: 'FloatField',
FIELD_TYPE.INT24: 'IntegerField',
FIELD_TYPE.LONG: 'IntegerField',
FIELD_TYPE.LONGLONG: 'BigIntegerField',
FIELD_TYPE.SHORT: 'SmallIntegerField',
FIELD_TYPE.STRING: 'CharField',
FIELD_TYPE.TIME: 'TimeField',
FIELD_TYPE.TIMESTAMP: 'DateTimeField',
FIELD_TYPE.TINY: 'IntegerField',
FIELD_TYPE.TINY_BLOB: 'TextField',
FIELD_TYPE.MEDIUM_BLOB: 'TextField',
FIELD_TYPE.LONG_BLOB: 'TextField',
FIELD_TYPE.VAR_STRING: 'CharField',
}
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
cursor.execute("SHOW FULL TABLES")
return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1]))
for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Returns a description of the table, with the DB-API cursor.description interface."
"""
# varchar length returned by cursor.description is an internal length,
# not visible length (#5725), use information_schema database to fix this
cursor.execute("""
SELECT column_name, character_maximum_length FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()
AND character_maximum_length IS NOT NULL""", [table_name])
length_map = dict(cursor.fetchall())
# Also getting precision and scale from information_schema (see #5014)
cursor.execute("""
SELECT column_name, numeric_precision, numeric_scale FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()
AND data_type='decimal'""", [table_name])
numeric_map = dict((line[0], tuple(int(n) for n in line[1:])) for line in cursor.fetchall())
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [FieldInfo(*((force_text(line[0]),)
+ line[1:3]
+ (length_map.get(line[0], line[3]),)
+ numeric_map.get(line[0], line[4:6])
+ (line[6],)))
for line in cursor.description]
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict((d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name)))
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
my_field_dict = self._name_to_index(cursor, table_name)
constraints = self.get_key_columns(cursor, table_name)
relations = {}
for my_fieldname, other_table, other_field in constraints:
other_field_index = self._name_to_index(cursor, other_table)[other_field]
my_field_index = my_field_dict[my_fieldname]
relations[my_field_index] = (other_field_index, other_table)
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
cursor.execute("""
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
# Do a two-pass search for indexes: on first pass check which indexes
# are multicolumn, on second pass check which single-column indexes
# are present.
rows = list(cursor.fetchall())
multicol_indexes = set()
for row in rows:
if row[3] > 1:
multicol_indexes.add(row[2])
indexes = {}
for row in rows:
if row[2] in multicol_indexes:
continue
if row[4] not in indexes:
indexes[row[4]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[2] == 'PRIMARY':
indexes[row[4]]['primary_key'] = True
if not row[1]:
indexes[row[4]]['unique'] = True
return indexes
def get_storage_engine(self, cursor, table_name):
"""
Retrieves the storage engine for a given table.
"""
cursor.execute(
"SELECT engine "
"FROM information_schema.tables "
"WHERE table_name = %s", [table_name])
return cursor.fetchone()[0]
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the actual constraint names and columns
name_query = """
SELECT kc.`constraint_name`, kc.`column_name`,
kc.`referenced_table_name`, kc.`referenced_column_name`
FROM information_schema.key_column_usage AS kc
WHERE
kc.table_schema = %s AND
kc.table_name = %s
"""
cursor.execute(name_query, [self.connection.settings_dict['NAME'], table_name])
for constraint, column, ref_table, ref_column in cursor.fetchall():
if constraint not in constraints:
constraints[constraint] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': False,
'check': False,
'foreign_key': (ref_table, ref_column) if ref_column else None,
}
constraints[constraint]['columns'].add(column)
# Now get the constraint types
type_query = """
SELECT c.constraint_name, c.constraint_type
FROM information_schema.table_constraints AS c
WHERE
c.table_schema = %s AND
c.table_name = %s
"""
cursor.execute(type_query, [self.connection.settings_dict['NAME'], table_name])
for constraint, kind in cursor.fetchall():
if kind.lower() == "primary key":
constraints[constraint]['primary_key'] = True
constraints[constraint]['unique'] = True
elif kind.lower() == "unique":
constraints[constraint]['unique'] = True
# Now add in the indexes
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
for table, non_unique, index, colseq, column in [x[:5] for x in cursor.fetchall()]:
if index not in constraints:
constraints[index] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': True,
'check': False,
'foreign_key': None,
}
constraints[index]['index'] = True
constraints[index]['columns'].add(column)
# Convert the sorted sets to lists
for constraint in constraints.values():
constraint['columns'] = list(constraint['columns'])
return constraints
| bsd-3-clause | 9,046,981,895,095,311,000 | 42.989899 | 113 | 0.57279 | false |
ypu/virt-test | virttest/qemu_devices_unittest.py | 3 | 45197 | #!/usr/bin/python
"""
This is a unittest for qemu_devices library.
:author: Lukas Doktor <[email protected]>
:copyright: 2012 Red Hat, Inc.
"""
__author__ = """Lukas Doktor ([email protected])"""
import re
import unittest
import os
import common
from autotest.client.shared.test_utils import mock
from qemu_devices import qdevices, qbuses, qcontainer
from qemu_devices.utils import DeviceHotplugError, DeviceRemoveError
import data_dir
import qemu_monitor
UNITTEST_DATA_DIR = os.path.join(
data_dir.get_root_dir(), "virttest", "unittest_data")
# Dummy variables
# qemu-1.5.0 human monitor help output
QEMU_HMP = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__hmp_help")).read()
# qemu-1.5.0 QMP monitor commands output
QEMU_QMP = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__qmp_help")).read()
# qemu-1.5.0 -help
QEMU_HELP = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__help")).read()
# qemu-1.5.0 -devices ?
QEMU_DEVICES = open(
os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__devices_help")).read()
# qemu-1.5.0 -M ?
QEMU_MACHINE = open(
os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__machine_help")).read()
class ParamsDict(dict):
""" params like dictionary """
def objects(self, item):
if self.get(item):
return self.get(item).split(' ')
def object_params(self, obj):
ret = self.copy()
for (param, value) in self.iteritems():
if param.endswith('_%s' % obj):
ret[param[:-len('_%s' % obj)]] = value
return ret
class MockHMPMonitor(qemu_monitor.HumanMonitor):
""" Dummy class inherited from qemu_monitor.HumanMonitor """
def __init__(self): # pylint: disable=W0231
self.debug_log = False
def __del__(self):
pass
class Devices(unittest.TestCase):
""" set of qemu devices tests """
def test_q_base_device(self):
""" QBaseDevice tests """
qdevice = qdevices.QBaseDevice('MyType',
{'ParamA': 'ValueA',
'AUTOREMOVE': None},
'Object1',
{'type': 'pci'})
self.assertEqual(qdevice['ParamA'], 'ValueA', 'Param added during '
'__init__ is corrupted %s != %s' % (qdevice['ParamA'],
'ValueA'))
qdevice['ParamA'] = 'ValueB'
qdevice.set_param('BoolTrue', True)
qdevice.set_param('BoolFalse', 'off', bool)
qdevice['Empty'] = 'EMPTY_STRING'
out = """MyType
aid = None
aobject = Object1
parent_bus = {'type': 'pci'}
child_bus = []
params:
ParamA = ValueB
BoolTrue = on
BoolFalse = off
Empty = ""
"""
self.assertEqual(qdevice.str_long(), out, "Device output doesn't match"
"\n%s\n\n%s" % (qdevice.str_long(), out))
def test_q_string_device(self):
""" QStringDevice tests """
qdevice = qdevices.QStringDevice('MyType', {'addr': '0x7'},
cmdline='-qdevice ahci,addr=%(addr)s')
self.assertEqual(qdevice.cmdline(), '-qdevice ahci,addr=0x7', "Cmdline"
" doesn't match expected one:\n%s\n%s"
% (qdevice.cmdline(), '-qdevice ahci,addr=0x7'))
def test_q_device(self):
""" QDevice tests """
qdevice = qdevices.QDevice('ahci', {'addr': '0x7'})
self.assertEqual(str(qdevice), "a'ahci'", "Alternative name error %s "
"!= %s" % (str(qdevice), "a'ahci'"))
qdevice['id'] = 'ahci1'
self.assertEqual(str(qdevice), "q'ahci1'", "Id name error %s "
"!= %s" % (str(qdevice), "q'ahci1'"))
exp = "device_add ahci,addr=0x7,id=ahci1"
out = qdevice.hotplug_hmp()
self.assertEqual(out, exp, "HMP command corrupted:\n%s\n%s"
% (out, exp))
exp = ("('device_add', OrderedDict([('addr', '0x7'), "
"('driver', 'ahci'), ('id', 'ahci1')]))")
out = str(qdevice.hotplug_qmp())
self.assertEqual(out, exp, "QMP command corrupted:\n%s\n%s"
% (out, exp))
class Buses(unittest.TestCase):
""" Set of bus-representation tests """
def test_q_sparse_bus(self):
""" Sparse bus tests (general bus testing) """
bus = qbuses.QSparseBus('bus',
(['addr1', 'addr2', 'addr3'], [2, 6, 4]),
'my_bus',
'bus_type',
'autotest_bus')
qdevice = qdevices.QDevice
# Correct records
params = {'addr1': '0', 'addr2': '0', 'addr3': '0', 'bus': 'my_bus'}
dev = qdevice('dev1', params, parent_bus={'type': 'bus_type'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
params = {'addr1': '1', 'addr2': '0', 'addr3': '0', 'bus': 'my_bus'}
dev = qdevice('dev2', params, parent_bus={'type': 'bus_type'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
params = {'addr1': '1', 'addr2': '1', 'addr3': '0', 'bus': 'my_bus'}
dev = qdevice('dev3', params, parent_bus={'type': 'bus_type'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
params = {'addr1': '1', 'addr2': '1', 'addr3': '1', 'bus': 'my_bus'}
dev = qdevice('dev4', params, parent_bus={'type': 'bus_type'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
params = {'addr1': '1', 'bus': 'my_bus'}
dev = qdevice('dev5', params, parent_bus={'type': 'bus_type'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
params = {'bus': 'my_bus'}
dev = qdevice('dev6', params, parent_bus={'type': 'bus_type'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
params = {}
dev2 = qdevice('dev7', params, parent_bus={'type': 'bus_type'})
exp = []
out = bus.insert(dev2, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev2.str_long(), bus.str_long()))
# Compare short repr
exp = ("my_bus(bus_type): {0-0-0:a'dev1',0-0-1:a'dev6',0-0-2:a'dev7',"
"1-0-0:a'dev2',1-0-1:a'dev5',1-1-0:a'dev3',1-1-1:a'dev4'}")
out = str(bus.str_short())
self.assertEqual(out, exp, "Short representation corrupted:\n%s\n%s"
"\n\n%s" % (out, exp, bus.str_long()))
# Incorrect records
# Used address
params = {'addr1': '0', 'addr2': '0', 'addr3': '0', 'bus': 'my_bus'}
dev = qdevice('devI1', params, parent_bus={'type': 'bus_type'})
exp = "UsedSlot"
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Added bad device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
# Out of range address
params = {'addr1': '0', 'addr2': '6', 'addr3': '0', 'bus': 'my_bus'}
dev = qdevice('devI2', params, parent_bus={'type': 'bus_type'})
exp = "BadAddr(False)"
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Added bad device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
# Incorrect bus name
params = {'bus': 'other_bus'}
dev = qdevice('devI3', params, parent_bus={'type': 'bus_type'})
exp = "BusId"
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Added bad device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
# Compare short repr
exp = ("my_bus(bus_type): {0-0-0:a'dev1',0-0-1:a'dev6',0-0-2:a'dev7',"
"1-0-0:a'dev2',1-0-1:a'dev5',1-1-0:a'dev3',1-1-1:a'dev4'}")
out = str(bus.str_short())
self.assertEqual(out, exp, "Short representation corrupted:\n%s\n%s"
"\n\n%s" % (out, exp, bus.str_long()))
# Compare long repr
exp = """Bus my_bus, type=bus_type
Slots:
---------------< 1-0-0 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'bus_type'}
child_bus = []
params:
bus = my_bus
addr2 = 0
addr3 = 0
addr1 = 1
driver = dev2
---------------< 1-0-1 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'bus_type'}
child_bus = []
params:
bus = my_bus
addr1 = 1
driver = dev5
---------------< 1-1-1 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'bus_type'}
child_bus = []
params:
bus = my_bus
addr2 = 1
addr3 = 1
addr1 = 1
driver = dev4
---------------< 1-1-0 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'bus_type'}
child_bus = []
params:
bus = my_bus
addr2 = 1
addr3 = 0
addr1 = 1
driver = dev3
---------------< 0-0-1 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'bus_type'}
child_bus = []
params:
bus = my_bus
driver = dev6
---------------< 0-0-0 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'bus_type'}
child_bus = []
params:
bus = my_bus
addr2 = 0
addr3 = 0
addr1 = 0
driver = dev1
---------------< 0-0-2 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'bus_type'}
child_bus = []
params:
driver = dev7
"""
out = str(bus.str_long())
self.assertEqual(out, exp, "Long representation corrupted:\n%s\n%s"
% (repr(out), exp))
# Low level functions
# Get device by object
exp = dev2
out = bus.get(dev2)
self.assertEqual(out, exp, "Failed to get device from bus:\n%s\n%s"
"\n\n%s" % (out, exp, bus.str_long()))
dev2.aid = 'bad_device3'
exp = dev2
out = bus.get('bad_device3')
self.assertEqual(out, exp, "Failed to get device from bus:\n%s\n%s"
"\n\n%s" % (out, exp, bus.str_long()))
exp = None
out = bus.get('missing_bad_device')
self.assertEqual(out, exp, "Got device while expecting None:\n%s\n%s"
"\n\n%s" % (out, exp, bus.str_long()))
# Remove all devices
devs = [dev for dev in bus]
for dev in devs:
bus.remove(dev)
exp = 'Bus my_bus, type=bus_type\nSlots:\n'
out = str(bus.str_long())
self.assertEqual(out, exp, "Long representation corrupted:\n%s\n%s"
% (out, exp))
def test_q_pci_bus(self):
""" PCI bus tests """
bus = qbuses.QPCIBus('pci.0', 'pci', 'my_pci')
qdevice = qdevices.QDevice
# Good devices
params = {'addr': '0'}
dev = qdevice('dev1', params, parent_bus={'type': 'pci'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
params = {'addr': 10, 'bus': 'pci.0'}
dev = qdevice('dev2', params, parent_bus={'type': 'pci'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
params = {'addr': '0x1f'}
dev = qdevice('dev3', params, parent_bus={'type': 'pci'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
# Compare short repr
exp = ("pci.0(pci): {00-00:a'dev1',0a-00:a'dev2',1f-00:a'dev3'}")
out = str(bus.str_short())
self.assertEqual(out, exp, "Short representation corrupted:\n%s\n%s"
"\n\n%s" % (out, exp, bus.str_long()))
# Incorrect records
# Used address
params = {'addr': 0}
dev = qdevice('devI1', params, parent_bus={'type': 'pci'})
exp = "UsedSlot"
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Added bad device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
# Out of range address
params = {'addr': '0xffff'}
dev = qdevice('devI2', params, parent_bus={'type': 'pci'})
exp = "BadAddr(False)"
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Added bad device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
# Compare short repr
exp = ("pci.0(pci): {00-00:a'dev1',0a-00:a'dev2',1f-00:a'dev3'}")
out = str(bus.str_short())
self.assertEqual(out, exp, "Short representation corrupted:\n%s\n%s"
"\n\n%s" % (out, exp, bus.str_long()))
def test_q_pci_bus_strict(self):
""" PCI bus tests in strict_mode (enforce additional options) """
bus = qbuses.QPCIBus('pci.0', 'pci', 'my_pci')
qdevice = qdevices.QDevice
params = {}
bus.insert(qdevice('dev1', params, parent_bus={'type': 'pci'}), True)
bus.insert(qdevice('dev2', params, parent_bus={'type': 'pci'}), True)
bus.insert(qdevice('dev3', params, parent_bus={'type': 'pci'}), True)
params = {'addr': '0x1f'}
bus.insert(qdevice('dev1', params, parent_bus={'type': 'pci'}), True)
params = {'addr': 30}
bus.insert(qdevice('dev1', params, parent_bus={'type': 'pci'}), True)
params = {'addr': 12}
bus.insert(qdevice('dev1', params, parent_bus={'type': 'pci'}), True)
# All devices will have 'addr' set as we are in the strict mode
exp = """Bus pci.0, type=pci
Slots:
---------------< 1e-00 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'pci'}
child_bus = []
params:
addr = 1e
driver = dev1
bus = pci.0
---------------< 02-00 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'pci'}
child_bus = []
params:
driver = dev3
bus = pci.0
addr = 02
---------------< 1f-00 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'pci'}
child_bus = []
params:
addr = 1f
driver = dev1
bus = pci.0
---------------< 00-00 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'pci'}
child_bus = []
params:
driver = dev1
bus = pci.0
addr = 00
---------------< 0c-00 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'pci'}
child_bus = []
params:
addr = 0c
driver = dev1
bus = pci.0
---------------< 01-00 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'pci'}
child_bus = []
params:
driver = dev2
bus = pci.0
addr = 01
"""
out = str(bus.str_long())
self.assertEqual(out, exp, "Long representation corrupted:\n%s\n%s"
% (out, exp))
def test_usb_bus(self):
""" Tests the specific handlings of QUSBBus """
usbc1 = qbuses.QUSBBus(2, 'usb1.0', 'uhci')
# Insert device into usb controller, default port
dev = qdevices.QDevice('usb-kbd', parent_bus={'type': 'uhci'})
assert usbc1.insert(dev) == []
# Insert usb-hub into usb controller, default port
dev = qdevices.QDevice('usb-hub', parent_bus={'type': 'uhci'})
assert usbc1.insert(dev) == []
hub1 = dev.child_bus[-1]
# Insert usb-hub into usb-hub, exact port
dev = qdevices.QDevice('usb-hub', {'port': '2.4'},
parent_bus={'type': 'uhci'})
assert hub1.insert(dev) == []
hub2 = dev.child_bus[-1]
# Insert usb-hub into usb-hub in usb-hub, exact port
dev = qdevices.QDevice('usb-hub', {'port': '2.4.3'},
parent_bus={'type': 'uhci'})
assert hub2.insert(dev) == []
hub3 = dev.child_bus[-1]
# verify that port is updated correctly
self.assertEqual("2.4.3", dev.get_param("port"))
# Insert usb-device into usb-hub in usb-hub in usb-hub, exact port
dev = qdevices.QDevice('usb-kbd', {'port': '2.4.3.1'},
parent_bus={'type': 'uhci'})
assert hub3.insert(dev) == []
# Insert usb-device into usb-hub in usb-hub in usb-hub, default port
dev = qdevices.QDevice('usb-kbd', parent_bus={'type': 'uhci'})
assert hub3.insert(dev) == []
# Try to insert device into specific port which belongs to inferior bus
out = hub2.insert(qdevices.QDevice('usb-kbd',
{'port': '2.4.3.3'},
parent_bus={'type': 'uhci'}))
assert out == "BusId"
# Try to insert device into specific port which belongs to superior bus
out = hub2.insert(qdevices.QDevice('usb-kbd', {'port': '2.4'},
parent_bus={'type': 'uhci'}))
assert out == "BusId"
# Try to insert device into specific port which belongs to same level
# but different port
out = hub2.insert(qdevices.QDevice('usb-kbd', {'port': '2.3.4'},
parent_bus={'type': 'uhci'}))
assert out == "BusId"
# Force insert device with port which belongs to other hub
dev = qdevices.QDevice('usb-hub', {'port': '2.4.3.4'},
parent_bus={'type': 'uhci'})
# Check the overall buses correctness
self.assertEqual("usb1.0(uhci): {1:a'usb-kbd',2:a'usb-hub'}",
usbc1.str_short())
self.assertEqual("usb1.0(uhci): {4:a'usb-hub'}",
hub1.str_short())
self.assertEqual("usb1.0(uhci): {3:a'usb-hub'}",
hub2.str_short())
self.assertEqual("usb1.0(uhci): {1:a'usb-kbd',2:a'usb-kbd'}",
hub3.str_short())
class Container(unittest.TestCase):
""" Tests related to the abstract representation of qemu machine """
def setUp(self):
self.god = mock.mock_god(ut=self)
self.god.stub_function(qcontainer.utils, "system_output")
def tearDown(self):
self.god.unstub_all()
def create_qdev(self, vm_name='vm1', strict_mode="no",
allow_hotplugged_vm="yes"):
""" :return: Initialized qcontainer.DevContainer object """
qemu_cmd = '/usr/bin/qemu_kvm'
qcontainer.utils.system_output.expect_call('%s -help' % qemu_cmd,
timeout=10, ignore_status=True
).and_return(QEMU_HELP)
qcontainer.utils.system_output.expect_call("%s -device ? 2>&1"
% qemu_cmd, timeout=10,
ignore_status=True
).and_return(QEMU_DEVICES)
qcontainer.utils.system_output.expect_call("%s -M ?" % qemu_cmd,
timeout=10, ignore_status=True
).and_return(QEMU_MACHINE)
cmd = "echo -e 'help\nquit' | %s -monitor stdio -vnc none" % qemu_cmd
qcontainer.utils.system_output.expect_call(cmd, timeout=10,
ignore_status=True
).and_return(QEMU_HMP)
cmd = ('echo -e \'{ "execute": "qmp_capabilities" }\n'
'{ "execute": "query-commands", "id": "RAND91" }\n'
'{ "execute": "quit" }\''
'| %s -qmp stdio -vnc none | grep return |'
' grep RAND91' % qemu_cmd)
qcontainer.utils.system_output.expect_call(cmd, timeout=10,
ignore_status=True
).and_return('')
cmd = ('echo -e \'{ "execute": "qmp_capabilities" }\n'
'{ "execute": "query-commands", "id": "RAND91" }\n'
'{ "execute": "quit" }\' | (sleep 1; cat )'
'| %s -qmp stdio -vnc none | grep return |'
' grep RAND91' % qemu_cmd)
qcontainer.utils.system_output.expect_call(cmd, timeout=10,
ignore_status=True
).and_return(QEMU_QMP)
qdev = qcontainer.DevContainer(qemu_cmd, vm_name, strict_mode, 'no',
allow_hotplugged_vm)
self.god.check_playback()
return qdev
def test_qdev_functional(self):
""" Test basic qdev workflow """
qdev = self.create_qdev('vm1')
# Add basic 'pc' devices
out = qdev.insert(qdev.machine_by_params(ParamsDict({'machine_type':
'pc'})))
assert isinstance(out, list)
assert len(out) == 6, len(out)
exp = r"""Devices of vm1:
machine
aid = __0
aobject = pci.0
parent_bus = ()
child_bus = \[.*QPCIBus.*, .*QStrictCustomBus.*\]
params:
i440FX
aid = __1
aobject = None
parent_bus = ({'aobject': 'pci.0'},)
child_bus = \[\]
params:
driver = i440FX
addr = 00
bus = pci.0
PIIX4_PM
aid = __2
aobject = None
parent_bus = ({'aobject': 'pci.0'},)
child_bus = \[\]
params:
driver = PIIX4_PM
addr = 01.3
bus = pci.0
PIIX3
aid = __3
aobject = None
parent_bus = ({'aobject': 'pci.0'},)
child_bus = \[\]
params:
driver = PIIX3
addr = 01
bus = pci.0
piix3-ide
aid = __4
aobject = None
parent_bus = ({'aobject': 'pci.0'},)
child_bus = \[.*QIDEBus.*\]
params:
driver = piix3-ide
addr = 01.1
bus = pci.0
fdc
aid = __5
aobject = None
parent_bus = \(\)
child_bus = \[.*QFloppyBus.*\]
params:"""
out = qdev.str_long()
self.assertNotEqual(re.findall(exp, out), None, 'Long representation is'
'corrupted:\n%s\n%s' % (out, exp))
exp = ("Buses of vm1\n"
" floppy(floppy): [None,None]\n"
" ide(ide): [None,None,None,None]\n"
" _PCI_CHASSIS_NR(None): {}\n"
" _PCI_CHASSIS(None): {}\n"
" pci.0(PCI): {00-00:t'i440FX',01-00:t'PIIX3',"
"01-01:t'piix3-ide',01-03:t'PIIX4_PM'}")
out = qdev.str_bus_short()
assert out == exp, "Bus representation is ocrrupted:\n%s\n%s" % (out,
exp)
# Insert some good devices
qdevice = qdevices.QDevice
# Device with child bus
bus = qbuses.QSparseBus('bus', [['addr'], [6]], 'hba1.0', 'hba',
'a_hba')
dev = qdevice('HBA', {'id': 'hba1', 'addr': 10},
parent_bus={'aobject': 'pci.0'}, child_bus=bus)
out = qdev.insert(dev)
assert isinstance(out, list), out
assert len(out) == 1, len(out)
# Device inside a child bus by type (most common)
dev = qdevice('dev', {}, parent_bus={'type': 'hba'})
out = qdev.insert(dev)
assert isinstance(out, list), out
assert len(out) == 1, len(out)
# Device inside a child bus by autotest_id
dev = qdevice('dev', {}, 'autotest_remove', {'aobject': 'a_hba'})
out = qdev.insert(dev)
assert isinstance(out, list), out
assert len(out) == 1, len(out)
# Device inside a child bus by busid
dev = qdevice('dev', {}, 'autoremove', {'busid': 'hba1.0'})
out = qdev.insert(dev)
assert isinstance(out, list), out
assert len(out) == 1, len(out)
# Check the representation
exp = ("Devices of vm1: [t'machine',t'i440FX',t'PIIX4_PM',t'PIIX3',"
"t'piix3-ide',t'fdc',hba1,a'dev',a'dev',a'dev']")
out = qdev.str_short()
self.assertEqual(out, exp, "Short representation is corrupted:\n%s\n%s"
% (out, exp))
exp = ("Buses of vm1\n"
" hba1.0(hba): {0:a'dev',1:a'dev',2:a'dev'}\n"
" floppy(floppy): [None,None]\n"
" ide(ide): [None,None,None,None]\n"
" _PCI_CHASSIS_NR(None): {}\n"
" _PCI_CHASSIS(None): {}\n"
" pci.0(PCI): {00-00:t'i440FX',01-00:t'PIIX3',"
"01-01:t'piix3-ide',01-03:t'PIIX4_PM',0a-00:hba1}")
out = qdev.str_bus_short()
assert out == exp, 'Bus representation iscorrupted:\n%s\n%s' % (out,
exp)
# Check the representation
exp = ("Devices of vm1: [t'machine',t'i440FX',t'PIIX4_PM',t'PIIX3',"
"t'piix3-ide',t'fdc',hba1,a'dev',a'dev',a'dev']")
out = qdev.str_short()
assert out == exp, "Short representation is corrupted:\n%s\n%s" % (out,
exp)
exp = ("Buses of vm1\n"
" hba1.0(hba): {0:a'dev',1:a'dev',2:a'dev'}\n"
" floppy(floppy): [None,None]\n"
" ide(ide): [None,None,None,None]\n"
" _PCI_CHASSIS_NR(None): {}\n"
" _PCI_CHASSIS(None): {}\n"
" pci.0(PCI): {00-00:t'i440FX',01-00:t'PIIX3',"
"01-01:t'piix3-ide',01-03:t'PIIX4_PM',0a-00:hba1}")
out = qdev.str_bus_short()
assert out == exp, 'Bus representation is corrupted:\n%s\n%s' % (out,
exp)
# Now representation contains some devices, play with it a bit
# length
out = len(qdev)
assert out == 10, "Length of qdev is incorrect: %s != %s" % (out, 10)
# compare
qdev2 = self.create_qdev('vm1')
self.assertNotEqual(qdev, qdev2, "This qdev matches empty one:"
"\n%s\n%s" % (qdev, qdev2))
self.assertNotEqual(qdev2, qdev, "Empty qdev matches current one:"
"\n%s\n%s" % (qdev, qdev2))
for _ in xrange(10):
qdev2.insert(qdevice())
self.assertNotEqual(qdev, qdev2, "This qdev matches different one:"
"\n%s\n%s" % (qdev, qdev2))
self.assertNotEqual(qdev2, qdev, "Other qdev matches this one:\n%s\n%s"
% (qdev, qdev2))
# cmdline
exp = ("-M pc -device HBA,id=hba1,addr=0a,bus=pci.0 -device dev "
"-device dev -device dev")
out = qdev.cmdline()
self.assertEqual(out, exp, 'Corrupted qdev.cmdline() output:\n%s\n%s'
% (out, exp))
# get_by_qid (currently we have 2 devices of the same qid)
out = qdev.get_by_qid('hba1')
self.assertEqual(len(out), 1, 'Incorrect number of devices by qid '
'"hba1": %s != 1\n%s' % (len(out), qdev.str_long()))
# Remove some devices
# Remove based on aid
out = qdev.remove('__6')
self.assertEqual(out, None, 'Failed to remove device:\n%s\nRepr:\n%s'
% ('hba1__0', qdev.str_long()))
# Remove device which contains other devices (without recursive)
self.assertRaises(qcontainer.DeviceRemoveError, qdev.remove, 'hba1',
False)
# Remove device which contains other devices (recursive)
out = qdev.remove('hba1')
self.assertEqual(out, None, 'Failed to remove device:\n%s\nRepr:\n%s'
% ('hba1', qdev.str_long()))
# Check the representation
exp = ("Devices of vm1: [t'machine',t'i440FX',t'PIIX4_PM',t'PIIX3',"
"t'piix3-ide',t'fdc']")
out = qdev.str_short()
assert out == exp, "Short representation is corrupted:\n%s\n%s" % (out,
exp)
exp = ("Buses of vm1\n"
" floppy(floppy): [None,None]\n"
" ide(ide): [None,None,None,None]\n"
" _PCI_CHASSIS_NR(None): {}\n"
" _PCI_CHASSIS(None): {}\n"
" pci.0(PCI): {00-00:t'i440FX',01-00:t'PIIX3',"
"01-01:t'piix3-ide',01-03:t'PIIX4_PM'}")
out = qdev.str_bus_short()
assert out == exp, 'Bus representation is corrupted:\n%s\n%s' % (out,
exp)
def test_qdev_hotplug(self):
""" Test the hotplug/unplug functionality """
qdev = self.create_qdev('vm1', False, True)
devs = qdev.machine_by_params(ParamsDict({'machine_type': 'pc'}))
for dev in devs:
qdev.insert(dev)
monitor = MockHMPMonitor()
out = qdev.get_state()
assert out == -1, ("Status after init is not -1"
" (%s)" % out)
out = len(qdev)
assert out == 6, "Number of devices of this VM is not 5 (%s)" % out
dev1, dev2 = qdev.images_define_by_variables('disk', '/tmp/a',
fmt="virtio")
out = dev1.hotplug_hmp()
exp = "drive_add auto id=drive_disk,if=none,file=/tmp/a"
assert out == exp, ("Hotplug command of drive is incorrect:\n%s\n%s"
% (exp, out))
# hotplug of drive will return " OK" (pass)
dev1.hotplug = lambda _monitor: "OK"
dev1.verify_hotplug = lambda _out, _monitor: True
out, ver_out = qdev.simple_hotplug(dev1, monitor)
assert out == "OK", "Return value of hotplug is not OK (%s)" % out
assert ver_out is True, ("Return value of hotplug"
" is not True (%s)" % ver_out)
out = qdev.get_state()
assert out == 0, ("Status after verified hotplug is not 0 (%s)" % out)
# hotplug of virtio-blk-pci will return ""
out = dev2.hotplug_hmp()
exp = "device_add virtio-blk-pci,id=disk,drive=drive_disk"
assert out == exp, ("Hotplug command of device is incorrect:\n%s\n%s"
% (exp, out))
dev2.hotplug = lambda _monitor: ""
dev2.verify_hotplug = lambda _out, _monitor: ""
out, ver_out = qdev.simple_hotplug(dev2, monitor)
# automatic verification is not supported, hotplug returns the original
# monitor message ("")
assert ver_out == "", ("Return value of hotplug is"
" not "" (%s)" % ver_out)
assert out == "", 'Return value of hotplug is not "" (%s)' % out
out = qdev.get_state()
assert out == 1, ("Status after verified hotplug is not 1 (%s)" % out)
qdev.hotplug_verified()
out = qdev.get_state()
assert out == 0, ("Status after verified hotplug is not 0 (%s)" % out)
out = len(qdev)
assert out == 8, "Number of devices of this VM is not 8 (%s)" % out
# Hotplug is expected to pass but monitor reports failure
dev3 = qdevices.QDrive('a_dev1')
dev3.hotplug = lambda _monitor: ("could not open disk image /tmp/qqq: "
"No such file or directory")
out, ver_out = qdev.simple_hotplug(dev3, monitor)
exp = "could not open disk image /tmp/qqq: No such file or directory"
assert out, "Return value of hotplug is incorrect:\n%s\n%s" % (out,
exp)
out = qdev.get_state()
assert out == 1, ("Status after failed hotplug is not 1 (%s)" % out)
# device is still in qdev, but is not in qemu, we should remove it
qdev.remove(dev3, recursive=False)
out = qdev.get_state()
assert out == 1, ("Status after verified hotplug is not 1 (%s)" % out)
qdev.hotplug_verified()
out = qdev.get_state()
assert out == 0, ("Status after verified hotplug is not 0 (%s)" % out)
# Hotplug is expected to fail, qdev should stay unaffected
dev4 = qdevices.QBaseDevice("bad_dev", parent_bus={'type': "XXX"})
dev4.hotplug = lambda _monitor: ("")
self.assertRaises(qcontainer.DeviceHotplugError, qdev.simple_hotplug,
dev4, True)
out = qdev.get_state()
assert out == 0, "Status after impossible hotplug is not 0 (%s)" % out
# Unplug
# Unplug used drive (automatic verification not supported)
out = dev1.unplug_hmp()
exp = "drive_del drive_disk"
assert out == exp, ("Hotplug command of device is incorrect:\n%s\n%s"
% (exp, out))
dev1.unplug = lambda _monitor: ""
dev1.verify_unplug = lambda _monitor, _out: ""
out, ver_out = qdev.simple_unplug(dev1, monitor)
# I verified, that device was unplugged successfully
qdev.hotplug_verified()
out = qdev.get_state()
assert out == 0, ("Status after verified hotplug is not 0 (%s)" % out)
out = len(qdev)
assert out == 7, "Number of devices of this VM is not 7 (%s)" % out
# Removal of drive should also set drive of the disk device to None
out = dev2.get_param('drive')
assert out is None, "Drive was not removed from disk device"
# pylint: disable=W0212
def test_qdev_low_level(self):
""" Test low level functions """
qdev = self.create_qdev('vm1')
# Representation state (used for hotplug or other nasty things)
out = qdev.get_state()
assert out == -1, "qdev state is incorrect %s != %s" % (out, 1)
qdev.set_dirty()
out = qdev.get_state()
self.assertEqual(out, 1, "qdev state is incorrect %s != %s" % (out, 1))
qdev.set_dirty()
out = qdev.get_state()
self.assertEqual(out, 2, "qdev state is incorrect %s != %s" % (out, 1))
qdev.set_clean()
out = qdev.get_state()
self.assertEqual(out, 1, "qdev state is incorrect %s != %s" % (out, 1))
qdev.set_clean()
out = qdev.get_state()
self.assertEqual(out, 0, "qdev state is incorrect %s != %s" % (out, 1))
qdev.reset_state()
out = qdev.get_state()
assert out == -1, "qdev state is incorrect %s != %s" % (out, 1)
# __create_unique_aid
dev = qdevices.QDevice()
qdev.insert(dev)
out = dev.get_aid()
self.assertEqual(out, '__0', "incorrect aid %s != %s" % (out, '__0'))
dev = qdevices.QDevice(None, {'id': 'qid'})
qdev.insert(dev)
out = dev.get_aid()
self.assertEqual(out, 'qid', "incorrect aid %s != %s" % (out, 'qid'))
# has_option
out = qdev.has_option('device')
self.assertEqual(out, True)
out = qdev.has_option('missing_option')
self.assertEqual(out, False)
# has_device
out = qdev.has_device('ide-drive')
self.assertEqual(out, True)
out = qdev.has_device('missing_device')
self.assertEqual(out, False)
# get_help_text
out = qdev.get_help_text()
self.assertEqual(out, QEMU_HELP)
# has_hmp_cmd
self.assertTrue(qdev.has_hmp_cmd('pcie_aer_inject_error'))
self.assertTrue(qdev.has_hmp_cmd('c'))
self.assertTrue(qdev.has_hmp_cmd('cont'))
self.assertFalse(qdev.has_hmp_cmd('off'))
self.assertFalse(qdev.has_hmp_cmd('\ndump-guest-memory'))
self.assertFalse(qdev.has_hmp_cmd('The'))
# has_qmp_cmd
self.assertTrue(qdev.has_qmp_cmd('device_add'))
self.assertFalse(qdev.has_qmp_cmd('RAND91'))
# Add some buses
bus1 = qbuses.QPCIBus('pci.0', 'pci', 'a_pci0')
qdev.insert(qdevices.QDevice(params={'id': 'pci0'},
child_bus=bus1))
bus2 = qbuses.QPCIBus('pci.1', 'pci', 'a_pci1')
qdev.insert(qdevices.QDevice(child_bus=bus2))
bus3 = qbuses.QPCIBus('pci.2', 'pci', 'a_pci2')
qdev.insert(qdevices.QDevice(child_bus=bus3))
bus4 = qbuses.QPCIBus('pcie.0', 'pcie', 'a_pcie0')
qdev.insert(qdevices.QDevice(child_bus=bus4))
# get_buses (all buses of this type)
out = qdev.get_buses({'type': 'pci'})
self.assertEqual(len(out), 3, 'get_buses should return 3 buses but '
'returned %s instead:\n%s' % (len(out), out))
# get_first_free_bus (last added bus of this type)
out = qdev.get_first_free_bus({'type': 'pci'}, [None, None])
self.assertEqual(bus3, out)
# fill the first pci bus
for _ in xrange(32):
qdev.insert(qdevices.QDevice(parent_bus={'type': 'pci'}))
# get_first_free_bus (last one is full, return the previous one)
out = qdev.get_first_free_bus({'type': 'pci'}, [None, None])
self.assertEqual(bus2, out)
# list_named_buses
out = qdev.list_missing_named_buses('pci.', 'pci', 5)
self.assertEqual(len(out), 2, 'Number of missing named buses is '
'incorrect: %s != %s\n%s' % (len(out), 2, out))
out = qdev.list_missing_named_buses('pci.', 'abc', 5)
self.assertEqual(len(out), 5, 'Number of missing named buses is '
'incorrect: %s != %s\n%s' % (len(out), 2, out))
# idx_of_next_named_bus
out = qdev.idx_of_next_named_bus('pci.')
self.assertEqual(out, 3, 'Incorrect idx of next named bus: %s !='
' %s' % (out, 3))
# get_children
dev = qdevices.QDevice(parent_bus={'aobject': 'a_pci0'})
bus = qbuses.QPCIBus('test1', 'test', 'a_test1')
dev.add_child_bus(bus)
bus = qbuses.QPCIBus('test2', 'test', 'a_test2')
dev.add_child_bus(bus)
qdev.insert(dev)
qdev.insert(qdevices.QDevice(parent_bus={'aobject': 'a_test1'}))
qdev.insert(qdevices.QDevice(parent_bus={'aobject': 'a_test2'}))
out = dev.get_children()
assert len(out) == 2, ("Not all children were listed %d != 2:\n%s"
% (len(out), out))
out = bus.get_device()
assert out == dev, ("bus.get_device() returned different device "
"than the one in which it was plugged:\n"
"%s\n%s\n%s" % (out.str_long(), dev.str_long(),
qdev.str_long()))
# pylint: enable=W0212
def test_qdev_equal(self):
qdev1 = self.create_qdev('vm1', allow_hotplugged_vm='no')
qdev2 = self.create_qdev('vm1', allow_hotplugged_vm='no')
qdev3 = self.create_qdev('vm1', allow_hotplugged_vm='yes')
monitor = MockHMPMonitor()
assert qdev1 == qdev2, ("Init qdevs are not alike\n%s\n%s"
% (qdev1.str_long(), qdev2.str_long()))
# Insert a device to qdev1
dev = qdevices.QDevice('dev1', {'id': 'dev1'})
qdev1.insert(dev)
assert qdev1 != qdev2, ("Different qdevs match:\n%s\n%s"
% (qdev1.str_long(), qdev2.str_long()))
# Insert similar device to qdev2
dev = qdevices.QDevice('dev1', {'id': 'dev1'})
qdev2.insert(dev)
assert qdev1 == qdev2, ("Similar qdevs are not alike\n%s\n%s"
% (qdev1.str_long(), qdev2.str_long()))
# Hotplug similar device to qdev3
dev = qdevices.QDevice('dev1', {'id': 'dev1'})
dev.hotplug = lambda _monitor: "" # override the hotplug method
dev.verify_hotplug = lambda _out, _monitor: True
qdev3.simple_hotplug(dev, monitor)
assert qdev1 == qdev3, ("Similar hotplugged qdevs are not alike\n%s\n"
"%s" % (qdev1.str_long(), qdev2.str_long()))
# Eq. is not symmetrical, qdev1 doesn't allow hotplugged VMs.
assert qdev3 != qdev1, ("Similar hotplugged qdevs match even thought "
"qdev1 doesn't allow hotplugged VM\n%s\n%s"
% (qdev1.str_long(), qdev2.str_long()))
qdev2.__qemu_help = "I support only this :-)" # pylint: disable=W0212
assert qdev1 == qdev2, ("qdevs of different qemu versions match:\n%s\n"
"%s" % (qdev1.str_long(), qdev2.str_long()))
def test_pci(self):
qdev = self.create_qdev('vm1')
devs = qdev.machine_by_params(ParamsDict({'machine_type': 'pc'}))
for dev in devs:
qdev.insert(dev)
# machine creates main pci (pci.0)
# buses root.1 pci_switch pci_bridge
# root.1: ioh3420(pci.0)
# pci_switch: x3130(root.1)
# pci_bridge: pci-bridge(root.1)
devs = qdev.pcic_by_params('root.1', {'pci_bus': 'pci.0',
'type': 'ioh3420'})
qdev.insert(devs)
devs = qdev.pcic_by_params('pci_switch', {'pci_bus': 'root.1',
'type': 'x3130'})
qdev.insert(devs)
devs = qdev.pcic_by_params('pci_bridge', {'pci_bus': 'root.1',
'type': 'pci-bridge'})
qdev.insert(devs)
qdev.insert(qdevices.QDevice("ahci", {'id': 'in_bridge'},
parent_bus={'type': ('PCI', 'PCIE'),
'aobject': 'pci_bridge'}))
qdev.insert(qdevices.QDevice("ahci", {'id': 'in_switch1'},
parent_bus={'type': ('PCI', 'PCIE'),
'aobject': 'pci_switch'}))
qdev.insert(qdevices.QDevice("ahci", {'id': 'in_switch2'},
parent_bus={'type': ('PCI', 'PCIE'),
'aobject': 'pci_switch'}))
qdev.insert(qdevices.QDevice("ahci", {'id': 'in_switch3'},
parent_bus={'type': ('PCI', 'PCIE'),
'aobject': 'pci_switch'}))
qdev.insert(qdevices.QDevice("ahci", {'id': 'in_root1'},
parent_bus={'type': ('PCI', 'PCIE'),
'aobject': 'root.1'}))
qdev.insert(qdevices.QDevice("ahci", {'id': 'in_pci.0'},
parent_bus={'type': ('PCI', 'PCIE'),
'aobject': 'pci.0'}))
exp = ("-M pc -device ioh3420,id=root.1,bus=pci.0,addr=02 "
"-device x3130-upstream,id=pci_switch,bus=root.1,addr=00 "
"-device pci-bridge,id=pci_bridge,bus=root.1,addr=01,"
"chassis_nr=1 -device ahci,id=in_bridge,bus=pci_bridge,addr=01"
" -device xio3130-downstream,bus=pci_switch,id=pci_switch.0,"
"addr=00,chassis=1 -device ahci,id=in_switch1,bus=pci_switch.0"
",addr=00 "
"-device xio3130-downstream,bus=pci_switch,id=pci_switch.1,"
"addr=01,chassis=2 -device ahci,id=in_switch2,bus=pci_switch.1"
",addr=00 "
"-device xio3130-downstream,bus=pci_switch,id=pci_switch.2,"
"addr=02,chassis=3 -device ahci,id=in_switch3,bus=pci_switch.2"
",addr=00 "
"-device ahci,id=in_root1,bus=root.1,addr=02 "
"-device ahci,id=in_pci.0,bus=pci.0,addr=03")
out = qdev.cmdline()
assert out == exp, (out, exp)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | -3,187,964,355,943,792,600 | 38.716169 | 81 | 0.496692 | false |
0Chencc/CTFCrackTools | Lib/site-packages/setuptools/sandbox.py | 51 | 14324 | import os
import sys
import tempfile
import operator
import functools
import itertools
import re
import contextlib
import pickle
from setuptools.extern import six
from setuptools.extern.six.moves import builtins, map
import pkg_resources
if sys.platform.startswith('java'):
import org.python.modules.posix.PosixModule as _os
else:
_os = sys.modules[os.name]
try:
_file = file
except NameError:
_file = None
_open = open
from distutils.errors import DistutilsError
from pkg_resources import working_set
__all__ = [
"AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
]
def _execfile(filename, globals, locals=None):
"""
Python 3 implementation of execfile.
"""
mode = 'rb'
with open(filename, mode) as stream:
script = stream.read()
# compile() function in Python 2.6 and 3.1 requires LF line endings.
if sys.version_info[:2] < (2, 7) or sys.version_info[:2] >= (3, 0) and sys.version_info[:2] < (3, 2):
script = script.replace(b'\r\n', b'\n')
script = script.replace(b'\r', b'\n')
if locals is None:
locals = globals
code = compile(script, filename, 'exec')
exec(code, globals, locals)
@contextlib.contextmanager
def save_argv(repl=None):
saved = sys.argv[:]
if repl is not None:
sys.argv[:] = repl
try:
yield saved
finally:
sys.argv[:] = saved
@contextlib.contextmanager
def save_path():
saved = sys.path[:]
try:
yield saved
finally:
sys.path[:] = saved
@contextlib.contextmanager
def override_temp(replacement):
"""
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
"""
if not os.path.isdir(replacement):
os.makedirs(replacement)
saved = tempfile.tempdir
tempfile.tempdir = replacement
try:
yield
finally:
tempfile.tempdir = saved
@contextlib.contextmanager
def pushd(target):
saved = os.getcwd()
os.chdir(target)
try:
yield saved
finally:
os.chdir(saved)
class UnpickleableException(Exception):
"""
An exception representing another Exception that could not be pickled.
"""
@staticmethod
def dump(type, exc):
"""
Always return a dumped (pickled) type and exc. If exc can't be pickled,
wrap it in UnpickleableException first.
"""
try:
return pickle.dumps(type), pickle.dumps(exc)
except Exception:
# get UnpickleableException inside the sandbox
from setuptools.sandbox import UnpickleableException as cls
return cls.dump(cls, cls(repr(exc)))
class ExceptionSaver:
"""
A Context Manager that will save an exception, serialized, and restore it
later.
"""
def __enter__(self):
return self
def __exit__(self, type, exc, tb):
if not exc:
return
# dump the exception
self._saved = UnpickleableException.dump(type, exc)
self._tb = tb
# suppress the exception
return True
def resume(self):
"restore and re-raise any exception"
if '_saved' not in vars(self):
return
type, exc = map(pickle.loads, self._saved)
six.reraise(type, exc, self._tb)
@contextlib.contextmanager
def save_modules():
"""
Context in which imported modules are saved.
Translates exceptions internal to the context into the equivalent exception
outside the context.
"""
saved = sys.modules.copy()
with ExceptionSaver() as saved_exc:
yield saved
sys.modules.update(saved)
# remove any modules imported since
del_modules = (
mod_name for mod_name in sys.modules
if mod_name not in saved
# exclude any encodings modules. See #285
and not mod_name.startswith('encodings.')
)
_clear_modules(del_modules)
saved_exc.resume()
def _clear_modules(module_names):
for mod_name in list(module_names):
del sys.modules[mod_name]
@contextlib.contextmanager
def save_pkg_resources_state():
saved = pkg_resources.__getstate__()
try:
yield saved
finally:
pkg_resources.__setstate__(saved)
@contextlib.contextmanager
def setup_context(setup_dir):
temp_dir = os.path.join(setup_dir, 'temp')
with save_pkg_resources_state():
with save_modules():
hide_setuptools()
with save_path():
with save_argv():
with override_temp(temp_dir):
with pushd(setup_dir):
# ensure setuptools commands are available
__import__('setuptools')
yield
def _needs_hiding(mod_name):
"""
>>> _needs_hiding('setuptools')
True
>>> _needs_hiding('pkg_resources')
True
>>> _needs_hiding('setuptools_plugin')
False
>>> _needs_hiding('setuptools.__init__')
True
>>> _needs_hiding('distutils')
True
>>> _needs_hiding('os')
False
>>> _needs_hiding('Cython')
True
"""
pattern = re.compile('(setuptools|pkg_resources|distutils|Cython)(\.|$)')
return bool(pattern.match(mod_name))
def hide_setuptools():
"""
Remove references to setuptools' modules from sys.modules to allow the
invocation to import the most appropriate setuptools. This technique is
necessary to avoid issues such as #315 where setuptools upgrading itself
would fail to find a function declared in the metadata.
"""
modules = filter(_needs_hiding, sys.modules)
_clear_modules(modules)
def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
setup_dir = os.path.abspath(os.path.dirname(setup_script))
with setup_context(setup_dir):
try:
sys.argv[:] = [setup_script] + list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist: dist.activate())
def runner():
ns = dict(__file__=setup_script, __name__='__main__')
_execfile(setup_script, ns)
DirectorySandbox(setup_dir).run(runner)
except SystemExit as v:
if v.args and v.args[0]:
raise
# Normal exit, just return
class AbstractSandbox:
"""Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
_active = False
def __init__(self):
self._attrs = [
name for name in dir(_os)
if not name.startswith('_') and hasattr(self, name)
]
def _copy(self, source):
for name in self._attrs:
setattr(os, name, getattr(source, name))
def run(self, func):
"""Run 'func' under os sandboxing"""
try:
self._copy(self)
if _file:
builtins.file = self._file
builtins.open = self._open
self._active = True
return func()
finally:
self._active = False
if _file:
builtins.file = _file
builtins.open = _open
self._copy(_os)
def _mk_dual_path_wrapper(name):
original = getattr(_os, name)
def wrap(self, src, dst, *args, **kw):
if self._active:
src, dst = self._remap_pair(name, src, dst, *args, **kw)
return original(src, dst, *args, **kw)
return wrap
for name in ["rename", "link", "symlink"]:
if hasattr(_os, name):
locals()[name] = _mk_dual_path_wrapper(name)
def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os, name)
def wrap(self, path, *args, **kw):
if self._active:
path = self._remap_input(name, path, *args, **kw)
return original(path, *args, **kw)
return wrap
if _file:
_file = _mk_single_path_wrapper('file', _file)
_open = _mk_single_path_wrapper('open', _open)
for name in [
"stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
"remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
"startfile", "mkfifo", "mknod", "pathconf", "access"
]:
if hasattr(_os, name):
locals()[name] = _mk_single_path_wrapper(name)
def _mk_single_with_return(name):
original = getattr(_os, name)
def wrap(self, path, *args, **kw):
if self._active:
path = self._remap_input(name, path, *args, **kw)
return self._remap_output(name, original(path, *args, **kw))
return original(path, *args, **kw)
return wrap
for name in ['readlink', 'tempnam']:
if hasattr(_os, name):
locals()[name] = _mk_single_with_return(name)
def _mk_query(name):
original = getattr(_os, name)
def wrap(self, *args, **kw):
retval = original(*args, **kw)
if self._active:
return self._remap_output(name, retval)
return retval
return wrap
for name in ['getcwd', 'tmpnam']:
if hasattr(_os, name):
locals()[name] = _mk_query(name)
def _validate_path(self, path):
"""Called to remap or validate any path, whether input or output"""
return path
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
return self._validate_path(path)
def _remap_output(self, operation, path):
"""Called for path outputs"""
return self._validate_path(path)
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
return (
self._remap_input(operation + '-from', src, *args, **kw),
self._remap_input(operation + '-to', dst, *args, **kw)
)
if hasattr(os, 'devnull'):
_EXCEPTIONS = [os.devnull,]
else:
_EXCEPTIONS = []
try:
from win32com.client.gencache import GetGeneratePath
_EXCEPTIONS.append(GetGeneratePath())
del GetGeneratePath
except ImportError:
# it appears pywin32 is not installed, so no need to exclude.
pass
class DirectorySandbox(AbstractSandbox):
"""Restrict operations to a single subdirectory - pseudo-chroot"""
write_ops = dict.fromkeys([
"open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
"utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
])
_exception_patterns = [
# Allow lib2to3 to attempt to save a pickled grammar object (#121)
'.*lib2to3.*\.pickle$',
]
"exempt writing to paths that match the pattern"
def __init__(self, sandbox, exceptions=_EXCEPTIONS):
self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox, '')
self._exceptions = [
os.path.normcase(os.path.realpath(path))
for path in exceptions
]
AbstractSandbox.__init__(self)
def _violation(self, operation, *args, **kw):
from setuptools.sandbox import SandboxViolation
raise SandboxViolation(operation, args, kw)
if _file:
def _file(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("file", path, mode, *args, **kw)
return _file(path, mode, *args, **kw)
def _open(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("open", path, mode, *args, **kw)
return _open(path, mode, *args, **kw)
def tmpnam(self):
self._violation("tmpnam")
def _ok(self, path):
active = self._active
try:
self._active = False
realpath = os.path.normcase(os.path.realpath(path))
return (
self._exempted(realpath)
or realpath == self._sandbox
or realpath.startswith(self._prefix)
)
finally:
self._active = active
def _exempted(self, filepath):
start_matches = (
filepath.startswith(exception)
for exception in self._exceptions
)
pattern_matches = (
re.match(pattern, filepath)
for pattern in self._exception_patterns
)
candidates = itertools.chain(start_matches, pattern_matches)
return any(candidates)
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
if operation in self.write_ops and not self._ok(path):
self._violation(operation, os.path.realpath(path), *args, **kw)
return path
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src, dst)
def open(self, file, flags, mode=0o777, *args, **kw):
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file, flags, mode, *args, **kw)
WRITE_FLAGS = functools.reduce(
operator.or_, [getattr(_os, a, 0) for a in
"O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
)
class SandboxViolation(DistutilsError):
"""A setup script attempted to modify the filesystem outside the sandbox"""
def __str__(self):
return """SandboxViolation: %s%r %s
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.""" % self.args
#
| gpl-3.0 | 3,123,334,706,033,168,400 | 28.113821 | 105 | 0.587825 | false |
rfcx/defunct | sound-localization/localization/gaussian.py | 1 | 4871 | import numpy as np
from signal_likelihood import SignalLikelihood
import unittest
from numpy.testing import assert_array_almost_equal,assert_almost_equal, assert_equal
import math
"""
Models the ambient audio scenery with multiple, independent
Gaussian distributions. Based on that model we can distinguish
between the ambient sounds and sounds that are
unlikely to occur naturally.
This model requires the assumption that the amplitudes
of frequencies are independent. Most likely we will need
to use a model that allows for correlations (multivariate
gaussian). For now, this is the simplest solution to the
problem.
Under the assumption of independence, we model each frequency
amplitude with a gaussian. We just need to save the mean
and variance of each frequency amplitude indepdently.
To test a signal, we calculate the probability of each of the
tested signal's frequency amplitude. Their product (independence)
will be our meassure of the overall probability of hte signal
being ambient noise.
"""
class Gaussian(SignalLikelihood):
def __init__(self):
self.mean = None
self.var = None
self.sumSquareDif = None
self.n = 0
def train(self, features):
"""
Updates the mean and variance of the gaussian model capturing the
ambient sound scenery.
"""
if self.mean is None:
# no previous mean or variance exist
self.mean = features
# we need a zero vector with the size of the feature vector
self.sumSquareDif = np.zeros_like(features)
self.var = np.zeros_like(features)
self.n = 1
else:
# previous mean is old_sum / old_n => new_sum = (old_sum * old_n) + new values
old_mean = self.mean
old_sum = old_mean * self.n
new_sum = old_sum + features
self.n = self.n + 1
self.mean = new_sum / self.n
# vectorizaed adaption of Knuth's online variance algorithm
# the original algorithm can be found here:
# Donald E. Knuth (1998). The Art of Computer Programming, volume 2:
# Seminumerical Algorithms, 3rd edn., p. 232. Boston: Addison-Wesley.
# update sum of square differences
self.sumSquareDif = self.sumSquareDif + (features - old_mean) * (features - self.mean)
# update variance
self.var = self.sumSquareDif / (self.n - 1)
def calculate_prob(self, features):
"""
Calculates the probability that the signal described by the
features is an ambient sound.
"""
if np.any(self.var == 0):
return 0
# this is a vectorized version of the pdf of a normal distribution for each frequency amplitude
# it returns one probability for each of the signal's frequency amplitudes
probs = np.exp(-(features-self.mean)**2/(2.*self.var**2)) / (math.sqrt(math.pi * 2.) * self.var)
# simplificaiton: assumption of independent frequencies => product
return np.prod(probs)
class GaussianTests(unittest.TestCase):
def train(self, data):
gaussian = Gaussian()
for datum in data:
gaussian.train(datum)
return gaussian
def checkMean(self, data, expectedMean):
gaussian = self.train(data)
assert_almost_equal(gaussian.mean, expectedMean)
def checkVariance(self, data, exptectedVar):
gaussian = self.train(data)
assert_almost_equal(gaussian.var, exptectedVar)
def test_mean_for_one_feature(self):
data = [np.array([0.]), np.array([6.]), np.array([10.]), np.array([8.])]
expectedMean = np.array([6.])
self.checkMean(data, expectedMean)
def test_mean_for_multiple_features(self):
data = [np.array([0., 3.]), np.array([6., 8.]), np.array([10., 4.]), np.array([8., 7.])]
expectedMean = np.array([6., 5.5])
self.checkMean(data, expectedMean)
def test_variance_for_one_feature(self):
data = [np.array([1.]), np.array([0.]), np.array([2.]), np.array([1.]), np.array([0.])]
expectedVariance = np.array([0.7])
self.checkVariance(data, expectedVariance)
def test_variance_for_one_feature(self):
data = [np.array([1., 0.]), np.array([0., 2.]), np.array([2., 1.]), np.array([1., 0.]), np.array([0., 1.])]
expectedVariance = np.array([0.7, 0.7])
self.checkVariance(data, expectedVariance)
def test_probability_calculation(self):
gaussian = Gaussian()
gaussian.mean = np.array([5., 3.])
gaussian.var = np.array([2., 1.])
x = np.array([4.,4.])
expected = 0.0426
actual = gaussian.calculate_prob(x)
assert_almost_equal(actual,expected, decimal=4)
| apache-2.0 | -6,261,069,175,828,290,000 | 35.088889 | 115 | 0.624307 | false |
yehzhang/RapidTest | rapidtest/executors/common_executors.py | 1 | 1701 | from .outputs import ExecutionOutput
from .._compat import is_sequence, PRIMITIVE_TYPES as P_TYPES
from ..utils import identity
class BaseExecutor(object):
ENVIRONMENT = None
PRIMITIVE_TYPES = P_TYPES
def __init__(self, target):
self.target = target
self.initialize()
def initialize(self, post_proc=None, in_place_selector=None):
self.in_place_selector = in_place_selector
self.post_proc = post_proc or identity
def execute(self, operations):
"""
:param Operations operations:
:return ExecutionOutput:
"""
operations.initialize(self)
return ExecutionOutput(self.execute_operations(operations))
def execute_operations(self, operations):
"""
:param Operations operations:
:return Iterable[OperationOutput]:
"""
raise NotImplementedError
def finalize_operation(self, operation, output):
"""
:param Operation operation:
:param Any output: returned value from the operation
:return OperationOutput:
"""
if self.in_place_selector:
output = self.in_place_selector(output)
output = self.normalize_raw(output)
return operation.as_output(output)
def normalize_raw(self, val):
return self.post_proc(self._normalize_type(val))
@classmethod
def _normalize_type(cls, val):
if isinstance(val, cls.PRIMITIVE_TYPES) or val is None:
pass
elif is_sequence(val):
val = [cls._normalize_type(o) for o in val]
else:
raise RuntimeError('type of return value {} is invalid'.format(type(val)))
return val
| mit | -4,809,565,232,686,919,000 | 29.375 | 86 | 0.627866 | false |
toshywoshy/ansible | lib/ansible/module_utils/hwc_utils.py | 13 | 12696 | # Copyright (c), Google Inc, 2017
# Simplified BSD License (see licenses/simplified_bsd.txt or
# https://opensource.org/licenses/BSD-2-Clause)
import re
import time
import traceback
THIRD_LIBRARIES_IMP_ERR = None
try:
from keystoneauth1.adapter import Adapter
from keystoneauth1.identity import v3
from keystoneauth1 import session
HAS_THIRD_LIBRARIES = True
except ImportError:
THIRD_LIBRARIES_IMP_ERR = traceback.format_exc()
HAS_THIRD_LIBRARIES = False
from ansible.module_utils.basic import (AnsibleModule, env_fallback,
missing_required_lib)
from ansible.module_utils._text import to_text
class HwcModuleException(Exception):
def __init__(self, message):
super(HwcModuleException, self).__init__()
self._message = message
def __str__(self):
return "[HwcClientException] message=%s" % self._message
class HwcClientException(Exception):
def __init__(self, code, message):
super(HwcClientException, self).__init__()
self._code = code
self._message = message
def __str__(self):
msg = " code=%s," % str(self._code) if self._code != 0 else ""
return "[HwcClientException]%s message=%s" % (
msg, self._message)
class HwcClientException404(HwcClientException):
def __init__(self, message):
super(HwcClientException404, self).__init__(404, message)
def __str__(self):
return "[HwcClientException404] message=%s" % self._message
def session_method_wrapper(f):
def _wrap(self, url, *args, **kwargs):
try:
url = self.endpoint + url
r = f(self, url, *args, **kwargs)
except Exception as ex:
raise HwcClientException(
0, "Sending request failed, error=%s" % ex)
result = None
if r.content:
try:
result = r.json()
except Exception as ex:
raise HwcClientException(
0, "Parsing response to json failed, error: %s" % ex)
code = r.status_code
if code not in [200, 201, 202, 203, 204, 205, 206, 207, 208, 226]:
msg = ""
for i in ['message', 'error.message']:
try:
msg = navigate_value(result, i)
break
except Exception:
pass
else:
msg = str(result)
if code == 404:
raise HwcClientException404(msg)
raise HwcClientException(code, msg)
return result
return _wrap
class _ServiceClient(object):
def __init__(self, client, endpoint, product):
self._client = client
self._endpoint = endpoint
self._default_header = {
'User-Agent': "Huawei-Ansible-MM-%s" % product,
'Accept': 'application/json',
}
@property
def endpoint(self):
return self._endpoint
@endpoint.setter
def endpoint(self, e):
self._endpoint = e
@session_method_wrapper
def get(self, url, body=None, header=None, timeout=None):
return self._client.get(url, json=body, timeout=timeout,
headers=self._header(header))
@session_method_wrapper
def post(self, url, body=None, header=None, timeout=None):
return self._client.post(url, json=body, timeout=timeout,
headers=self._header(header))
@session_method_wrapper
def delete(self, url, body=None, header=None, timeout=None):
return self._client.delete(url, json=body, timeout=timeout,
headers=self._header(header))
@session_method_wrapper
def put(self, url, body=None, header=None, timeout=None):
return self._client.put(url, json=body, timeout=timeout,
headers=self._header(header))
def _header(self, header):
if header and isinstance(header, dict):
for k, v in self._default_header.items():
if k not in header:
header[k] = v
else:
header = self._default_header
return header
class Config(object):
def __init__(self, module, product):
self._project_client = None
self._domain_client = None
self._module = module
self._product = product
self._endpoints = {}
self._validate()
self._gen_provider_client()
@property
def module(self):
return self._module
def client(self, region, service_type, service_level):
c = self._project_client
if service_level == "domain":
c = self._domain_client
e = self._get_service_endpoint(c, service_type, region)
return _ServiceClient(c, e, self._product)
def _gen_provider_client(self):
m = self._module
p = {
"auth_url": m.params['identity_endpoint'],
"password": m.params['password'],
"username": m.params['user'],
"project_name": m.params['project'],
"user_domain_name": m.params['domain'],
"reauthenticate": True
}
self._project_client = Adapter(
session.Session(auth=v3.Password(**p)),
raise_exc=False)
p.pop("project_name")
self._domain_client = Adapter(
session.Session(auth=v3.Password(**p)),
raise_exc=False)
def _get_service_endpoint(self, client, service_type, region):
k = "%s.%s" % (service_type, region if region else "")
if k in self._endpoints:
return self._endpoints.get(k)
url = None
try:
url = client.get_endpoint(service_type=service_type,
region_name=region, interface="public")
except Exception as ex:
raise HwcClientException(
0, "Getting endpoint failed, error=%s" % ex)
if url == "":
raise HwcClientException(
0, "Can not find the enpoint for %s" % service_type)
if url[-1] != "/":
url += "/"
self._endpoints[k] = url
return url
def _validate(self):
if not HAS_THIRD_LIBRARIES:
self.module.fail_json(
msg=missing_required_lib('keystoneauth1'),
exception=THIRD_LIBRARIES_IMP_ERR)
class HwcModule(AnsibleModule):
def __init__(self, *args, **kwargs):
arg_spec = kwargs.setdefault('argument_spec', {})
arg_spec.update(
dict(
identity_endpoint=dict(
required=True, type='str',
fallback=(env_fallback, ['ANSIBLE_HWC_IDENTITY_ENDPOINT']),
),
user=dict(
required=True, type='str',
fallback=(env_fallback, ['ANSIBLE_HWC_USER']),
),
password=dict(
required=True, type='str', no_log=True,
fallback=(env_fallback, ['ANSIBLE_HWC_PASSWORD']),
),
domain=dict(
required=True, type='str',
fallback=(env_fallback, ['ANSIBLE_HWC_DOMAIN']),
),
project=dict(
required=True, type='str',
fallback=(env_fallback, ['ANSIBLE_HWC_PROJECT']),
),
region=dict(
type='str',
fallback=(env_fallback, ['ANSIBLE_HWC_REGION']),
),
id=dict(type='str')
)
)
super(HwcModule, self).__init__(*args, **kwargs)
class _DictComparison(object):
''' This class takes in two dictionaries `a` and `b`.
These are dictionaries of arbitrary depth, but made up of standard
Python types only.
This differ will compare all values in `a` to those in `b`.
If value in `a` is None, always returns True, indicating
this value is no need to compare.
Note: On all lists, order does matter.
'''
def __init__(self, request):
self.request = request
def __eq__(self, other):
return self._compare_dicts(self.request, other.request)
def __ne__(self, other):
return not self.__eq__(other)
def _compare_dicts(self, dict1, dict2):
if dict1 is None:
return True
if set(dict1.keys()) != set(dict2.keys()):
return False
for k in dict1:
if not self._compare_value(dict1.get(k), dict2.get(k)):
return False
return True
def _compare_lists(self, list1, list2):
"""Takes in two lists and compares them."""
if list1 is None:
return True
if len(list1) != len(list2):
return False
for i in range(len(list1)):
if not self._compare_value(list1[i], list2[i]):
return False
return True
def _compare_value(self, value1, value2):
"""
return: True: value1 is same as value2, otherwise False.
"""
if value1 is None:
return True
if not (value1 and value2):
return (not value1) and (not value2)
# Can assume non-None types at this point.
if isinstance(value1, list) and isinstance(value2, list):
return self._compare_lists(value1, value2)
elif isinstance(value1, dict) and isinstance(value2, dict):
return self._compare_dicts(value1, value2)
# Always use to_text values to avoid unicode issues.
return (to_text(value1, errors='surrogate_or_strict') == to_text(
value2, errors='surrogate_or_strict'))
def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3):
is_last_time = False
not_found_times = 0
wait = 0
time.sleep(delay)
end = time.time() + timeout
while not is_last_time:
if time.time() > end:
is_last_time = True
obj, status = refresh()
if obj is None:
not_found_times += 1
if not_found_times > 10:
raise HwcModuleException(
"not found the object for %d times" % not_found_times)
else:
not_found_times = 0
if status in target:
return obj
if pending and status not in pending:
raise HwcModuleException(
"unexpect status(%s) occured" % status)
if not is_last_time:
wait *= 2
if wait < min_interval:
wait = min_interval
elif wait > 10:
wait = 10
time.sleep(wait)
raise HwcModuleException("asycn wait timeout after %d seconds" % timeout)
def navigate_value(data, index, array_index=None):
if array_index and (not isinstance(array_index, dict)):
raise HwcModuleException("array_index must be dict")
d = data
for n in range(len(index)):
if d is None:
return None
if not isinstance(d, dict):
raise HwcModuleException(
"can't navigate value from a non-dict object")
i = index[n]
if i not in d:
raise HwcModuleException(
"navigate value failed: key(%s) is not exist in dict" % i)
d = d[i]
if not array_index:
continue
k = ".".join(index[: (n + 1)])
if k not in array_index:
continue
if d is None:
return None
if not isinstance(d, list):
raise HwcModuleException(
"can't navigate value from a non-list object")
j = array_index.get(k)
if j >= len(d):
raise HwcModuleException(
"navigate value failed: the index is out of list")
d = d[j]
return d
def build_path(module, path, kv=None):
if kv is None:
kv = dict()
v = {}
for p in re.findall(r"{[^/]*}", path):
n = p[1:][:-1]
if n in kv:
v[n] = str(kv[n])
else:
if n in module.params:
v[n] = str(module.params.get(n))
else:
v[n] = ""
return path.format(**v)
def get_region(module):
if module.params['region']:
return module.params['region']
return module.params['project'].split("_")[0]
def is_empty_value(v):
return (not v)
def are_different_dicts(dict1, dict2):
return _DictComparison(dict1) != _DictComparison(dict2)
| gpl-3.0 | 1,518,753,234,668,667,600 | 27.986301 | 79 | 0.537886 | false |
pymedusa/Medusa | ext/boto/gs/key.py | 17 | 42787 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import base64
import binascii
import os
import re
from boto.compat import StringIO
from boto.exception import BotoClientError
from boto.s3.key import Key as S3Key
from boto.s3.keyfile import KeyFile
from boto.utils import compute_hash
from boto.utils import get_utf8_value
class Key(S3Key):
"""
Represents a key (object) in a GS bucket.
:ivar bucket: The parent :class:`boto.gs.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in GS.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | DURABLE_REDUCED_AVAILABILITY.
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar generation: The generation number of the object.
:ivar metageneration: The generation number of the object metadata.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
:ivar cloud_hashes: Dictionary of checksums as supplied by the storage
provider.
"""
def __init__(self, bucket=None, name=None, generation=None):
super(Key, self).__init__(bucket=bucket, name=name)
self.generation = generation
self.meta_generation = None
self.cloud_hashes = {}
self.component_count = None
def __repr__(self):
if self.generation and self.metageneration:
ver_str = '#%s.%s' % (self.generation, self.metageneration)
else:
ver_str = ''
if self.bucket:
return '<Key: %s,%s%s>' % (self.bucket.name, self.name, ver_str)
else:
return '<Key: None,%s%s>' % (self.name, ver_str)
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
elif name == 'Generation':
self.generation = value
elif name == 'MetaGeneration':
self.metageneration = value
else:
setattr(self, name, value)
def handle_version_headers(self, resp, force=False):
self.metageneration = resp.getheader('x-goog-metageneration', None)
self.generation = resp.getheader('x-goog-generation', None)
def handle_restore_headers(self, response):
return
def handle_addl_headers(self, headers):
for key, value in headers:
if key == 'x-goog-hash':
for hash_pair in value.split(','):
alg, b64_digest = hash_pair.strip().split('=', 1)
self.cloud_hashes[alg] = binascii.a2b_base64(b64_digest)
elif key == 'x-goog-component-count':
self.component_count = int(value)
elif key == 'x-goog-generation':
self.generation = value
# Use x-goog-stored-content-encoding and
# x-goog-stored-content-length to indicate original content length
# and encoding, which are transcoding-invariant (so are preferable
# over using content-encoding and size headers).
elif key == 'x-goog-stored-content-encoding':
self.content_encoding = value
elif key == 'x-goog-stored-content-length':
self.size = int(value)
elif key == 'x-goog-storage-class':
self.storage_class = value
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
# For GCS we need to include the object generation in the query args.
# The rest of the processing is handled in the parent class.
if self.generation:
if query_args:
query_args += '&'
query_args += 'generation=%s' % self.generation
super(Key, self).open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries,
response_headers=response_headers)
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None):
query_args = None
if self.generation:
query_args = ['generation=%s' % self.generation]
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=hash_algs,
query_args=query_args)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None,
hash_algs=None):
"""
Retrieve an object from GCS using the name of the Key object as the
key in GCS. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to GCS and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/sMkcC for details.
"""
if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
hash_algs=hash_algs)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers,
hash_algs=hash_algs)
def compute_hash(self, fp, algorithm, size=None):
"""
:type fp: file
:param fp: File pointer to the file to hash. The file
pointer will be reset to the same position before the
method returns.
:type algorithm: zero-argument constructor for hash objects that
implements update() and digest() (e.g. hashlib.md5)
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_hash(
fp, size=size, hash_algorithm=algorithm)
# The internal implementation of compute_hash() needs to return the
# data size, but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code), so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
"""
Upload a file to GCS.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type hash_algs: dictionary
:param hash_algs: (optional) Dictionary of hash algorithms and
corresponding hashing class that implements update() and digest().
Defaults to {'md5': hashlib.md5}.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size,
hash_algs=hash_algs)
def delete(self, headers=None):
return self.bucket.delete_key(self.name, version_id=self.version_id,
generation=self.generation,
headers=headers)
def add_email_grant(self, permission, email_address):
"""
Convenience method that provides a quick way to add an email grant to a
key. This method retrieves the current ACL, creates a new grant based on
the parameters passed in, adds that grant to the ACL and then PUT's the
new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
account to which you are granting the permission.
"""
acl = self.get_acl()
acl.add_email_grant(permission, email_address)
self.set_acl(acl)
def add_user_grant(self, permission, user_id):
"""
Convenience method that provides a quick way to add a canonical user
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type user_id: string
:param user_id: The canonical user id associated with the GS account to
which you are granting the permission.
"""
acl = self.get_acl()
acl.add_user_grant(permission, user_id)
self.set_acl(acl)
def add_group_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
Group to which you are granting the permission.
"""
acl = self.get_acl(headers=headers)
acl.add_group_email_grant(permission, email_address)
self.set_acl(acl, headers=headers)
def add_group_grant(self, permission, group_id):
"""
Convenience method that provides a quick way to add a canonical group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type group_id: string
:param group_id: The canonical group id associated with the Google
Groups account you are granting the permission to.
"""
acl = self.get_acl()
acl.add_group_grant(permission, group_id)
self.set_acl(acl)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
res_upload_handler=None, size=None, rewind=False,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: The file whose contents are to be uploaded.
:type headers: dict
:param headers: (optional) Additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: (optional) If this parameter is False, the method will
first check to see if an object exists in the bucket with the same
key. If it does, it won't overwrite it. The default value is True
which will overwrite the object.
:type cb: function
:param cb: (optional) Callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: (optional) A canned ACL policy that will be applied to
the new key in GS.
:type md5: tuple
:param md5: (optional) A tuple containing the hexdigest version of the
MD5 checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second element.
This is the same format returned by the compute_md5 method.
If you need to compute the MD5 for any reason prior to upload, it's
silly to have to do it twice so this param, if present, will be
used as the MD5 values of the file. Otherwise, the checksum will be
computed.
:type res_upload_handler: :py:class:`boto.gs.resumable_upload_handler.ResumableUploadHandler`
:param res_upload_handler: (optional) If provided, this handler will
perform the upload.
:type size: int
:param size: (optional) The Maximum number of bytes to read from the
file pointer (fp). This is useful when uploading a file in multiple
parts where you are splitting the file up into different ranges to
be uploaded. If not specified, the default behaviour is to read all
bytes from the file pointer. Less bytes may be available.
Notes:
1. The "size" parameter currently cannot be used when a
resumable upload handler is given but is still useful for
uploading part of a file as implemented by the parent class.
2. At present Google Cloud Storage does not support multipart
uploads.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will be
rewound to the start before any bytes are read from it. The default
behaviour is False which reads from the current position of the
file pointer (fp).
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
:rtype: int
:return: The number of bytes written to the key.
TODO: At some point we should refactor the Bucket and Key classes,
to move functionality common to all providers into a parent class,
and provider-specific functionality into subclasses (rather than
just overriding/sharing code the way it currently works).
"""
provider = self.bucket.connection.provider
if res_upload_handler and size:
# could use size instead of file_length if provided but...
raise BotoClientError(
'"size" param not supported for resumable uploads.')
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket is not None:
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
if md5 is None:
md5 = self.compute_md5(fp, size)
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
if if_generation is not None:
headers['x-goog-if-generation-match'] = str(if_generation)
if res_upload_handler:
res_upload_handler.send_file(self, fp, headers, cb, num_cb)
else:
# Not a resumable transfer so use basic send_file mechanism.
self.send_file(fp, headers, cb, num_cb, size=size)
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=None,
res_upload_handler=None,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto GS.
:type headers: dict
:param headers: (optional) Additional headers to pass along with the
request to GS.
:type replace: bool
:param replace: (optional) If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: (optional) Callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :py:attribute:`boto.gs.acl.CannedACLStrings`
:param policy: (optional) A canned ACL policy that will be applied to
the new key in GS.
:type md5: tuple
:param md5: (optional) A tuple containing the hexdigest version of the
MD5 checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second element.
This is the same format returned by the compute_md5 method.
If you need to compute the MD5 for any reason prior to upload, it's
silly to have to do it twice so this param, if present, will be
used as the MD5 values of the file. Otherwise, the checksum will be
computed.
:type res_upload_handler: :py:class:`boto.gs.resumable_upload_handler.ResumableUploadHandler`
:param res_upload_handler: (optional) If provided, this handler will
perform the upload.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed hashes, since we are setting the
# content.
self.local_hashes = {}
with open(filename, 'rb') as fp:
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, res_upload_handler,
if_generation=if_generation)
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
if_generation=None):
"""
Store an object in GCS using the name of the Key object as the
key in GCS and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to GCS and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in GCS.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed md5 hashes, since we are setting the content.
self.md5 = None
self.base64md5 = None
fp = StringIO(get_utf8_value(s))
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5,
if_generation=if_generation)
fp.close()
return r
def set_contents_from_stream(self, *args, **kwargs):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
if_generation = kwargs.pop('if_generation', None)
if if_generation is not None:
headers = kwargs.get('headers', {})
headers['x-goog-if-generation-match'] = str(if_generation)
kwargs['headers'] = headers
super(Key, self).set_contents_from_stream(*args, **kwargs)
def set_acl(self, acl_or_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets the ACL for this object.
:type acl_or_str: string or :class:`boto.gs.acl.ACL`
:param acl_or_str: A canned ACL string (see
:data:`~.gs.acl.CannedACLStrings`) or an ACL object.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
self.bucket.set_acl(acl_or_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def get_acl(self, headers=None, generation=None):
"""Returns the ACL of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: :class:`.gs.acl.ACL`
"""
if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers,
generation=generation)
def get_xml_acl(self, headers=None, generation=None):
"""Returns the ACL string of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: str
"""
if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers,
generation=generation)
def set_xml_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def set_canned_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL using a predefined (canned) value.
:type acl_str: string
:param acl_str: A canned ACL string. See
:data:`~.gs.acl.CannedACLStrings`.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
return self.bucket.set_canned_acl(
acl_str,
self.name,
headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration
)
def compose(self, components, content_type=None, headers=None):
"""Create a new object from a sequence of existing objects.
The content of the object representing this Key will be the
concatenation of the given object sequence. For more detail, visit
https://developers.google.com/storage/docs/composite-objects
:type components list of Keys
:param components List of gs.Keys representing the component objects
:type content_type (optional) string
:param content_type Content type for the new composite object.
"""
compose_req = []
for key in components:
if key.bucket.name != self.bucket.name:
raise BotoClientError(
'GCS does not support inter-bucket composing')
generation_tag = ''
if key.generation:
generation_tag = ('<Generation>%s</Generation>'
% str(key.generation))
compose_req.append('<Component><Name>%s</Name>%s</Component>' %
(key.name, generation_tag))
compose_req_xml = ('<ComposeRequest>%s</ComposeRequest>' %
''.join(compose_req))
headers = headers or {}
if content_type:
headers['Content-Type'] = content_type
resp = self.bucket.connection.make_request(
'PUT', get_utf8_value(self.bucket.name), get_utf8_value(self.name),
headers=headers, query_args='compose',
data=get_utf8_value(compose_req_xml))
if resp.status < 200 or resp.status > 299:
raise self.bucket.connection.provider.storage_response_error(
resp.status, resp.reason, resp.read())
# Return the generation so that the result URI can be built with this
# for automatic parallel uploads.
return resp.getheader('x-goog-generation')
| gpl-3.0 | -2,098,132,593,346,260,500 | 44.133966 | 101 | 0.602496 | false |
supriyantomaftuh/syzygy | third_party/numpy/files/numpy/f2py/tests/test_callback.py | 22 | 1740 | from numpy.testing import *
from numpy import array
import math
import util
class TestF77Callback(util.F2PyTest):
code = """
subroutine t(fun,a)
integer a
cf2py intent(out) a
external fun
call fun(a)
end
subroutine func(a)
cf2py intent(in,out) a
integer a
a = a + 11
end
subroutine func0(a)
cf2py intent(out) a
integer a
a = 11
end
subroutine t2(a)
cf2py intent(callback) fun
integer a
cf2py intent(out) a
external fun
call fun(a)
end
"""
@dec.slow
def test_all(self):
for name in "t,t2".split(","):
self.check_function(name)
def check_function(self, name):
t = getattr(self.module, name)
r = t(lambda : 4)
assert_( r==4,`r`)
r = t(lambda a:5,fun_extra_args=(6,))
assert_( r==5,`r`)
r = t(lambda a:a,fun_extra_args=(6,))
assert_( r==6,`r`)
r = t(lambda a:5+a,fun_extra_args=(7,))
assert_( r==12,`r`)
r = t(lambda a:math.degrees(a),fun_extra_args=(math.pi,))
assert_( r==180,`r`)
r = t(math.degrees,fun_extra_args=(math.pi,))
assert_( r==180,`r`)
r = t(self.module.func, fun_extra_args=(6,))
assert_( r==17,`r`)
r = t(self.module.func0)
assert_( r==11,`r`)
r = t(self.module.func0._cpointer)
assert_( r==11,`r`)
class A:
def __call__(self):
return 7
def mth(self):
return 9
a = A()
r = t(a)
assert_( r==7,`r`)
r = t(a.mth)
assert_( r==9,`r`)
if __name__ == "__main__":
import nose
nose.runmodule()
| apache-2.0 | -8,656,660,185,156,959,000 | 22.2 | 65 | 0.486782 | false |
kjordahl/xray | xray/test/test_dask.py | 2 | 9054 | import numpy as np
from xray import Variable, DataArray, Dataset, concat
import xray.ufuncs as xu
from . import TestCase, requires_dask, unittest, InaccessibleArray
try:
import dask
import dask.array as da
except ImportError:
pass
def _copy_at_variable_level(arg):
"""We need to copy the argument at the level of xray.Variable objects, so
that viewing its values does not trigger lazy loading.
"""
if isinstance(arg, Variable):
return arg.copy(deep=False)
elif isinstance(arg, DataArray):
ds = arg.to_dataset(name='__copied__')
return _copy_at_variable_level(ds)['__copied__']
elif isinstance(arg, Dataset):
ds = arg.copy()
for k in list(ds):
ds._variables[k] = ds._variables[k].copy(deep=False)
return ds
else:
assert False
class DaskTestCase(TestCase):
def assertLazyAnd(self, expected, actual, test):
expected_copy = _copy_at_variable_level(expected)
actual_copy = _copy_at_variable_level(actual)
with dask.set_options(get=dask.get):
test(actual_copy, expected_copy)
var = getattr(actual, 'variable', actual)
self.assertIsInstance(var.data, da.Array)
@requires_dask
class TestVariable(DaskTestCase):
def assertLazyAnd(self, expected, actual, test):
expected_copy = expected.copy(deep=False)
actual_copy = actual.copy(deep=False)
with dask.set_options(get=dask.get):
test(actual_copy, expected_copy)
var = getattr(actual, 'variable', actual)
self.assertIsInstance(var.data, da.Array)
def assertLazyAndIdentical(self, expected, actual):
self.assertLazyAnd(expected, actual, self.assertVariableIdentical)
def assertLazyAndAllClose(self, expected, actual):
self.assertLazyAnd(expected, actual, self.assertVariableAllClose)
def setUp(self):
self.values = np.random.randn(4, 6)
self.data = da.from_array(self.values, chunks=(2, 2))
self.eager_var = Variable(('x', 'y'), self.values)
self.lazy_var = Variable(('x', 'y'), self.data)
def test_basics(self):
v = self.lazy_var
self.assertIs(self.data, v.data)
self.assertEqual(self.data.chunks, v.chunks)
self.assertArrayEqual(self.values, v)
def test_chunk(self):
for chunks, expected in [(None, ((2, 2), (2, 2, 2))),
(3, ((3, 1), (3, 3))),
({'x': 3, 'y': 3}, ((3, 1), (3, 3))),
({'x': 3}, ((3, 1), (2, 2, 2))),
({'x': (3, 1)}, ((3, 1), (2, 2, 2)))]:
rechunked = self.lazy_var.chunk(chunks)
self.assertEqual(rechunked.chunks, expected)
self.assertLazyAndIdentical(self.eager_var, rechunked)
def test_indexing(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u[0], v[0])
self.assertLazyAndIdentical(u[:1], v[:1])
self.assertLazyAndIdentical(u[[0, 1], [0, 1, 2]], v[[0, 1], [0, 1, 2]])
with self.assertRaisesRegexp(TypeError, 'stored in a dask array'):
v[:1] = 0
def test_squeeze(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u[0].squeeze(), v[0].squeeze())
def test_equals(self):
v = self.lazy_var
self.assertTrue(v.equals(v))
self.assertIsInstance(v.data, da.Array)
self.assertTrue(v.identical(v))
self.assertIsInstance(v.data, da.Array)
def test_transpose(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.T, v.T)
def test_unary_op(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(-u, -v)
self.assertLazyAndIdentical(abs(u), abs(v))
self.assertLazyAndIdentical(u.round(), v.round())
def test_binary_op(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(2 * u, 2 * v)
self.assertLazyAndIdentical(u + u, v + v)
self.assertLazyAndIdentical(u[0] + u, v[0] + v)
def test_reduce(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(u.mean(), v.mean())
self.assertLazyAndAllClose(u.std(), v.std())
self.assertLazyAndAllClose(u.argmax(dim='x'), v.argmax(dim='x'))
with self.assertRaisesRegexp(NotImplementedError, 'dask'):
v.prod()
with self.assertRaisesRegexp(NotImplementedError, 'dask'):
v.median()
def test_missing_values(self):
values = np.array([0, 1, np.nan, 3])
data = da.from_array(values, chunks=(2,))
eager_var = Variable('x', values)
lazy_var = Variable('x', data)
self.assertLazyAndIdentical(eager_var, lazy_var.fillna(lazy_var))
self.assertLazyAndIdentical(Variable('x', range(4)), lazy_var.fillna(2))
self.assertLazyAndIdentical(eager_var.count(), lazy_var.count())
def test_concat(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u, Variable.concat([v[:2], v[2:]], 'x'))
self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], v[1]], 'x'))
self.assertLazyAndIdentical(
u[:3], Variable.concat([v[[0, 2]], v[[1]]], 'x', positions=[[0, 2], [1]]))
def test_missing_methods(self):
v = self.lazy_var
with self.assertRaisesRegexp(NotImplementedError, 'dask'):
v.conj()
with self.assertRaisesRegexp(NotImplementedError, 'dask'):
v.argsort()
with self.assertRaisesRegexp(NotImplementedError, 'dask'):
v[0].item()
def test_ufuncs(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(np.sin(u), xu.sin(v))
@requires_dask
class TestDataArrayAndDataset(DaskTestCase):
def assertLazyAndIdentical(self, expected, actual):
self.assertLazyAnd(expected, actual, self.assertDataArrayIdentical)
def assertLazyAndAllClose(self, expected, actual):
self.assertLazyAnd(expected, actual, self.assertDataArrayAllClose)
def setUp(self):
self.values = np.random.randn(4, 6)
self.data = da.from_array(self.values, chunks=(2, 2))
self.eager_array = DataArray(self.values, dims=('x', 'y'), name='foo')
self.lazy_array = DataArray(self.data, dims=('x', 'y'), name='foo')
def test_chunk(self):
chunked = self.eager_array.chunk({'x': 2}).chunk({'y': 2})
self.assertEqual(chunked.chunks, ((2,) * 2, (2,) * 3))
def test_lazy_dataset(self):
lazy_ds = Dataset({'foo': (('x', 'y'), self.data)})
self.assertIsInstance(lazy_ds.foo.variable.data, da.Array)
def test_lazy_array(self):
u = self.eager_array
v = self.lazy_array
self.assertLazyAndAllClose(u, v)
self.assertLazyAndAllClose(-u, -v)
self.assertLazyAndAllClose(u.T, v.T)
self.assertLazyAndAllClose(u.mean(), v.mean())
self.assertLazyAndAllClose(1 + u, 1 + v)
actual = concat([v[:2], v[2:]], 'x')
self.assertLazyAndAllClose(u, actual)
def test_groupby(self):
u = self.eager_array
v = self.lazy_array
expected = u.groupby('x').mean()
actual = v.groupby('x').mean()
self.assertLazyAndAllClose(expected, actual)
def test_groupby_first(self):
u = self.eager_array
v = self.lazy_array
for coords in [u.coords, v.coords]:
coords['ab'] = ('x', ['a', 'a', 'b', 'b'])
with self.assertRaisesRegexp(NotImplementedError, 'dask'):
v.groupby('ab').first()
expected = u.groupby('ab').first()
actual = v.groupby('ab').first(skipna=False)
self.assertLazyAndAllClose(expected, actual)
def test_reindex(self):
u = self.eager_array
v = self.lazy_array
for kwargs in [{'x': [2, 3, 4]},
{'x': [1, 100, 2, 101, 3]},
{'x': [2.5, 3, 3.5], 'y': [2, 2.5, 3]}]:
expected = u.reindex(**kwargs)
actual = v.reindex(**kwargs)
self.assertLazyAndAllClose(expected, actual)
def test_to_dataset_roundtrip(self):
u = self.eager_array
v = self.lazy_array
expected = u.assign_coords(x=u['x'].astype(str))
self.assertLazyAndIdentical(expected, v.to_dataset('x').to_array('x'))
def test_ufuncs(self):
u = self.eager_array
v = self.lazy_array
self.assertLazyAndAllClose(np.sin(u), xu.sin(v))
def test_simultaneous_compute(self):
ds = Dataset({'foo': ('x', range(5)),
'bar': ('x', range(5))}).chunk()
count = [0]
def counting_get(*args, **kwargs):
count[0] += 1
return dask.get(*args, **kwargs)
with dask.set_options(get=counting_get):
ds.load()
self.assertEqual(count[0], 1)
| apache-2.0 | 3,757,064,685,218,720,000 | 34.645669 | 86 | 0.584603 | false |
pombredanne/pulp_python | plugins/test/unit/plugins/distributors/test_steps.py | 1 | 17187 | """
This module contains tests for the pulp_python.plugins.distributors.steps module.
"""
from gettext import gettext as _
import os
import unittest
from xml.etree import cElementTree as ElementTree
import mock
from pulp.plugins.model import Unit
from pulp_python.common import constants
from pulp_python.plugins.distributors import steps
_GET_UNITS_RETURN = [
Unit(constants.PACKAGE_TYPE_ID, {'name': 'nectar', 'version': '1.2.0'},
{'_filename': 'nectar-1.2.0.tar.gz', '_checksum': 'abcde', '_checksum_type': 'made_up'},
'/path/to/nectar-1.2.0.tar.gz'),
Unit(constants.PACKAGE_TYPE_ID, {'name': 'nectar', 'version': '1.3.1'},
{'_filename': 'nectar-1.3.1.tar.gz', '_checksum': 'fghij', '_checksum_type': 'made_up'},
'/path/to/nectar-1.3.1.tar.gz'),
Unit(constants.PACKAGE_TYPE_ID, {'name': 'pulp_python_plugins', 'version': '0.0.0'},
{'_filename': 'pulp_python_plugins-0.0.0.tar.gz', '_checksum': 'klmno',
'_checksum_type': 'made_up'},
'/path/to/pulp_python_plugins-0.0.0.tar.gz'),
]
class TestPublishContentStep(unittest.TestCase):
"""
This class contains tests for the PublishContentStep class.
"""
@mock.patch('pulp_python.plugins.distributors.steps.PluginStep.__init__')
def test___init__(self, super___init__):
"""
Assert correct behavior from the __init__() method.
"""
step = steps.PublishContentStep()
super___init__.assert_called_once_with(constants.PUBLISH_STEP_CONTENT)
self.assertEqual(step.context, None)
self.assertEqual(step.redirect_context, None)
self.assertEqual(step.description, _('Publishing Python Content.'))
@mock.patch('pulp_python.plugins.distributors.steps.os.makedirs')
@mock.patch('pulp_python.plugins.distributors.steps.os.path.exists')
@mock.patch('pulp_python.plugins.distributors.steps.os.symlink')
def test_process_main(self, symlink, exists, makedirs):
"""
Assert correct operation from the process_main() method with our _GET_UNITS_RETURN data.
"""
_seen_paths = []
def mock_exists(path):
"""
This mocks the return value of exists to return False the first time a path is given to
it, and True every time thereafter for that same path.
"""
if path not in _seen_paths:
_seen_paths.append(path)
return False
return True
exists.side_effect = mock_exists
step = steps.PublishContentStep()
conduit = mock.MagicMock()
conduit.get_units.return_value = _GET_UNITS_RETURN
step.get_conduit = mock.MagicMock(return_value=conduit)
step.parent = mock.MagicMock()
step.parent.web_working_dir = '/some/path/'
step.process_main()
step.get_conduit.assert_called_once_with()
conduit.get_units.assert_called_once_with()
# os.path.exists should have been called once for each Unit. It also gets called for a lot
# of locale stuff, so we'll need to filter those out.
pulp_exists_calls = [c for c in exists.mock_calls if 'locale' not in c[1][0]]
self.assertEqual(len(pulp_exists_calls), 3)
expected_symlink_args = [
(u.storage_path, steps._get_package_path(u.unit_key['name'], u.metadata['_filename']))
for u in _GET_UNITS_RETURN]
expected_symlink_args = [(a[0], os.path.join(step.parent.web_working_dir, a[1]))
for a in expected_symlink_args]
expected_exists_call_args = [(os.path.dirname(a[1]),) for a in expected_symlink_args]
actual_exists_call_args = [c[1] for c in pulp_exists_calls]
self.assertEqual(set(actual_exists_call_args), set(expected_exists_call_args))
# os.makedirs should only have been called twice, since there are two versions of Nectar and
# they share a directory. This is also going to be the same set as the exists set.
self.assertEqual(makedirs.call_count, 2)
makedirs_call_args = [c[1] for c in makedirs.mock_calls]
self.assertEqual(set(makedirs_call_args), set(expected_exists_call_args))
# Lastly, three calls to symlink should have been made, one for each Unit.
self.assertEqual(symlink.call_count, 3)
actual_mock_call_args = [c[1] for c in symlink.mock_calls]
self.assertEqual(set(actual_mock_call_args), set(expected_symlink_args))
class TestPublishMetadataStep(unittest.TestCase):
"""
Test the PublishMetadataStep class.
"""
@mock.patch('pulp_python.plugins.distributors.steps.PluginStep.__init__')
def test___init__(self, super___init__):
"""
Assert correct behavior from the __init__() method.
"""
step = steps.PublishMetadataStep()
super___init__.assert_called_once_with(constants.PUBLISH_STEP_METADATA)
self.assertEqual(step.context, None)
self.assertEqual(step.redirect_context, None)
self.assertEqual(step.description, _('Publishing Python Metadata.'))
@mock.patch('__builtin__.open', autospec=True)
@mock.patch('pulp_python.plugins.distributors.steps.os.makedirs')
@mock.patch('pulp_python.plugins.distributors.steps.PublishMetadataStep._create_package_index')
def test_process_main(self, _create_package_index, makedirs, mock_open):
"""
Assert all the correct calls from process_main().
"""
step = steps.PublishMetadataStep()
conduit = mock.MagicMock()
conduit.get_units.return_value = _GET_UNITS_RETURN
step.get_conduit = mock.MagicMock(return_value=conduit)
step.parent = mock.MagicMock()
step.parent.web_working_dir = '/some/path/'
step.process_main()
# Assert correct usage of various mocked items
step.get_conduit.assert_called_once_with()
conduit.get_units.assert_called_once_with()
makedirs.assert_called_once_with(os.path.join(step.parent.web_working_dir, 'simple'))
mock_open.assert_called_once_with(
os.path.join(step.parent.web_working_dir, 'simple', 'index.html'), 'w')
# Assert that the two calls to _create_package_index for each package name are correct
self.assertEqual(_create_package_index.call_count, 2)
expected_packages_by_name = steps._get_packages(conduit)
for call in _create_package_index.mock_calls:
expected_packages = expected_packages_by_name[call[1][0]]
self.assertEqual(call[1][1], os.path.join(step.parent.web_working_dir, 'simple'))
self.assertEqual(call[1][2], expected_packages)
del expected_packages_by_name[call[1][0]]
self.assertEqual(expected_packages_by_name, {})
# Assert that the resulting HTML index is correct
write = mock_open.return_value.__enter__.return_value
index_html = write.mock_calls[0][1][0]
html = ElementTree.fromstring(index_html)
head = html.find('head')
title = head.find('title')
self.assertEqual(title.text, 'Simple Index')
meta = head.find('meta')
self.assertEqual(meta.get('name'), 'api-version')
self.assertEqual(meta.get('value'), '2')
body = html.find('body')
# There should be four subelements, two anchors and two breaks
self.assertEqual(len(body.findall('br')), 2)
self.assertEqual(len(body.findall('a')), 2)
anchors = body.findall('a')
self.assertEqual(set([a.get('href') for a in anchors]),
set(['nectar', 'pulp_python_plugins']))
self.assertEqual(set([a.text for a in anchors]), set(['nectar', 'pulp_python_plugins']))
@mock.patch('__builtin__.open', autospec=True)
@mock.patch('pulp_python.plugins.distributors.steps.os.makedirs')
def test__create_package_index(self, makedirs, mock_open):
"""
Assert all the correct calls from _create_package_index().
"""
step = steps.PublishMetadataStep()
name = 'test_package'
simple_path = os.path.join('/', 'path', 'to', 'simple')
packages = [
{'version': '2.4.3', 'filename': 'test_package-2.4.3.tar.gz', 'checksum': 'sum',
'checksum_type': 'barlow'},
{'version': '2.5.0', 'filename': 'test_package-2.5.0.tar.gz', 'checksum': 'different',
'checksum_type': 'barlow'},
]
step._create_package_index(name, simple_path, packages)
# Assert the right files and directories are made
makedirs.assert_called_once_with(os.path.join(simple_path, name))
mock_open.assert_called_once_with(
os.path.join(simple_path, name, 'index.html'), 'w')
# Assert that the resulting HTML index is correct
write = mock_open.return_value.__enter__.return_value
index_html = write.mock_calls[0][1][0]
html = ElementTree.fromstring(index_html)
head = html.find('head')
title = head.find('title')
self.assertEqual(title.text, 'Links for %s' % name)
meta = head.find('meta')
self.assertEqual(meta.get('name'), 'api-version')
self.assertEqual(meta.get('value'), '2')
body = html.find('body')
# There should be four subelements, two anchors and two breaks
self.assertEqual(len(body.findall('br')), 2)
self.assertEqual(len(body.findall('a')), 2)
anchors = body.findall('a')
hrefs = [
os.path.join('..', '..', steps._get_package_path(name, p['filename'])) +
'#%s=%s' % (p['checksum_type'], p['checksum']) for p in packages]
self.assertEqual(set([a.get('href') for a in anchors]), set(hrefs))
self.assertEqual(set([a.text for a in anchors]), set([p['filename'] for p in packages]))
class TestPythonPublisher(unittest.TestCase):
"""
This class contains tests for the PythonPublisher object.
"""
@mock.patch('pulp_python.plugins.distributors.steps.AtomicDirectoryPublishStep')
@mock.patch('pulp_python.plugins.distributors.steps.configuration.get_master_publish_dir')
@mock.patch('pulp_python.plugins.distributors.steps.configuration.get_web_publish_dir')
@mock.patch('pulp_python.plugins.distributors.steps.os.makedirs')
@mock.patch('pulp_python.plugins.distributors.steps.PluginStep.__init__',
side_effect=steps.PluginStep.__init__, autospec=True)
@mock.patch('pulp_python.plugins.distributors.steps.PluginStep.get_working_dir', autospec=True)
@mock.patch('pulp_python.plugins.distributors.steps.PublishContentStep')
@mock.patch('pulp_python.plugins.distributors.steps.PublishMetadataStep')
def test___init___working_dir_does_not_exist(
self, PublishMetadataStep, PublishContentStep, get_working_dir,
super___init__, makedirs, get_web_publish_dir, get_master_publish_dir,
AtomicDirectoryPublishStep):
"""
Assert correct operation from the __init__() method when the working_dir does not exist.
"""
repo = mock.MagicMock()
publish_conduit = mock.MagicMock()
config = mock.MagicMock()
working_dir = os.path.join('/', 'some', 'working', 'dir')
get_working_dir.return_value = working_dir
publish_dir = os.path.join('/', 'some', 'web', 'publish', 'dir')
get_web_publish_dir.return_value = publish_dir
master_publish_dir = os.path.join('/', 'some', 'master', 'publish', 'dir')
get_master_publish_dir.return_value = master_publish_dir
p = steps.PythonPublisher(repo, publish_conduit, config)
super___init__.assert_called_once_with(p, constants.PUBLISH_STEP_PUBLISHER, repo,
publish_conduit, config)
get_web_publish_dir.assert_called_once_with(repo, config)
makedirs.assert_called_once_with(working_dir)
AtomicDirectoryPublishStep.assert_called_once_with(
working_dir, [(repo.id, publish_dir)], master_publish_dir,
step_type=constants.PUBLISH_STEP_OVER_HTTP)
self.assertEqual(AtomicDirectoryPublishStep.return_value.description,
_('Making files available via web.'))
self.assertEqual(len(p.children), 3)
self.assertEqual(
set(p.children),
set([AtomicDirectoryPublishStep.return_value, PublishContentStep.return_value,
PublishMetadataStep.return_value]))
@mock.patch('pulp_python.plugins.distributors.steps.AtomicDirectoryPublishStep')
@mock.patch('pulp_python.plugins.distributors.steps.configuration.get_master_publish_dir')
@mock.patch('pulp_python.plugins.distributors.steps.configuration.get_web_publish_dir')
@mock.patch('pulp_python.plugins.distributors.steps.os.makedirs')
@mock.patch('pulp_python.plugins.distributors.steps.os.path.exists', return_value=True)
@mock.patch('pulp_python.plugins.distributors.steps.PluginStep.__init__',
side_effect=steps.PluginStep.__init__, autospec=True)
@mock.patch('pulp_python.plugins.distributors.steps.PluginStep.get_working_dir', autospec=True)
@mock.patch('pulp_python.plugins.distributors.steps.PublishContentStep')
@mock.patch('pulp_python.plugins.distributors.steps.PublishMetadataStep')
def test___init___working_dir_exists(
self, PublishMetadataStep, PublishContentStep, get_working_dir,
super___init__, exists, makedirs, get_web_publish_dir, get_master_publish_dir,
AtomicDirectoryPublishStep):
"""
Assert correct operation from the __init__() method when the working_dir does exist.
"""
repo = mock.MagicMock()
publish_conduit = mock.MagicMock()
config = mock.MagicMock()
working_dir = os.path.join('/', 'some', 'working', 'dir')
get_working_dir.return_value = working_dir
publish_dir = os.path.join('/', 'some', 'web', 'publish', 'dir')
get_web_publish_dir.return_value = publish_dir
master_publish_dir = os.path.join('/', 'some', 'master', 'publish', 'dir')
get_master_publish_dir.return_value = master_publish_dir
p = steps.PythonPublisher(repo, publish_conduit, config)
super___init__.assert_called_once_with(p, constants.PUBLISH_STEP_PUBLISHER, repo,
publish_conduit, config)
get_web_publish_dir.assert_called_once_with(repo, config)
# os.path.exists should have been called once for working_dir. It also gets called for a lot
# of locale stuff, so we'll need to filter those out.
pulp_exists_calls = [c for c in exists.mock_calls if 'locale' not in c[1][0]]
self.assertEqual(len(pulp_exists_calls), 1)
self.assertEqual(pulp_exists_calls[0][1], (working_dir,))
self.assertEqual(makedirs.call_count, 0)
AtomicDirectoryPublishStep.assert_called_once_with(
working_dir, [(repo.id, publish_dir)], master_publish_dir,
step_type=constants.PUBLISH_STEP_OVER_HTTP)
self.assertEqual(AtomicDirectoryPublishStep.return_value.description,
_('Making files available via web.'))
self.assertEqual(len(p.children), 3)
self.assertEqual(
set(p.children),
set([AtomicDirectoryPublishStep.return_value, PublishContentStep.return_value,
PublishMetadataStep.return_value]))
class TestGetPackagePath(unittest.TestCase):
"""
This class contains tests for the _get_package_path() function.
"""
def test__get_package_path(self):
"""
Assert the correct return value from _get_package_path().
"""
name = 'test_package'
filename = 'test_package-1.2.3.tar.gz'
path = steps._get_package_path(name, filename)
self.assertEqual(path, os.path.join('packages', 'source', 't', name, filename))
class TestGetPackages(unittest.TestCase):
"""
This class contains tests for the _get_packages() function.
"""
def test__get_packages(self):
"""
Assert the correct return value from _get_packages() with the _GET_UNITS_RETURN data set.
"""
conduit = mock.MagicMock()
conduit.get_units.return_value = _GET_UNITS_RETURN
packages = steps._get_packages(conduit)
expected_packages = {
'pulp_python_plugins': [
{'checksum': 'klmno', 'checksum_type': 'made_up', 'version': '0.0.0',
'storage_path': '/path/to/pulp_python_plugins-0.0.0.tar.gz',
'filename': 'pulp_python_plugins-0.0.0.tar.gz'}],
'nectar': [
{'checksum': 'abcde', 'checksum_type': 'made_up', 'version': '1.2.0',
'storage_path': '/path/to/nectar-1.2.0.tar.gz', 'filename': 'nectar-1.2.0.tar.gz'},
{'checksum': 'fghij', 'checksum_type': 'made_up', 'version': '1.3.1',
'storage_path': '/path/to/nectar-1.3.1.tar.gz',
'filename': 'nectar-1.3.1.tar.gz'}]}
self.assertEqual(packages, expected_packages)
| gpl-2.0 | 1,374,386,335,467,115,800 | 47.826705 | 100 | 0.632106 | false |
pando85/gourmet | gourmet/plugins/import_export/website_import_plugins/schema_org_parser.py | 5 | 2406 | """
Parser for web pages that use the http://schema.org/Recipe microformat
"""
class Excluder(object):
def __init__(self, url):
self.url = url
def search(self, other_url):
return not (other_url.endswith(self.url))
def generate(BaseParser):
class SchemaOrgParser (BaseParser):
schema_org_mappings = {# Properties from Thing (not all given)
'name': 'recipe',
'description': 'modifications',
# Properties from CreativeWork (none)
# Properties from Recipe
#'cookingMethod'
'ingredients': 'ingredients',
#'nutrition'
'recipeCategory': 'category',
'recipeCuisine': 'cuisine',
'recipeInstructions': 'instructions',
'recipeYield': 'yields',
#'totalTime'
}
#FIXME: Currently not evaluated
schema_org_duration_mappings = {# Properties from Recipe
'cookTime': 'cooktime',
'prepTime': 'preptime'
}
imageexcluders = []
def preparse (self, scope=True):
self.preparsed_elements = []
if scope:
self.recipe_schema_scope = self.soup.find(itemscope = True,
itemtype =
'http://schema.org/Recipe')
else:
self.recipe_schema_scope = self.soup
for tag in self.recipe_schema_scope.findAll(itemprop=True):
itemprop = tag["itemprop"]
for k, v in self.schema_org_mappings.iteritems():
if itemprop == k:
self.preparsed_elements.append((tag,v))
if itemprop == "image" and "src" in tag:
self.imageexcluders.append(Excluder(tag["src"]))
if self.preparsed_elements:
self.ignore_unparsed = True
else:
BaseParser.preparse(self)
return SchemaOrgParser
| gpl-2.0 | -744,655,749,017,660,200 | 38.442623 | 85 | 0.438071 | false |
atwel/gui_model | AC_ProductRuleNet.py | 1 | 4370 | """ This module creates a NetworkX network only for keeping track of
autocatalytic networks. It uses instances of the ProductNetRule class for
the nodes. Those objects are owned by cells.
Written by Jon Atwell
"""
import networkx
import AC_ProductRules
class ProductRuleNet:
""" A NetworkX DiGraph object. The construction of the network is done
by Cell instances who create a ProductNetRule for each type of rule they
have (not each rule, of which we expect duplicates).
"""
def __init__(self):
self.net = networkx.DiGraph()
self.cycle_counts = {}
self.has_cycles = True
def __str__(self):
"""Reports the number of nodes,
edges, and autocatalytic cycles.
"""
counts = ",".join([str(i) for i in self.cycle_counts.values()])
vals = (self.net.number_of_nodes(), self.net.number_of_edges(),counts)
return "Net has %d nodes, %d edges and cycles of lengths %s" %vals
def add_ProductNetRule(self, aProductNetRule):
""" This turn the ProductNetRule to a node in the network.
"""
self.net.add_node(aProductNetRule)
def remove_ProductNetRule(self, theProductNetRule, timestep):
""" This removes the node/ProductNetRule from the network.
NetworkX automatically removes adjacent edges in the network.
"""
owner = theProductNetRule.owner
input_ = theProductNetRule.input
self.net.remove_node(theProductNetRule)
def add_edge(self, rule1, rule2):
""" This connects two ProductNetRules via a network edge so that their
compatibility is included in the counting of autocatalytic cycles.
"""
if rule1.get_output() == rule2.get_input():
if self.net.has_edge(rule1, rule2) == False:
self.net.add_edge(rule1, rule2)
elif rule2.get_output() == rule1.get_input():
if self.net.has_edge(rule2, rule1) == False:
self.net.add_edge(rule2, rule1)
def update_cycle_counts(self):
""" This method makes use of the recursive_simple_cycles() function
of NetworkX to offload all the work of finding cycles in the graph.
The lengths of the cycles are added to the ProductRuleNet's field
named cycle_counts. NOTE: the list the algorithm returns is of list
of nodes in the cycle where the last one is dropped because it is
the same as the first. Thus a (sub) list of length 2 is not a
self-loop, but a path from one node to another and back and cycles
must be of length two or greater.
"""
cycles = networkx.recursive_simple_cycles(self.net)
self.cycle_counts = {}
cycle_rules = []
for i in cycles:
length = len(i)
try:
self.cycle_counts[length] += 1
except:
self.cycle_counts[length] = 1
for rule in i:
if rule not in cycle_rules:
cycle_rules.append(rule)
for node in self.net.nodes():
if node not in cycle_rules:
self.net.remove_node(node)
# If there are no cycles, this run is dead and we need to send word
if len(cycles) == 0:
self.has_cycles = False
return False
else:
return True
def get_cycle_complexity(self):
""" This function looks at the cycles that are longer than two.
"""
cycles = networkx.recursive_simple_cycles(self.net)
complexities = {} # keys are lengths, entries are # of distinct rules.
rule_owners = {}
for cycle in cycles:
if len(cycle) > 2:
length = len(cycle)
types = {}
owners = {}
for rule in cycle:
type = str(rule.get_input()) + "-" +str(rule.get_output())
owner = rule.get_owner()
try:
types[type] += 1
except:
types[type] = 1
try:
owners[owner] +=1
except:
owners[owner] = 1
count_owners = len(owners.keys())
count_types = len(types.keys())
try:
complexities[length].append((count_types,count_owners))
except:
complexities[length] = [(count_types,count_owners)]
return complexities
def get_plus3rule_complexity(self):
""" Returns whether there is a cycle of length of at least 3
that include at least 3 distinct rules.
"""
for length_type in self.get_cycle_complexity().values():
for instance in length_type:
if instance[0] >= 3:
return True
return False
def get_plus3cell_complexity(self):
""" Returns whether there is a cycle of length of at least 3
that include at least 3 cells.
"""
for length_type in self.get_cycle_complexity().values():
for instance in length_type:
if instance[1] >= 3:
return True
return False
| mit | -7,039,479,985,292,641,000 | 26.484277 | 74 | 0.683524 | false |
hvnsweeting/Diamond | src/collectors/elb/elb.py | 12 | 10670 | # coding=utf-8
"""
The ELB collector collects metrics for one or more Amazon AWS ELBs
#### Configuration
Below is an example configuration for the ELBCollector.
You can specify an arbitrary amount of regions
```
enabled = true
interval = 60
# Optional
access_key_id = ...
secret_access_key = ...
# Optional - Available keys: region, zone, elb_name, metric_name
format = $elb_name.$zone.$metric_name
# Optional - list of regular expressions used to ignore ELBs
elbs_ignored = ^elb-a$, .*-test$, $test-.*
[regions]
[[us-west-1]]
# Optional - queries all elbs if omitted
elb_names = elb1, elb2, ...
[[us-west-2]]
...
```
#### Dependencies
* boto
"""
import calendar
import cPickle
import datetime
import functools
import re
import time
from collections import namedtuple
from string import Template
import diamond.collector
from diamond.metric import Metric
try:
import boto.ec2.elb
from boto.ec2 import cloudwatch
except ImportError:
cloudwatch = False
class memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
the function is not re-evaluated.
Based upon from http://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
Nota bene: this decorator memoizes /all/ calls to the function. For
a memoization decorator with limited cache size, consider:
bit.ly/1wtHmlM
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args, **kwargs):
# If the function args cannot be used as a cache hash key, fail fast
key = cPickle.dumps((args, kwargs))
try:
return self.cache[key]
except KeyError:
value = self.func(*args, **kwargs)
self.cache[key] = value
return value
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
return functools.partial(self.__call__, obj)
def utc_to_local(utc_dt):
"""
:param utc_dt: datetime in UTC
:return: datetime in the local timezone
"""
# get integer timestamp to avoid precision lost
timestamp = calendar.timegm(utc_dt.timetuple())
local_dt = datetime.datetime.fromtimestamp(timestamp)
assert utc_dt.resolution >= datetime.timedelta(microseconds=1)
return local_dt.replace(microsecond=utc_dt.microsecond)
@memoized
def get_zones(region, auth_kwargs):
"""
:param region: region to get the availability zones for
:return: list of availability zones
"""
ec2_conn = boto.ec2.connect_to_region(region, **auth_kwargs)
return [zone.name for zone in ec2_conn.get_all_zones()]
class ElbCollector(diamond.collector.Collector):
# default_to_zero means if cloudwatch does not return a stat for the
# given metric, then just default it to zero.
MetricInfo = namedtuple(
'MetricInfo',
'name aws_type diamond_type precision default_to_zero')
# AWS metrics for ELBs
metrics = [
MetricInfo('HealthyHostCount', 'Average', 'GAUGE', 0, False),
MetricInfo('UnHealthyHostCount', 'Average', 'GAUGE', 0, False),
MetricInfo('RequestCount', 'Sum', 'COUNTER', 0, True),
MetricInfo('Latency', 'Average', 'GAUGE', 4, False),
MetricInfo('HTTPCode_ELB_4XX', 'Sum', 'COUNTER', 0, True),
MetricInfo('HTTPCode_ELB_5XX', 'Sum', 'COUNTER', 0, True),
MetricInfo('HTTPCode_Backend_2XX', 'Sum', 'COUNTER', 0, True),
MetricInfo('HTTPCode_Backend_3XX', 'Sum', 'COUNTER', 0, True),
MetricInfo('HTTPCode_Backend_4XX', 'Sum', 'COUNTER', 0, True),
MetricInfo('HTTPCode_Backend_5XX', 'Sum', 'COUNTER', 0, True),
MetricInfo('BackendConnectionErrors', 'Sum', 'COUNTER', 0, True),
MetricInfo('SurgeQueueLength', 'Maximum', 'GAUGE', 0, True),
MetricInfo('SpilloverCount', 'Sum', 'COUNTER', 0, True)
]
def process_config(self):
super(ElbCollector, self).process_config()
if self.config['enabled']:
self.interval = self.config.as_int('interval')
# Why is this?
if self.interval % 60 != 0:
raise Exception('Interval must be a multiple of 60 seconds: %s'
% self.interval)
if ('access_key_id' in self.config
and 'secret_access_key' in self.config):
self.auth_kwargs = {
'aws_access_key_id': self.config['access_key_id'],
'aws_secret_access_key': self.config['secret_access_key']
}
else:
# If creds not present, assume we're using IAM roles with
# instance profiles. Boto will automatically take care of using
# the creds from the instance metatdata.
self.auth_kwargs = {}
def check_boto(self):
if not cloudwatch:
self.log.error("boto module not found!")
return False
return True
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ElbCollector, self).get_default_config()
config.update({
'path': 'elb',
'regions': ['us-west-1'],
'interval': 60,
'format': '$zone.$elb_name.$metric_name',
})
return config
def publish_delayed_metric(self, name, value, timestamp, raw_value=None,
precision=0, metric_type='GAUGE', instance=None):
"""
Metrics may not be immediately available when querying cloudwatch.
Hence, allow the ability to publish a metric from some the past given
its timestamp.
"""
# Get metric Path
path = self.get_metric_path(name, instance)
# Get metric TTL
ttl = float(self.config['interval']) * float(
self.config['ttl_multiplier'])
# Create Metric
metric = Metric(path, value, raw_value=raw_value, timestamp=timestamp,
precision=precision, host=self.get_hostname(),
metric_type=metric_type, ttl=ttl)
# Publish Metric
self.publish_metric(metric)
def get_elb_names(self, region, config):
"""
:param region: name of a region
:param config: Collector config dict
:return: list of elb names to query in the given region
"""
# This function is ripe to be memoized but when ELBs are added/removed
# dynamically over time, diamond will have to be restarted to pick
# up the changes.
region_dict = config.get('regions', {}).get(region, {})
if 'elb_names' not in region_dict:
elb_conn = boto.ec2.elb.connect_to_region(region,
**self.auth_kwargs)
full_elb_names = \
[elb.name for elb in elb_conn.get_all_load_balancers()]
# Regular expressions for ELBs we DO NOT want to get metrics on.
matchers = \
[re.compile(regex) for regex in config.get('elbs_ignored', [])]
# cycle through elbs get the list of elbs that don't match
elb_names = []
for elb_name in full_elb_names:
if matchers and any([m.match(elb_name) for m in matchers]):
continue
elb_names.append(elb_name)
else:
elb_names = region_dict['elb_names']
return elb_names
def process_stat(self, region, zone, elb_name, metric, stat, end_time):
template_tokens = {
'region': region,
'zone': zone,
'elb_name': elb_name,
'metric_name': metric.name,
}
name_template = Template(self.config['format'])
formatted_name = name_template.substitute(template_tokens)
self.publish_delayed_metric(
formatted_name,
stat[metric.aws_type],
metric_type=metric.diamond_type,
precision=metric.precision,
timestamp=time.mktime(utc_to_local(end_time).timetuple()))
def process_metric(self, region_cw_conn, zone, start_time, end_time,
elb_name, metric):
stats = region_cw_conn.get_metric_statistics(
self.config['interval'],
start_time,
end_time,
metric.name,
namespace='AWS/ELB',
statistics=[metric.aws_type],
dimensions={
'LoadBalancerName': elb_name,
'AvailabilityZone': zone
})
# create a fake stat if the current metric should default to zero when
# a stat is not returned. Cloudwatch just skips the metric entirely
# instead of wasting space to store/emit a zero.
if len(stats) == 0 and metric.default_to_zero:
stats.append({
u'Timestamp': start_time,
metric.aws_type: 0.0,
u'Unit': u'Count'
})
for stat in stats:
self.process_stat(region_cw_conn.region.name, zone, elb_name,
metric, stat, end_time)
def process_elb(self, region_cw_conn, zone, start_time, end_time, elb_name):
for metric in self.metrics:
self.process_metric(region_cw_conn, zone, start_time, end_time,
elb_name, metric)
def process_zone(self, region_cw_conn, zone, start_time, end_time):
for elb_name in self.get_elb_names(region_cw_conn.region.name,
self.config):
self.process_elb(region_cw_conn, zone, start_time, end_time,
elb_name)
def process_region(self, region_cw_conn, start_time, end_time):
for zone in get_zones(region_cw_conn.region.name, self.auth_kwargs):
self.process_zone(region_cw_conn, zone, start_time, end_time)
def collect(self):
if not self.check_boto():
return
now = datetime.datetime.utcnow()
end_time = now.replace(second=0, microsecond=0)
start_time = end_time - datetime.timedelta(seconds=self.interval)
for region in self.config['regions'].keys():
region_cw_conn = cloudwatch.connect_to_region(region,
**self.auth_kwargs)
self.process_region(region_cw_conn, start_time, end_time)
| mit | -7,865,292,978,284,028,000 | 34.331126 | 80 | 0.586036 | false |
jeremiahyan/odoo | addons/website_sale_coupon/models/sale_order.py | 5 | 4820 | # -*- coding: utf-8 -*-
from datetime import timedelta
from odoo import api, fields, models
from odoo.http import request
class SaleOrder(models.Model):
_inherit = "sale.order"
def _compute_website_order_line(self):
""" This method will merge multiple discount lines generated by a same program
into a single one (temporary line with `new()`).
This case will only occur when the program is a discount applied on multiple
products with different taxes.
In this case, each taxes will have their own discount line. This is required
to have correct amount of taxes according to the discount.
But we wan't these lines to be `visually` merged into a single one in the
e-commerce since the end user should only see one discount line.
This is only possible since we don't show taxes in cart.
eg:
line 1: 10% discount on product with tax `A` - $15
line 2: 10% discount on product with tax `B` - $11.5
line 3: 10% discount on product with tax `C` - $10
would be `hidden` and `replaced` by
line 1: 10% discount - $36.5
Note: The line will be created without tax(es) and the amount will be computed
depending if B2B or B2C is enabled.
"""
super()._compute_website_order_line()
for order in self:
# TODO: potential performance bottleneck downstream
programs = order._get_applied_programs_with_rewards_on_current_order()
for program in programs:
program_lines = order.order_line.filtered(lambda line:
line.product_id == program.discount_line_product_id)
if len(program_lines) > 1:
if self.env.user.has_group('sale.group_show_price_subtotal'):
price_unit = sum(program_lines.mapped('price_subtotal'))
else:
price_unit = sum(program_lines.mapped('price_total'))
# TODO: batch then flush
order.website_order_line += self.env['sale.order.line'].new({
'product_id': program_lines[0].product_id.id,
'price_unit': price_unit,
'name': program_lines[0].name,
'product_uom_qty': 1,
'product_uom': program_lines[0].product_uom.id,
'order_id': order.id,
'is_reward_line': True,
})
order.website_order_line -= program_lines
def _compute_cart_info(self):
super(SaleOrder, self)._compute_cart_info()
for order in self:
reward_lines = order.website_order_line.filtered(lambda line: line.is_reward_line)
order.cart_quantity -= int(sum(reward_lines.mapped('product_uom_qty')))
def get_promo_code_error(self, delete=True):
error = request.session.get('error_promo_code')
if error and delete:
request.session.pop('error_promo_code')
return error
def _cart_update(self, product_id=None, line_id=None, add_qty=0, set_qty=0, **kwargs):
res = super(SaleOrder, self)._cart_update(product_id=product_id, line_id=line_id, add_qty=add_qty, set_qty=set_qty, **kwargs)
self.recompute_coupon_lines()
return res
def _get_free_shipping_lines(self):
self.ensure_one()
free_shipping_prgs_ids = self._get_applied_programs_with_rewards_on_current_order().filtered(lambda p: p.reward_type == 'free_shipping')
if not free_shipping_prgs_ids:
return self.env['sale.order.line']
free_shipping_product_ids = free_shipping_prgs_ids.mapped('discount_line_product_id')
return self.order_line.filtered(lambda l: l.product_id in free_shipping_product_ids)
@api.autovacuum
def _gc_abandoned_coupons(self, *args, **kwargs):
"""Remove/free coupon from abandonned ecommerce order."""
ICP = self.env['ir.config_parameter']
validity = ICP.get_param('website_sale_coupon.abandonned_coupon_validity', 4)
validity = fields.Datetime.to_string(fields.datetime.now() - timedelta(days=int(validity)))
coupon_to_reset = self.env['coupon.coupon'].search([
('state', '=', 'used'),
('sales_order_id.state', '=', 'draft'),
('sales_order_id.write_date', '<', validity),
('sales_order_id.website_id', '!=', False),
])
for coupon in coupon_to_reset:
coupon.sales_order_id.applied_coupon_ids -= coupon
coupon_to_reset.write({'state': 'new'})
coupon_to_reset.mapped('sales_order_id').recompute_coupon_lines()
| gpl-3.0 | 8,057,577,966,444,878,000 | 49.736842 | 144 | 0.589004 | false |
Storj/RandomIO | tests/test_unit.py | 3 | 6329 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2014 William T. James for Storj Labs
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import io
import os
import unittest
import RandomIO
class TestRandomIO(unittest.TestCase):
def test_gen(self):
s = RandomIO.RandomIO()
b = s.read(100)
self.assertEqual(len(b), 100)
self.assertEqual(
RandomIO.RandomIO(123456).read(100),
RandomIO.RandomIO(123456).read(100))
self.assertEqual(RandomIO.RandomIO(b'byte string seed').read(
100), RandomIO.RandomIO(b'byte string seed').read(100))
self.assertEqual(RandomIO.RandomIO(1.23456).read(
100), RandomIO.RandomIO(1.23456).read(100))
def test_consistent(self):
s1 = RandomIO.RandomIO('seed string')
s2 = RandomIO.RandomIO('seed string')
s3 = RandomIO.RandomIO('seed string')
s4 = RandomIO.RandomIO('another seed')
self.assertEqual(s1.read(100), s2.read(100))
self.assertNotEqual(s3.read(100), s4.read(100))
def test_crossplatform(self):
string_seed1 = b'\t\xb0\xef\xd9\x05p\xe1W\x17\x8a9\xc6!;^6\x1d\xadj\
\xb4#n\x1d/\x12+\xe6\xb1\x80\xc86\x06I\xc4!\x8b39\x84E\x1d\x14\xdf\x14e\x12\
\xfa\xf0\r\x1b'
s = RandomIO.RandomIO('seed1').read(50)
self.assertEqual(s, string_seed1)
string_123456 = b'\x18\xb2\xce\x8a \xc9\xe2n\xd9\xf6\x06\x0b8\xf9\xb9\
\xf8\x9b#81z\xf8\x02\x83\x1e\xa2\xf02\x7f\xad\xd7*h\xad9\xf6\x14U\xca\x90\\i\
\xcc~#h\xaa\xb4\x1b['
s = RandomIO.RandomIO(123456).read(50)
self.assertEqual(s, string_123456)
def test_read(self):
s1 = RandomIO.RandomIO('seed string')
with self.assertRaises(RuntimeError) as ex:
s1.read()
self.assertEqual(
str(ex.exception),
'Stream size must be specified if bytes to read is not.')
def test_dump(self):
s1 = RandomIO.RandomIO('seed string')
s2 = RandomIO.RandomIO('seed string')
DUMP_LENGTH = 100
file1 = io.BytesIO()
file2 = io.BytesIO()
s1.dump(file1, DUMP_LENGTH)
s2.dump(file2, DUMP_LENGTH)
self.assertEqual(file1.tell(), DUMP_LENGTH)
self.assertEqual(file2.tell(), DUMP_LENGTH)
self.assertEqual(file1.getvalue(), file2.getvalue())
def test_genfile(self):
path = RandomIO.RandomIO('seed string').genfile(100)
with open(path, 'rb') as f:
contents = f.read()
self.assertEqual(len(contents), 100)
os.remove(path)
dir = 'test_directory/'
os.mkdir(dir)
path = RandomIO.RandomIO('seed string').genfile(100, dir)
(h1, t1) = os.path.split(dir)
(h2, t2) = os.path.split(path)
self.assertEqual(h1, h2)
with open(path, 'rb') as f:
contents = f.read()
self.assertEqual(len(contents), 100)
os.remove(path)
os.rmdir(dir)
def test_large(self):
length = 100000000
file1 = RandomIO.RandomIO('seed string').genfile(length)
file2 = RandomIO.RandomIO('seed string').genfile(length)
with open(file1, 'rb') as f1:
with open(file1, 'rb') as f2:
for c in iter(lambda: f1.read(1000), b''):
self.assertEqual(c, f2.read(1000))
os.remove(file1)
os.remove(file2)
def test_read_limit(self):
s1 = RandomIO.RandomIO('seed string', 100)
s1.seek(90)
buf1 = s1.read(100)
self.assertEqual(len(buf1), 10)
def test_read_zero(self):
s1 = RandomIO.RandomIO('seed string')
b = s1.read(0)
self.assertEqual(len(b), 0)
def test_seek_beginning(self):
s1 = RandomIO.RandomIO('seed string')
buf1 = s1.read(10)
s1.seek(0)
buf2 = s1.read(10)
self.assertEqual(buf1, buf2)
def test_seek_middle(self):
s1 = RandomIO.RandomIO('seed string')
s1.seek(10000)
buf1 = s1.read(10)
s1.seek(-10, os.SEEK_CUR)
buf2 = s1.read(10)
self.assertEqual(buf1, buf2)
def test_seek_end_consistency(self):
s1 = RandomIO.RandomIO('seed string', 100)
s1.seek(98)
buf1 = s1.read(10)
s1.seek(90)
buf2 = s1.read(10)
self.assertEqual(buf1, buf2[-2:])
def test_seek_end(self):
s1 = RandomIO.RandomIO('seed string', 1000)
s1.seek(900)
buf1 = s1.read(10)
s1.seek(100, os.SEEK_END)
buf2 = s1.read(10)
self.assertEqual(buf1, buf2)
def test_tell_beginning(self):
s1 = RandomIO.RandomIO('seed string')
s1.read(100)
p = s1.tell()
self.assertEqual(p, 100)
def test_tell_seek_parity(self):
s1 = RandomIO.RandomIO('seed string')
s1.seek(100)
p = s1.tell()
self.assertEqual(p, 100)
def test_seek_end_not_possible(self):
s1 = RandomIO.RandomIO('seed string')
with self.assertRaises(RuntimeError) as ex:
s1.seek(100, os.SEEK_END)
self.assertEqual(
str(ex.exception),
'Cannot seek from end of stream if size is unknown.')
if __name__ == '__main__':
unittest.main()
| mit | -1,215,813,959,735,038,200 | 25.704641 | 79 | 0.611629 | false |
radlws/AWS-ElasticBeanstalk-CLI | eb/macosx/python3/lib/aws/exception.py | 4 | 3800 | #!/usr/bin/env python
# ==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
class AwsErrorCode(object):
'''AWS common error code'''
AccessDenied = 'AccessDenied'
InsufficientPrivileges = 'InsufficientPrivileges'
InvalidClientTokenId = 'InvalidClientTokenId'
InvalidParameterCombination = 'InvalidParameterCombination'
InvalidParameterValue = 'InvalidParameterValue'
InvalidQueryParameter = 'InvalidQueryParameter'
MalformedQueryString = 'MalformedQueryString'
MissingParameter = 'MissingParameter'
OptInRequired = 'OptInRequired'
RequestExpired = 'RequestExpired'
Throttling = 'Throttling'
class AwsServiceException(Exception):
def __init__(self, msg, code, http_code):
self._msg = msg
self._code = code
self._http_code = http_code
@property
def message(self):
return self._msg
@property
def code(self):
return self._code
@property
def http_code(self):
return self._http_code
def __str__(self):
return '{0}. {1}'.format(self._code, self._msg)
def __repr__(self):
return 'HTTP {0}:{1}. {2}'.format(self._http_code, self._code, self._msg)
class UnknownHttpCodeException(AwsServiceException):
''' Exception of receiving http code other than 200'''
def __init__(self, message, code, http_code):
super(UnknownHttpCodeException, self).__init__(message, code, http_code)
class MissingParameterException(AwsServiceException):
def __init__(self, ex):
if not issubclass(ex.__class__, AwsServiceException):
raise AttributeError('Must initialize from instance of AwsServiceException subclass.')
super(MissingParameterException, self).__init__(ex.message, ex.code, ex.http_code)
class InsufficientPrivilegesException(AwsServiceException):
def __init__(self, ex):
if not issubclass(ex.__class__, AwsServiceException):
raise AttributeError('Must initialize from instance of AwsServiceException subclass.')
super(InsufficientPrivilegesException, self).__init__(ex.message, ex.code, ex.http_code)
class InvalidParameterValueException(AwsServiceException):
def __init__(self, ex):
if not issubclass(ex.__class__, AwsServiceException):
raise AttributeError('Must initialize from instance of AwsServiceException subclass.')
super(InvalidParameterValueException, self).__init__(ex.message, ex.code, ex.http_code)
class OptInRequiredException(AwsServiceException):
def __init__(self, ex):
if not issubclass(ex.__class__, AwsServiceException):
raise AttributeError('Must initialize from instance of AwsServiceException subclass.')
super(OptInRequiredException, self).__init__(ex.message, ex.code, ex.http_code)
class AccessDeniedException(AwsServiceException):
def __init__(self, ex):
if not issubclass(ex.__class__, AwsServiceException):
raise AttributeError('Must initialize from instance of AwsServiceException subclass.')
super(AccessDeniedException, self).__init__(ex.message, ex.code, ex.http_code)
| apache-2.0 | 8,385,534,607,352,322,000 | 38.185567 | 98 | 0.670789 | false |
tellapart/aurproxy | tellapart/aurproxy/source/sources/static.py | 5 | 1839 | # Copyright 2015 TellApart, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tellapart.aurproxy.config import SourceEndpoint
from tellapart.aurproxy.exception import AurProxyConfigException
from tellapart.aurproxy.source import ProxySource
class StaticProxySource(ProxySource):
def __init__(self,
signal_update_fn=None,
share_updater_factories=None,
**kwargs):
super(StaticProxySource, self).__init__(signal_update_fn,
share_updater_factories)
self._name = kwargs.get('name')
self._host = kwargs.get('host')
self._port = kwargs.get('port')
self._endpoint = SourceEndpoint(self._host, self._port)
err_fmt = '"{0}" required on StaticProxySource'
if not self._name:
raise AurProxyConfigException(err_fmt.format('name'))
if not self._host:
raise AurProxyConfigException(err_fmt.format('host'))
if not self._port:
raise AurProxyConfigException(err_fmt.format('port'))
@property
def blueprint(self):
return None
@property
def slug(self):
return '{0}__{1}__{2}'.format(self._name,
self._host,
self._port)
def start(self):
self.add(self._endpoint)
def stop(self):
self.remove(self._endpoint)
| apache-2.0 | 48,258,656,994,390,136 | 33.698113 | 74 | 0.660685 | false |
cjcjameson/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/package/system_catalog/guc_settings/TEST.py | 1 | 2643 | #!/usr/bin/env python
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Test GPDB gucs
"""
############################################################################
# Set up some globals, and import gptest
# [YOU DO NOT NEED TO CHANGE THESE]
#
import os
import sys
MYD = os.path.abspath(os.path.dirname(__file__))
mkpath = lambda *x: os.path.join(MYD, *x)
UPD = os.path.abspath(mkpath('../..'))
if UPD not in sys.path:
sys.path.append(UPD)
import gptest
from lib.Shell import shell
from lib.PSQL import psql
import gpConfig
USER = os.environ.get('LOGNAME')
HOST = "localhost"
###########################################################################
# A Test class must inherit from gptest.GPTestCase
# [CREATE A CLASS FOR YOUR TESTS]
#
class guc_settings(gptest.GPTestCase):
gpdbConfig = gpConfig.GpConfig(HOST, USER)
def setUp(self):
pass
def tearDown(self):
pass
def test_01_guc_minimumvalue(self):
"""GUCS: MPP-8307: minimum max_connections is 10"""
self.gpdbConfig.setParameterMasterOnly("max_connections","10")
shell.run("gpstop -ar") # Restarts, TODO: need a utilty to restart GPDB
max_connections = self.gpdbConfig.getParameterMasterOnly("max_connections")
self.failUnless(max_connections=="10")
def test_02_guc_minimumvalue(self):
"""GUCS: MPP-8307: invalid max_connections if set less than 10, default to 200"""
# cur_maxconnections = self.gpdbConfig.getParameterMasterOnly("max_connections")
self.gpdbConfig.setParameterMasterOnly("max_connections","4")
shell.run("gpstop -ar") # Restarts, TODO: need a utilty to restart GPDB
max_connections = self.gpdbConfig.getParameterMasterOnly("max_connections")
self.failUnless(max_connections=="200")
###########################################################################
# Try to run if user launched this script directly
# [YOU SHOULD NOT CHANGE THIS]
if __name__ == '__main__':
gptest.main()
| apache-2.0 | 7,694,779,754,469,070,000 | 32.455696 | 89 | 0.643965 | false |
uvemas/ViTables | vitables/docbrowser/browsergui.py | 1 | 14862 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2007 Carabos Coop. V. All rights reserved
# Copyright (C) 2008-2019 Vicent Mas. All rights reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Vicent Mas - [email protected]
"""
Here is defined the HelpBrowserGUI class.
This module creates the main window for the documentation browser.
"""
__docformat__ = 'restructuredtext'
import re
from qtpy import QtCore
from qtpy import QtGui
from qtpy import QtWidgets
import vitables.utils
from vitables.vtsite import DOCDIR
translate = QtWidgets.QApplication.translate
class HelpBrowserGUI(QtWidgets.QMainWindow) :
"""
The main window of the documentation browser.
:Parameters:
- `browser`: an instance of the docs browser controller
:meth:`vitables.docbrowser.helpbrowser.HelpBrowser`
- `parent`: the parent widget.
"""
def __init__(self, browser, parent=None) :
"""
Initializes the browser.
"""
super(HelpBrowserGUI, self).__init__(parent)
self.setIconSize(QtCore.QSize(22, 22))
self.setWindowTitle(translate('HelpBrowserGUI',
'Documentation browser', 'The window title'))
self.icons = vitables.utils.getHBIcons()
self.setWindowIcon(self.icons['vitables_wm'])
self.browser = browser
# The browser widget
self.text_browser = QtWidgets.QTextBrowser()
self.text_browser.setSearchPaths([DOCDIR])
self.setCentralWidget(self.text_browser)
self.text_browser.setAcceptRichText(True)
self.text_browser.setReadOnly(1)
# The popup menus
self.actions = self.setupActions()
self.initPopups()
self.connectSignals()
self.setupHistoryCombo()
self.statusBar().showMessage(translate('HelpBrowserGUI', 'Ready...',
'Status bar startup message'))
def setupActions(self):
"""Provide actions to the menubar and the toolbars.
"""
actions = {}
actions['exitBrowser'] = QtWidgets.QAction(
translate('HelpBrowserGUI', 'E&xit', 'File --> Exit'), self,
shortcut=QtGui.QKeySequence.Quit,
triggered=self.browser.exitBrowser,
icon=self.icons['application-exit'],
statusTip=translate('HelpBrowserGUI', 'Close Help Browser',
'Status bar text for the File --> Exit action'))
actions['zoomIn'] = QtWidgets.QAction(
translate('HelpBrowserGUI', 'Zoom &in', 'View --> Zoom in'), self,
shortcut=QtGui.QKeySequence.ZoomIn,
triggered=self.browser.zoomIn,
icon=self.icons['zoom-in'],
statusTip=translate('HelpBrowserGUI', 'Increases the font size',
'Status bar text for the View --> Zoom in action'))
actions['zoomOut'] = QtWidgets.QAction(
translate('HelpBrowserGUI', 'Zoom &out', 'View --> Zoom out'),
self,
shortcut=QtGui.QKeySequence.ZoomOut,
triggered=self.browser.zoomOut,
icon=self.icons['zoom-out'],
statusTip=translate('HelpBrowserGUI', 'Decreases the font size',
'Status bar text for the View --> Zoom out action'))
actions['goHome'] = QtWidgets.QAction(
translate('HelpBrowserGUI', '&Home', 'Go --> Home'), self,
shortcut=QtGui.QKeySequence.UnknownKey,
triggered=self.text_browser.home,
icon=self.icons['go-first-view'],
statusTip=translate('HelpBrowserGUI',
'Go to the first visited page',
'Status bar text for the Go --> Home action'))
actions['goBackward'] = QtWidgets.QAction(
translate('HelpBrowserGUI', '&Backward', ' Go --> Backward'),
self,
shortcut=QtGui.QKeySequence.Back,
triggered=self.text_browser.backward,
icon=self.icons['go-previous-view'],
statusTip=translate('HelpBrowserGUI', 'Go to previous page',
'Status bar text for the Go --> Backward action'))
actions['goForward'] = QtWidgets.QAction(
translate('HelpBrowserGUI', '&Forward', ' Go --> Forward'), self,
shortcut=QtGui.QKeySequence.Forward,
triggered=self.text_browser.forward,
icon=self.icons['go-next-view'],
statusTip=translate('HelpBrowserGUI', 'Go to next page',
'Status bar text for the Go --> Forward action'))
actions['goReload'] = QtWidgets.QAction(
translate('HelpBrowserGUI', '&Reload', 'Go --> Reload'), self,
shortcut=QtGui.QKeySequence.Refresh,
triggered=self.text_browser.reload,
icon=self.icons['view-refresh'],
statusTip=translate('HelpBrowserGUI', 'Reload the current page',
'Status bar text for the Go --> Reload action'))
actions['bookmarksAdd'] = QtWidgets.QAction(
translate('HelpBrowserGUI', '&Add bookmark',
'Bookmarks --> Add bookmark'),
self,
shortcut=QtGui.QKeySequence('Ctrl+Alt+N'),
triggered=self.browser.addBookmark,
icon=self.icons['bookmark_add'],
statusTip=translate('HelpBrowserGUI', 'Bookmark the current page',
'Status bar text for Bookmarks --> Add bookmark action'))
actions['bookmarksEdit'] = QtWidgets.QAction(
translate('HelpBrowserGUI', '&Edit bookmarks...',
'Bookmarks --> Edit bookmarks'),
self,
shortcut=QtGui.QKeySequence('Ctrl+Alt+E'),
triggered=self.browser.editBookmarks,
icon=self.icons['bookmarks'],
statusTip=translate('HelpBrowserGUI', 'Edit bookmarks',
'Status bar text for Bookmarks --> Edit bookmarks action'))
actions['bookmarksClear'] = QtWidgets.QAction(
translate('HelpBrowserGUI', '&Clear All',
'Bookmarks --> Clear bookmark'),
self,
shortcut=QtGui.QKeySequence('Ctrl+Alt+C'),
triggered=self.browser.clearBookmarks,
statusTip=translate('HelpBrowserGUI',
'Clear all existing bookmarks',
'Status bar text for Bookmarks --> Add bookmark action'))
actions['about'] = QtWidgets.QAction(
translate('HelpBrowserGUI', '&About HelpBrowser',
'Help --> About HelpBrowser'),
self,
shortcut=QtGui.QKeySequence.UnknownKey,
triggered=self.browser.aboutBrowser,
statusTip=translate('HelpBrowserGUI', 'About HelpBrowser',
'Status bar text for Help --> About HelpBrowser action'))
actions['aboutQt'] = QtWidgets.QAction(
translate('HelpBrowserGUI', 'About &Qt', 'Help --> About Qt'),
self,
shortcut=QtGui.QKeySequence.UnknownKey,
triggered=self.browser.aboutQt,
statusTip=translate('HelpBrowserGUI', 'About Qt',
'Status bar text for the Help --> About Qt action'))
actions['clearSession'] = QtWidgets.QAction(
translate('HelpBrowserGUI', 'Clear history', ''), self,
shortcut=QtGui.QKeySequence.UnknownKey,
triggered=self.browser.clearHistory,
icon=self.icons['edit-clear-history'],
statusTip=translate('HelpBrowserGUI',
'Clear the content of the history combobox', ''))
return actions
def initPopups(self) :
"""
Setup the menubar and the toolbar of the main window.
The menubar contains the menus `File`, `Go`, `Bookmarks` and `Help`.
The toolbar contains the buttons: `home`, `backward`, `forward`,
`combobox` and `clear history`.
"""
# Create the File menu and add actions/submenus/separators to it
file_menu = self.menuBar().addMenu(
translate('HelpBrowserGUI', "&File", 'The File menu entry'))
file_actions = ['exitBrowser']
vitables.utils.addActions(file_menu, file_actions, self.actions)
# Create the View menu and toolbar
view_menu = self.menuBar().addMenu(
translate('HelpBrowserGUI', "&View", 'The View menu entry'))
view_toolbar = QtWidgets.QToolBar(
translate('HelpBrowserGUI', 'View operations', 'Toolbar title'),
self)
self.addToolBar(view_toolbar)
view_actions = ['zoomIn', 'zoomOut']
vitables.utils.addActions(view_menu, view_actions, self.actions)
vitables.utils.addActions(view_toolbar, view_actions, self.actions)
# Create the Go menu and toolbar
go_menu = self.menuBar().addMenu(translate('HelpBrowserGUI', "&Go",
'The Go menu entry'))
go_toolbar = QtWidgets.QToolBar(
translate('HelpBrowserGUI', 'Go operations', 'Toolbar title'),
self)
self.addToolBar(go_toolbar)
go_actions = ['goHome', 'goBackward', 'goForward', 'goReload']
vitables.utils.addActions(go_menu, go_actions, self.actions)
vitables.utils.addActions(go_toolbar, go_actions, self.actions)
# Create the Bookmarks menu and toolbar
self.bookmarks_menu = self.menuBar().addMenu(
translate('HelpBrowserGUI', "&Bookmarks", 'Bookmarks menu entry'))
bookmarks_toolbar = QtWidgets.QToolBar(
translate('HelpBrowserGUI', 'Bookmarks operations',
'Toolbar title'), self)
self.addToolBar(bookmarks_toolbar)
bookmark_actions = ['bookmarksAdd', 'bookmarksEdit', 'bookmarksClear',
None]
vitables.utils.addActions(self.bookmarks_menu, bookmark_actions,
self.actions)
vitables.utils.addActions(bookmarks_toolbar, bookmark_actions[:2],
self.actions)
# Create the Help menu and add actions/submenus/separators to it
help_menu = self.menuBar().addMenu(
translate('HelpBrowserGUI', "&Help", 'The Help menu entry'))
help_actions = ['about', 'aboutQt']
vitables.utils.addActions(help_menu, help_actions, self.actions)
# Create the History toolbar
history_toolbar = QtWidgets.QToolBar(
translate('HelpBrowserGUI', 'History operations', 'Toolbar title'),
self)
self.addToolBar(history_toolbar)
history_actions = ['clearSession']
vitables.utils.addActions(history_toolbar, history_actions,
self.actions)
go_selector = QtWidgets.QLabel(
translate('HelpBrowserGUI', 'Go: ', 'Text of the Go: label'),
history_toolbar)
history_toolbar.addWidget(go_selector)
self.combo_history = QtWidgets.QComboBox(history_toolbar)
self.combo_history.setSizeAdjustPolicy(
QtWidgets.QComboBox.AdjustToContents)
history_toolbar.addWidget(self.combo_history)
def connectSignals(self):
"""
Connect signals to slots.
Signals coming from GUI are connected to slots in the docs browser
controller, :meth:`vitables.docbrowser.helpbrowser.HelpBrowser`.
"""
self.combo_history.activated[str].connect(self.browser.displaySrc)
# This is the most subtle connection. It encompasses source
# changes coming from anywhere, including slots (home, backward
# and forward), menus (Go and Bookmarks), clicked links and
# programatic changes (setSource calls).
self.text_browser.sourceChanged.connect(self.browser.updateHistory)
self.text_browser.backwardAvailable.connect(
self.browser.updateBackward)
self.text_browser.forwardAvailable.connect(
self.browser.updateForward)
# The Bookmarks menu is special case due to its dynamic nature.
# The menu content vary every time a bookmark is added/deleted
# In order to track changes and keep it updated, the menu is reloaded
# every time it is about to be displayed.
self.bookmarks_menu.aboutToShow.connect(self.updateRecentSubmenu)
def updateRecentSubmenu(self):
"""Update the content of the Bookmarks menu."""
# Clear the current bookmarks from the Bookmarks menu
for action in self.bookmarks_menu.actions():
if re.search("^(\s?\d)", action.text()):
self.bookmarks_menu.removeAction(action)
# and refresh it
index = 0
for filepath in self.browser.bookmarks:
index += 1
action = QtWidgets.QAction('{0:>2}. {1}'.format(index, filepath),
self.bookmarks_menu)
action.setData(filepath)
self.bookmarks_menu.addAction(action)
action.triggered.connect(self.browser.displaySrc)
def setupHistoryCombo(self):
"""
Initializes history combobox.
"""
# Setup combobox
self.combo_history.setEditable(0)
self.combo_history.setWhatsThis(translate('HelpBrowserGUI',
"""<qt>
<h3>Page selector</h3>Select the page you want to visit.
</qt>""",
'WhatsThis text for the combobox of visited pages')
)
for item in self.browser.history :
self.combo_history.addItem(item)
def closeEvent(self, event) :
"""
Reimplements the event handler for `QCloseEvent` events.
Before the close event is accepted we need to do some stuff. This can
be done in two different ways: via event filters or reimplementing the
event handler. We have chosen the second possibility.
:Parameter event: the event being handled
"""
# When the help browser window is closed via File --> Exit
# the exitBrowser is called and its history is saved.
# But if we close the window with the
# close button then history is not saved at all.
# We fix this misbehavior by overwriting this method.
self.browser.exitBrowser()
QtWidgets.QMainWindow.closeEvent(self, event)
| gpl-3.0 | 5,899,242,716,550,069,000 | 39.606557 | 79 | 0.617885 | false |
bswartz/cinder | cinder/tests/unit/api/test_common.py | 2 | 24438 | # Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
import mock
from testtools import matchers
import webob
import webob.exc
from oslo_config import cfg
from cinder.api import common
from cinder import test
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
CONF = cfg.CONF
class LimiterTest(test.TestCase):
"""Unit tests for the `cinder.api.common.limited` method.
This method takes in a list of items and, depending on the 'offset'
and 'limit' GET params, returns a subset or complete set of the given
items.
"""
def setUp(self):
"""Run before each test."""
super(LimiterTest, self).setUp()
self.tiny = list(range(1))
self.small = list(range(10))
self.medium = list(range(1000))
self.large = list(range(10000))
def test_limiter_offset_zero(self):
"""Test offset key works with 0."""
req = webob.Request.blank('/?offset=0')
self.assertEqual(self.tiny, common.limited(self.tiny, req))
self.assertEqual(self.small, common.limited(self.small, req))
self.assertEqual(self.medium, common.limited(self.medium, req))
self.assertEqual(self.large[:1000], common.limited(self.large, req))
def test_limiter_offset_medium(self):
"""Test offset key works with a medium sized number."""
req = webob.Request.blank('/?offset=10')
self.assertEqual([], common.limited(self.tiny, req))
self.assertEqual(self.small[10:], common.limited(self.small, req))
self.assertEqual(self.medium[10:], common.limited(self.medium, req))
self.assertEqual(self.large[10:1010], common.limited(self.large, req))
def test_limiter_offset_over_max(self):
"""Test offset key works with a number over 1000 (max_limit)."""
req = webob.Request.blank('/?offset=1001')
self.assertEqual([], common.limited(self.tiny, req))
self.assertEqual([], common.limited(self.small, req))
self.assertEqual([], common.limited(self.medium, req))
self.assertEqual(
self.large[1001:2001], common.limited(self.large, req))
def test_limiter_offset_blank(self):
"""Test offset key works with a blank offset."""
req = webob.Request.blank('/?offset=')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_out_of_range(self):
"""Test offset key works with a offset out of range."""
req = webob.Request.blank('/?offset=123456789012346456')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_bad(self):
"""Test offset key works with a BAD offset."""
req = webob.Request.blank(u'/?offset=\u0020aa')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_nothing(self):
"""Test request with no offset or limit."""
req = webob.Request.blank('/')
self.assertEqual(self.tiny, common.limited(self.tiny, req))
self.assertEqual(self.small, common.limited(self.small, req))
self.assertEqual(self.medium, common.limited(self.medium, req))
self.assertEqual(self.large[:1000], common.limited(self.large, req))
def test_limiter_limit_zero(self):
"""Test limit of zero."""
req = webob.Request.blank('/?limit=0')
self.assertEqual(self.tiny, common.limited(self.tiny, req))
self.assertEqual(self.small, common.limited(self.small, req))
self.assertEqual(self.medium, common.limited(self.medium, req))
self.assertEqual(self.large[:1000], common.limited(self.large, req))
def test_limiter_limit_bad(self):
"""Test with a bad limit."""
req = webob.Request.blank(u'/?limit=hello')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_limit_medium(self):
"""Test limit of 10."""
req = webob.Request.blank('/?limit=10')
self.assertEqual(self.tiny, common.limited(self.tiny, req))
self.assertEqual(self.small, common.limited(self.small, req))
self.assertEqual(self.medium[:10], common.limited(self.medium, req))
self.assertEqual(self.large[:10], common.limited(self.large, req))
def test_limiter_limit_over_max(self):
"""Test limit of 3000."""
req = webob.Request.blank('/?limit=3000')
self.assertEqual(self.tiny, common.limited(self.tiny, req))
self.assertEqual(self.small, common.limited(self.small, req))
self.assertEqual(self.medium, common.limited(self.medium, req))
self.assertEqual(self.large[:1000], common.limited(self.large, req))
def test_limiter_limit_and_offset(self):
"""Test request with both limit and offset."""
items = list(range(2000))
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(items[1:4], common.limited(items, req))
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(items[3:1003], common.limited(items, req))
req = webob.Request.blank('/?offset=3&limit=1500')
self.assertEqual(items[3:1003], common.limited(items, req))
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual([], common.limited(items, req))
req = webob.Request.blank('/?offset=30034522235674530&limit=10')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, items, req)
def test_limiter_custom_max_limit(self):
"""Test a max_limit other than 1000."""
items = list(range(2000))
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(
items[1:4], common.limited(items, req, max_limit=2000))
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(
items[3:], common.limited(items, req, max_limit=2000))
req = webob.Request.blank('/?offset=3&limit=2500')
self.assertEqual(
items[3:], common.limited(items, req, max_limit=2000))
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual([], common.limited(items, req, max_limit=2000))
def test_limiter_negative_limit(self):
"""Test a negative limit."""
req = webob.Request.blank('/?limit=-3000')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_negative_offset(self):
"""Test a negative offset."""
req = webob.Request.blank('/?offset=-30')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
class PaginationParamsTest(test.TestCase):
"""Unit tests for `cinder.api.common.get_pagination_params` method.
This method takes in a request object and returns 'marker' and 'limit'
GET params.
"""
def test_nonnumerical_limit(self):
"""Test nonnumerical limit param."""
req = webob.Request.blank('/?limit=hello')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params,
req.GET.copy())
@mock.patch.object(common, 'CONF')
def test_no_params(self, mock_cfg):
"""Test no params."""
mock_cfg.osapi_max_limit = 100
req = webob.Request.blank('/')
expected = (None, 100, 0)
self.assertEqual(expected,
common.get_pagination_params(req.GET.copy()))
def test_valid_marker(self):
"""Test valid marker param."""
marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
req = webob.Request.blank('/?marker=' + marker)
expected = (marker, CONF.osapi_max_limit, 0)
self.assertEqual(expected,
common.get_pagination_params(req.GET.copy()))
def test_valid_limit(self):
"""Test valid limit param."""
req = webob.Request.blank('/?limit=10')
expected = (None, 10, 0)
self.assertEqual(expected,
common.get_pagination_params(req.GET.copy()))
def test_invalid_limit(self):
"""Test invalid limit param."""
req = webob.Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params,
req.GET.copy())
def test_valid_limit_and_marker(self):
"""Test valid limit and marker parameters."""
marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
req = webob.Request.blank('/?limit=20&marker=%s' % marker)
expected = (marker, 20, 0)
self.assertEqual(expected,
common.get_pagination_params(req.GET.copy()))
class SortParamUtilsTest(test.TestCase):
def test_get_sort_params_defaults(self):
"""Verifies the default sort key and direction."""
sort_keys, sort_dirs = common.get_sort_params({})
self.assertEqual(['created_at'], sort_keys)
self.assertEqual(['desc'], sort_dirs)
def test_get_sort_params_override_defaults(self):
"""Verifies that the defaults can be overriden."""
sort_keys, sort_dirs = common.get_sort_params({}, default_key='key1',
default_dir='dir1')
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_single_value_sort_param(self):
"""Verifies a single sort key and direction."""
params = {'sort': 'key1:dir1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_single_value_old_params(self):
"""Verifies a single sort key and direction."""
params = {'sort_key': 'key1', 'sort_dir': 'dir1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_single_with_default_sort_param(self):
"""Verifies a single sort value with a default direction."""
params = {'sort': 'key1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
# Direction should be defaulted
self.assertEqual(['desc'], sort_dirs)
def test_get_sort_params_single_with_default_old_params(self):
"""Verifies a single sort value with a default direction."""
params = {'sort_key': 'key1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
# Direction should be defaulted
self.assertEqual(['desc'], sort_dirs)
def test_get_sort_params_multiple_values(self):
"""Verifies multiple sort parameter values."""
params = {'sort': 'key1:dir1,key2:dir2,key3:dir3'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dirs)
def test_get_sort_params_multiple_not_all_dirs(self):
"""Verifies multiple sort keys without all directions."""
params = {'sort': 'key1:dir1,key2,key3:dir3'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
# Second key is missing the direction, should be defaulted
self.assertEqual(['dir1', 'desc', 'dir3'], sort_dirs)
def test_get_sort_params_multiple_override_default_dir(self):
"""Verifies multiple sort keys and overriding default direction."""
params = {'sort': 'key1:dir1,key2,key3'}
sort_keys, sort_dirs = common.get_sort_params(params,
default_dir='foo')
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'foo', 'foo'], sort_dirs)
def test_get_sort_params_params_modified(self):
"""Verifies that the input sort parameter are modified."""
params = {'sort': 'key1:dir1,key2:dir2,key3:dir3'}
common.get_sort_params(params)
self.assertEqual({}, params)
params = {'sort_key': 'key1', 'sort_dir': 'dir1'}
common.get_sort_params(params)
self.assertEqual({}, params)
def test_get_sort_params_random_spaces(self):
"""Verifies that leading and trailing spaces are removed."""
params = {'sort': ' key1 : dir1,key2: dir2 , key3 '}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'dir2', 'desc'], sort_dirs)
def test_get_params_mix_sort_and_old_params(self):
"""An exception is raised if both types of sorting params are given."""
for params in ({'sort': 'k1', 'sort_key': 'k1'},
{'sort': 'k1', 'sort_dir': 'd1'},
{'sort': 'k1', 'sort_key': 'k1', 'sort_dir': 'd2'}):
self.assertRaises(webob.exc.HTTPBadRequest,
common.get_sort_params,
params)
class MiscFunctionsTest(test.TestCase):
def test_remove_major_version_from_href(self):
fixture = 'http://cinder.example.com/v1/images'
expected = 'http://cinder.example.com/images'
actual = common.remove_version_from_href(fixture)
self.assertEqual(expected, actual)
def test_remove_version_from_href(self):
fixture = 'http://cinder.example.com/v1.1/images'
expected = 'http://cinder.example.com/images'
actual = common.remove_version_from_href(fixture)
self.assertEqual(expected, actual)
def test_remove_version_from_href_2(self):
fixture = 'http://cinder.example.com/v1.1/'
expected = 'http://cinder.example.com/'
actual = common.remove_version_from_href(fixture)
self.assertEqual(expected, actual)
def test_remove_version_from_href_3(self):
fixture = 'http://cinder.example.com/v10.10'
expected = 'http://cinder.example.com'
actual = common.remove_version_from_href(fixture)
self.assertEqual(expected, actual)
def test_remove_version_from_href_4(self):
fixture = 'http://cinder.example.com/v1.1/images/v10.5'
expected = 'http://cinder.example.com/images/v10.5'
actual = common.remove_version_from_href(fixture)
self.assertEqual(expected, actual)
def test_remove_version_from_href_version_not_trailing_domain(self):
fixture = 'http://cinder.example.com/cinder/v2'
expected = 'http://cinder.example.com/cinder'
self.assertEqual(expected, common.remove_version_from_href(fixture))
def test_remove_version_from_href_bad_request(self):
fixture = 'http://cinder.example.com/1.1/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_2(self):
fixture = 'http://cinder.example.com/v/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_3(self):
fixture = 'http://cinder.example.com/v1.1images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
class TestCollectionLinks(test.TestCase):
"""Tests the _get_collection_links method."""
def _validate_next_link(self, item_count, osapi_max_limit, limit,
should_link_exist):
req = webob.Request.blank('/?limit=%s' % limit if limit else '/')
link_return = [{"rel": "next", "href": "fake_link"}]
self.flags(osapi_max_limit=osapi_max_limit)
if limit is None:
limited_list_size = min(item_count, osapi_max_limit)
else:
limited_list_size = min(item_count, osapi_max_limit, limit)
limited_list = [{"uuid": str(i)} for i in range(limited_list_size)]
builder = common.ViewBuilder()
def get_pagination_params(params, max_limit=CONF.osapi_max_limit,
original_call=common.get_pagination_params):
return original_call(params, max_limit)
def _get_limit_param(params, max_limit=CONF.osapi_max_limit,
original_call=common._get_limit_param):
return original_call(params, max_limit)
with mock.patch.object(common, 'get_pagination_params',
get_pagination_params), \
mock.patch.object(common, '_get_limit_param',
_get_limit_param), \
mock.patch.object(common.ViewBuilder, '_generate_next_link',
return_value=link_return) as href_link_mock:
results = builder._get_collection_links(req, limited_list,
mock.sentinel.coll_key,
item_count, "uuid")
if should_link_exist:
href_link_mock.assert_called_once_with(limited_list, "uuid",
req,
mock.sentinel.coll_key)
self.assertThat(results, matchers.HasLength(1))
else:
self.assertFalse(href_link_mock.called)
self.assertThat(results, matchers.HasLength(0))
def test_items_equals_osapi_max_no_limit(self):
item_count = 5
osapi_max_limit = 5
limit = None
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_equals_osapi_max_greater_than_limit(self):
item_count = 5
osapi_max_limit = 5
limit = 4
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_equals_osapi_max_equals_limit(self):
item_count = 5
osapi_max_limit = 5
limit = 5
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_equals_osapi_max_less_than_limit(self):
item_count = 5
osapi_max_limit = 5
limit = 6
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_less_than_osapi_max_no_limit(self):
item_count = 5
osapi_max_limit = 7
limit = None
should_link_exist = False
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_limit_less_than_items_less_than_osapi_max(self):
item_count = 5
osapi_max_limit = 7
limit = 4
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_limit_equals_items_less_than_osapi_max(self):
item_count = 5
osapi_max_limit = 7
limit = 5
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_less_than_limit_less_than_osapi_max(self):
item_count = 5
osapi_max_limit = 7
limit = 6
should_link_exist = False
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_less_than_osapi_max_equals_limit(self):
item_count = 5
osapi_max_limit = 7
limit = 7
should_link_exist = False
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_less_than_osapi_max_less_than_limit(self):
item_count = 5
osapi_max_limit = 7
limit = 8
should_link_exist = False
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_greater_than_osapi_max_no_limit(self):
item_count = 5
osapi_max_limit = 3
limit = None
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_limit_less_than_items_greater_than_osapi_max(self):
item_count = 5
osapi_max_limit = 3
limit = 2
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_greater_than_osapi_max_equals_limit(self):
item_count = 5
osapi_max_limit = 3
limit = 3
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_greater_than_limit_greater_than_osapi_max(self):
item_count = 5
osapi_max_limit = 3
limit = 4
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_items_equals_limit_greater_than_osapi_max(self):
item_count = 5
osapi_max_limit = 3
limit = 5
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
def test_limit_greater_than_items_greater_than_osapi_max(self):
item_count = 5
osapi_max_limit = 3
limit = 6
should_link_exist = True
self._validate_next_link(item_count, osapi_max_limit, limit,
should_link_exist)
class LinkPrefixTest(test.TestCase):
def test_update_link_prefix(self):
vb = common.ViewBuilder()
result = vb._update_link_prefix("http://192.168.0.243:24/",
"http://127.0.0.1/volume")
self.assertEqual("http://127.0.0.1/volume", result)
result = vb._update_link_prefix("http://foo.x.com/v1",
"http://new.prefix.com")
self.assertEqual("http://new.prefix.com/v1", result)
result = vb._update_link_prefix(
"http://foo.x.com/v1",
"http://new.prefix.com:20455/new_extra_prefix")
self.assertEqual("http://new.prefix.com:20455/new_extra_prefix/v1",
result)
class RequestUrlTest(test.TestCase):
def test_get_request_url_no_forward(self):
app_url = 'http://127.0.0.1/v2;param?key=value#frag'
request = type('', (), {
'application_url': app_url,
'headers': {}
})
result = common.get_request_url(request)
self.assertEqual(app_url, result)
def test_get_request_url_forward(self):
request = type('', (), {
'application_url': 'http://127.0.0.1/v2;param?key=value#frag',
'headers': {'X-Forwarded-Host': '192.168.0.243:24'}
})
result = common.get_request_url(request)
self.assertEqual('http://192.168.0.243:24/v2;param?key=value#frag',
result)
| apache-2.0 | -5,789,866,241,644,605,000 | 40.703072 | 79 | 0.595834 | false |
cBioPortal/cbioportal | core/src/main/scripts/importUsers.py | 20 | 25394 | #! /usr/bin/env python
# ------------------------------------------------------------------------------
# Script which adds new users fram google spreadsheet into the the cgds
# user table. The following properties must be specified in portal.properties:
#
# db.name
# db.user
# db.password
# db.host
# google.id
# google.pw
# users.spreadsheet
# users.worksheet
#
# The script considers all users in the google spreadsheet
# that have an "APPROVED" value in the "Status (APPROVED or BLANK)" column. If that
# user does not exist in the user table of the cgds database, the user will be added
# to both the user table and authority table. In addition, a confirmation email will
# be sent to the user notifying them of their acct activation.
#
# ------------------------------------------------------------------------------
# imports
import os
import sys
import time
import getopt
import MySQLdb
import smtplib
import gdata.docs.client
import gdata.docs.service
import gdata.spreadsheet.service
import httplib2
from oauth2client import client
from oauth2client.file import Storage
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run_flow, argparser
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
# ------------------------------------------------------------------------------
# globals
# some file descriptors
ERROR_FILE = sys.stderr
OUTPUT_FILE = sys.stdout
# fields in portal.properties
CGDS_DATABASE_HOST = 'db.host'
CGDS_DATABASE_NAME = 'db.portal_db_name'
CGDS_DATABASE_USER = 'db.user'
CGDS_DATABASE_PW = 'db.password'
GOOGLE_ID = 'google.id'
GOOGLE_PW = 'google.pw'
CGDS_USERS_SPREADSHEET = 'users.spreadsheet'
CGDS_USERS_WORKSHEET = 'users.worksheet'
IMPORTER_SPREADSHEET = 'importer.spreadsheet'
# Worksheet that contains email contents
IMPORTER_WORKSHEET = 'import_user_email'
# Worksheet that contains portal names
ACCESS_CONTROL_WORKSHEET = 'access_control'
# column constants on google spreadsheet
FULLNAME_KEY = "fullname"
INST_EMAIL_KEY = "institutionalemailaddress"
MSKCC_EMAIL_KEY = "mskccemailaddress"
OPENID_EMAIL_KEY = "googleoropenidaddress"
STATUS_KEY = "statusapprovedorblank"
AUTHORITIES_KEY = "authoritiesalloralltcgaandorsemicolondelimitedcancerstudylist"
LAB_PI_KEY = "labpi"
TIMESTAMP_KEY = "timestamp"
SUBJECT_KEY = "subject"
BODY_KEY = "body"
PORTAL_NAME_KEY = 'portalname'
SPREADSHEET_NAME_KEY = 'spreadsheetname'
# possible values in status column
STATUS_APPROVED = "APPROVED"
DEFAULT_AUTHORITIES = "PUBLIC;EXTENDED;MSKPUB"
# consts used in email
MSKCC_EMAIL_SUFFIX = "@mskcc.org"
SMTP_SERVER = "cbio.mskcc.org"
MESSAGE_FROM = "[email protected]"
MESSAGE_BCC = []
# ------------------------------------------------------------------------------
# class definitions
class PortalProperties(object):
def __init__(self,
cgds_database_host,
cgds_database_name, cgds_database_user, cgds_database_pw,
google_id, google_pw, google_spreadsheet, google_worksheet,google_importer_spreadsheet):
self.cgds_database_host = cgds_database_host
self.cgds_database_name = cgds_database_name
self.cgds_database_user = cgds_database_user
self.cgds_database_pw = cgds_database_pw
self.google_id = google_id
self.google_pw = google_pw
self.google_spreadsheet = google_spreadsheet
self.google_worksheet = google_worksheet
self.google_importer_spreadsheet = google_importer_spreadsheet
class User(object):
def __init__(self, inst_email, google_email, name, enabled, authorities):
self.inst_email = inst_email.lower()
self.google_email = google_email.lower()
self.name = name
self.enabled = enabled
self.authorities = authorities
# ------------------------------------------------------------------------------
# functions
#
# Uses smtplib to send email.
#
def send_mail(to, subject, body, server=SMTP_SERVER):
assert type(to)==list
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = MESSAGE_FROM
msg['To'] = COMMASPACE.join(to)
msg['Date'] = formatdate(localtime=True)
msg.attach(MIMEText(body))
# combine to and bcc lists for sending
combined_to_list = []
for to_name in to:
combined_to_list.append(to_name)
for bcc_name in MESSAGE_BCC:
combined_to_list.append(bcc_name)
smtp = smtplib.SMTP(server)
smtp.sendmail(MESSAGE_FROM, combined_to_list, msg.as_string() )
smtp.close()
# ------------------------------------------------------------------------------
# logs into google spreadsheet client
def get_gdata_credentials(secrets, creds, scope, force=False):
storage = Storage(creds)
credentials = storage.get()
if credentials is None or credentials.invalid or force:
credentials = run_flow(flow_from_clientsecrets(secrets, scope=scope), storage, argparser.parse_args([]))
if credentials.access_token_expired:
credentials.refresh(httplib2.Http())
return credentials
def google_login(secrets, creds, user, pw, app_name):
credentials = get_gdata_credentials(secrets, creds, ["https://spreadsheets.google.com/feeds"], False)
client = gdata.spreadsheet.service.SpreadsheetsService(additional_headers={'Authorization' : 'Bearer %s' % credentials.access_token})
# google spreadsheet
client.email = user
client.password = pw
client.source = app_name
client.ProgrammaticLogin()
return client
# ------------------------------------------------------------------------------
# given a feed & feed name, returns its id
#
def get_feed_id(feed, name):
to_return = ''
for entry in feed.entry:
if entry.title.text.strip() == name:
id_parts = entry.id.text.split('/')
to_return = id_parts[len(id_parts) - 1]
return to_return
# ------------------------------------------------------------------------------
# gets a worksheet feed
def get_worksheet_feed(client, ss, ws):
ss_id = get_feed_id(client.GetSpreadsheetsFeed(), ss)
ws_id = get_feed_id(client.GetWorksheetsFeed(ss_id), ws)
return client.GetListFeed(ss_id, ws_id)
# ------------------------------------------------------------------------------
# insert new users into table - this list does not contain users already in table
def insert_new_users(cursor, new_user_list):
try:
for user in new_user_list:
print >> OUTPUT_FILE, "new user: %s" % user.google_email;
cursor.executemany("insert into users values(%s, %s, %s)",
[(user.google_email.lower(), user.name, user.enabled) for user in new_user_list])
for user in new_user_list:
# authorities is semicolon delimited
authorities = user.authorities
cursor.executemany("insert into authorities values(%s, %s)",
[(user.google_email.lower(), authority) for authority in authorities])
except MySQLdb.Error, msg:
print >> OUTPUT_FILE, msg
print >> ERROR_FILE, msg
return False
return True
# ------------------------------------------------------------------------------
# get current users from database
def get_current_user_map(cursor):
# map that we are returning
# key is the email address of the user (primary key) and value is a User object
to_return = {}
# recall each tuple in user table is ['EMAIL', 'NAME', 'ENABLED'] &
# no tuple can contain nulls
try:
cursor.execute('select * from users')
for row in cursor.fetchall():
to_return[row[0].lower()] = User(row[0].lower(), row[0].lower(), row[1], row[2], 'not_used_here')
except MySQLdb.Error, msg:
print >> ERROR_FILE, msg
return None
return to_return
# ------------------------------------------------------------------------------
# get current user authorities
def get_user_authorities(cursor, google_email):
# list of authorities (cancer studies) we are returning -- as a set
to_return = []
# recall each tuple in authorities table is ['EMAIL', 'AUTHORITY']
# no tuple can contain nulls
try:
cursor.execute('select * from authorities where email = (%s)', [google_email])
for row in cursor.fetchall():
to_return.append(row[1])
except MySQLdb.Error, msg:
print >> ERROR_FILE, msg
return None
return to_return
# ------------------------------------------------------------------------------
# get current users from google spreadsheet
def get_new_user_map(spreadsheet, worksheet_feed, current_user_map, portal_name,mskcc_user_spreadsheet):
# map that we are returning
# key is the institutional email address + google (in case user has multiple google ids)
# of the user and value is a User object
to_return = {}
for entry in worksheet_feed.entry:
# we are only concerned with 'APPROVED' entries
if (entry.custom[STATUS_KEY].text is not None and
entry.custom[STATUS_KEY].text.strip() == STATUS_APPROVED):
if spreadsheet == mskcc_user_spreadsheet:
inst_email = entry.custom[MSKCC_EMAIL_KEY].text.strip().lower()
google_email = entry.custom[MSKCC_EMAIL_KEY].text.strip().lower()
else:
inst_email = entry.custom[INST_EMAIL_KEY].text.strip().lower()
google_email = entry.custom[OPENID_EMAIL_KEY].text.strip().lower()
name = entry.custom[FULLNAME_KEY].text.strip()
authorities = entry.custom[AUTHORITIES_KEY].text.strip()
# do not add entry if this entry is a current user
# we lowercase google account because entries added to mysql are lowercased.
if google_email.lower() not in current_user_map:
if authorities[-1:] == ';':
authorities = authorities[:-1]
if google_email.lower() in to_return:
# there may be multiple entries per email address
# in google spreadsheet, combine entries
user = to_return[google_email.lower()]
user.authorities.extend([portal_name + ':' + au for au in authorities.split(';')])
to_return[google_email.lower()] = user
else:
to_return[google_email] = User(inst_email, google_email, name, 1,
[portal_name + ':' + au for au in authorities.split(';')])
return to_return
# ------------------------------------------------------------------------------
# get all users from google spreadsheet. note only inst & google email is returned
def get_all_user_map(spreadsheet, worksheet_feed,mskcc_user_spreadsheet):
# map that we are returning
# key is the institutional email address + google (in case user has multiple google ids)
# of the user and value is a User object
to_return = {}
for entry in worksheet_feed.entry:
if spreadsheet == mskcc_user_spreadsheet:
inst_email = entry.custom[MSKCC_EMAIL_KEY].text.strip().lower()
google_email = entry.custom[MSKCC_EMAIL_KEY].text.strip().lower()
else:
inst_email = entry.custom[INST_EMAIL_KEY].text.strip().lower()
google_email = entry.custom[OPENID_EMAIL_KEY].text.strip().lower()
to_return[google_email] = User(inst_email, google_email, "not_used", 1, "not_used")
return to_return
# ------------------------------------------------------------------------------
# get db connection
def get_db_connection(portal_properties):
# try and create a connection to the db
try:
connection = MySQLdb.connect(host=portal_properties.cgds_database_host, port=3306,
user=portal_properties.cgds_database_user,
passwd=portal_properties.cgds_database_pw,
db=portal_properties.cgds_database_name)
except MySQLdb.Error, msg:
print >> ERROR_FILE, msg
return None
return connection
# ------------------------------------------------------------------------------
# parse portal.properties
def get_portal_properties(portal_properties_filename):
properties = {}
portal_properties_file = open(portal_properties_filename, 'r')
for line in portal_properties_file:
line = line.strip()
# skip line if its blank or a comment
if len(line) == 0 or line.startswith('#'):
continue
# store name/value
property = line.split('=')
# spreadsheet url contains an '=' sign
if line.startswith(CGDS_USERS_SPREADSHEET):
property = [property[0], line[line.index('=')+1:len(line)]]
if (len(property) != 2):
print >> ERROR_FILE, 'Skipping invalid entry in property file: ' + line
continue
properties[property[0]] = property[1].strip()
portal_properties_file.close()
# error check
if (CGDS_DATABASE_HOST not in properties or len(properties[CGDS_DATABASE_HOST]) == 0 or
CGDS_DATABASE_NAME not in properties or len(properties[CGDS_DATABASE_NAME]) == 0 or
CGDS_DATABASE_USER not in properties or len(properties[CGDS_DATABASE_USER]) == 0 or
CGDS_DATABASE_PW not in properties or len(properties[CGDS_DATABASE_PW]) == 0 or
GOOGLE_ID not in properties or len(properties[GOOGLE_ID]) == 0 or
GOOGLE_PW not in properties or len(properties[GOOGLE_PW]) == 0 or
CGDS_USERS_SPREADSHEET not in properties or len(properties[CGDS_USERS_SPREADSHEET]) == 0 or
CGDS_USERS_WORKSHEET not in properties or len(properties[CGDS_USERS_WORKSHEET]) == 0 or
IMPORTER_SPREADSHEET not in properties or len(properties[IMPORTER_SPREADSHEET]) == 0):
print >> ERROR_FILE, 'Missing one or more required properties, please check property file'
return None
# return an instance of PortalProperties
return PortalProperties(properties[CGDS_DATABASE_HOST],
properties[CGDS_DATABASE_NAME],
properties[CGDS_DATABASE_USER],
properties[CGDS_DATABASE_PW],
properties[GOOGLE_ID],
properties[GOOGLE_PW],
properties[CGDS_USERS_SPREADSHEET],
properties[CGDS_USERS_WORKSHEET],
properties[IMPORTER_SPREADSHEET])
# ------------------------------------------------------------------------------
# adds new users from the google spreadsheet into the cgds portal database
# returns new user map if users have been inserted, None otherwise
def manage_users(spreadsheet, cursor, worksheet_feed, portal_name, mskcc_user_spreadsheet):
# get map of current portal users
print >> OUTPUT_FILE, 'Getting list of current portal users'
current_user_map = get_current_user_map(cursor)
if current_user_map is not None:
print >> OUTPUT_FILE, 'We have found %s current portal users' % len(current_user_map)
else:
print >> OUTPUT_FILE, 'Error reading user table'
return None
# get list of new users and insert
print >> OUTPUT_FILE, 'Checking for new users'
new_user_map = get_new_user_map(spreadsheet, worksheet_feed, current_user_map, portal_name, mskcc_user_spreadsheet)
if (len(new_user_map) > 0):
print >> OUTPUT_FILE, 'We have %s new user(s) to add' % len(new_user_map)
success = insert_new_users(cursor, new_user_map.values())
if success:
print >> OUTPUT_FILE, 'Successfully inserted new users in database'
return new_user_map
else:
print >> OUTPUT_FILE, 'Error inserting new users in database'
return None
else:
print >> OUTPUT_FILE, 'No new users to insert, exiting'
return None
# ------------------------------------------------------------------------------
# updates user study access
def update_user_authorities(spreadsheet, cursor, worksheet_feed, portal_name, mskcc_user_spreadsheet):
# get map of current portal users
print >> OUTPUT_FILE, 'Getting list of current portal users from spreadsheet'
all_user_map = get_new_user_map(spreadsheet, worksheet_feed, {}, portal_name, mskcc_user_spreadsheet)
if all_user_map is None:
return None;
print >> OUTPUT_FILE, 'Updating authorities for each user in current portal user list'
for user in all_user_map.values():
worksheet_authorities = set(user.authorities)
db_authorities = set(get_user_authorities(cursor, user.google_email))
try:
cursor.executemany("insert into authorities values(%s, %s)",
[(user.google_email, authority) for authority in worksheet_authorities - db_authorities])
except MySQLdb.Error, msg:
print >> ERROR_FILE, msg
# ------------------------------------------------------------------------------
# Adds unknown users to user spreadsheet. MSKCC users are given default access.
# during MSK signon. If this happens, we want to make sure they get into the google
# spreadsheet for tracking purposes.
def add_unknown_users_to_spreadsheet(client, cursor, spreadsheet, worksheet, mskcc_user_spreadsheet):
# get map of all users in google spreadsheet and portal database
worksheet_feed = get_worksheet_feed(client, spreadsheet, worksheet)
google_spreadsheet_user_map = get_all_user_map(spreadsheet, worksheet_feed, mskcc_user_spreadsheet)
portal_db_user_map = get_current_user_map(cursor)
current_time = time.strftime("%m/%d/%y %H:%M:%S")
# for each user in portal database not in google spreadsheet, insert user into google spreadsheet
for email in portal_db_user_map.keys():
if email.endswith(MSKCC_EMAIL_SUFFIX) and email not in google_spreadsheet_user_map:
user = portal_db_user_map[email]
print >> OUTPUT_FILE, user.name
def_authorities = DEFAULT_AUTHORITIES + ";" + email[0:email.index('@')].upper()
# we only got here if user was inserted via MSK AD - in which case name is formatted as:
# Gross, Benjamin E./Sloan Kettering Institute
if "/" in user.name:
user_name_parts = user.name.split("/")
row = { TIMESTAMP_KEY : current_time, MSKCC_EMAIL_KEY : user.inst_email, FULLNAME_KEY : user_name_parts[0], LAB_PI_KEY : user_name_parts[1], STATUS_KEY : STATUS_APPROVED, AUTHORITIES_KEY : def_authorities }
else:
row = { TIMESTAMP_KEY : current_time, MSKCC_EMAIL_KEY : user.inst_email, FULLNAME_KEY : user.name, STATUS_KEY : STATUS_APPROVED, AUTHORITIES_KEY : def_authorities }
add_row_to_google_worksheet(client, spreadsheet, worksheet, row)
# ------------------------------------------------------------------------------
# adds a row to the google spreadsheet
def add_row_to_google_worksheet(client, spreadsheet, worksheet, row):
ss_id = get_feed_id(client.GetSpreadsheetsFeed(), spreadsheet)
ws_id = get_feed_id(client.GetWorksheetsFeed(ss_id), worksheet)
client.InsertRow(row, ss_id, ws_id);
# ------------------------------------------------------------------------------
# gets email parameters from google spreadsheet
def get_email_parameters(google_spreadsheet,client):
subject = ''
body = ''
print >> OUTPUT_FILE, 'Getting email parameters from google spreadsheet'
email_worksheet_feed = get_worksheet_feed(client, google_spreadsheet, IMPORTER_WORKSHEET)
for entry in email_worksheet_feed.entry:
if entry.custom[SUBJECT_KEY].text is not None and entry.custom[BODY_KEY].text is not None:
subject = entry.custom[SUBJECT_KEY].text.strip()
body = entry.custom[BODY_KEY].text.strip()
return subject, body
def get_portal_name_map(google_spreadsheet,client):
portal_name = {}
print >> OUTPUT_FILE, 'Getting access control parameter from google spreadsheet'
access_control_worksheet_feed = get_worksheet_feed(client,google_spreadsheet,ACCESS_CONTROL_WORKSHEET)
for entry in access_control_worksheet_feed.entry:
if entry.custom[PORTAL_NAME_KEY] is not None and entry.custom[SPREADSHEET_NAME_KEY] is not None:
portal_name[entry.custom[SPREADSHEET_NAME_KEY].text.strip()] = entry.custom[PORTAL_NAME_KEY].text.strip()
if entry.custom[PORTAL_NAME_KEY].text.strip() == 'mskcc-portal':
mskcc_user_spreadsheet = entry.custom[SPREADSHEET_NAME_KEY].text.strip()
return portal_name,mskcc_user_spreadsheet
# ------------------------------------------------------------------------------
# displays program usage (invalid args)
def usage():
print >> OUTPUT_FILE, 'importUsers.py --secrets-file [google secrets.json] --creds-file [oauth creds filename] --properties-file [properties file] --send-email-confirm [true or false] --use-institutional-id [true or false]'
# ------------------------------------------------------------------------------
# the big deal main.
def main():
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], '', ['secrets-file=', 'creds-file=', 'properties-file=', 'send-email-confirm=', 'use-institutional-id='])
except getopt.error, msg:
print >> ERROR_FILE, msg
usage()
sys.exit(2)
# process the options
secrets_filename = ''
creds_filename = ''
properties_filename = ''
send_email_confirm = ''
for o, a in opts:
if o == '--secrets-file':
secrets_filename = a
elif o == '--creds-file':
creds_filename = a
elif o == '--properties-file':
properties_filename = a
elif o == '--send-email-confirm':
send_email_confirm = a
if (secrets_filename == '' or creds_filename == '' or properties_filename == '' or send_email_confirm == '' or
(send_email_confirm != 'true' and send_email_confirm != 'false')):
usage()
sys.exit(2)
# check existence of file
if not os.path.exists(properties_filename):
print >> ERROR_FILE, 'properties file cannot be found: ' + properties_filename
sys.exit(2)
# parse/get relevant portal properties
print >> OUTPUT_FILE, 'Reading portal properties file: ' + properties_filename
portal_properties = get_portal_properties(properties_filename)
if not portal_properties:
print >> OUTPUT_FILE, 'Error reading %s, exiting' % properties_filename
return
# get db connection & create cursor
print >> OUTPUT_FILE, 'Connecting to database: ' + portal_properties.cgds_database_name
connection = get_db_connection(portal_properties)
if connection is not None:
cursor = connection.cursor()
else:
print >> OUTPUT_FILE, 'Error connecting to database, exiting'
return
# login to google and get spreadsheet feed
client = google_login(secrets_filename, creds_filename, portal_properties.google_id, portal_properties.google_pw, sys.argv[0])
portal_name_map,mskcc_user_spreadsheet = get_portal_name_map(portal_properties.google_importer_spreadsheet,client)
google_spreadsheets = portal_properties.google_spreadsheet.split(';')
for google_spreadsheet in google_spreadsheets:
if not google_spreadsheet == '':
print >> OUTPUT_FILE, 'Importing ' + google_spreadsheet + ' ...'
worksheet_feed = get_worksheet_feed(client, google_spreadsheet,
portal_properties.google_worksheet)
# the 'guts' of the script
new_user_map = manage_users(google_spreadsheet, cursor, worksheet_feed, portal_name_map[google_spreadsheet], mskcc_user_spreadsheet)
# update user authorities
update_user_authorities(google_spreadsheet, cursor, worksheet_feed, portal_name_map[google_spreadsheet], mskcc_user_spreadsheet)
# sending emails
if new_user_map is not None:
if send_email_confirm == 'true':
subject,body = get_email_parameters(google_spreadsheet,client)
for new_user_key in new_user_map.keys():
new_user = new_user_map[new_user_key]
print >> OUTPUT_FILE, ('Sending confirmation email to new user: %s at %s' %
(new_user.name, new_user.inst_email))
send_mail([new_user.inst_email],subject,body)
if google_spreadsheet == mskcc_user_spreadsheet:
add_unknown_users_to_spreadsheet(client, cursor, google_spreadsheet, portal_properties.google_worksheet,mskcc_user_spreadsheet)
# clean up
cursor.close()
connection.commit()
connection.close()
# ------------------------------------------------------------------------------
# ready to roll
if __name__ == '__main__':
main()
| agpl-3.0 | -9,097,960,245,886,748,000 | 40.697865 | 227 | 0.604631 | false |
mr-uuid/snippets | python/functional.py | 1 | 1669 | import unittest
expressions = {
"gten": lambda x: x > 10,
"cube": lambda y: y**3,
"add": lambda x, y: x + y if (x is not None and y is not None) else (
x if y is None else y),
"reduce": lambda x, y: "{} {}".format(str(x), str(y)),
}
def lten(x):
return x < 10
class Tester(unittest.TestCase):
def test_functional_programming(self):
self.assertEqual(
filter(lten, range(2, 25)),
range(2, 10)
)
self.assertEqual(
filter(expressions["gten"], range(2, 25)),
range(11, 25)
)
self.assertEqual(
map(expressions["cube"], [1, 2, 3]),
[1, 8, 27]
)
self.assertEqual(
map(expressions["add"], [2, 3, 4], [2, 3, 4]),
[4, 6, 8]
)
self.assertEqual(
reduce(expressions["reduce"], range(1, 4)),
"1 2 3"
)
def test_more_functional_programming(self):
stuff = [(x, y, z)
for x in [1, 2, 3]
for y in [3, 1, 4]
for z in range(2, 100)
if x != y and z % 25 == 0]
sum_map = reduce(lambda x, y: x+y,
map(lambda x: x[0]+x[1]+x[2], stuff))
col1 = [x for x, y, z in stuff]
col2 = [y for x, y, z in stuff]
col3 = [z for x, y, z in stuff]
sum_reduce = (reduce(lambda x, y: x+y, col1)
+ reduce(lambda x, y: x+y, col2)
+ reduce(lambda x, y: x+y, col3))
self.assertEqual(sum_map, sum_reduce)
if __name__ == "__main__":
unittest.main()
| mit | -2,022,393,962,986,300,400 | 27.288136 | 73 | 0.444578 | false |
authmillenon/RIOT | tests/gnrc_ipv6_ext/tests/01-run.py | 7 | 30694 | #!/usr/bin/env python3
# Copyright (C) 2018 Freie Universität Berlin
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import re
import os
import sys
import subprocess
from scapy.all import Ether, IPv6, UDP, \
IPv6ExtHdrHopByHop, IPv6ExtHdrDestOpt, \
IPv6ExtHdrFragment, IPv6ExtHdrRouting, \
ICMPv6ParamProblem, \
sendp, srp1
from testrunner import run
EXT_HDR_NH = {
IPv6ExtHdrHopByHop: 0,
IPv6ExtHdrRouting: 43,
IPv6ExtHdrFragment: 44,
# IPSec headers currently unsupported by scapy
IPv6ExtHdrDestOpt: 60,
# Mobility header currently unsupported by scapy
}
def pktbuf_empty(child):
child.sendline("pktbuf")
child.expect(r"packet buffer: first byte: (?P<first_byte>0x[0-9a-fA-F]+), "
r"last byte: 0x[0-9a-fA-F]+ \(size: (?P<size>\d+)\)")
first_byte = child.match.group("first_byte")
size = child.match.group("size")
child.expect(
r"~ unused: {} \(next: (\(nil\)|0), size: {}\) ~".format(
first_byte, size))
def register_protnum(child, protnum):
child.sendline("ip reg %d" % protnum)
child.expect("Registered to protocol number %d" % protnum)
def unregister(child):
child.sendline("ip unreg")
child.expect(r"Unregistered from protocol number \d")
def test_empty_hop_by_hop_opt_wo_register(child, iface, hw_dst, ll_dst, ll_src):
# Try sending an empty hop-by-hop-option header
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / UDP(), iface=iface, verbose=0)
pktbuf_empty(child)
def test_empty_hop_by_hop_opt_w_register(child, iface, hw_dst, ll_dst, ll_src):
# Register to hop-by-hop-option header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrHopByHop])
# Try sending an empty hop-by-hop-option header
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / UDP() / "\x01\x02", iface=iface, verbose=0)
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = 17 (UDP), len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 11 00 01 04 00 00 00 00")
child.expect(r"~~ SNIP 1 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrHopByHop]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_empty_duplicate_hop_by_hop_opt(child, iface, hw_dst, ll_dst, ll_src):
# Try sending two empty hop-by-hop-option header
p = srp1(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / IPv6ExtHdrHopByHop() / UDP() / "\x03\x04",
iface=iface, timeout=1, verbose=0)
# should return parameter problem message
assert(p is not None)
assert(ICMPv6ParamProblem in p)
assert(p[ICMPv6ParamProblem].code == 1) # unrecognized next header
assert(p[ICMPv6ParamProblem].ptr >= 40) # after IPv6 header
pktbuf_empty(child)
def test_empty_non_first_hop_by_hop_opt(child, iface, hw_dst, ll_dst, ll_src):
# Try sending empty hop-by-hop-option header after destination option
# header
p = srp1(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrDestOpt() / IPv6ExtHdrHopByHop() / UDP() / "\x05\x06",
iface=iface, timeout=1, verbose=0)
# should return parameter problem message
assert(p is not None)
assert(ICMPv6ParamProblem in p)
assert(p[ICMPv6ParamProblem].code == 1) # unrecognized next header
assert(p[ICMPv6ParamProblem].ptr >= 40) # after IPv6 header
pktbuf_empty(child)
def test_empty_duplicate_non_first_hop_by_hop_opt(child, iface, hw_dst, ll_dst,
ll_src):
# Try sending empty hop-by-hop-option header after destination option
# header and another hop-by-hop-option header
p = srp1(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / IPv6ExtHdrDestOpt() / IPv6ExtHdrHopByHop() /
UDP() / "\x07\x08",
iface=iface, timeout=1, verbose=0)
# should return parameter problem message
assert(p is not None)
assert(ICMPv6ParamProblem in p)
assert(p[ICMPv6ParamProblem].code == 1) # unrecognized next header
assert(p[ICMPv6ParamProblem].ptr >= 48) # after IPv6 header and HopByHopOpt
pktbuf_empty(child)
def test_empty_routing_header_wo_register(child, iface, hw_dst, ll_dst, ll_src):
# Try sending an empty routing header
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrRouting() / UDP(), iface=iface, verbose=0)
pktbuf_empty(child)
def test_empty_routing_header_w_register(child, iface, hw_dst, ll_dst, ll_src):
# Register to routing header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrRouting])
# Try sending an empty routing header
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrRouting() / UDP() / "\x01\x02", iface=iface, verbose=0)
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = 17 (UDP), len = 0x00, routing type = 0, segments left = 0
child.expect(r"00000000 11 00 00 00 00 00 00 00")
child.expect(r"~~ SNIP 1 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrRouting]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_empty_fragment_header_wo_register(child, iface, hw_dst, ll_dst, ll_src):
# Try sending an empty fragment header
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrFragment() / UDP(), iface=iface, verbose=0)
pktbuf_empty(child)
def test_empty_fragment_header_w_register(child, iface, hw_dst, ll_dst, ll_src):
# Register to fragment header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrFragment])
# Try sending an empty fragment header
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrFragment() / UDP() / "\x01\x02", iface=iface, verbose=0)
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = 17 (UDP), reserved = 0x00, fragment offset = 0, res = 0, M = 0
child.expect(r"00000000 11 00 00 00 00 00 00 00")
child.expect(r"~~ SNIP 1 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrFragment]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_empty_dest_opt_wo_register(child, iface, hw_dst, ll_dst, ll_src):
# Try sending an empty Destination-Option header
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrDestOpt() / UDP(), iface=iface, verbose=0)
pktbuf_empty(child)
def test_empty_dest_opt_w_register(child, iface, hw_dst, ll_dst, ll_src):
# Register to Destination-Option header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrDestOpt])
# Try sending an empty Destination-Option header
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrDestOpt() / UDP() / "\x01\x02", iface=iface, verbose=0)
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = 17 (UDP), len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 11 00 01 04 00 00 00 00")
child.expect(r"~~ SNIP 1 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrDestOpt]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_empty_mixed1_w_hop_opt_registered(child, iface, hw_dst, ll_dst, ll_src):
# Register to hop-by-hop-option header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrHopByHop])
# Try sending a packet with a number of extension headers in recommended
# order: https://tools.ietf.org/html/rfc8200#section-4.1
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / IPv6ExtHdrDestOpt() / IPv6ExtHdrRouting() /
IPv6ExtHdrFragment() / IPv6ExtHdrDestOpt() / UDP() / "\x01\x02",
iface=iface, verbose=0)
# Hop-by-hop option with payload
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = IPv6ExtHdrDestOpt, len = 0x00, PadN option (0x01) of length 0x04
# NH = IPv6ExtHdrRouting, len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00 "
r"{:02X} 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrDestOpt],
EXT_HDR_NH[IPv6ExtHdrRouting]
))
# NH = IPv6ExtHdrFragment, len = 0x00, routing type = 0, segments left = 0
# NH = IPv6ExtHdrDestOpt, reserved = 0x00, fragment offset = 0, res = 0, M = 0
child.expect(r"00000010 {:02X} 00 00 00 00 00 00 00 "
r"{:02X} 00 00 00 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrFragment],
EXT_HDR_NH[IPv6ExtHdrDestOpt]
))
# NH = 17 (UDP), len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000020 11 00 01 04 00 00 00 00")
# IPv6 header
child.expect(r"~~ SNIP 1 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrHopByHop]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_empty_mixed1_w_rt_hdr_registered(child, iface, hw_dst, ll_dst, ll_src):
# Register to routing header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrRouting])
# Try sending a packet with a number of extension headers in recommended
# order: https://tools.ietf.org/html/rfc8200#section-4.1
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / IPv6ExtHdrDestOpt() / IPv6ExtHdrRouting() /
IPv6ExtHdrFragment() / IPv6ExtHdrDestOpt() / UDP() / "\x01\x02",
iface=iface, verbose=0)
# Routing header with payload
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = IPv6ExtHdrFragment, len = 0x00, routing type = 0, segments left = 0
# NH = IPv6ExtHdrDestOpt, reserved = 0x00, fragment offset = 0, res = 0, M = 0
child.expect(r"00000000 {:02X} 00 00 00 00 00 00 00 "
r"{:02X} 00 00 00 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrFragment],
EXT_HDR_NH[IPv6ExtHdrDestOpt]
))
# NH = 17 (UDP), len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000010 11 00 01 04 00 00 00 00")
# Destination option 1
child.expect(r"~~ SNIP 1 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrRouting, len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrRouting]))
# Hop-by-hop option
child.expect(r"~~ SNIP 2 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrDestOpt, len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrDestOpt]))
# IPv6 header
child.expect(r"~~ SNIP 3 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrHopByHop]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_empty_mixed1_w_frag_hdr_registered(child, iface, hw_dst, ll_dst, ll_src):
# Register to fragment header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrFragment])
# Try sending a packet with a number of extension headers in recommended
# order: https://tools.ietf.org/html/rfc8200#section-4.1
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / IPv6ExtHdrDestOpt() / IPv6ExtHdrRouting() /
IPv6ExtHdrFragment() / IPv6ExtHdrDestOpt() / UDP() / "\x01\x02",
iface=iface, verbose=0)
# Routing header with payload
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = IPv6ExtHdrDestOpt, reserved = 0x00, fragment offset = 0, res = 0, M = 0
child.expect(r"00000000 {:02X} 00 00 00 00 00 00 00 "
"11 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrDestOpt]))
child.expect(r"~~ SNIP 1 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrFragment, len = 0x00, routing type = 0, segments left = 0
child.expect(r"00000000 {:02X} 00 00 00 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrFragment]))
# Destination option 1
child.expect(r"~~ SNIP 2 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrRouting, len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrRouting]))
# Hop-by-hop option
child.expect(r"~~ SNIP 3 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrDestOpt, len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrDestOpt]))
# IPv6 header
child.expect(r"~~ SNIP 4 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrHopByHop]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_empty_mixed1_w_dest_opt_registered(child, iface, hw_dst, ll_dst, ll_src):
# Register to destination-option header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrDestOpt])
# Try sending a packet with a number of extension headers in recommended
# order: https://tools.ietf.org/html/rfc8200#section-4.1
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / IPv6ExtHdrDestOpt() / IPv6ExtHdrRouting() /
IPv6ExtHdrFragment() / IPv6ExtHdrDestOpt() / UDP() / "\x01\x02",
iface=iface, verbose=0)
# IPv6ExtHdrDestOpt is two times in the packet so pktdump will it print two
# times
# 1st print parsed up to the first IPv6ExtHdrDestOpt
# Destination option 1 with payload
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = IPv6ExtHdrRouting, len = 0x00, PadN option (0x01) of length 0x04
# NH = IPv6ExtHdrFragment, len = 0x00, routing type = 0, segments left = 0
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00 "
r"{:02X} 00 00 00 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrRouting],
EXT_HDR_NH[IPv6ExtHdrFragment]
))
# NH = IPv6ExtHdrDestOpt, reserved = 0x00, fragment offset = 0, res = 0, M = 0
# NH = 17 (UDP), len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000010 {:02X} 00 00 00 00 00 00 00 "
r"11 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrDestOpt]
))
# Hop-by-hop option
child.expect(r"~~ SNIP 1 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrDestOpt, len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrDestOpt]))
# IPv6 header
child.expect(r"~~ SNIP 2 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrHopByHop]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
# 2nd print parsed up to the second IPv6ExtHdrHopByHop
# Destination option 2 with payload
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = 17 (UDP), len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 11 00 01 04 00 00 00 00")
# Fragment header
child.expect(r"~~ SNIP 1 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrDestOpt, reserved = 0x00, fragment offset = 0, res = 0, M = 0
child.expect(r"00000000 {:02X} 00 00 00 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrDestOpt]))
# Routing header
child.expect(r"~~ SNIP 2 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrFragment, len = 0x00, routing type = 0, segments left = 0
child.expect(r"00000000 {:02X} 00 00 00 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrFragment]))
# Destination option 1
child.expect(r"~~ SNIP 3 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrRouting, len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrRouting]))
# Hop-by-hop option
child.expect(r"~~ SNIP 4 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrDestOpt, len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrDestOpt]))
# IPv6 header
child.expect(r"~~ SNIP 5 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrHopByHop]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_empty_mixed2_w_hop_opt_registered(child, iface, hw_dst, ll_dst, ll_src):
# Register to hop-by-hop-option header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrHopByHop])
# Try sending a packet with a number of extension headers in not recommended
# (but legal) order
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / IPv6ExtHdrRouting() / IPv6ExtHdrDestOpt() /
IPv6ExtHdrFragment() / UDP() / "\x01\x02",
iface=iface, verbose=0)
# Hop-by-hop option with payload
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = IPv6ExtHdrRouting, len = 0x00, PadN option (0x01) of length 0x04
# NH = IPv6ExtHdrDestOpt, len = 0x00, routing type = 0, segments left = 0
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00 "
r"{:02X} 00 00 00 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrRouting],
EXT_HDR_NH[IPv6ExtHdrDestOpt]
))
# NH = IPv6ExtHdrFragment, len = 0x00, PadN option (0x01) of length 0x04
# NH = 17 (UDP), reserved = 0x00, fragment offset = 0, res = 0, M = 0
child.expect(r"00000010 {:02X} 00 01 04 00 00 00 00 "
r"11 00 00 00 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrFragment]
))
# IPv6 header
child.expect(r"~~ SNIP 1 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrHopByHop]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_empty_mixed2_w_rt_hdr_registered(child, iface, hw_dst, ll_dst, ll_src):
# Register to routing header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrRouting])
# Try sending a packet with a number of extension headers in not recommended
# (but legal) order
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / IPv6ExtHdrRouting() / IPv6ExtHdrDestOpt() /
IPv6ExtHdrFragment() / UDP() / "\x01\x02",
iface=iface, verbose=0)
# Routing header with payload
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = IPv6ExtHdrDestOpt, len = 0x00, routing type = 0, segments left = 0
# NH = IPv6ExtHdrFragment, len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 {:02X} 00 00 00 00 00 00 00 "
r"{:02X} 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrDestOpt],
EXT_HDR_NH[IPv6ExtHdrFragment]
))
# NH = 17 (UDP), reserved = 0x00, fragment offset = 0, res = 0, M = 0
child.expect(r"00000010 11 00 00 00 00 00 00 00")
# Hop-by-hop-option
child.expect(r"~~ SNIP 1 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrRouting, len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrRouting]))
# IPv6 header
child.expect(r"~~ SNIP 2 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrHopByHop]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_empty_mixed2_w_frag_hdr_registered(child, iface, hw_dst, ll_dst,
ll_src):
# Register to fragment header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrFragment])
# Try sending a packet with a number of extension headers in not recommended
# (but legal) order
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / IPv6ExtHdrRouting() / IPv6ExtHdrDestOpt() /
IPv6ExtHdrFragment() / UDP() / "\x01\x02",
iface=iface, verbose=0)
# Fragment header with payload
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = 17 (UDP), reserved = 0x00, fragment offset = 0, res = 0, M = 0
child.expect(r"00000000 11 00 00 00 00 00 00 00")
# Destination option
child.expect(r"~~ SNIP 1 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrFragment, len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrFragment]))
# Routing header
child.expect(r"~~ SNIP 2 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrDestOpt, len = 0x00, routing type = 0, segments left = 0
child.expect(r"00000000 {:02X} 00 00 00 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrDestOpt]))
# Hop-by-hop-option
child.expect(r"~~ SNIP 3 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrRouting, len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrRouting]))
# IPv6 header
child.expect(r"~~ SNIP 4 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrHopByHop]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_empty_mixed2_w_dest_opt_registered(child, iface, hw_dst, ll_dst, ll_src):
# Register to destination-option header
register_protnum(child, EXT_HDR_NH[IPv6ExtHdrDestOpt])
# Try sending a packet with a number of extension headers in not recommended
# (but legal) order
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrHopByHop() / IPv6ExtHdrRouting() / IPv6ExtHdrDestOpt() /
IPv6ExtHdrFragment() / UDP() / "\x01\x02",
iface=iface, verbose=0)
# Destination option with payload
child.expect(r"~~ SNIP 0 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len = int(child.match.group(1))
# NH = IPv6ExtHdrFragment, len = 0x00, PadN option (0x01) of length 0x04
# NH = 17 (UDP), reserved = 0x00, fragment offset = 0, res = 0, M = 0
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00 "
r"11 00 00 00 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrFragment]))
# Routing header
child.expect(r"~~ SNIP 1 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrDestOpt, len = 0x00, routing type = 0, segments left = 0
child.expect(r"00000000 {:02X} 00 00 00 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrDestOpt]))
# Hop-by-hop-option
child.expect(r"~~ SNIP 2 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = IPv6ExtHdrRouting, len = 0x00, PadN option (0x01) of length 0x04
child.expect(r"00000000 {:02X} 00 01 04 00 00 00 00".format(
EXT_HDR_NH[IPv6ExtHdrRouting]))
# IPv6 header
child.expect(r"~~ SNIP 3 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrHopByHop]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def check_and_search_output(cmd, pattern, res_group, *args, **kwargs):
output = subprocess.check_output(cmd, *args, **kwargs).decode("utf-8")
for line in output.splitlines():
m = re.search(pattern, line)
if m is not None:
return m.group(res_group)
return None
def get_bridge(tap):
res = check_and_search_output(
["bridge", "link"],
r"{}.+master\s+(?P<master>[^\s]+)".format(tap),
"master"
)
return tap if res is None else res
def get_host_lladdr(tap):
res = check_and_search_output(
["ip", "addr", "show", "dev", tap, "scope", "link"],
r"inet6 (?P<lladdr>[0-9A-Fa-f:]+)/64",
"lladdr"
)
if res is None:
raise AssertionError(
"Can't find host link-local address on interface {}".format(tap)
)
else:
return res
def testfunc(child):
tap = get_bridge(os.environ["TAP"])
lladdr_src = get_host_lladdr(tap)
child.sendline("ifconfig")
child.expect("HWaddr: (?P<hwaddr>[A-Fa-f:0-9]+)")
hwaddr_dst = child.match.group("hwaddr").lower()
child.expect("(?P<lladdr>fe80::[A-Fa-f:0-9]+)")
lladdr_dst = child.match.group("lladdr").lower()
def run(func):
if child.logfile == sys.stdout:
func(child, tap, hwaddr_dst, lladdr_dst, lladdr_src)
else:
try:
func(child, tap, hwaddr_dst, lladdr_dst, lladdr_src)
print(".", end="", flush=True)
except Exception as e:
print("FAILED")
raise e
run(test_empty_hop_by_hop_opt_wo_register)
run(test_empty_hop_by_hop_opt_w_register)
run(test_empty_duplicate_hop_by_hop_opt)
run(test_empty_non_first_hop_by_hop_opt)
run(test_empty_duplicate_non_first_hop_by_hop_opt)
run(test_empty_routing_header_wo_register)
run(test_empty_routing_header_w_register)
run(test_empty_fragment_header_wo_register)
run(test_empty_fragment_header_w_register)
run(test_empty_dest_opt_wo_register)
run(test_empty_dest_opt_w_register)
# check various registrations with recommended order to validate parsing
# (recommended order, see https://tools.ietf.org/html/rfc8200#section-4.1)
run(test_empty_mixed1_w_hop_opt_registered)
run(test_empty_mixed1_w_rt_hdr_registered)
run(test_empty_mixed1_w_frag_hdr_registered)
run(test_empty_mixed1_w_dest_opt_registered)
# other orders SHOULD also be parsed (and since checking the order is more
# complicated we are able to do that)
run(test_empty_mixed2_w_hop_opt_registered)
run(test_empty_mixed2_w_rt_hdr_registered)
run(test_empty_mixed2_w_frag_hdr_registered)
run(test_empty_mixed2_w_dest_opt_registered)
print("SUCCESS")
if __name__ == "__main__":
if os.geteuid() != 0:
print("\x1b[1;31mThis test requires root privileges.\n"
"It's constructing and sending Ethernet frames.\x1b[0m\n",
file=sys.stderr)
sys.exit(1)
sys.exit(run(testfunc, timeout=1, echo=False))
| lgpl-2.1 | -2,034,594,512,957,191,200 | 46.512384 | 83 | 0.610432 | false |
Tuxemon/Tuxemon-Server | tuxemon_server/core/game/event/conditions/combat.py | 5 | 1893 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Tuxemon
# Copyright (C) 2014, William Edwards <[email protected]>
#
# This file is part of Tuxemon.
#
# Tuxemon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tuxemon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tuxemon. If not, see <http://www.gnu.org/licenses/>.
#
# Contributor(s):
#
# William Edwards <[email protected]>
#
class Combat(object):
def combat_started(self, game, condition):
"""Checks to see if combat has been started or not.
:param game: The main game object that contains all the game's variables.
:param condition: A dictionary of condition details. See :py:func:`core.components.map.Map.loadevents`
for the format of the dictionary.
:type game: core.control.Control
:type condition: Dictionary
:rtype: Boolean
:returns: True or False
Valid Parameters: None
**Examples:**
>>> condition
{'action_id': '9',
'id': 9,
'operator': 'is_not',
'parameters': '',
'type': 'combat_started',
'x': 1,
'y': 11}
"""
world = game.current_state
if (game.state_name == "CombatState"
or world.battle_transition_in_progress
or world.start_battle_transition
or world.next == "CombatState"):
return True
else:
return False
| gpl-3.0 | 3,870,911,959,556,521,000 | 27.253731 | 110 | 0.632858 | false |
yaolinz/rethinkdb | drivers/python/rethinkdb/net_tornado.py | 5 | 10607 | # Copyright 2015 RethinkDB, all rights reserved.
import errno
import json
import numbers
import socket
import struct
import sys
from tornado import gen, iostream
from tornado.ioloop import IOLoop
from tornado.concurrent import Future
from . import ql2_pb2 as p
from .net import decodeUTF, Query, Response, Cursor, maybe_profile, convert_pseudo
from .net import Connection as ConnectionBase
from .errors import *
from .ast import RqlQuery, RqlTopLevelQuery, DB
__all__ = ['Connection']
pResponse = p.Response.ResponseType
pQuery = p.Query.QueryType
@gen.coroutine
def with_absolute_timeout(deadline, generator, **kwargs):
if deadline is None:
res = yield generator
else:
try:
res = yield gen.with_timeout(deadline, generator, **kwargs)
except gen.TimeoutError:
raise ReqlTimeoutError()
raise gen.Return(res)
# The Tornado implementation of the Cursor object:
# The `new_response` Future notifies any waiting coroutines that the can attempt
# to grab the next result. In addition, the waiting coroutine will schedule a
# timeout at the given deadline (if provided), at which point the future will be
# errored.
class TornadoCursor(Cursor):
def __init__(self, *args, **kwargs):
Cursor.__init__(self, *args, **kwargs)
self.new_response = Future()
def _extend(self, res):
Cursor._extend(self, res)
self.new_response.set_result(True)
self.new_response = Future()
# Convenience function so users know when they've hit the end of the cursor
# without having to catch an exception
@gen.coroutine
def fetch_next(self, wait=True):
timeout = Cursor._wait_to_timeout(wait)
deadline = None if timeout is None else self.conn._io_loop.time() + timeout
while len(self.items) == 0 and self.error is None:
self._maybe_fetch_batch()
yield with_absolute_timeout(deadline, self.new_response)
# If there is a (non-empty) error to be received, we return True, so the
# user will receive it on the next `next` call.
raise gen.Return(len(self.items) != 0 or not isinstance(self.error, ReqlCursorEmpty))
def _empty_error(self):
# We do not have ReqlCursorEmpty inherit from StopIteration as that interferes
# with Tornado's gen.coroutine and is the equivalent of gen.Return(None).
return ReqlCursorEmpty()
@gen.coroutine
def _get_next(self, timeout):
deadline = None if timeout is None else self.conn._io_loop.time() + timeout
while len(self.items) == 0:
self._maybe_fetch_batch()
if self.error is not None:
raise self.error
yield with_absolute_timeout(deadline, self.new_response)
raise gen.Return(convert_pseudo(self.items.pop(0), self.query))
class ConnectionInstance(object):
def __init__(self, parent, io_loop=None):
self._parent = parent
self._closing = False
self._user_queries = { }
self._cursor_cache = { }
self._ready = Future()
self._io_loop = io_loop
if self._io_loop is None:
self._io_loop = IOLoop.current()
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if len(self._parent.ssl) > 0:
ssl_options = {}
if self._parent.ssl["ca_certs"]:
ssl_options['ca_certs'] = self._parent.ssl["ca_certs"]
ssl_options['cert_reqs'] = 2 # ssl.CERT_REQUIRED
self._stream = iostream.SSLIOStream(
self._socket, ssl_options=ssl_options, io_loop=self._io_loop)
else:
self._stream = iostream.IOStream(self._socket, io_loop=self._io_loop)
@gen.coroutine
def connect(self, timeout):
deadline = None if timeout is None else self._io_loop.time() + timeout
try:
yield with_absolute_timeout(
deadline,
self._stream.connect((self._parent.host,
self._parent.port),
server_hostname=self._parent.host),
io_loop=self._io_loop,
quiet_exceptions=(iostream.StreamClosedError))
except Exception as err:
raise ReqlDriverError('Could not connect to %s:%s. Error: %s' %
(self._parent.host, self._parent.port, str(err)))
try:
self._stream.write(self._parent.handshake)
response = yield with_absolute_timeout(
deadline,
self._stream.read_until(b'\0'),
io_loop=self._io_loop,
quiet_exceptions=(iostream.StreamClosedError))
except Exception as err:
raise ReqlDriverError(
'Connection interrupted during handshake with %s:%s. Error: %s' %
(self._parent.host, self._parent.port, str(err)))
message = decodeUTF(response[:-1]).split('\n')[0]
if message != 'SUCCESS':
self.close(False, None)
if message == "ERROR: Incorrect authorization key":
raise ReqlAuthError(self._parent.host, self._parent.port)
else:
raise ReqlDriverError('Server dropped connection with message: "%s"' %
(message, ))
# Start a parallel function to perform reads
self._io_loop.add_callback(self._reader)
raise gen.Return(self._parent)
def is_open(self):
return not self._stream.closed()
@gen.coroutine
def close(self, noreply_wait, token, exception=None):
self._closing = True
if exception is not None:
err_message = "Connection is closed (%s)." % str(exception)
else:
err_message = "Connection is closed."
# Cursors may remove themselves when errored, so copy a list of them
for cursor in list(self._cursor_cache.values()):
cursor._error(err_message)
for query, future in iter(self._user_queries.values()):
future.set_exception(ReqlDriverError(err_message))
self._user_queries = { }
self._cursor_cache = { }
if noreply_wait:
noreply = Query(pQuery.NOREPLY_WAIT, token, None, None)
yield self.run_query(noreply, False)
try:
self._stream.close()
except iostream.StreamClosedError:
pass
raise gen.Return(None)
@gen.coroutine
def run_query(self, query, noreply):
yield self._stream.write(query.serialize())
if noreply:
raise gen.Return(None)
response_future = Future()
self._user_queries[query.token] = (query, response_future)
res = yield response_future
raise gen.Return(res)
# The _reader coroutine runs in its own context at the top level of the
# Tornado.IOLoop it was created with. It runs in parallel, reading responses
# off of the socket and forwarding them to the appropriate Future or Cursor.
# This is shut down as a consequence of closing the stream, or an error in the
# socket/protocol from the server. Unexpected errors in this coroutine will
# close the ConnectionInstance and be passed to any open Futures or Cursors.
@gen.coroutine
def _reader(self):
try:
while True:
buf = yield self._stream.read_bytes(12)
(token, length,) = struct.unpack("<qL", buf)
buf = yield self._stream.read_bytes(length)
res = Response(token, buf)
cursor = self._cursor_cache.get(token)
if cursor is not None:
cursor._extend(res)
elif token in self._user_queries:
# Do not pop the query from the dict until later, so
# we don't lose track of it in case of an exception
query, future = self._user_queries[token]
if res.type == pResponse.SUCCESS_ATOM:
value = convert_pseudo(res.data[0], query)
future.set_result(maybe_profile(value, res))
elif res.type in (pResponse.SUCCESS_SEQUENCE,
pResponse.SUCCESS_PARTIAL):
cursor = TornadoCursor(self, query)
self._cursor_cache[token] = cursor
cursor._extend(res)
future.set_result(maybe_profile(cursor, res))
elif res.type == pResponse.WAIT_COMPLETE:
future.set_result(None)
else:
future.set_exception(res.make_error(query))
del self._user_queries[token]
elif not self._closing:
raise ReqlDriverError("Unexpected response received.")
except Exception as ex:
if not self._closing:
self.close(False, None, ex)
# Wrap functions from the base connection class that may throw - these will
# put any exception inside a Future and return it.
class Connection(ConnectionBase):
def __init__(self, *args, **kwargs):
ConnectionBase.__init__(self, ConnectionInstance, *args, **kwargs)
@gen.coroutine
def reconnect(self, noreply_wait=True, timeout=None):
# We close before reconnect so reconnect doesn't try to close us
# and then fail to return the Future (this is a little awkward).
yield self.close(noreply_wait)
res = yield ConnectionBase.reconnect(self, noreply_wait, timeout)
raise gen.Return(res)
@gen.coroutine
def close(self, *args, **kwargs):
if self._instance is None:
res = None
else:
res = yield ConnectionBase.close(self, *args, **kwargs)
raise gen.Return(res)
@gen.coroutine
def noreply_wait(self, *args, **kwargs):
res = yield ConnectionBase.noreply_wait(self, *args, **kwargs)
raise gen.Return(res)
@gen.coroutine
def _start(self, *args, **kwargs):
res = yield ConnectionBase._start(self, *args, **kwargs)
raise gen.Return(res)
@gen.coroutine
def _continue(self, *args, **kwargs):
res = yield ConnectionBase._continue(self, *args, **kwargs)
raise gen.Return(res)
@gen.coroutine
def _stop(self, *args, **kwargs):
res = yield ConnectionBase._stop(self, *args, **kwargs)
raise gen.Return(res)
| agpl-3.0 | 394,968,667,133,397,200 | 38.431227 | 93 | 0.598001 | false |
zetaops/SpiffWorkflow | tests/SpiffWorkflow/bpmn/TimerIntermediateTest.py | 3 | 1604 | # -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
from __future__ import division
import unittest
import datetime
import time
from SpiffWorkflow.Task import Task
from SpiffWorkflow.bpmn.BpmnWorkflow import BpmnWorkflow
from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class TimerIntermediateTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec = self.load_spec()
def load_spec(self):
return self.load_workflow_spec('Test-Workflows/*.bpmn20.xml', 'Timer Intermediate')
def testRunThroughHappy(self):
self.workflow = BpmnWorkflow(self.spec)
due_time = datetime.datetime.now() + datetime.timedelta(seconds=0.5)
self.assertEquals(1, len(self.workflow.get_tasks(Task.READY)))
self.workflow.get_tasks(Task.READY)[0].set_data(due_time=due_time)
self.workflow.do_engine_steps()
self.assertEquals(1, len(self.workflow.get_tasks(Task.WAITING)))
time.sleep(0.6)
self.assertEquals(1, len(self.workflow.get_tasks(Task.WAITING)))
self.workflow.refresh_waiting_tasks()
self.assertEquals(0, len(self.workflow.get_tasks(Task.WAITING)))
self.assertEquals(1, len(self.workflow.get_tasks(Task.READY)))
self.workflow.do_engine_steps()
self.assertEquals(0, len(self.workflow.get_tasks(Task.READY | Task.WAITING)))
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TimerIntermediateTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity = 2).run(suite())
| lgpl-3.0 | 8,238,835,200,252,255,000 | 33.12766 | 91 | 0.706983 | false |
EricCline/CEM_inc | env/lib/python2.7/site-packages/IPython/parallel/controller/hub.py | 7 | 54907 | """The IPython Controller Hub with 0MQ
This is the master object that handles connections from engines and clients,
and monitors traffic through the various queues.
Authors:
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import json
import os
import sys
import time
from datetime import datetime
import zmq
from zmq.eventloop import ioloop
from zmq.eventloop.zmqstream import ZMQStream
# internal:
from IPython.utils.importstring import import_item
from IPython.utils.jsonutil import extract_dates
from IPython.utils.localinterfaces import localhost
from IPython.utils.py3compat import cast_bytes, unicode_type, iteritems
from IPython.utils.traitlets import (
HasTraits, Instance, Integer, Unicode, Dict, Set, Tuple, CBytes, DottedObjectName
)
from IPython.parallel import error, util
from IPython.parallel.factory import RegistrationFactory
from IPython.kernel.zmq.session import SessionFactory
from .heartmonitor import HeartMonitor
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def _passer(*args, **kwargs):
return
def _printer(*args, **kwargs):
print (args)
print (kwargs)
def empty_record():
"""Return an empty dict with all record keys."""
return {
'msg_id' : None,
'header' : None,
'metadata' : None,
'content': None,
'buffers': None,
'submitted': None,
'client_uuid' : None,
'engine_uuid' : None,
'started': None,
'completed': None,
'resubmitted': None,
'received': None,
'result_header' : None,
'result_metadata' : None,
'result_content' : None,
'result_buffers' : None,
'queue' : None,
'pyin' : None,
'pyout': None,
'pyerr': None,
'stdout': '',
'stderr': '',
}
def init_record(msg):
"""Initialize a TaskRecord based on a request."""
header = msg['header']
return {
'msg_id' : header['msg_id'],
'header' : header,
'content': msg['content'],
'metadata': msg['metadata'],
'buffers': msg['buffers'],
'submitted': header['date'],
'client_uuid' : None,
'engine_uuid' : None,
'started': None,
'completed': None,
'resubmitted': None,
'received': None,
'result_header' : None,
'result_metadata': None,
'result_content' : None,
'result_buffers' : None,
'queue' : None,
'pyin' : None,
'pyout': None,
'pyerr': None,
'stdout': '',
'stderr': '',
}
class EngineConnector(HasTraits):
"""A simple object for accessing the various zmq connections of an object.
Attributes are:
id (int): engine ID
uuid (unicode): engine UUID
pending: set of msg_ids
stallback: DelayedCallback for stalled registration
"""
id = Integer(0)
uuid = Unicode()
pending = Set()
stallback = Instance(ioloop.DelayedCallback)
_db_shortcuts = {
'sqlitedb' : 'IPython.parallel.controller.sqlitedb.SQLiteDB',
'mongodb' : 'IPython.parallel.controller.mongodb.MongoDB',
'dictdb' : 'IPython.parallel.controller.dictdb.DictDB',
'nodb' : 'IPython.parallel.controller.dictdb.NoDB',
}
class HubFactory(RegistrationFactory):
"""The Configurable for setting up a Hub."""
# port-pairs for monitoredqueues:
hb = Tuple(Integer,Integer,config=True,
help="""PUB/ROUTER Port pair for Engine heartbeats""")
def _hb_default(self):
return tuple(util.select_random_ports(2))
mux = Tuple(Integer,Integer,config=True,
help="""Client/Engine Port pair for MUX queue""")
def _mux_default(self):
return tuple(util.select_random_ports(2))
task = Tuple(Integer,Integer,config=True,
help="""Client/Engine Port pair for Task queue""")
def _task_default(self):
return tuple(util.select_random_ports(2))
control = Tuple(Integer,Integer,config=True,
help="""Client/Engine Port pair for Control queue""")
def _control_default(self):
return tuple(util.select_random_ports(2))
iopub = Tuple(Integer,Integer,config=True,
help="""Client/Engine Port pair for IOPub relay""")
def _iopub_default(self):
return tuple(util.select_random_ports(2))
# single ports:
mon_port = Integer(config=True,
help="""Monitor (SUB) port for queue traffic""")
def _mon_port_default(self):
return util.select_random_ports(1)[0]
notifier_port = Integer(config=True,
help="""PUB port for sending engine status notifications""")
def _notifier_port_default(self):
return util.select_random_ports(1)[0]
engine_ip = Unicode(config=True,
help="IP on which to listen for engine connections. [default: loopback]")
def _engine_ip_default(self):
return localhost()
engine_transport = Unicode('tcp', config=True,
help="0MQ transport for engine connections. [default: tcp]")
client_ip = Unicode(config=True,
help="IP on which to listen for client connections. [default: loopback]")
client_transport = Unicode('tcp', config=True,
help="0MQ transport for client connections. [default : tcp]")
monitor_ip = Unicode(config=True,
help="IP on which to listen for monitor messages. [default: loopback]")
monitor_transport = Unicode('tcp', config=True,
help="0MQ transport for monitor messages. [default : tcp]")
_client_ip_default = _monitor_ip_default = _engine_ip_default
monitor_url = Unicode('')
db_class = DottedObjectName('NoDB',
config=True, help="""The class to use for the DB backend
Options include:
SQLiteDB: SQLite
MongoDB : use MongoDB
DictDB : in-memory storage (fastest, but be mindful of memory growth of the Hub)
NoDB : disable database altogether (default)
""")
registration_timeout = Integer(0, config=True,
help="Engine registration timeout in seconds [default: max(30,"
"10*heartmonitor.period)]" )
def _registration_timeout_default(self):
if self.heartmonitor is None:
# early initialization, this value will be ignored
return 0
# heartmonitor period is in milliseconds, so 10x in seconds is .01
return max(30, int(.01 * self.heartmonitor.period))
# not configurable
db = Instance('IPython.parallel.controller.dictdb.BaseDB')
heartmonitor = Instance('IPython.parallel.controller.heartmonitor.HeartMonitor')
def _ip_changed(self, name, old, new):
self.engine_ip = new
self.client_ip = new
self.monitor_ip = new
self._update_monitor_url()
def _update_monitor_url(self):
self.monitor_url = "%s://%s:%i" % (self.monitor_transport, self.monitor_ip, self.mon_port)
def _transport_changed(self, name, old, new):
self.engine_transport = new
self.client_transport = new
self.monitor_transport = new
self._update_monitor_url()
def __init__(self, **kwargs):
super(HubFactory, self).__init__(**kwargs)
self._update_monitor_url()
def construct(self):
self.init_hub()
def start(self):
self.heartmonitor.start()
self.log.info("Heartmonitor started")
def client_url(self, channel):
"""return full zmq url for a named client channel"""
return "%s://%s:%i" % (self.client_transport, self.client_ip, self.client_info[channel])
def engine_url(self, channel):
"""return full zmq url for a named engine channel"""
return "%s://%s:%i" % (self.engine_transport, self.engine_ip, self.engine_info[channel])
def init_hub(self):
"""construct Hub object"""
ctx = self.context
loop = self.loop
if 'TaskScheduler.scheme_name' in self.config:
scheme = self.config.TaskScheduler.scheme_name
else:
from .scheduler import TaskScheduler
scheme = TaskScheduler.scheme_name.get_default_value()
# build connection dicts
engine = self.engine_info = {
'interface' : "%s://%s" % (self.engine_transport, self.engine_ip),
'registration' : self.regport,
'control' : self.control[1],
'mux' : self.mux[1],
'hb_ping' : self.hb[0],
'hb_pong' : self.hb[1],
'task' : self.task[1],
'iopub' : self.iopub[1],
}
client = self.client_info = {
'interface' : "%s://%s" % (self.client_transport, self.client_ip),
'registration' : self.regport,
'control' : self.control[0],
'mux' : self.mux[0],
'task' : self.task[0],
'task_scheme' : scheme,
'iopub' : self.iopub[0],
'notification' : self.notifier_port,
}
self.log.debug("Hub engine addrs: %s", self.engine_info)
self.log.debug("Hub client addrs: %s", self.client_info)
# Registrar socket
q = ZMQStream(ctx.socket(zmq.ROUTER), loop)
util.set_hwm(q, 0)
q.bind(self.client_url('registration'))
self.log.info("Hub listening on %s for registration.", self.client_url('registration'))
if self.client_ip != self.engine_ip:
q.bind(self.engine_url('registration'))
self.log.info("Hub listening on %s for registration.", self.engine_url('registration'))
### Engine connections ###
# heartbeat
hpub = ctx.socket(zmq.PUB)
hpub.bind(self.engine_url('hb_ping'))
hrep = ctx.socket(zmq.ROUTER)
util.set_hwm(hrep, 0)
hrep.bind(self.engine_url('hb_pong'))
self.heartmonitor = HeartMonitor(loop=loop, parent=self, log=self.log,
pingstream=ZMQStream(hpub,loop),
pongstream=ZMQStream(hrep,loop)
)
### Client connections ###
# Notifier socket
n = ZMQStream(ctx.socket(zmq.PUB), loop)
n.bind(self.client_url('notification'))
### build and launch the queues ###
# monitor socket
sub = ctx.socket(zmq.SUB)
sub.setsockopt(zmq.SUBSCRIBE, b"")
sub.bind(self.monitor_url)
sub.bind('inproc://monitor')
sub = ZMQStream(sub, loop)
# connect the db
db_class = _db_shortcuts.get(self.db_class.lower(), self.db_class)
self.log.info('Hub using DB backend: %r', (db_class.split('.')[-1]))
self.db = import_item(str(db_class))(session=self.session.session,
parent=self, log=self.log)
time.sleep(.25)
# resubmit stream
r = ZMQStream(ctx.socket(zmq.DEALER), loop)
url = util.disambiguate_url(self.client_url('task'))
r.connect(url)
# convert seconds to msec
registration_timeout = 1000*self.registration_timeout
self.hub = Hub(loop=loop, session=self.session, monitor=sub, heartmonitor=self.heartmonitor,
query=q, notifier=n, resubmit=r, db=self.db,
engine_info=self.engine_info, client_info=self.client_info,
log=self.log, registration_timeout=registration_timeout)
class Hub(SessionFactory):
"""The IPython Controller Hub with 0MQ connections
Parameters
==========
loop: zmq IOLoop instance
session: Session object
<removed> context: zmq context for creating new connections (?)
queue: ZMQStream for monitoring the command queue (SUB)
query: ZMQStream for engine registration and client queries requests (ROUTER)
heartbeat: HeartMonitor object checking the pulse of the engines
notifier: ZMQStream for broadcasting engine registration changes (PUB)
db: connection to db for out of memory logging of commands
NotImplemented
engine_info: dict of zmq connection information for engines to connect
to the queues.
client_info: dict of zmq connection information for engines to connect
to the queues.
"""
engine_state_file = Unicode()
# internal data structures:
ids=Set() # engine IDs
keytable=Dict()
by_ident=Dict()
engines=Dict()
clients=Dict()
hearts=Dict()
pending=Set()
queues=Dict() # pending msg_ids keyed by engine_id
tasks=Dict() # pending msg_ids submitted as tasks, keyed by client_id
completed=Dict() # completed msg_ids keyed by engine_id
all_completed=Set() # completed msg_ids keyed by engine_id
dead_engines=Set() # completed msg_ids keyed by engine_id
unassigned=Set() # set of task msg_ds not yet assigned a destination
incoming_registrations=Dict()
registration_timeout=Integer()
_idcounter=Integer(0)
# objects from constructor:
query=Instance(ZMQStream)
monitor=Instance(ZMQStream)
notifier=Instance(ZMQStream)
resubmit=Instance(ZMQStream)
heartmonitor=Instance(HeartMonitor)
db=Instance(object)
client_info=Dict()
engine_info=Dict()
def __init__(self, **kwargs):
"""
# universal:
loop: IOLoop for creating future connections
session: streamsession for sending serialized data
# engine:
queue: ZMQStream for monitoring queue messages
query: ZMQStream for engine+client registration and client requests
heartbeat: HeartMonitor object for tracking engines
# extra:
db: ZMQStream for db connection (NotImplemented)
engine_info: zmq address/protocol dict for engine connections
client_info: zmq address/protocol dict for client connections
"""
super(Hub, self).__init__(**kwargs)
# register our callbacks
self.query.on_recv(self.dispatch_query)
self.monitor.on_recv(self.dispatch_monitor_traffic)
self.heartmonitor.add_heart_failure_handler(self.handle_heart_failure)
self.heartmonitor.add_new_heart_handler(self.handle_new_heart)
self.monitor_handlers = {b'in' : self.save_queue_request,
b'out': self.save_queue_result,
b'intask': self.save_task_request,
b'outtask': self.save_task_result,
b'tracktask': self.save_task_destination,
b'incontrol': _passer,
b'outcontrol': _passer,
b'iopub': self.save_iopub_message,
}
self.query_handlers = {'queue_request': self.queue_status,
'result_request': self.get_results,
'history_request': self.get_history,
'db_request': self.db_query,
'purge_request': self.purge_results,
'load_request': self.check_load,
'resubmit_request': self.resubmit_task,
'shutdown_request': self.shutdown_request,
'registration_request' : self.register_engine,
'unregistration_request' : self.unregister_engine,
'connection_request': self.connection_request,
}
# ignore resubmit replies
self.resubmit.on_recv(lambda msg: None, copy=False)
self.log.info("hub::created hub")
@property
def _next_id(self):
"""gemerate a new ID.
No longer reuse old ids, just count from 0."""
newid = self._idcounter
self._idcounter += 1
return newid
# newid = 0
# incoming = [id[0] for id in itervalues(self.incoming_registrations)]
# # print newid, self.ids, self.incoming_registrations
# while newid in self.ids or newid in incoming:
# newid += 1
# return newid
#-----------------------------------------------------------------------------
# message validation
#-----------------------------------------------------------------------------
def _validate_targets(self, targets):
"""turn any valid targets argument into a list of integer ids"""
if targets is None:
# default to all
return self.ids
if isinstance(targets, (int,str,unicode_type)):
# only one target specified
targets = [targets]
_targets = []
for t in targets:
# map raw identities to ids
if isinstance(t, (str,unicode_type)):
t = self.by_ident.get(cast_bytes(t), t)
_targets.append(t)
targets = _targets
bad_targets = [ t for t in targets if t not in self.ids ]
if bad_targets:
raise IndexError("No Such Engine: %r" % bad_targets)
if not targets:
raise IndexError("No Engines Registered")
return targets
#-----------------------------------------------------------------------------
# dispatch methods (1 per stream)
#-----------------------------------------------------------------------------
@util.log_errors
def dispatch_monitor_traffic(self, msg):
"""all ME and Task queue messages come through here, as well as
IOPub traffic."""
self.log.debug("monitor traffic: %r", msg[0])
switch = msg[0]
try:
idents, msg = self.session.feed_identities(msg[1:])
except ValueError:
idents=[]
if not idents:
self.log.error("Monitor message without topic: %r", msg)
return
handler = self.monitor_handlers.get(switch, None)
if handler is not None:
handler(idents, msg)
else:
self.log.error("Unrecognized monitor topic: %r", switch)
@util.log_errors
def dispatch_query(self, msg):
"""Route registration requests and queries from clients."""
try:
idents, msg = self.session.feed_identities(msg)
except ValueError:
idents = []
if not idents:
self.log.error("Bad Query Message: %r", msg)
return
client_id = idents[0]
try:
msg = self.session.unserialize(msg, content=True)
except Exception:
content = error.wrap_exception()
self.log.error("Bad Query Message: %r", msg, exc_info=True)
self.session.send(self.query, "hub_error", ident=client_id,
content=content)
return
# print client_id, header, parent, content
#switch on message type:
msg_type = msg['header']['msg_type']
self.log.info("client::client %r requested %r", client_id, msg_type)
handler = self.query_handlers.get(msg_type, None)
try:
assert handler is not None, "Bad Message Type: %r" % msg_type
except:
content = error.wrap_exception()
self.log.error("Bad Message Type: %r", msg_type, exc_info=True)
self.session.send(self.query, "hub_error", ident=client_id,
content=content)
return
else:
handler(idents, msg)
def dispatch_db(self, msg):
""""""
raise NotImplementedError
#---------------------------------------------------------------------------
# handler methods (1 per event)
#---------------------------------------------------------------------------
#----------------------- Heartbeat --------------------------------------
def handle_new_heart(self, heart):
"""handler to attach to heartbeater.
Called when a new heart starts to beat.
Triggers completion of registration."""
self.log.debug("heartbeat::handle_new_heart(%r)", heart)
if heart not in self.incoming_registrations:
self.log.info("heartbeat::ignoring new heart: %r", heart)
else:
self.finish_registration(heart)
def handle_heart_failure(self, heart):
"""handler to attach to heartbeater.
called when a previously registered heart fails to respond to beat request.
triggers unregistration"""
self.log.debug("heartbeat::handle_heart_failure(%r)", heart)
eid = self.hearts.get(heart, None)
uuid = self.engines[eid].uuid
if eid is None or self.keytable[eid] in self.dead_engines:
self.log.info("heartbeat::ignoring heart failure %r (not an engine or already dead)", heart)
else:
self.unregister_engine(heart, dict(content=dict(id=eid, queue=uuid)))
#----------------------- MUX Queue Traffic ------------------------------
def save_queue_request(self, idents, msg):
if len(idents) < 2:
self.log.error("invalid identity prefix: %r", idents)
return
queue_id, client_id = idents[:2]
try:
msg = self.session.unserialize(msg)
except Exception:
self.log.error("queue::client %r sent invalid message to %r: %r", client_id, queue_id, msg, exc_info=True)
return
eid = self.by_ident.get(queue_id, None)
if eid is None:
self.log.error("queue::target %r not registered", queue_id)
self.log.debug("queue:: valid are: %r", self.by_ident.keys())
return
record = init_record(msg)
msg_id = record['msg_id']
self.log.info("queue::client %r submitted request %r to %s", client_id, msg_id, eid)
# Unicode in records
record['engine_uuid'] = queue_id.decode('ascii')
record['client_uuid'] = msg['header']['session']
record['queue'] = 'mux'
try:
# it's posible iopub arrived first:
existing = self.db.get_record(msg_id)
for key,evalue in iteritems(existing):
rvalue = record.get(key, None)
if evalue and rvalue and evalue != rvalue:
self.log.warn("conflicting initial state for record: %r:%r <%r> %r", msg_id, rvalue, key, evalue)
elif evalue and not rvalue:
record[key] = evalue
try:
self.db.update_record(msg_id, record)
except Exception:
self.log.error("DB Error updating record %r", msg_id, exc_info=True)
except KeyError:
try:
self.db.add_record(msg_id, record)
except Exception:
self.log.error("DB Error adding record %r", msg_id, exc_info=True)
self.pending.add(msg_id)
self.queues[eid].append(msg_id)
def save_queue_result(self, idents, msg):
if len(idents) < 2:
self.log.error("invalid identity prefix: %r", idents)
return
client_id, queue_id = idents[:2]
try:
msg = self.session.unserialize(msg)
except Exception:
self.log.error("queue::engine %r sent invalid message to %r: %r",
queue_id, client_id, msg, exc_info=True)
return
eid = self.by_ident.get(queue_id, None)
if eid is None:
self.log.error("queue::unknown engine %r is sending a reply: ", queue_id)
return
parent = msg['parent_header']
if not parent:
return
msg_id = parent['msg_id']
if msg_id in self.pending:
self.pending.remove(msg_id)
self.all_completed.add(msg_id)
self.queues[eid].remove(msg_id)
self.completed[eid].append(msg_id)
self.log.info("queue::request %r completed on %s", msg_id, eid)
elif msg_id not in self.all_completed:
# it could be a result from a dead engine that died before delivering the
# result
self.log.warn("queue:: unknown msg finished %r", msg_id)
return
# update record anyway, because the unregistration could have been premature
rheader = msg['header']
md = msg['metadata']
completed = rheader['date']
started = extract_dates(md.get('started', None))
result = {
'result_header' : rheader,
'result_metadata': md,
'result_content': msg['content'],
'received': datetime.now(),
'started' : started,
'completed' : completed
}
result['result_buffers'] = msg['buffers']
try:
self.db.update_record(msg_id, result)
except Exception:
self.log.error("DB Error updating record %r", msg_id, exc_info=True)
#--------------------- Task Queue Traffic ------------------------------
def save_task_request(self, idents, msg):
"""Save the submission of a task."""
client_id = idents[0]
try:
msg = self.session.unserialize(msg)
except Exception:
self.log.error("task::client %r sent invalid task message: %r",
client_id, msg, exc_info=True)
return
record = init_record(msg)
record['client_uuid'] = msg['header']['session']
record['queue'] = 'task'
header = msg['header']
msg_id = header['msg_id']
self.pending.add(msg_id)
self.unassigned.add(msg_id)
try:
# it's posible iopub arrived first:
existing = self.db.get_record(msg_id)
if existing['resubmitted']:
for key in ('submitted', 'client_uuid', 'buffers'):
# don't clobber these keys on resubmit
# submitted and client_uuid should be different
# and buffers might be big, and shouldn't have changed
record.pop(key)
# still check content,header which should not change
# but are not expensive to compare as buffers
for key,evalue in iteritems(existing):
if key.endswith('buffers'):
# don't compare buffers
continue
rvalue = record.get(key, None)
if evalue and rvalue and evalue != rvalue:
self.log.warn("conflicting initial state for record: %r:%r <%r> %r", msg_id, rvalue, key, evalue)
elif evalue and not rvalue:
record[key] = evalue
try:
self.db.update_record(msg_id, record)
except Exception:
self.log.error("DB Error updating record %r", msg_id, exc_info=True)
except KeyError:
try:
self.db.add_record(msg_id, record)
except Exception:
self.log.error("DB Error adding record %r", msg_id, exc_info=True)
except Exception:
self.log.error("DB Error saving task request %r", msg_id, exc_info=True)
def save_task_result(self, idents, msg):
"""save the result of a completed task."""
client_id = idents[0]
try:
msg = self.session.unserialize(msg)
except Exception:
self.log.error("task::invalid task result message send to %r: %r",
client_id, msg, exc_info=True)
return
parent = msg['parent_header']
if not parent:
# print msg
self.log.warn("Task %r had no parent!", msg)
return
msg_id = parent['msg_id']
if msg_id in self.unassigned:
self.unassigned.remove(msg_id)
header = msg['header']
md = msg['metadata']
engine_uuid = md.get('engine', u'')
eid = self.by_ident.get(cast_bytes(engine_uuid), None)
status = md.get('status', None)
if msg_id in self.pending:
self.log.info("task::task %r finished on %s", msg_id, eid)
self.pending.remove(msg_id)
self.all_completed.add(msg_id)
if eid is not None:
if status != 'aborted':
self.completed[eid].append(msg_id)
if msg_id in self.tasks[eid]:
self.tasks[eid].remove(msg_id)
completed = header['date']
started = extract_dates(md.get('started', None))
result = {
'result_header' : header,
'result_metadata': msg['metadata'],
'result_content': msg['content'],
'started' : started,
'completed' : completed,
'received' : datetime.now(),
'engine_uuid': engine_uuid,
}
result['result_buffers'] = msg['buffers']
try:
self.db.update_record(msg_id, result)
except Exception:
self.log.error("DB Error saving task request %r", msg_id, exc_info=True)
else:
self.log.debug("task::unknown task %r finished", msg_id)
def save_task_destination(self, idents, msg):
try:
msg = self.session.unserialize(msg, content=True)
except Exception:
self.log.error("task::invalid task tracking message", exc_info=True)
return
content = msg['content']
# print (content)
msg_id = content['msg_id']
engine_uuid = content['engine_id']
eid = self.by_ident[cast_bytes(engine_uuid)]
self.log.info("task::task %r arrived on %r", msg_id, eid)
if msg_id in self.unassigned:
self.unassigned.remove(msg_id)
# else:
# self.log.debug("task::task %r not listed as MIA?!"%(msg_id))
self.tasks[eid].append(msg_id)
# self.pending[msg_id][1].update(received=datetime.now(),engine=(eid,engine_uuid))
try:
self.db.update_record(msg_id, dict(engine_uuid=engine_uuid))
except Exception:
self.log.error("DB Error saving task destination %r", msg_id, exc_info=True)
def mia_task_request(self, idents, msg):
raise NotImplementedError
client_id = idents[0]
# content = dict(mia=self.mia,status='ok')
# self.session.send('mia_reply', content=content, idents=client_id)
#--------------------- IOPub Traffic ------------------------------
def save_iopub_message(self, topics, msg):
"""save an iopub message into the db"""
# print (topics)
try:
msg = self.session.unserialize(msg, content=True)
except Exception:
self.log.error("iopub::invalid IOPub message", exc_info=True)
return
parent = msg['parent_header']
if not parent:
self.log.warn("iopub::IOPub message lacks parent: %r", msg)
return
msg_id = parent['msg_id']
msg_type = msg['header']['msg_type']
content = msg['content']
# ensure msg_id is in db
try:
rec = self.db.get_record(msg_id)
except KeyError:
rec = empty_record()
rec['msg_id'] = msg_id
self.db.add_record(msg_id, rec)
# stream
d = {}
if msg_type == 'stream':
name = content['name']
s = rec[name] or ''
d[name] = s + content['data']
elif msg_type == 'pyerr':
d['pyerr'] = content
elif msg_type == 'pyin':
d['pyin'] = content['code']
elif msg_type in ('display_data', 'pyout'):
d[msg_type] = content
elif msg_type == 'status':
pass
elif msg_type == 'data_pub':
self.log.info("ignored data_pub message for %s" % msg_id)
else:
self.log.warn("unhandled iopub msg_type: %r", msg_type)
if not d:
return
try:
self.db.update_record(msg_id, d)
except Exception:
self.log.error("DB Error saving iopub message %r", msg_id, exc_info=True)
#-------------------------------------------------------------------------
# Registration requests
#-------------------------------------------------------------------------
def connection_request(self, client_id, msg):
"""Reply with connection addresses for clients."""
self.log.info("client::client %r connected", client_id)
content = dict(status='ok')
jsonable = {}
for k,v in iteritems(self.keytable):
if v not in self.dead_engines:
jsonable[str(k)] = v
content['engines'] = jsonable
self.session.send(self.query, 'connection_reply', content, parent=msg, ident=client_id)
def register_engine(self, reg, msg):
"""Register a new engine."""
content = msg['content']
try:
uuid = content['uuid']
except KeyError:
self.log.error("registration::queue not specified", exc_info=True)
return
eid = self._next_id
self.log.debug("registration::register_engine(%i, %r)", eid, uuid)
content = dict(id=eid,status='ok',hb_period=self.heartmonitor.period)
# check if requesting available IDs:
if cast_bytes(uuid) in self.by_ident:
try:
raise KeyError("uuid %r in use" % uuid)
except:
content = error.wrap_exception()
self.log.error("uuid %r in use", uuid, exc_info=True)
else:
for h, ec in iteritems(self.incoming_registrations):
if uuid == h:
try:
raise KeyError("heart_id %r in use" % uuid)
except:
self.log.error("heart_id %r in use", uuid, exc_info=True)
content = error.wrap_exception()
break
elif uuid == ec.uuid:
try:
raise KeyError("uuid %r in use" % uuid)
except:
self.log.error("uuid %r in use", uuid, exc_info=True)
content = error.wrap_exception()
break
msg = self.session.send(self.query, "registration_reply",
content=content,
ident=reg)
heart = cast_bytes(uuid)
if content['status'] == 'ok':
if heart in self.heartmonitor.hearts:
# already beating
self.incoming_registrations[heart] = EngineConnector(id=eid,uuid=uuid)
self.finish_registration(heart)
else:
purge = lambda : self._purge_stalled_registration(heart)
dc = ioloop.DelayedCallback(purge, self.registration_timeout, self.loop)
dc.start()
self.incoming_registrations[heart] = EngineConnector(id=eid,uuid=uuid,stallback=dc)
else:
self.log.error("registration::registration %i failed: %r", eid, content['evalue'])
return eid
def unregister_engine(self, ident, msg):
"""Unregister an engine that explicitly requested to leave."""
try:
eid = msg['content']['id']
except:
self.log.error("registration::bad engine id for unregistration: %r", ident, exc_info=True)
return
self.log.info("registration::unregister_engine(%r)", eid)
# print (eid)
uuid = self.keytable[eid]
content=dict(id=eid, uuid=uuid)
self.dead_engines.add(uuid)
# self.ids.remove(eid)
# uuid = self.keytable.pop(eid)
#
# ec = self.engines.pop(eid)
# self.hearts.pop(ec.heartbeat)
# self.by_ident.pop(ec.queue)
# self.completed.pop(eid)
handleit = lambda : self._handle_stranded_msgs(eid, uuid)
dc = ioloop.DelayedCallback(handleit, self.registration_timeout, self.loop)
dc.start()
############## TODO: HANDLE IT ################
self._save_engine_state()
if self.notifier:
self.session.send(self.notifier, "unregistration_notification", content=content)
def _handle_stranded_msgs(self, eid, uuid):
"""Handle messages known to be on an engine when the engine unregisters.
It is possible that this will fire prematurely - that is, an engine will
go down after completing a result, and the client will be notified
that the result failed and later receive the actual result.
"""
outstanding = self.queues[eid]
for msg_id in outstanding:
self.pending.remove(msg_id)
self.all_completed.add(msg_id)
try:
raise error.EngineError("Engine %r died while running task %r" % (eid, msg_id))
except:
content = error.wrap_exception()
# build a fake header:
header = {}
header['engine'] = uuid
header['date'] = datetime.now()
rec = dict(result_content=content, result_header=header, result_buffers=[])
rec['completed'] = header['date']
rec['engine_uuid'] = uuid
try:
self.db.update_record(msg_id, rec)
except Exception:
self.log.error("DB Error handling stranded msg %r", msg_id, exc_info=True)
def finish_registration(self, heart):
"""Second half of engine registration, called after our HeartMonitor
has received a beat from the Engine's Heart."""
try:
ec = self.incoming_registrations.pop(heart)
except KeyError:
self.log.error("registration::tried to finish nonexistant registration", exc_info=True)
return
self.log.info("registration::finished registering engine %i:%s", ec.id, ec.uuid)
if ec.stallback is not None:
ec.stallback.stop()
eid = ec.id
self.ids.add(eid)
self.keytable[eid] = ec.uuid
self.engines[eid] = ec
self.by_ident[cast_bytes(ec.uuid)] = ec.id
self.queues[eid] = list()
self.tasks[eid] = list()
self.completed[eid] = list()
self.hearts[heart] = eid
content = dict(id=eid, uuid=self.engines[eid].uuid)
if self.notifier:
self.session.send(self.notifier, "registration_notification", content=content)
self.log.info("engine::Engine Connected: %i", eid)
self._save_engine_state()
def _purge_stalled_registration(self, heart):
if heart in self.incoming_registrations:
ec = self.incoming_registrations.pop(heart)
self.log.info("registration::purging stalled registration: %i", ec.id)
else:
pass
#-------------------------------------------------------------------------
# Engine State
#-------------------------------------------------------------------------
def _cleanup_engine_state_file(self):
"""cleanup engine state mapping"""
if os.path.exists(self.engine_state_file):
self.log.debug("cleaning up engine state: %s", self.engine_state_file)
try:
os.remove(self.engine_state_file)
except IOError:
self.log.error("Couldn't cleanup file: %s", self.engine_state_file, exc_info=True)
def _save_engine_state(self):
"""save engine mapping to JSON file"""
if not self.engine_state_file:
return
self.log.debug("save engine state to %s" % self.engine_state_file)
state = {}
engines = {}
for eid, ec in iteritems(self.engines):
if ec.uuid not in self.dead_engines:
engines[eid] = ec.uuid
state['engines'] = engines
state['next_id'] = self._idcounter
with open(self.engine_state_file, 'w') as f:
json.dump(state, f)
def _load_engine_state(self):
"""load engine mapping from JSON file"""
if not os.path.exists(self.engine_state_file):
return
self.log.info("loading engine state from %s" % self.engine_state_file)
with open(self.engine_state_file) as f:
state = json.load(f)
save_notifier = self.notifier
self.notifier = None
for eid, uuid in iteritems(state['engines']):
heart = uuid.encode('ascii')
# start with this heart as current and beating:
self.heartmonitor.responses.add(heart)
self.heartmonitor.hearts.add(heart)
self.incoming_registrations[heart] = EngineConnector(id=int(eid), uuid=uuid)
self.finish_registration(heart)
self.notifier = save_notifier
self._idcounter = state['next_id']
#-------------------------------------------------------------------------
# Client Requests
#-------------------------------------------------------------------------
def shutdown_request(self, client_id, msg):
"""handle shutdown request."""
self.session.send(self.query, 'shutdown_reply', content={'status': 'ok'}, ident=client_id)
# also notify other clients of shutdown
self.session.send(self.notifier, 'shutdown_notice', content={'status': 'ok'})
dc = ioloop.DelayedCallback(lambda : self._shutdown(), 1000, self.loop)
dc.start()
def _shutdown(self):
self.log.info("hub::hub shutting down.")
time.sleep(0.1)
sys.exit(0)
def check_load(self, client_id, msg):
content = msg['content']
try:
targets = content['targets']
targets = self._validate_targets(targets)
except:
content = error.wrap_exception()
self.session.send(self.query, "hub_error",
content=content, ident=client_id)
return
content = dict(status='ok')
# loads = {}
for t in targets:
content[bytes(t)] = len(self.queues[t])+len(self.tasks[t])
self.session.send(self.query, "load_reply", content=content, ident=client_id)
def queue_status(self, client_id, msg):
"""Return the Queue status of one or more targets.
If verbose, return the msg_ids, else return len of each type.
Keys:
* queue (pending MUX jobs)
* tasks (pending Task jobs)
* completed (finished jobs from both queues)
"""
content = msg['content']
targets = content['targets']
try:
targets = self._validate_targets(targets)
except:
content = error.wrap_exception()
self.session.send(self.query, "hub_error",
content=content, ident=client_id)
return
verbose = content.get('verbose', False)
content = dict(status='ok')
for t in targets:
queue = self.queues[t]
completed = self.completed[t]
tasks = self.tasks[t]
if not verbose:
queue = len(queue)
completed = len(completed)
tasks = len(tasks)
content[str(t)] = {'queue': queue, 'completed': completed , 'tasks': tasks}
content['unassigned'] = list(self.unassigned) if verbose else len(self.unassigned)
# print (content)
self.session.send(self.query, "queue_reply", content=content, ident=client_id)
def purge_results(self, client_id, msg):
"""Purge results from memory. This method is more valuable before we move
to a DB based message storage mechanism."""
content = msg['content']
self.log.info("Dropping records with %s", content)
msg_ids = content.get('msg_ids', [])
reply = dict(status='ok')
if msg_ids == 'all':
try:
self.db.drop_matching_records(dict(completed={'$ne':None}))
except Exception:
reply = error.wrap_exception()
self.log.exception("Error dropping records")
else:
pending = [m for m in msg_ids if (m in self.pending)]
if pending:
try:
raise IndexError("msg pending: %r" % pending[0])
except:
reply = error.wrap_exception()
self.log.exception("Error dropping records")
else:
try:
self.db.drop_matching_records(dict(msg_id={'$in':msg_ids}))
except Exception:
reply = error.wrap_exception()
self.log.exception("Error dropping records")
if reply['status'] == 'ok':
eids = content.get('engine_ids', [])
for eid in eids:
if eid not in self.engines:
try:
raise IndexError("No such engine: %i" % eid)
except:
reply = error.wrap_exception()
self.log.exception("Error dropping records")
break
uid = self.engines[eid].uuid
try:
self.db.drop_matching_records(dict(engine_uuid=uid, completed={'$ne':None}))
except Exception:
reply = error.wrap_exception()
self.log.exception("Error dropping records")
break
self.session.send(self.query, 'purge_reply', content=reply, ident=client_id)
def resubmit_task(self, client_id, msg):
"""Resubmit one or more tasks."""
def finish(reply):
self.session.send(self.query, 'resubmit_reply', content=reply, ident=client_id)
content = msg['content']
msg_ids = content['msg_ids']
reply = dict(status='ok')
try:
records = self.db.find_records({'msg_id' : {'$in' : msg_ids}}, keys=[
'header', 'content', 'buffers'])
except Exception:
self.log.error('db::db error finding tasks to resubmit', exc_info=True)
return finish(error.wrap_exception())
# validate msg_ids
found_ids = [ rec['msg_id'] for rec in records ]
pending_ids = [ msg_id for msg_id in found_ids if msg_id in self.pending ]
if len(records) > len(msg_ids):
try:
raise RuntimeError("DB appears to be in an inconsistent state."
"More matching records were found than should exist")
except Exception:
self.log.exception("Failed to resubmit task")
return finish(error.wrap_exception())
elif len(records) < len(msg_ids):
missing = [ m for m in msg_ids if m not in found_ids ]
try:
raise KeyError("No such msg(s): %r" % missing)
except KeyError:
self.log.exception("Failed to resubmit task")
return finish(error.wrap_exception())
elif pending_ids:
pass
# no need to raise on resubmit of pending task, now that we
# resubmit under new ID, but do we want to raise anyway?
# msg_id = invalid_ids[0]
# try:
# raise ValueError("Task(s) %r appears to be inflight" % )
# except Exception:
# return finish(error.wrap_exception())
# mapping of original IDs to resubmitted IDs
resubmitted = {}
# send the messages
for rec in records:
header = rec['header']
msg = self.session.msg(header['msg_type'], parent=header)
msg_id = msg['msg_id']
msg['content'] = rec['content']
# use the old header, but update msg_id and timestamp
fresh = msg['header']
header['msg_id'] = fresh['msg_id']
header['date'] = fresh['date']
msg['header'] = header
self.session.send(self.resubmit, msg, buffers=rec['buffers'])
resubmitted[rec['msg_id']] = msg_id
self.pending.add(msg_id)
msg['buffers'] = rec['buffers']
try:
self.db.add_record(msg_id, init_record(msg))
except Exception:
self.log.error("db::DB Error updating record: %s", msg_id, exc_info=True)
return finish(error.wrap_exception())
finish(dict(status='ok', resubmitted=resubmitted))
# store the new IDs in the Task DB
for msg_id, resubmit_id in iteritems(resubmitted):
try:
self.db.update_record(msg_id, {'resubmitted' : resubmit_id})
except Exception:
self.log.error("db::DB Error updating record: %s", msg_id, exc_info=True)
def _extract_record(self, rec):
"""decompose a TaskRecord dict into subsection of reply for get_result"""
io_dict = {}
for key in ('pyin', 'pyout', 'pyerr', 'stdout', 'stderr'):
io_dict[key] = rec[key]
content = {
'header': rec['header'],
'metadata': rec['metadata'],
'result_metadata': rec['result_metadata'],
'result_header' : rec['result_header'],
'result_content': rec['result_content'],
'received' : rec['received'],
'io' : io_dict,
}
if rec['result_buffers']:
buffers = list(map(bytes, rec['result_buffers']))
else:
buffers = []
return content, buffers
def get_results(self, client_id, msg):
"""Get the result of 1 or more messages."""
content = msg['content']
msg_ids = sorted(set(content['msg_ids']))
statusonly = content.get('status_only', False)
pending = []
completed = []
content = dict(status='ok')
content['pending'] = pending
content['completed'] = completed
buffers = []
if not statusonly:
try:
matches = self.db.find_records(dict(msg_id={'$in':msg_ids}))
# turn match list into dict, for faster lookup
records = {}
for rec in matches:
records[rec['msg_id']] = rec
except Exception:
content = error.wrap_exception()
self.log.exception("Failed to get results")
self.session.send(self.query, "result_reply", content=content,
parent=msg, ident=client_id)
return
else:
records = {}
for msg_id in msg_ids:
if msg_id in self.pending:
pending.append(msg_id)
elif msg_id in self.all_completed:
completed.append(msg_id)
if not statusonly:
c,bufs = self._extract_record(records[msg_id])
content[msg_id] = c
buffers.extend(bufs)
elif msg_id in records:
if rec['completed']:
completed.append(msg_id)
c,bufs = self._extract_record(records[msg_id])
content[msg_id] = c
buffers.extend(bufs)
else:
pending.append(msg_id)
else:
try:
raise KeyError('No such message: '+msg_id)
except:
content = error.wrap_exception()
break
self.session.send(self.query, "result_reply", content=content,
parent=msg, ident=client_id,
buffers=buffers)
def get_history(self, client_id, msg):
"""Get a list of all msg_ids in our DB records"""
try:
msg_ids = self.db.get_history()
except Exception as e:
content = error.wrap_exception()
self.log.exception("Failed to get history")
else:
content = dict(status='ok', history=msg_ids)
self.session.send(self.query, "history_reply", content=content,
parent=msg, ident=client_id)
def db_query(self, client_id, msg):
"""Perform a raw query on the task record database."""
content = msg['content']
query = extract_dates(content.get('query', {}))
keys = content.get('keys', None)
buffers = []
empty = list()
try:
records = self.db.find_records(query, keys)
except Exception as e:
content = error.wrap_exception()
self.log.exception("DB query failed")
else:
# extract buffers from reply content:
if keys is not None:
buffer_lens = [] if 'buffers' in keys else None
result_buffer_lens = [] if 'result_buffers' in keys else None
else:
buffer_lens = None
result_buffer_lens = None
for rec in records:
# buffers may be None, so double check
b = rec.pop('buffers', empty) or empty
if buffer_lens is not None:
buffer_lens.append(len(b))
buffers.extend(b)
rb = rec.pop('result_buffers', empty) or empty
if result_buffer_lens is not None:
result_buffer_lens.append(len(rb))
buffers.extend(rb)
content = dict(status='ok', records=records, buffer_lens=buffer_lens,
result_buffer_lens=result_buffer_lens)
# self.log.debug (content)
self.session.send(self.query, "db_reply", content=content,
parent=msg, ident=client_id,
buffers=buffers)
| mit | 2,754,891,311,389,879,300 | 36.89303 | 118 | 0.537309 | false |
nicholaslocascio/deep-regex | deep-regex-model/nearest_neighbors_model.py | 1 | 2990 | import sys
import argparse
import subprocess
from sklearn.neighbors import NearestNeighbors
import numpy as np
from regexDFAEquals import regex_equiv_from_raw
def main(arguments):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data_dir', help="data_dir",
type=str, required=True)
parser.add_argument('--alt_eval', help="alt-eval",
type=bool, default=False)
args = parser.parse_args(arguments)
train_x_lines = [line.rstrip('\n') for line in open("{}/{}".format(args.data_dir, "src-train.txt"))]
train_y_lines = [line.rstrip('\n') for line in open("{}/{}".format(args.data_dir, "targ-train.txt"))]
if args.alt_eval:
eval_x_lines = [line.rstrip('\n') for line in open("{}/{}".format(args.data_dir, "src-test.txt"))]
eval_y_lines = [line.rstrip('\n') for line in open("{}/{}".format(args.data_dir, "targ-test.txt"))]
else:
eval_x_lines = [line.rstrip('\n') for line in open("{}/{}".format(args.data_dir, "src-val.txt"))]
eval_y_lines = [line.rstrip('\n') for line in open("{}/{}".format(args.data_dir, "targ-val.txt"))]
do_classify(train_x_lines, train_y_lines, eval_x_lines, eval_y_lines)
def do_classify(train_x, train_y, test_x, test_y):
train_x_bow, test_x_bow = get_all_bow(train_x, test_x)
classifier = NearestNeighbors(n_neighbors=1, algorithm='ball_tree').fit(train_x_bow)
distances, indices = classifier.kneighbors(test_x_bow)
indices = [index[0] for index in indices]
exact = 0.0
dfa_equal = 0.0
for row_index in range(len(test_x_bow)):
gold = test_y[row_index]
pred_index = indices[row_index]
pred = train_y[pred_index]
print("PRED: {}".format(pred))
print("GOLD: {}".format(gold))
if pred == gold:
exact += 1.0
print("string equal")
if regex_equiv_from_raw(pred, gold):
dfa_equal += 1.0
print("dfa equal")
print("")
print("{} String-Equal Correct".format(exact/len(test_x_bow)))
print("{} DFA-Equal Correct".format(dfa_equal/len(test_x_bow)))
def get_all_bow(train_x, test_x):
bow_word_set = {'<UNK>'}
for data in [train_x, test_x]:
for line in data:
for word in line.split(' '):
bow_word_set.add(word)
print(bow_word_set)
train_all_bow = []
test_all_bow = []
for line in train_x:
bow = get_bow(line, bow_word_set)
train_all_bow.append(bow)
for line in test_x:
bow = get_bow(line, bow_word_set)
test_all_bow.append(bow)
return np.array(train_all_bow), np.array(test_all_bow)
def get_bow(line, bow_word_set):
bow = {word : 0 for word in bow_word_set}
for word in line.split(' '):
bow[word] += 1
return bow.values()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:])) | mit | 2,719,777,218,254,576,600 | 33.37931 | 107 | 0.592642 | false |
yvaucher/bank-payment | account_direct_debit/models/payment_line.py | 2 | 7320 | # -*- coding: utf-8 -*-
from openerp.osv import orm, fields
from openerp import netsvc
from openerp.tools.translate import _
class PaymentLine(orm.Model):
_inherit = 'payment.line'
def debit_storno(self, cr, uid, payment_line_id, amount,
currency, storno_retry=True, context=None):
"""The processing of a storno is triggered by a debit
transfer on one of the company's bank accounts.
This method offers to re-reconcile the original debit
payment. For this purpose, we have registered that
payment move on the payment line.
Return the (now incomplete) reconcile id. The caller MUST
re-reconcile this reconcile with the bank transfer and
re-open the associated invoice.
:param payment_line_id: the single payment line id
:param amount: the (signed) amount debited from the bank account
:param currency: the bank account's currency *browse object*
:param boolean storno_retry: when True, attempt to reopen the invoice,
set the invoice to 'Debit denied' otherwise.
:return: an incomplete reconcile for the caller to fill
:rtype: database id of an account.move.reconcile resource.
"""
reconcile_obj = self.pool.get('account.move.reconcile')
line = self.browse(cr, uid, payment_line_id)
reconcile_id = False
if (line.transit_move_line_id and not line.storno and
self.pool.get('res.currency').is_zero(
cr, uid, currency, (
(line.transit_move_line_id.credit or 0.0) -
(line.transit_move_line_id.debit or 0.0) + amount))):
# Two different cases, full and partial
# Both cases differ subtly in the procedure to follow
# Needs refractoring, but why is this not in the OpenERP API?
# Actually, given the nature of a direct debit order and storno,
# we should not need to take partial into account on the side of
# the transit_move_line.
if line.transit_move_line_id.reconcile_partial_id:
reconcile_id = \
line.transit_move_line_id.reconcile_partial_id.id
reconcile = line.transit_move_line_id.reconcile_id
if len(reconcile.line_partial_ids) == 2:
# reuse the simple reconcile for the storno transfer
reconcile_obj.write(
cr, uid, reconcile_id,
{'line_id': [(6, 0, line.transit_move_line_id.id)],
'line_partial_ids': [(6, 0, [])]}, context=context)
else:
# split up the original reconcile in a partial one
# and a new one for reconciling the storno transfer
reconcile = {
'line_partial_ids': [(3, line.transit_move_line_id.id)]
}
reconcile_obj.write(
cr, uid, reconcile_id, reconcile, context=context)
reconcile_id = reconcile_obj.create(
cr, uid,
{'type': 'auto',
'line_id': [(6, 0, line.transit_move_line_id.id)]},
context=context)
elif line.transit_move_line_id.reconcile_id:
reconcile_id = line.transit_move_line_id.reconcile_id.id
if len(line.transit_move_line_id.reconcile_id.line_id) == 2:
# reuse the simple reconcile for the storno transfer
reconcile_obj.write(
cr, uid, reconcile_id,
{'line_id': [(6, 0, [line.transit_move_line_id.id])]},
context=context)
else:
# split up the original reconcile in a partial one
# and a new one for reconciling the storno transfer
reconcile = line.transit_move_line_id.reconcile_id
partial_ids = [x.id for x in reconcile.line_id
if x.id != line.transit_move_line_id.id]
reconcile_obj.write(
cr, uid, reconcile_id,
{'line_partial_ids': [(6, 0, partial_ids)],
'line_id': [(6, 0, [])]}, context=context)
reconcile_id = reconcile_obj.create(
cr, uid,
{'type': 'auto',
'line_id': [(6, 0, line.transit_move_line_id.id)]},
context=context)
# mark the payment line for storno processed
if reconcile_id:
self.write(cr, uid, [payment_line_id],
{'storno': True}, context=context)
# put forth the invoice workflow
if line.move_line_id.invoice:
activity = (storno_retry and 'open_test'
or 'invoice_debit_denied')
netsvc.LocalService("workflow").trg_validate(
uid, 'account.invoice', line.move_line_id.invoice.id,
activity, cr)
return reconcile_id
def get_storno_account_id(
self, cr, uid, payment_line_id, amount, currency, context=None):
"""Check the match of the arguments, and return the account associated
with the storno.
Used in account_banking interactive mode
:param payment_line_id: the single payment line id
:param amount: the (signed) amount debited from the bank account
:param currency: the bank account's currency *browse object*
:return: an account if there is a full match, False otherwise
:rtype: database id of an account.account resource.
"""
line = self.browse(cr, uid, payment_line_id)
account_id = False
if (line.transit_move_line_id and not line.storno and
self.pool.get('res.currency').is_zero(
cr, uid, currency, (
(line.transit_move_line_id.credit or 0.0) -
(line.transit_move_line_id.debit or 0.0) + amount))):
account_id = line.transit_move_line_id.account_id.id
return account_id
def debit_reconcile(self, cr, uid, payment_line_id, context=None):
"""Raise if a payment line is passed for which storno is True."""
if isinstance(payment_line_id, (list, tuple)):
payment_line_id = payment_line_id[0]
payment_line_vals = self.read(
cr, uid, payment_line_id, ['storno', 'name'], context=context)
if payment_line_vals['storno']:
raise orm.except_orm(
_('Can not reconcile'),
_('Cancelation of payment line \'%s\' has already been '
'processed') % payment_line_vals['name'])
return super(PaymentLine, self).debit_reconcile(
cr, uid, payment_line_id, context=context)
_columns = {
'storno': fields.boolean(
'Storno',
readonly=True,
help=("If this is true, the debit order has been canceled "
"by the bank or by the customer")),
}
| agpl-3.0 | 8,172,708,525,495,252,000 | 49.482759 | 79 | 0.544262 | false |
VisTrails/VisTrails | vistrails/packages/tensorflow/base.py | 2 | 9926 | ###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import itertools
import tensorflow
from vistrails.core.modules.config import ModuleSettings
from vistrails.core.modules.vistrails_module import Module, ModuleError
class Op(object):
def __init__(self, op, args):
"""Constructor from a function and its arguments.
This is the type actually passed on TFOperation ports. It represents a
future TensorFlow operation; the actual operation is only created from
the Run module, allowing multiple graphs to be used (and the same
VisTrails-defined graph to be used from multiple Run modules).
:type args: dict | collections.Iterable
"""
self.op = op
self.args = args
def build(self, operation_map):
"""Builds the graph, by instanciating the operations recursively.
"""
if self in operation_map:
return operation_map[self]
else:
def build(op):
if isinstance(op, list):
return [build(e) for e in op]
else:
return op.build(operation_map)
if isinstance(self.args, dict):
kwargs = dict((k, build(v))
for k, v in self.args.iteritems())
obj = self.op(**kwargs)
else:
args = [build(a) for a in self.args]
obj = self.op(*args)
operation_map[self] = obj
return obj
class TFOperation(Module):
"""A TensorFlow operation that will be run by Run as part of the graph.
"""
_settings = ModuleSettings(abstract=True)
_output_ports = [
('output', '(org.vistrails.vistrails.tensorflow:TFOperation)')]
def compute(self):
raise NotImplementedError
class constant(TFOperation):
"""A TensorFlow operation that simply output a constant into the graph.
Note that it is only constant from TensorFlow's point of view; it can be
the output of another VisTrails module.
"""
_input_ports = [('value', '(basic:Variant)')]
def compute(self):
value = self.get_input('value')
self.set_output('output', Op(lambda: tensorflow.constant(value), []))
class cast(TFOperation):
"""Casts tensors to the specific scalar type.
"""
_input_ports = [('value', TFOperation),
('type', '(basic:String)')]
def compute(self):
value = self.get_input('value')
type_ = self.get_input('type')
self.set_output('output',
Op(lambda x: tensorflow.cast(x, type_), [value]))
class Variable(TFOperation):
"""A variable, that update its state between TensorFlow iterations.
"""
_input_ports = [('initial_value', TFOperation)]
_output_ports = [
('output', '(org.vistrails.vistrails.tensorflow:Variable)')]
def compute(self):
initial_value = self.get_input('initial_value')
self.set_output('output', Op(tensorflow.Variable, [initial_value]))
class Optimizer(Module):
_settings = ModuleSettings(abstract=True,
namespace='train|optimizer')
class minimize(TFOperation):
__doc__ = tensorflow.train.Optimizer.__doc__
_settings = ModuleSettings(namespace='train|optimizer')
_input_ports = [('optimizer', Optimizer),
('loss', TFOperation),
('global_step', Variable, {'optional': True}),
('var_list', Variable, {'depth': 1, 'optional': True}),
('gate_gradients', '(basic:String)',
{'optional': True, 'entry_types': '["enum"]',
'values': '[["GATE_NONE", "GATE_OP", "GATE_GRAPH"]]'}),
('name', '(basic:String)', {'optional': True})]
_GATE_GRADIENTS = {'GATE_NONE': tensorflow.train.Optimizer.GATE_NONE,
'GATE_OP': tensorflow.train.Optimizer.GATE_OP,
'GATE_GRAPH': tensorflow.train.Optimizer.GATE_GRAPH}
def compute(self):
if self.has_input('gate_gradients'):
gate_gradients = self._GATE_GRADIENTS[
self.get_input('gate_gradients')]
else:
gate_gradients = None
name = self.force_get_input('name')
def output(optimizer, loss, **kwargs):
kw = {'loss': loss, 'name': name}
if gate_gradients is not None:
kw['gate_gradients'] = gate_gradients
kw.update(kwargs)
ret = optimizer.minimize(**kw)
return ret
kwargs = {'optimizer': self.get_input('optimizer'),
'loss': self.get_input('loss')}
if self.has_input('global_step'):
kwargs['global_step'] = self.get_input('global_step')
if self.has_input('var_list'):
kwargs['var_list'] = self.get_input('var_list')
self.set_output('output', Op(output, kwargs))
class RunResult(object):
def __init__(self, graph, session, operation_map, fetch_map):
self.graph = graph
self.session = session
self.operation_map = operation_map
self.fetch_map = fetch_map
class FeedGenerator(Module):
_settings = ModuleSettings(abstract=True)
class run(Module):
"""Instanciate and run a TensorFlow graph to make the results available.
"""
_input_ports = [('output', TFOperation, {'depth': 1}),
('iterations', '(basic:Integer)',
{'optional': True, 'defaults': '["1"]'}),
('after', '(org.vistrails.vistrails.tensorflow:run)'),
('feed_generator', FeedGenerator)]
_output_ports = [('result', '(org.vistrails.vistrails.tensorflow:run)')]
def compute(self):
outputs = self.get_input('output')
iterations = self.get_input('iterations')
if self.has_input('feed_generator'):
feeds = self.get_input('feed_generator')()
else:
feeds = None
if self.has_input('after'):
after = self.get_input('after')
graph = after.graph
session = after.session
operation_map = after.operation_map
else:
graph = tensorflow.Graph()
session = tensorflow.Session(graph=graph)
operation_map = {}
fetches = []
with graph.as_default():
for op in outputs:
fetches.append(op.build(operation_map))
if not self.has_input('after'):
session.run(tensorflow.initialize_all_variables())
for i in xrange(iterations):
feed_dict = None
if feeds is not None:
try:
feed_dict = next(feeds)
except StopIteration:
feeds = None
else:
feed_dict = dict((operation_map[op], value)
for op, value in feed_dict.iteritems())
out = session.run(fetches, feed_dict=feed_dict)
fetch_map = dict(itertools.izip(outputs, out))
self.set_output('result', RunResult(graph, session, operation_map,
fetch_map))
class fetch(Module):
"""Fetch the output of a TensorFlow operation after the graph has been run.
"""
_input_ports = [('result', run),
('op', TFOperation)]
_output_ports = [('value', '(basic:List)')]
def compute(self):
result = self.get_input('result')
op = self.get_input('op')
try:
value = result.fetch_map[op]
except KeyError:
raise ModuleError(self, "Requested operation was not passed in "
"the list of outputs of the run module")
self.set_output('value', value)
_modules = [TFOperation, constant, cast, Variable,
Optimizer, minimize,
FeedGenerator, run, fetch]
wrapped = set(['constant', 'cast', 'Variable'])
| bsd-3-clause | 5,559,374,266,293,038,000 | 36.037313 | 79 | 0.584525 | false |
cheapjack/MemoryCraft | HelloTemperature5.py | 1 | 3662 | #!/usr/bin/python
#Install the modules we need
#from pyfirmata import Arduino, util, INPUT
from mcpi import minecraft
from mcpi import minecraftstuff
from time import sleep
import server
import serial
# Set up the connection to the Arduino/Shrimp
# This may appear differently in Windows as COM0 or COM1 but in Unix like systems it's likely
# to be like this:
PORT = "/dev/tty.SLAB_USBtoUART"
ser = serial.Serial(PORT, 9600)
#shrimp = Arduino(PORT)
# If we get here things should be ready to go
print("Everything is connected up.")
sleep(0.5)
print("Reading from Serial...")
sleep(0.5)
# Use the command /getpos or F3 in Minecraft client to find out where you are then use those
# x, y, z coordinates to build things
# translate mc coords for mcpi ones
# add this to x
mcx = 177
# - this from y
mcy = 64
# - this from z
mcz = 135
# Connect to the server we use the imported server.py to make it work with CloudMaker
mc = minecraft.Minecraft.create(server.address)
#Post a message to the minecraft chat window
mc.postToChat("Ready to read from Temperature 5")
# Define a translate function that maps one value range to another:
def translate(value, leftMin, leftMax, rightMin, rightMax):
# Figure out how 'wide' each range is
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
# Convert the left range into a 0-1 range (float)
valueScaled = float(value - leftMin) / float(leftSpan)
# Convert the 0-1 range into a value in the right range.
return rightMin + (valueScaled * rightSpan)
# make an arbitrary start value for lastVal
lastVal = 1
# define a barchart function
def TemperatureChart5(startx, starty, startz, maxchartwidth, maxchartheight, blocktype,id):
global lastVal
# Make a stage
mc.setBlocks((startx + mcx) - 1, (starty-mcy) - 2, (startz-mcz) - 1, (startx + mcx) + maxchartwidth +1, (starty-mcy) -1, (startz - mcz) + (1 + maxchartwidth), 20)
# An array to store the temperature from the arduino over serial
tempstring = []
tempread = ser.readline()
# Add the serial message to tempstring
tempstring.append(tempread)
reading = tempstring.pop()
tempreading = reading[0:2]
# Option to read temperature for debugging
print "Temperature is ", tempreading
# Our main reading:
tempreadingint = int(tempreading)
# use the translate function to map temperature range to a defined block range
read_ranged = translate(tempreadingint, 19, 30, 0, maxchartheight)
# Wait a while
sleep(0.25)
# our ranged reading
readnow = int(read_ranged)
# print "Readnow is ranged to ", readnow
# compare if value has changed
# Build a chart according to Temperature Reading
if tempreadingint > lastVal:
barheight = readnow + lastVal
mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx) + maxchartwidth, (starty-mcy) + barheight, (startz - mcz) + maxchartwidth, blocktype, id)
mc.setBlocks((startx + mcx), (starty-mcy) + barheight, (startz-mcz), (startx + mcx) + maxchartwidth, maxchartheight, (startz - mcz) + maxchartwidth, 0)
elif tempreadingint < lastVal:
mc.setBlocks((startx+mcx), (starty-mcy) + lastVal, (startz-mcz), (startx+mcx)+maxchartwidth, (starty-mcy) + maxchartheight, (startz-mcz) + maxchartwidth, 0)
else:
lastVal = tempreadingint
sleep(0.25)
# Draw a stage
#Main Loop
while True:
# Remember your chart is (x_coord, x_coord, x_coord, chartwidth, maxchartheight, block block id(usually 0))
#TemperatureChart1(394, 68, -326, 2, 40, 35, 5)
#TemperatureChart2(394, 68, -318, 2, 40, 35, 4)
#TemperatureChart3(394, 68, -310, 2, 40, 35, 4)
#TemperatureChart4(394, 68, -302, 2, 40, 35, 4)
TemperatureChart5(394, 68, -294, 2, 40, 35, 1)
print "stopped"
| mit | -5,348,799,991,370,018,000 | 35.257426 | 163 | 0.722829 | false |
tysonclugg/django | django/contrib/gis/gdal/raster/band.py | 12 | 8024 | from ctypes import byref, c_double, c_int, c_void_p
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import raster as capi
from django.contrib.gis.gdal.raster.base import GDALRasterBase
from django.contrib.gis.shortcuts import numpy
from django.utils.encoding import force_text
from .const import GDAL_INTEGER_TYPES, GDAL_PIXEL_TYPES, GDAL_TO_CTYPES
class GDALBand(GDALRasterBase):
"""
Wrap a GDAL raster band, needs to be obtained from a GDALRaster object.
"""
def __init__(self, source, index):
self.source = source
self._ptr = capi.get_ds_raster_band(source._ptr, index)
def _flush(self):
"""
Call the flush method on the Band's parent raster and force a refresh
of the statistics attribute when requested the next time.
"""
self.source._flush()
self._stats_refresh = True
@property
def description(self):
"""
Return the description string of the band.
"""
return force_text(capi.get_band_description(self._ptr))
@property
def width(self):
"""
Width (X axis) in pixels of the band.
"""
return capi.get_band_xsize(self._ptr)
@property
def height(self):
"""
Height (Y axis) in pixels of the band.
"""
return capi.get_band_ysize(self._ptr)
@property
def pixel_count(self):
"""
Return the total number of pixels in this band.
"""
return self.width * self.height
_stats_refresh = False
def statistics(self, refresh=False, approximate=False):
"""
Compute statistics on the pixel values of this band.
The return value is a tuple with the following structure:
(minimum, maximum, mean, standard deviation).
If approximate=True, the statistics may be computed based on overviews
or a subset of image tiles.
If refresh=True, the statistics will be computed from the data directly,
and the cache will be updated where applicable.
For empty bands (where all pixel values are nodata), all statistics
values are returned as None.
For raster formats using Persistent Auxiliary Metadata (PAM) services,
the statistics might be cached in an auxiliary file.
"""
# Prepare array with arguments for capi function
smin, smax, smean, sstd = c_double(), c_double(), c_double(), c_double()
stats_args = [
self._ptr, c_int(approximate), byref(smin), byref(smax),
byref(smean), byref(sstd), c_void_p(), c_void_p(),
]
if refresh or self._stats_refresh:
func = capi.compute_band_statistics
else:
# Add additional argument to force computation if there is no
# existing PAM file to take the values from.
force = True
stats_args.insert(2, c_int(force))
func = capi.get_band_statistics
# Computation of statistics fails for empty bands.
try:
func(*stats_args)
result = smin.value, smax.value, smean.value, sstd.value
except GDALException:
result = (None, None, None, None)
self._stats_refresh = False
return result
@property
def min(self):
"""
Return the minimum pixel value for this band.
"""
return self.statistics()[0]
@property
def max(self):
"""
Return the maximum pixel value for this band.
"""
return self.statistics()[1]
@property
def mean(self):
"""
Return the mean of all pixel values of this band.
"""
return self.statistics()[2]
@property
def std(self):
"""
Return the standard deviation of all pixel values of this band.
"""
return self.statistics()[3]
@property
def nodata_value(self):
"""
Return the nodata value for this band, or None if it isn't set.
"""
# Get value and nodata exists flag
nodata_exists = c_int()
value = capi.get_band_nodata_value(self._ptr, nodata_exists)
if not nodata_exists:
value = None
# If the pixeltype is an integer, convert to int
elif self.datatype() in GDAL_INTEGER_TYPES:
value = int(value)
return value
@nodata_value.setter
def nodata_value(self, value):
"""
Set the nodata value for this band.
"""
if value is None:
if not capi.delete_band_nodata_value:
raise ValueError('GDAL >= 2.1 required to delete nodata values.')
capi.delete_band_nodata_value(self._ptr)
elif not isinstance(value, (int, float)):
raise ValueError('Nodata value must be numeric or None.')
else:
capi.set_band_nodata_value(self._ptr, value)
self._flush()
def datatype(self, as_string=False):
"""
Return the GDAL Pixel Datatype for this band.
"""
dtype = capi.get_band_datatype(self._ptr)
if as_string:
dtype = GDAL_PIXEL_TYPES[dtype]
return dtype
def data(self, data=None, offset=None, size=None, shape=None, as_memoryview=False):
"""
Read or writes pixel values for this band. Blocks of data can
be accessed by specifying the width, height and offset of the
desired block. The same specification can be used to update
parts of a raster by providing an array of values.
Allowed input data types are bytes, memoryview, list, tuple, and array.
"""
if not offset:
offset = (0, 0)
if not size:
size = (self.width - offset[0], self.height - offset[1])
if not shape:
shape = size
if any(x <= 0 for x in size):
raise ValueError('Offset too big for this raster.')
if size[0] > self.width or size[1] > self.height:
raise ValueError('Size is larger than raster.')
# Create ctypes type array generator
ctypes_array = GDAL_TO_CTYPES[self.datatype()] * (shape[0] * shape[1])
if data is None:
# Set read mode
access_flag = 0
# Prepare empty ctypes array
data_array = ctypes_array()
else:
# Set write mode
access_flag = 1
# Instantiate ctypes array holding the input data
if isinstance(data, (bytes, memoryview)) or (numpy and isinstance(data, numpy.ndarray)):
data_array = ctypes_array.from_buffer_copy(data)
else:
data_array = ctypes_array(*data)
# Access band
capi.band_io(self._ptr, access_flag, offset[0], offset[1],
size[0], size[1], byref(data_array), shape[0],
shape[1], self.datatype(), 0, 0)
# Return data as numpy array if possible, otherwise as list
if data is None:
if as_memoryview:
return memoryview(data_array)
elif numpy:
# reshape() needs a reshape parameter with the height first.
return numpy.frombuffer(
data_array, dtype=numpy.dtype(data_array)
).reshape(tuple(reversed(size)))
else:
return list(data_array)
else:
self._flush()
class BandList(list):
def __init__(self, source):
self.source = source
list.__init__(self)
def __iter__(self):
for idx in range(1, len(self) + 1):
yield GDALBand(self.source, idx)
def __len__(self):
return capi.get_ds_raster_count(self.source._ptr)
def __getitem__(self, index):
try:
return GDALBand(self.source, index + 1)
except GDALException:
raise GDALException('Unable to get band index %d' % index)
| bsd-3-clause | 3,657,172,399,683,865,600 | 31.2249 | 100 | 0.580882 | false |
yokose-ks/edx-platform | common/djangoapps/student/firebase_token_generator.py | 16 | 3431 | '''
Firebase - library to generate a token
License: https://github.com/firebase/firebase-token-generator-python/blob/master/LICENSE
Tweaked and Edited by @danielcebrianr and @lduarte1991
This library will take either objects or strings and use python's built-in encoding
system as specified by RFC 3548. Thanks to the firebase team for their open-source
library. This was made specifically for speaking with the annotation_storage_url and
can be used and expanded, but not modified by anyone else needing such a process.
'''
from base64 import urlsafe_b64encode
import hashlib
import hmac
import sys
try:
import json
except ImportError:
import simplejson as json
__all__ = ['create_token']
TOKEN_SEP = '.'
def create_token(secret, data):
'''
Simply takes in the secret key and the data and
passes it to the local function _encode_token
'''
return _encode_token(secret, data)
if sys.version_info < (2, 7):
def _encode(bytes_data):
'''
Takes a json object, string, or binary and
uses python's urlsafe_b64encode to encode data
and make it safe pass along in a url.
To make sure it does not conflict with variables
we make sure equal signs are removed.
More info: docs.python.org/2/library/base64.html
'''
encoded = urlsafe_b64encode(bytes(bytes_data))
return encoded.decode('utf-8').replace('=', '')
else:
def _encode(bytes_info):
'''
Same as above function but for Python 2.7 or later
'''
encoded = urlsafe_b64encode(bytes_info)
return encoded.decode('utf-8').replace('=', '')
def _encode_json(obj):
'''
Before a python dict object can be properly encoded,
it must be transformed into a jason object and then
transformed into bytes to be encoded using the function
defined above.
'''
return _encode(bytearray(json.dumps(obj), 'utf-8'))
def _sign(secret, to_sign):
'''
This function creates a sign that goes at the end of the
message that is specific to the secret and not the actual
content of the encoded body.
More info on hashing: http://docs.python.org/2/library/hmac.html
The function creates a hashed values of the secret and to_sign
and returns the digested values based the secure hash
algorithm, 256
'''
def portable_bytes(string):
'''
Simply transforms a string into a bytes object,
which is a series of immutable integers 0<=x<=256.
Always try to encode as utf-8, unless it is not
compliant.
'''
try:
return bytes(string, 'utf-8')
except TypeError:
return bytes(string)
return _encode(hmac.new(portable_bytes(secret), portable_bytes(to_sign), hashlib.sha256).digest()) # pylint: disable=E1101
def _encode_token(secret, claims):
'''
This is the main function that takes the secret token and
the data to be transmitted. There is a header created for decoding
purposes. Token_SEP means that a period/full stop separates the
header, data object/message, and signatures.
'''
encoded_header = _encode_json({'typ': 'JWT', 'alg': 'HS256'})
encoded_claims = _encode_json(claims)
secure_bits = '%s%s%s' % (encoded_header, TOKEN_SEP, encoded_claims)
sig = _sign(secret, secure_bits)
return '%s%s%s' % (secure_bits, TOKEN_SEP, sig)
| agpl-3.0 | -141,993,413,047,504,930 | 33.656566 | 127 | 0.66657 | false |
with-git/tensorflow | tensorflow/python/ops/parsing_ops.py | 4 | 49645 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Parsing Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_parsing_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_parsing_ops import *
# pylint: enable=wildcard-import,undefined-variable
from tensorflow.python.platform import tf_logging
ops.NotDifferentiable("DecodeRaw")
ops.NotDifferentiable("ParseTensor")
ops.NotDifferentiable("StringToNumber")
class VarLenFeature(collections.namedtuple("VarLenFeature", ["dtype"])):
"""Configuration for parsing a variable-length input feature.
Fields:
dtype: Data type of input.
"""
pass
class SparseFeature(
collections.namedtuple(
"SparseFeature",
["index_key", "value_key", "dtype", "size", "already_sorted"])):
"""Configuration for parsing a sparse input feature from an `Example`.
Note, preferably use `VarLenFeature` (possibly in combination with a
`SequenceExample`) in order to parse out `SparseTensor`s instead of
`SparseFeature` due to its simplicity.
Closely mimicking the `SparseTensor` that will be obtained by parsing an
`Example` with a `SparseFeature` config, a `SparseFeature` contains a
* `value_key`: The name of key for a `Feature` in the `Example` whose parsed
`Tensor` will be the resulting `SparseTensor.values`.
* `index_key`: A list of names - one for each dimension in the resulting
`SparseTensor` whose `indices[i][dim]` indicating the position of
the `i`-th value in the `dim` dimension will be equal to the `i`-th value in
the Feature with key named `index_key[dim]` in the `Example`.
* `size`: A list of ints for the resulting `SparseTensor.dense_shape`.
For example, we can represent the following 2D `SparseTensor`
```python
SparseTensor(indices=[[3, 1], [20, 0]],
values=[0.5, -1.0]
dense_shape=[100, 3])
```
with an `Example` input proto
```python
features {
feature { key: "val" value { float_list { value: [ 0.5, -1.0 ] } } }
feature { key: "ix0" value { int64_list { value: [ 3, 20 ] } } }
feature { key: "ix1" value { int64_list { value: [ 1, 0 ] } } }
}
```
and `SparseFeature` config with 2 `index_key`s
```python
SparseFeature(index_key=["ix0", "ix1"],
value_key="val",
dtype=tf.float32,
size=[100, 3])
```
Fields:
index_key: A single string name or a list of string names of index features.
For each key the underlying feature's type must be `int64` and its length
must always match that of the `value_key` feature.
To represent `SparseTensor`s with a `dense_shape` of `rank` higher than 1
a list of length `rank` should be used.
value_key: Name of value feature. The underlying feature's type must
be `dtype` and its length must always match that of all the `index_key`s'
features.
dtype: Data type of the `value_key` feature.
size: A Python int or list thereof specifying the dense shape. Should be a
list if and only if `index_key` is a list. In that case the list must be
equal to the length of `index_key`. Each for each entry `i` all values in
the `index_key`[i] feature must be in `[0, size[i])`.
already_sorted: A Python boolean to specify whether the values in
`value_key` are already sorted by their index position. If so skip
sorting. False by default (optional).
"""
def __new__(cls, index_key, value_key, dtype, size, already_sorted=False):
return super(SparseFeature, cls).__new__(
cls, index_key, value_key, dtype, size, already_sorted)
class FixedLenFeature(collections.namedtuple(
"FixedLenFeature", ["shape", "dtype", "default_value"])):
"""Configuration for parsing a fixed-length input feature.
To treat sparse input as dense, provide a `default_value`; otherwise,
the parse functions will fail on any examples missing this feature.
Fields:
shape: Shape of input data.
dtype: Data type of input.
default_value: Value to be used if an example is missing this feature. It
must be compatible with `dtype` and of the specified `shape`.
"""
def __new__(cls, shape, dtype, default_value=None):
return super(FixedLenFeature, cls).__new__(
cls, shape, dtype, default_value)
class FixedLenSequenceFeature(collections.namedtuple(
"FixedLenSequenceFeature",
["shape", "dtype", "allow_missing", "default_value"])):
"""Configuration for parsing a variable-length input feature into a `Tensor`.
The resulting `Tensor` of parsing a single `SequenceExample` or `Example` has
a static `shape` of `[None] + shape` and the specified `dtype`.
The resulting `Tensor` of parsing a `batch_size` many `Example`s has
a static `shape` of `[batch_size, None] + shape` and the specified `dtype`.
The entries in the `batch` from different `Examples` will be padded with
`default_value` to the maximum length present in the `batch`.
To treat a sparse input as dense, provide `allow_missing=True`; otherwise,
the parse functions will fail on any examples missing this feature.
Fields:
shape: Shape of input data for dimension 2 and higher. First dimension is
of variable length `None`.
dtype: Data type of input.
allow_missing: Whether to allow this feature to be missing from a feature
list item. Is available only for parsing `SequenceExample` not for
parsing `Examples`.
default_value: Scalar value to be used to pad multiple `Example`s to their
maximum length. Irrelevant for parsing a single `Example` or
`SequenceExample`. Defaults to "" for dtype string and 0 otherwise
(optional).
"""
def __new__(cls, shape, dtype, allow_missing=False, default_value=None):
return super(FixedLenSequenceFeature, cls).__new__(
cls, shape, dtype, allow_missing, default_value)
def _features_to_raw_params(features, types):
"""Split feature tuples into raw params used by `gen_parsing_ops`.
Args:
features: A `dict` mapping feature keys to objects of a type in `types`.
types: Type of features to allow, among `FixedLenFeature`, `VarLenFeature`,
`SparseFeature`, and `FixedLenSequenceFeature`.
Returns:
Tuple of `sparse_keys`, `sparse_types`, `dense_keys`, `dense_types`,
`dense_defaults`, `dense_shapes`.
Raises:
ValueError: if `features` contains an item not in `types`, or an invalid
feature.
"""
sparse_keys = []
sparse_types = []
dense_keys = []
dense_types = []
dense_defaults = {}
dense_shapes = []
if features:
# NOTE: We iterate over sorted keys to keep things deterministic.
for key in sorted(features.keys()):
feature = features[key]
if isinstance(feature, VarLenFeature):
if VarLenFeature not in types:
raise ValueError("Unsupported VarLenFeature %s.", feature)
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
sparse_keys.append(key)
sparse_types.append(feature.dtype)
elif isinstance(feature, SparseFeature):
if SparseFeature not in types:
raise ValueError("Unsupported SparseFeature %s.", feature)
if not feature.index_key:
raise ValueError(
"Missing index_key for SparseFeature %s.", feature)
if not feature.value_key:
raise ValueError(
"Missing value_key for SparseFeature %s.", feature)
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
index_keys = feature.index_key
if isinstance(index_keys, str):
index_keys = [index_keys]
elif len(index_keys) > 1:
tf_logging.warning("SparseFeature is a complicated feature config "
"and should only be used after careful "
"consideration of VarLenFeature.")
for index_key in sorted(index_keys):
if index_key in sparse_keys:
dtype = sparse_types[sparse_keys.index(index_key)]
if dtype != dtypes.int64:
raise ValueError("Conflicting type %s vs int64 for feature %s." %
(dtype, index_key))
else:
sparse_keys.append(index_key)
sparse_types.append(dtypes.int64)
if feature.value_key in sparse_keys:
dtype = sparse_types[sparse_keys.index(feature.value_key)]
if dtype != feature.dtype:
raise ValueError("Conflicting type %s vs %s for feature %s." % (
dtype, feature.dtype, feature.value_key))
else:
sparse_keys.append(feature.value_key)
sparse_types.append(feature.dtype)
elif isinstance(feature, FixedLenFeature):
if FixedLenFeature not in types:
raise ValueError("Unsupported FixedLenFeature %s.", feature)
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
if feature.shape is None:
raise ValueError("Missing shape for feature %s." % key)
feature_tensor_shape = tensor_shape.as_shape(feature.shape)
if (feature.shape and feature_tensor_shape.ndims and
feature_tensor_shape.dims[0].value is None):
raise ValueError("First dimension of shape for feature %s unknown. "
"Consider using FixedLenSequenceFeature." % key)
if (feature.shape is not None and
not feature_tensor_shape.is_fully_defined()):
raise ValueError("All dimensions of shape for feature %s need to be "
"known but received %s." % (key, str(feature.shape)))
dense_keys.append(key)
dense_shapes.append(feature.shape)
dense_types.append(feature.dtype)
if feature.default_value is not None:
dense_defaults[key] = feature.default_value
elif isinstance(feature, FixedLenSequenceFeature):
if FixedLenSequenceFeature not in types:
raise ValueError("Unsupported FixedLenSequenceFeature %s.", feature)
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
if feature.shape is None:
raise ValueError("Missing shape for feature %s." % key)
dense_keys.append(key)
dense_shapes.append(feature.shape)
dense_types.append(feature.dtype)
if feature.allow_missing:
dense_defaults[key] = None
if feature.default_value is not None:
dense_defaults[key] = feature.default_value
else:
raise ValueError("Invalid feature %s:%s." % (key, feature))
return (
sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,
dense_shapes)
def _construct_sparse_tensors_for_sparse_features(features, tensor_dict):
"""Merges SparseTensors of indices and values of SparseFeatures.
Constructs new dict based on `tensor_dict`. For `SparseFeatures` in the values
of `features` expects their `index_key`s and `index_value`s to be present in
`tensor_dict` mapping to `SparseTensor`s. Constructs a single `SparseTensor`
from them, and adds it to the result with the key from `features`.
Copies other keys and values from `tensor_dict` with keys present in
`features`.
Args:
features: A `dict` mapping feature keys to `SparseFeature` values.
Values of other types will be ignored.
tensor_dict: A `dict` mapping feature keys to `Tensor` and `SparseTensor`
values. Expected to contain keys of the `SparseFeature`s' `index_key`s and
`value_key`s and mapping them to `SparseTensor`s.
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values. Similar
to `tensor_dict` except each `SparseFeature`s in `features` results in a
single `SparseTensor`.
"""
tensor_dict = dict(tensor_dict) # Do not modify argument passed in.
# Construct SparseTensors for SparseFeatures.
for key in sorted(features.keys()):
feature = features[key]
if isinstance(feature, SparseFeature):
if isinstance(feature.index_key, str):
sp_ids = tensor_dict[feature.index_key]
else:
sp_ids = [tensor_dict[index_key] for index_key in feature.index_key]
sp_values = tensor_dict[feature.value_key]
tensor_dict[key] = sparse_ops.sparse_merge(
sp_ids,
sp_values,
vocab_size=feature.size,
already_sorted=feature.already_sorted)
# Remove tensors from dictionary that were only used to construct
# SparseTensors for SparseFeature.
for key in set(tensor_dict) - set(features):
del tensor_dict[key]
return tensor_dict
def _prepend_none_dimension(features):
if features:
modified_features = dict(features) # Create a copy to modify
for key, feature in features.items():
if isinstance(feature, FixedLenSequenceFeature):
if not feature.allow_missing:
raise ValueError("Unsupported: FixedLenSequenceFeature requires "
"allow_missing to be True.")
modified_features[key] = FixedLenSequenceFeature(
[None] + list(feature.shape),
feature.dtype,
feature.allow_missing,
feature.default_value)
return modified_features
else:
return features
def parse_example(serialized, features, name=None, example_names=None):
# pylint: disable=line-too-long
"""Parses `Example` protos into a `dict` of tensors.
Parses a number of serialized [`Example`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto)
protos given in `serialized`. We refer to `serialized` as a batch with
`batch_size` many entries of individual `Example` protos.
`example_names` may contain descriptive names for the corresponding serialized
protos. These may be useful for debugging purposes, but they have no effect on
the output. If not `None`, `example_names` must be the same length as
`serialized`.
This op parses serialized examples into a dictionary mapping keys to `Tensor`
and `SparseTensor` objects. `features` is a dict from keys to `VarLenFeature`,
`SparseFeature`, and `FixedLenFeature` objects. Each `VarLenFeature`
and `SparseFeature` is mapped to a `SparseTensor`, and each
`FixedLenFeature` is mapped to a `Tensor`.
Each `VarLenFeature` maps to a `SparseTensor` of the specified type
representing a ragged matrix. Its indices are `[batch, index]` where `batch`
identifies the example in `serialized`, and `index` is the value's index in
the list of values associated with that feature and example.
Each `SparseFeature` maps to a `SparseTensor` of the specified type
representing a Tensor of `dense_shape` `[batch_size] + SparseFeature.size`.
Its `values` come from the feature in the examples with key `value_key`.
A `values[i]` comes from a position `k` in the feature of an example at batch
entry `batch`. This positional information is recorded in `indices[i]` as
`[batch, index_0, index_1, ...]` where `index_j` is the `k-th` value of
the feature in the example at with key `SparseFeature.index_key[j].
In other words, we split the indices (except the first index indicating the
batch entry) of a `SparseTensor` by dimension into different features of the
`Example`. Due to its complexity a `VarLenFeature` should be preferred over a
`SparseFeature` whenever possible.
Each `FixedLenFeature` `df` maps to a `Tensor` of the specified type (or
`tf.float32` if not specified) and shape `(serialized.size(),) + df.shape`.
`FixedLenFeature` entries with a `default_value` are optional. With no default
value, we will fail if that `Feature` is missing from any example in
`serialized`.
Each `FixedLenSequenceFeature` `df` maps to a `Tensor` of the specified type
(or `tf.float32` if not specified) and shape
`(serialized.size(), None) + df.shape`.
All examples in `serialized` will be padded with `default_value` along the
second dimension.
Examples:
For example, if one expects a `tf.float32` `VarLenFeature` `ft` and three
serialized `Example`s are provided:
```
serialized = [
features
{ feature { key: "ft" value { float_list { value: [1.0, 2.0] } } } },
features
{ feature []},
features
{ feature { key: "ft" value { float_list { value: [3.0] } } }
]
```
then the output will look like:
```python
{"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]],
values=[1.0, 2.0, 3.0],
dense_shape=(3, 2)) }
```
If instead a `FixedLenSequenceFeature` with `default_value = -1.0` and
`shape=[]` is used then the output will look like:
```python
{"ft": [[1.0, 2.0], [3.0, -1.0]]}
```
Given two `Example` input protos in `serialized`:
```
[
features {
feature { key: "kw" value { bytes_list { value: [ "knit", "big" ] } } }
feature { key: "gps" value { float_list { value: [] } } }
},
features {
feature { key: "kw" value { bytes_list { value: [ "emmy" ] } } }
feature { key: "dank" value { int64_list { value: [ 42 ] } } }
feature { key: "gps" value { } }
}
]
```
And arguments
```
example_names: ["input0", "input1"],
features: {
"kw": VarLenFeature(tf.string),
"dank": VarLenFeature(tf.int64),
"gps": VarLenFeature(tf.float32),
}
```
Then the output is a dictionary:
```python
{
"kw": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["knit", "big", "emmy"]
dense_shape=[2, 2]),
"dank": SparseTensor(
indices=[[1, 0]],
values=[42],
dense_shape=[2, 1]),
"gps": SparseTensor(
indices=[],
values=[],
dense_shape=[2, 0]),
}
```
For dense results in two serialized `Example`s:
```
[
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
}
]
```
We can use arguments:
```
example_names: ["input0", "input1"],
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
}
```
An alternative to `VarLenFeature` to obtain a `SparseTensor` is
`SparseFeature`. For example, given two `Example` input protos in
`serialized`:
```
[
features {
feature { key: "val" value { float_list { value: [ 0.5, -1.0 ] } } }
feature { key: "ix" value { int64_list { value: [ 3, 20 ] } } }
},
features {
feature { key: "val" value { float_list { value: [ 0.0 ] } } }
feature { key: "ix" value { int64_list { value: [ 42 ] } } }
}
]
```
And arguments
```
example_names: ["input0", "input1"],
features: {
"sparse": SparseFeature(
index_key="ix", value_key="val", dtype=tf.float32, size=100),
}
```
Then the output is a dictionary:
```python
{
"sparse": SparseTensor(
indices=[[0, 3], [0, 20], [1, 42]],
values=[0.5, -1.0, 0.0]
dense_shape=[2, 100]),
}
```
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
features: A `dict` mapping feature keys to `FixedLenFeature`,
`VarLenFeature`, and `SparseFeature` values.
name: A name for this operation (optional).
example_names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos in the batch.
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
"""
if not features:
raise ValueError("Missing: features was %s." % features)
features = _prepend_none_dimension(features)
(sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,
dense_shapes) = _features_to_raw_params(
features,
[VarLenFeature, SparseFeature, FixedLenFeature, FixedLenSequenceFeature])
outputs = _parse_example_raw(
serialized, example_names, sparse_keys, sparse_types, dense_keys,
dense_types, dense_defaults, dense_shapes, name)
return _construct_sparse_tensors_for_sparse_features(features, outputs)
def _parse_example_raw(serialized,
names=None,
sparse_keys=None,
sparse_types=None,
dense_keys=None,
dense_types=None,
dense_defaults=None,
dense_shapes=None,
name=None):
"""Parses `Example` protos.
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos.
sparse_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `SparseTensor` objects.
sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
dense_types: A list of DTypes of the same length as `dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the dense_keys of the feature.
dense_shapes: A list of tuples with the same length as `dense_keys`.
The shape of the data for each dense feature referenced by `dense_keys`.
Required for any input tensors identified by `dense_keys`. Must be
either fully defined, or may contain an unknown first dimension.
An unknown first dimension means the feature is treated as having
a variable number of blocks, and the output shape along this dimension
is considered unknown at graph build time. Padding is applied for
minibatch elements smaller than the maximum number of blocks for the
given feature along this dimension.
name: A name for this operation (optional).
Returns:
A `dict` mapping keys to `Tensor`s and `SparseTensor`s.
Raises:
ValueError: If sparse and dense key sets intersect, or input lengths do not
match up.
"""
with ops.name_scope(name, "ParseExample", [serialized, names]):
names = [] if names is None else names
dense_defaults = {} if dense_defaults is None else dense_defaults
sparse_keys = [] if sparse_keys is None else sparse_keys
sparse_types = [] if sparse_types is None else sparse_types
dense_keys = [] if dense_keys is None else dense_keys
dense_types = [] if dense_types is None else dense_types
dense_shapes = (
[[]] * len(dense_keys) if dense_shapes is None else dense_shapes)
num_dense = len(dense_keys)
num_sparse = len(sparse_keys)
if len(dense_shapes) != num_dense:
raise ValueError("len(dense_shapes) != len(dense_keys): %d vs. %d"
% (len(dense_shapes), num_dense))
if len(dense_types) != num_dense:
raise ValueError("len(dense_types) != len(num_dense): %d vs. %d"
% (len(dense_types), num_dense))
if len(sparse_types) != num_sparse:
raise ValueError("len(sparse_types) != len(sparse_keys): %d vs. %d"
% (len(sparse_types), num_sparse))
if num_dense + num_sparse == 0:
raise ValueError("Must provide at least one sparse key or dense key")
if not set(dense_keys).isdisjoint(set(sparse_keys)):
raise ValueError(
"Dense and sparse keys must not intersect; intersection: %s" %
set(dense_keys).intersection(set(sparse_keys)))
# Convert dense_shapes to TensorShape object.
dense_shapes = [tensor_shape.as_shape(shape) for shape in dense_shapes]
dense_defaults_vec = []
for i, key in enumerate(dense_keys):
default_value = dense_defaults.get(key)
dense_shape = dense_shapes[i]
if (dense_shape.ndims is not None and dense_shape.ndims > 0 and
dense_shape[0].value is None):
# Variable stride dense shape, the default value should be a
# scalar padding value
if default_value is None:
default_value = ops.convert_to_tensor(
"" if dense_types[i] == dtypes.string else 0,
dtype=dense_types[i])
else:
# Reshape to a scalar to ensure user gets an error if they
# provide a tensor that's not intended to be a padding value
# (0 or 2+ elements).
key_name = "padding_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=dense_types[i], name=key_name)
default_value = array_ops.reshape(default_value, [])
else:
if default_value is None:
default_value = constant_op.constant([], dtype=dense_types[i])
elif not isinstance(default_value, ops.Tensor):
key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=dense_types[i], name=key_name)
default_value = array_ops.reshape(default_value, dense_shape)
dense_defaults_vec.append(default_value)
# Finally, convert dense_shapes to TensorShapeProto
dense_shapes = [shape.as_proto() for shape in dense_shapes]
# pylint: disable=protected-access
outputs = gen_parsing_ops._parse_example(
serialized=serialized,
names=names,
dense_defaults=dense_defaults_vec,
sparse_keys=sparse_keys,
sparse_types=sparse_types,
dense_keys=dense_keys,
dense_shapes=dense_shapes,
name=name)
# pylint: enable=protected-access
(sparse_indices, sparse_values, sparse_shapes, dense_values) = outputs
sparse_tensors = [
sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(sparse_indices, sparse_values, sparse_shapes)]
return dict(zip(sparse_keys + dense_keys, sparse_tensors + dense_values))
def parse_single_example(serialized, features, name=None, example_names=None):
"""Parses a single `Example` proto.
Similar to `parse_example`, except:
For dense tensors, the returned `Tensor` is identical to the output of
`parse_example`, except there is no batch dimension, the output shape is the
same as the shape given in `dense_shape`.
For `SparseTensor`s, the first (batch) column of the indices matrix is removed
(the indices matrix is a column vector), the values vector is unchanged, and
the first (`batch_size`) entry of the shape vector is removed (it is now a
single element vector).
One might see performance advantages by batching `Example` protos with
`parse_example` instead of using this function directly.
Args:
serialized: A scalar string Tensor, a single serialized Example.
See `_parse_single_example_raw` documentation for more details.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
name: A name for this operation (optional).
example_names: (Optional) A scalar string Tensor, the associated name.
See `_parse_single_example_raw` documentation for more details.
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
"""
if not features:
raise ValueError("Missing features.")
features = _prepend_none_dimension(features)
(sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,
dense_shapes) = _features_to_raw_params(
features,
[VarLenFeature, FixedLenFeature, FixedLenSequenceFeature, SparseFeature])
outputs = _parse_single_example_raw(
serialized, example_names, sparse_keys, sparse_types, dense_keys,
dense_types, dense_defaults, dense_shapes, name)
return _construct_sparse_tensors_for_sparse_features(features, outputs)
def _parse_single_example_raw(serialized,
names=None,
sparse_keys=None,
sparse_types=None,
dense_keys=None,
dense_types=None,
dense_defaults=None,
dense_shapes=None,
name=None):
"""Parses a single `Example` proto.
Args:
serialized: A scalar string Tensor, a single serialized Example.
See `_parse_example_raw` documentation for more details.
names: (Optional) A scalar string Tensor, the associated name.
See `_parse_example_raw` documentation for more details.
sparse_keys: See `_parse_example_raw` documentation for more details.
sparse_types: See `_parse_example_raw` documentation for more details.
dense_keys: See `_parse_example_raw` documentation for more details.
dense_types: See `_parse_example_raw` documentation for more details.
dense_defaults: See `_parse_example_raw` documentation for more details.
dense_shapes: See `_parse_example_raw` documentation for more details.
name: A name for this operation (optional).
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
"""
with ops.name_scope(name, "ParseSingleExample", [serialized, names]):
serialized = ops.convert_to_tensor(serialized)
serialized_shape = serialized.get_shape()
if serialized_shape.ndims is not None:
if serialized_shape.ndims != 0:
raise ValueError("Input serialized must be a scalar")
else:
serialized = control_flow_ops.with_dependencies(
[control_flow_ops.Assert(
math_ops.equal(array_ops.rank(serialized), 0),
["Input serialized must be a scalar"],
name="SerializedIsScalar")],
serialized,
name="SerializedDependencies")
serialized = array_ops.expand_dims(serialized, 0)
if names is not None:
names = ops.convert_to_tensor(names)
names_shape = names.get_shape()
if names_shape.ndims is not None:
if names_shape.ndims != 0:
raise ValueError("Input names must be a scalar")
else:
names = control_flow_ops.with_dependencies(
[control_flow_ops.Assert(
math_ops.equal(array_ops.rank(names), 0),
["Input names must be a scalar"],
name="NamesIsScalar")],
names,
name="NamesDependencies")
names = array_ops.expand_dims(names, 0)
outputs = _parse_example_raw(
serialized,
names=names,
sparse_keys=sparse_keys,
sparse_types=sparse_types,
dense_keys=dense_keys,
dense_types=dense_types,
dense_defaults=dense_defaults,
dense_shapes=dense_shapes,
name=name)
if dense_keys is not None:
for d in dense_keys:
d_name = re.sub("[^A-Za-z0-9_.\\-/]", "_", d)
outputs[d] = array_ops.squeeze(
outputs[d], [0], name="Squeeze_%s" % d_name)
if sparse_keys is not None:
for s in sparse_keys:
s_name = re.sub("[^A-Za-z0-9_.\\-/]", "_", s)
outputs[s] = sparse_tensor.SparseTensor(
array_ops.slice(outputs[s].indices,
[0, 1], [-1, -1], name="Slice_Indices_%s" % s_name),
outputs[s].values,
array_ops.slice(outputs[s].dense_shape,
[1], [-1], name="Squeeze_Shape_%s" % s_name))
return outputs
def parse_single_sequence_example(
serialized, context_features=None, sequence_features=None,
example_name=None, name=None):
# pylint: disable=line-too-long
"""Parses a single `SequenceExample` proto.
Parses a single serialized [`SequenceExample`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto)
proto given in `serialized`.
This op parses a serialized sequence example into a tuple of dictionaries
mapping keys to `Tensor` and `SparseTensor` objects respectively.
The first dictionary contains mappings for keys appearing in
`context_features`, and the second dictionary contains mappings for keys
appearing in `sequence_features`.
At least one of `context_features` and `sequence_features` must be provided
and non-empty.
The `context_features` keys are associated with a `SequenceExample` as a
whole, independent of time / frame. In contrast, the `sequence_features` keys
provide a way to access variable-length data within the `FeatureList` section
of the `SequenceExample` proto. While the shapes of `context_features` values
are fixed with respect to frame, the frame dimension (the first dimension)
of `sequence_features` values may vary between `SequenceExample` protos,
and even between `feature_list` keys within the same `SequenceExample`.
`context_features` contains `VarLenFeature` and `FixedLenFeature` objects.
Each `VarLenFeature` is mapped to a `SparseTensor`, and each `FixedLenFeature`
is mapped to a `Tensor`, of the specified type, shape, and default value.
`sequence_features` contains `VarLenFeature` and `FixedLenSequenceFeature`
objects. Each `VarLenFeature` is mapped to a `SparseTensor`, and each
`FixedLenSequenceFeature` is mapped to a `Tensor`, each of the specified type.
The shape will be `(T,) + df.dense_shape` for `FixedLenSequenceFeature` `df`, where
`T` is the length of the associated `FeatureList` in the `SequenceExample`.
For instance, `FixedLenSequenceFeature([])` yields a scalar 1-D `Tensor` of
static shape `[None]` and dynamic shape `[T]`, while
`FixedLenSequenceFeature([k])` (for `int k >= 1`) yields a 2-D matrix `Tensor`
of static shape `[None, k]` and dynamic shape `[T, k]`.
Each `SparseTensor` corresponding to `sequence_features` represents a ragged
vector. Its indices are `[time, index]`, where `time` is the `FeatureList`
entry and `index` is the value's index in the list of values associated with
that time.
`FixedLenFeature` entries with a `default_value` and `FixedLenSequenceFeature`
entries with `allow_missing=True` are optional; otherwise, we will fail if
that `Feature` or `FeatureList` is missing from any example in `serialized`.
`example_name` may contain a descriptive name for the corresponding serialized
proto. This may be useful for debugging purposes, but it has no effect on the
output. If not `None`, `example_name` must be a scalar.
Args:
serialized: A scalar (0-D Tensor) of type string, a single binary
serialized `SequenceExample` proto.
context_features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. These features are associated with a
`SequenceExample` as a whole.
sequence_features: A `dict` mapping feature keys to
`FixedLenSequenceFeature` or `VarLenFeature` values. These features are
associated with data within the `FeatureList` section of the
`SequenceExample` proto.
example_name: A scalar (0-D Tensor) of strings (optional), the name of
the serialized proto.
name: A name for this operation (optional).
Returns:
A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s.
The first dict contains the context key/values.
The second dict contains the feature_list key/values.
Raises:
ValueError: if any feature is invalid.
"""
# pylint: enable=line-too-long
if not (context_features or sequence_features):
raise ValueError("Missing features.")
(context_sparse_keys, context_sparse_types, context_dense_keys,
context_dense_types, context_dense_defaults,
context_dense_shapes) = _features_to_raw_params(
context_features, [VarLenFeature, FixedLenFeature])
(feature_list_sparse_keys, feature_list_sparse_types,
feature_list_dense_keys, feature_list_dense_types,
feature_list_dense_defaults,
feature_list_dense_shapes) = _features_to_raw_params(
sequence_features, [VarLenFeature, FixedLenSequenceFeature])
return _parse_single_sequence_example_raw(
serialized, context_sparse_keys, context_sparse_types,
context_dense_keys, context_dense_types, context_dense_defaults,
context_dense_shapes, feature_list_sparse_keys,
feature_list_sparse_types, feature_list_dense_keys,
feature_list_dense_types, feature_list_dense_shapes,
feature_list_dense_defaults, example_name, name)
def _parse_single_sequence_example_raw(serialized,
context_sparse_keys=None,
context_sparse_types=None,
context_dense_keys=None,
context_dense_types=None,
context_dense_defaults=None,
context_dense_shapes=None,
feature_list_sparse_keys=None,
feature_list_sparse_types=None,
feature_list_dense_keys=None,
feature_list_dense_types=None,
feature_list_dense_shapes=None,
feature_list_dense_defaults=None,
debug_name=None,
name=None):
"""Parses a single `SequenceExample` proto.
Args:
serialized: A scalar (0-D Tensor) of type string, a single binary
serialized `SequenceExample` proto.
context_sparse_keys: A list of string keys in the `SequenceExample`'s
features. The results for these keys will be returned as
`SparseTensor` objects.
context_sparse_types: A list of `DTypes`, the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
context_dense_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
context_dense_types: A list of DTypes, same length as `context_dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
context_dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the context_dense_keys of the feature.
context_dense_shapes: A list of tuples, same length as `context_dense_keys`.
The shape of the data for each context_dense feature referenced by
`context_dense_keys`. Required for any input tensors identified by
`context_dense_keys` whose shapes are anything other than `[]` or `[1]`.
feature_list_sparse_keys: A list of string keys in the `SequenceExample`'s
feature_lists. The results for these keys will be returned as
`SparseTensor` objects.
feature_list_sparse_types: A list of `DTypes`, same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
feature_list_dense_keys: A list of string keys in the `SequenceExample`'s
features_lists. The results for these keys will be returned as `Tensor`s.
feature_list_dense_types: A list of `DTypes`, same length as
`feature_list_dense_keys`. Only `tf.float32` (`FloatList`),
`tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported.
feature_list_dense_shapes: A list of tuples, same length as
`feature_list_dense_keys`. The shape of the data for each
`FeatureList` feature referenced by `feature_list_dense_keys`.
feature_list_dense_defaults: A dict mapping key strings to values.
The only currently allowed value is `None`. Any key appearing
in this dict with value `None` is allowed to be missing from the
`SequenceExample`. If missing, the key is treated as zero-length.
debug_name: A scalar (0-D Tensor) of strings (optional), the name of
the serialized proto.
name: A name for this operation (optional).
Returns:
A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s.
The first dict contains the context key/values.
The second dict contains the feature_list key/values.
Raises:
ValueError: If context_sparse and context_dense key sets intersect,
if input lengths do not match up, or if a value in
feature_list_dense_defaults is not None.
TypeError: if feature_list_dense_defaults is not either None or a dict.
"""
with ops.name_scope(name, "ParseSingleSequenceExample", [serialized]):
context_dense_defaults = (
{} if context_dense_defaults is None else context_dense_defaults)
context_sparse_keys = (
[] if context_sparse_keys is None else context_sparse_keys)
context_sparse_types = (
[] if context_sparse_types is None else context_sparse_types)
context_dense_keys = (
[] if context_dense_keys is None else context_dense_keys)
context_dense_types = (
[] if context_dense_types is None else context_dense_types)
context_dense_shapes = (
[[]] * len(context_dense_keys)
if context_dense_shapes is None else context_dense_shapes)
feature_list_sparse_keys = (
[] if feature_list_sparse_keys is None else feature_list_sparse_keys)
feature_list_sparse_types = (
[] if feature_list_sparse_types is None else feature_list_sparse_types)
feature_list_dense_keys = (
[] if feature_list_dense_keys is None else feature_list_dense_keys)
feature_list_dense_types = (
[] if feature_list_dense_types is None else feature_list_dense_types)
feature_list_dense_shapes = (
[[]] * len(feature_list_dense_keys)
if feature_list_dense_shapes is None else feature_list_dense_shapes)
feature_list_dense_defaults = (
dict() if feature_list_dense_defaults is None
else feature_list_dense_defaults)
debug_name = "" if debug_name is None else debug_name
# Internal
feature_list_dense_missing_assumed_empty = []
num_context_dense = len(context_dense_keys)
num_feature_list_dense = len(feature_list_dense_keys)
num_context_sparse = len(context_sparse_keys)
num_feature_list_sparse = len(feature_list_sparse_keys)
if len(context_dense_shapes) != num_context_dense:
raise ValueError(
"len(context_dense_shapes) != len(context_dense_keys): %d vs. %d"
% (len(context_dense_shapes), num_context_dense))
if len(context_dense_types) != num_context_dense:
raise ValueError(
"len(context_dense_types) != len(num_context_dense): %d vs. %d"
% (len(context_dense_types), num_context_dense))
if len(feature_list_dense_shapes) != num_feature_list_dense:
raise ValueError(
"len(feature_list_dense_shapes) != len(feature_list_dense_keys): "
"%d vs. %d" % (len(feature_list_dense_shapes),
num_feature_list_dense))
if len(feature_list_dense_types) != num_feature_list_dense:
raise ValueError(
"len(feature_list_dense_types) != len(num_feature_list_dense):"
"%d vs. %d" % (len(feature_list_dense_types), num_feature_list_dense))
if len(context_sparse_types) != num_context_sparse:
raise ValueError(
"len(context_sparse_types) != len(context_sparse_keys): %d vs. %d"
% (len(context_sparse_types), num_context_sparse))
if len(feature_list_sparse_types) != num_feature_list_sparse:
raise ValueError(
"len(feature_list_sparse_types) != len(feature_list_sparse_keys): "
"%d vs. %d"
% (len(feature_list_sparse_types), num_feature_list_sparse))
if (num_context_dense + num_context_sparse
+ num_feature_list_dense + num_feature_list_sparse) == 0:
raise ValueError(
"Must provide at least one context_sparse key, context_dense key, "
", feature_list_sparse key, or feature_list_dense key")
if not set(context_dense_keys).isdisjoint(set(context_sparse_keys)):
raise ValueError(
"context_dense and context_sparse keys must not intersect; "
"intersection: %s" %
set(context_dense_keys).intersection(set(context_sparse_keys)))
if not set(feature_list_dense_keys).isdisjoint(
set(feature_list_sparse_keys)):
raise ValueError(
"feature_list_dense and feature_list_sparse keys must not intersect; "
"intersection: %s" %
set(feature_list_dense_keys).intersection(
set(feature_list_sparse_keys)))
if not isinstance(feature_list_dense_defaults, dict):
raise TypeError("feature_list_dense_defaults must be a dict")
for k, v in feature_list_dense_defaults.items():
if v is not None:
raise ValueError("Value feature_list_dense_defaults[%s] must be None"
% k)
feature_list_dense_missing_assumed_empty.append(k)
context_dense_defaults_vec = []
for i, key in enumerate(context_dense_keys):
default_value = context_dense_defaults.get(key)
if default_value is None:
default_value = constant_op.constant([], dtype=context_dense_types[i])
elif not isinstance(default_value, ops.Tensor):
key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=context_dense_types[i], name=key_name)
default_value = array_ops.reshape(
default_value, context_dense_shapes[i])
context_dense_defaults_vec.append(default_value)
context_dense_shapes = [tensor_shape.as_shape(shape).as_proto()
for shape in context_dense_shapes]
feature_list_dense_shapes = [tensor_shape.as_shape(shape).as_proto()
for shape in feature_list_dense_shapes]
# pylint: disable=protected-access
outputs = gen_parsing_ops._parse_single_sequence_example(
serialized=serialized,
debug_name=debug_name,
context_dense_defaults=context_dense_defaults_vec,
context_sparse_keys=context_sparse_keys,
context_sparse_types=context_sparse_types,
context_dense_keys=context_dense_keys,
context_dense_shapes=context_dense_shapes,
feature_list_sparse_keys=feature_list_sparse_keys,
feature_list_sparse_types=feature_list_sparse_types,
feature_list_dense_keys=feature_list_dense_keys,
feature_list_dense_types=feature_list_dense_types,
feature_list_dense_shapes=feature_list_dense_shapes,
feature_list_dense_missing_assumed_empty=(
feature_list_dense_missing_assumed_empty),
name=name)
# pylint: enable=protected-access
(context_sparse_indices, context_sparse_values,
context_sparse_shapes, context_dense_values,
feature_list_sparse_indices, feature_list_sparse_values,
feature_list_sparse_shapes, feature_list_dense_values) = outputs
context_sparse_tensors = [
sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(context_sparse_indices,
context_sparse_values,
context_sparse_shapes)]
feature_list_sparse_tensors = [
sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(feature_list_sparse_indices,
feature_list_sparse_values,
feature_list_sparse_shapes)]
context_output = dict(
zip(context_sparse_keys + context_dense_keys,
context_sparse_tensors + context_dense_values))
feature_list_output = dict(
zip(feature_list_sparse_keys + feature_list_dense_keys,
feature_list_sparse_tensors + feature_list_dense_values))
return (context_output, feature_list_output)
| apache-2.0 | -1,496,799,710,853,547,500 | 41.723752 | 119 | 0.653822 | false |
ishay2b/tensorflow | tensorflow/python/ops/histogram_ops_test.py | 71 | 2968 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import histogram_ops
from tensorflow.python.platform import test
class HistogramFixedWidthTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_input_gives_all_zero_counts(self):
# Bins will be:
# (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
value_range = [0.0, 5.0]
values = []
expected_bin_counts = [0, 0, 0, 0, 0]
with self.test_session():
hist = histogram_ops.histogram_fixed_width(values, value_range, nbins=5)
self.assertEqual(dtypes.int32, hist.dtype)
self.assertAllClose(expected_bin_counts, hist.eval())
def test_1d_values_int64_output(self):
# Bins will be:
# (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
value_range = [0.0, 5.0]
values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
expected_bin_counts = [2, 1, 1, 0, 2]
with self.test_session():
hist = histogram_ops.histogram_fixed_width(
values, value_range, nbins=5, dtype=dtypes.int64)
self.assertEqual(dtypes.int64, hist.dtype)
self.assertAllClose(expected_bin_counts, hist.eval())
def test_1d_float64_values(self):
# Bins will be:
# (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
value_range = np.float64([0.0, 5.0])
values = np.float64([-1.0, 0.0, 1.5, 2.0, 5.0, 15])
expected_bin_counts = [2, 1, 1, 0, 2]
with self.test_session():
hist = histogram_ops.histogram_fixed_width(values, value_range, nbins=5)
self.assertEqual(dtypes.int32, hist.dtype)
self.assertAllClose(expected_bin_counts, hist.eval())
def test_2d_values(self):
# Bins will be:
# (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
value_range = [0.0, 5.0]
values = [[-1.0, 0.0, 1.5], [2.0, 5.0, 15]]
expected_bin_counts = [2, 1, 1, 0, 2]
with self.test_session():
hist = histogram_ops.histogram_fixed_width(values, value_range, nbins=5)
self.assertEqual(dtypes.int32, hist.dtype)
self.assertAllClose(expected_bin_counts, hist.eval())
if __name__ == '__main__':
test.main()
| apache-2.0 | -1,804,547,402,525,981,200 | 36.1 | 80 | 0.632749 | false |
calhewitt/lucid-utils | lucid_utils/classification/end_detection.py | 2 | 5414 | # An algorithm to work out the number of end points of a 'long' cluster (beta, etc) in order to detect crossed or divergent tracks
# Note: This is currently only accurate for clusters of radius > ~20
# TODO Develop a similar algorithm for shorter blobs
# Author: Cal Hewitt
import numpy as np
def is_single_track(blob):
return num_end_points(blob) <= 2
def num_end_points(blob):
cluster, best_fit_line, radius, centroid = blob.pixels, blob.best_fit_line, blob.radius, blob.centroid
m, c = best_fit_line
radius = int(np.ceil(radius)) # Make radius into an integer, bigger is better for avoiding errors
# Define constants and initialise arrays which we will use a lot later
pixel_ch_x = 1 / np.sqrt( (m**2) + 1) # For efficiency, change in x between sample points
m_normal = (-1)*(1/m) # Gradient of the normal to the line of best fit
all_pixel_clusters = []
num_end_points = 0
# To begin the process, we are going to step along line of best fit from c - r to c + r, 1 pixel at a time
# For simplicity we call this 'left to right'
# First, find the leftmost point
ch_x = radius / np.sqrt( (m**2) + 1 ) # Change in x between centroid and leftmost point
start_point = ( centroid[0] - ch_x, centroid[1] - m*ch_x )
# Now start stepping along the line of best fit, with i between 0 and diameter, 1 pixel at a time...
for i in range( (radius*2) + 1):
# First we locate the point on the line of best fit which corresponds to i
current_point = (start_point[0] + (i*pixel_ch_x), start_point[1] + (m*i*pixel_ch_x))
# We want to check for pixels which 'correspond' to this point by seeing if the normal at this point intersects them
# Use Bresenham's Algorithm to rasterise the normal r either side of current_point, and then check for clusters
# Make up bresenham start and end (more points than are actually needed, but probs computationally easier this way as B's alg is very light)
p1 = (int(current_point[0] - radius), int(current_point[1] - np.ceil(m_normal*radius)))
p2 = (int(current_point[0] + radius), int(current_point[1] + np.ceil(m_normal*radius)))
relevant_pixels = bresenham(p1, p2)
# Make a list of 'clusters' of these relevant pixels, which are from separate branches
relevant_pixel_clusters = []
last_active_pixel = None
current_cluster = None
for pixel in relevant_pixels:
# Check that the pixel has been hit
if pixel in cluster:
if not current_cluster:
current_cluster = [pixel]
else:
if pixels_adjacent(pixel, last_active_pixel):
current_cluster.append(pixel)
else:
relevant_pixel_clusters.append(current_cluster)
current_cluster = [pixel]
last_active_pixel = pixel
# If a cluster has been partially formed by the end of the loop, still use it
if current_cluster:
relevant_pixel_clusters.append(current_cluster)
if relevant_pixel_clusters:
all_pixel_clusters.append(relevant_pixel_clusters)
# By this point, all_pixel_clusters contains a list of rows, each of these a list of clusters
# Check for clusters with only one neighbour, as these will be end points
for i in range(len(all_pixel_clusters)):
active_row = all_pixel_clusters[i]
for active_cluster in active_row:
neighbours = 0
for check_cluster in all_pixel_clusters[i]:
if clusters_adjacent(active_cluster, check_cluster) and (active_cluster != check_cluster):
neighbours += 1
if i > 0:
for check_cluster in all_pixel_clusters[i-1]:
if clusters_adjacent(active_cluster, check_cluster):
neighbours += 1
if i < (len(all_pixel_clusters) - 1):
for check_cluster in all_pixel_clusters[i+1]:
if clusters_adjacent(active_cluster, check_cluster):
neighbours += 1
if neighbours == 1:
num_end_points += 1
return num_end_points
def pixels_adjacent(pixel1, pixel2, distance = 1):
return abs(pixel2[0] - pixel1[0]) <= distance and abs(pixel2[1] - pixel1[1]) <= distance
def clusters_adjacent(cluster1, cluster2):
for p1 in cluster1:
for p2 in cluster2:
if pixels_adjacent(p1, p2, 2): # Hack as sometimes Bresenham lines will miss a pixel
return True
return False
# An implementation of Bresenham's line algorithm, thanks to roguebasin.com
def bresenham(start, end):
x1, y1 = start
x2, y2 = end
dx = x2 - x1
dy = y2 - y1
is_steep = abs(dy) > abs(dx)
if is_steep:
x1, y1 = y1, x1
x2, y2 = y2, x2
swapped = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
swapped = True
dx = x2 - x1
dy = y2 - y1
error = int(dx / 2.0)
ystep = 1 if y1 < y2 else -1
y = y1
points = []
for x in range(x1, x2 + 1):
coord = (y, x) if is_steep else (x, y)
points.append(coord)
error -= abs(dy)
if error < 0:
y += ystep
error += dx
if swapped:
points.reverse()
return points
| mit | -6,095,883,016,788,638,000 | 44.881356 | 148 | 0.605652 | false |
woshialex/diagnose-heart | fitting_models.py | 1 | 13596 | from scipy.optimize import minimize;
import numpy as np;
from scipy import stats
import itertools;
import analysis;
class BaseModel:
def __init__(self):
self.p = None;
def set(self,p):
self.p = p;
class SexAgeModel(BaseModel):
def __init__(self):
self.p = np.array([[4.0,3,10.6,12,75,181],\
[3.0,8,7.0,30,53,144]]);
#fitted: 0.03737 in train
def fit(self, info, train_true):
print("use fitted values, no fitting")
def predict(self,info):
res = {};
for idx,row in info.iterrows():
case,age,sex = row['Id'],row['age'],row['sex'];
p = self.p;
if sex=='M':
if age<15:
hS = [p[0][0]*age+p[0][1],15];
hD = [p[0][2]*age+p[0][3], 35];
else:
hS = [p[0][4],35];
hD = [p[0][5],45];
elif sex=='F':
if age<15:
hS = [p[1][0]*age+p[1][1],15];
hD = [p[1][2]*age+p[1][3],35];
else:
hS = [p[1][4],35];
hD = [p[1][5],40];
else:
print("!!!no such sex type!");
hS = [p[1][4],35];
hD = [p[1][5],45];
res[case] = np.asarray(hS + hD);
return res;
class OneSliceModel(BaseModel):
def __init__(self):
self.p = np.array([5,0.00115,10,0.00124,0.080,6,0.075,7]);
#fitted on train, score = 0.01519
def fit(self, areas_all, train_true):
print("not implemented yet, use default to fit")
def predict(self, areas_all):
#take the area_data as input
#return sys_vol, sys_err, dias_vol, dias_error
res = {};
p = self.p;
for case,areas in areas_all.iteritems():
x = np.sum(areas[:,1:],axis=0);
tsys,tdias = np.argmin(x),np.argmax(x);
a = areas[:,tdias+1];
if np.sum(a>100) <= 2:
dias_v = np.nan;
sys_v = np.nan;
else:
da = np.percentile(a,80);
dias_v = np.clip(p[2] + p[3]*da**1.5,5,580);
a = areas[:,tsys+1];
if np.sum(a>100) <= 2:
sys_v = np.nan;
else:
sa = np.percentile(a,80);
sys_v = np.clip(p[0] + p[1]*(10+sa)*(da**0.5+sa**0.5)/2,5,580);
sys_err = np.clip(sys_v * p[4] + p[5],0,30);
dias_err = np.clip(dias_v * p[6] + p[7],0,30);
res[case] = np.asarray([sys_v, sys_err, dias_v, dias_err]);
return res;
class SaxModel(BaseModel):
def __init__(self,version=1):
self.version = version;
if version == 1:
self.p0 = [1.05,1.05,0.05,4];
self.bounds = [(0.8,1.5),(0.8,1.3),(0.03,0.07),(0,10)];
elif version == 2:
self.p0 = [1.0,1.0,0.05,4,0.05,4];
self.bounds = [(-0.5,1.8),(-0.5,1.5),(0.03,0.10),(0,10),(0.03,0.10),(0,10)];
elif version == 3:
self.p0 = [1.05,0, 1.05, 0, 0.05,4, 0.05, 4];
self.bounds = [(0.8,1.5),(0,30), (0.8,1.3),(0,50),(0.03,0.10),(0,10), (0.03,0.10),(0,10)];
self.p = None;
def _get_result(self,X,p):#X a single column vector of sys and dias volume
CLIP = 25;
Y = np.zeros((X.shape[0],2));
if self.version == 1:
Y[::2,0] = X[::2]*p[0];
Y[1::2,0] = X[1::2]*p[1];
Y[:,1] = np.clip(Y[:,0]*p[2]+p[3], 0, CLIP);
elif self.version == 2:
Y[::2,0] = X[::2] - np.sqrt(X[::2])*p[0];
Y[1::2,0] = X[1::2] - np.sqrt(X[1::2])*p[1];
Y[::2,1] = np.clip(Y[::2,0]*p[2]+p[3], 0, CLIP);
Y[1::2,1] = np.clip(Y[1::2,0]*p[4]+p[5], 0, CLIP);
elif self.version == 3:
Y[::2,0] = X[::2]*p[0] + p[1];
Y[1::2,0] = X[1::2]*p[2] + p[3];
Y[::2,1] = np.clip(Y[::2,0]*p[4]+p[5], 0, CILP);
Y[1::2,1] = np.clip(Y[1::2,0]*p[6]+p[7], 0, CLIP);
return Y;
def fit(self, results, train_true):
x = [];
y = [];
count = 0;
missing = [];
for idx,row in train_true.iterrows():
res = results.get(row['Id']);
if res is None:
missing.append(row['Id']);
continue
count+=1;
x.extend(res);
y.extend([row['Systole'],row['Diastole']]);
print("{} cases are used to fit the model".format(count));
if len(missing)>0:
print("cases are missing: " + ','.join([str(m_) for m_ in missing]));
x = np.asarray(x);
y = np.asarray(y);
ff = minimize(lambda p:analysis.crps_score(self._get_result(x,p),y), self.p0, bounds=self.bounds, options={'gtol':1e-5,'maxiter':500,'eps':1e-5});
self.p = ff.x;
print("fitting parameters " + str(self.p));
print("fitting score " + str(ff.fun));
def predict(self,results):
res = {};
if self.p is None:
print("need to fit the model first");
for case,sd in results.iteritems():
res[case] = self._get_result(np.asarray(sd),self.p).flatten();
return res;
class Ch4Model(BaseModel):
def __init__(self):
self.p0 = [.8,10,.3,.9,.09,4];
self.bounds = [(.6,.98),(0,20),(.2,0.7),(0.6,0.98),(.03,.2),(0,10)];
self.p = None;
def _get_result(self,X,p):#X a single column vector of sys and dias volume
Y = np.zeros((X.shape[0],2));
Y[1::2,0] = np.clip(X[1::2]*p[0]+p[1],4,580);
Y[::2,0] = np.clip(np.maximum(Y[1::2,0]*p[2], X[::2]*p[3]),4,580);
Y[:,1] = np.clip(Y[:,0]*p[4]+p[5], 0, 35);
dele = np.array([[i*2,i*2+1] for i in range(X.shape[0]/2) if X[i*2+1]<40]).reshape((-1))
if len(dele) > 0:
Y[dele]=np.nan
return Y;
def fit(self, results, train_true):
x = [];
y = [];
count = 0;
missing = [];
for idx,row in train_true.iterrows():
res = results.get(row['Id']);
if res is None or res[1] < 40:
missing.append(row['Id']);
continue
count+=1;
x.extend(res);
y.extend([row['Systole'],row['Diastole']]);
print("{} cases are used to fit the model".format(count));
if len(missing)>0:
print("cases are missing in train: " + ','.join([str(int(m)) for m in missing]));
x = np.asarray(x);
y = np.asarray(y);
ff = minimize(lambda p:analysis.crps_score(self._get_result(x,p),y), self.p0, bounds=self.bounds, options={'gtol':1e-5,'maxiter':500,'eps':1e-3});
self.p = ff.x;
print("fitting parameters " + str(self.p));
print("fitting score " + str(ff.fun));
def predict(self,results):
res = {};
if self.p is None:
print("need to fit the model first");
for case,sd in results.iteritems():
res[case] = self._get_result(np.asarray(sd),self.p).flatten();
return res;
class AverageModel(BaseModel):
def __init__(self,ll=9.5e-5):
self.p = None;
self.ll = ll;
def _get_result(self,X,p):
"""
how to deal with nans???
this code treat them as missing use the same coefficients
ideally, it should fit another model use only the rest of models
"""
NR = X.shape[0];
y = np.zeros((NR,2));
p = np.asarray(p);
for i in range(NR):
preds = np.copy(X[i]).reshape((-1,2));
err0 = np.copy(preds[:,1]);
preds[:,1] = err0*p;
preds = preds[~np.isnan(preds[:,0])];
if preds.shape[0]==0:
y[i] = [np.nan,np.nan];
continue;
me = np.sum(preds[:,0]/preds[:,1]**2);
err = np.sum(1.0/preds[:,1]**2);
me /= err;
err = 1.0/np.sqrt(err);
err = np.minimum(np.nanmin(err0),err);
err *=(1.0 + np.std(preds[:,0])/np.max(preds[:,1])/3)**0.5;
y[i] = [me,err];
return y;
def fit(self,preds,train_true):
N = len(preds);
print("combine # predictions:" + ','.join([str(len(x)) for x in preds]));
self.p0 = np.ones(N)*np.sqrt(N);
X = np.zeros((train_true.shape[0]*2,N*2));
X[:] = np.nan;
y = [];
i = 0;
for idx,row in train_true.iterrows():
case = row['Id'];
y.extend([row['Systole'],row['Diastole']]);
for j in range(N):
sede = preds[j].get(case);
if sede is not None:
X[i*2,2*j:2*j+2] = sede[0:2];
X[i*2+1,2*j:2*j+2] = sede[2:4];
i += 1;
y = np.asarray(y);
print("init score :{}".format(analysis.crps_score(self._get_result(X,self.p0),y)));
ff = minimize(lambda p:analysis.crps_score(self._get_result(X,p),y) + self.ll*np.var(p), self.p0, options={'gtol':1e-5,'eps':1e-4,'maxiter':500});
self.p = ff.x;
print("fitting parameters " + str(self.p));
print("fitting score " + str(ff.fun));
def predict(self,preds):
print("combine # predictions:" + ','.join([str(len(x)) for x in preds]));
res = {};
css = [list(x.keys()) for x in preds];
css = set(list(itertools.chain.from_iterable(css)));
N = len(preds);
assert(N == self.p.size);
for case in css:
X = np.zeros((2,2*N));
X[:] = np.nan;
for j in range(N):
sede = preds[j].get(case);
if sede is not None:
X[0,2*j:2*j+2] = sede[0:2];
X[1,2*j:2*j+2] = sede[2:4];
res[case] = self._get_result(X,self.p).flatten();
return res;
class SaxFilterModel(BaseModel):
def __init__(self):
self.p0 = [1.0,1.0,0.05,4,0.05,4];
self.bounds = [(-0.5,1.8),(-0.5,1.5),(0.03,0.10),(0,10),(0.03,0.10),(0,10)];
self.p = None;
def _get_result(self,X,p):#X a single column vector of sys and dias volume
Y = np.zeros((X.shape[0],2));
idx = X[:,1]>1;
ridx = np.logical_not(idx);
Y[idx,0] = X[idx,0] - np.sqrt(X[idx,0])*p[0];
Y[ridx,0] = X[ridx,0] - np.sqrt(X[ridx,0])*p[1];
Y[idx,1] = np.clip(Y[idx,0]*p[2]+p[3],0,25);
Y[ridx,1] = np.clip(Y[ridx,0]*p[4]+p[5],0,25);
return Y;
def fit(self, results,train_true):
x = [];
y = [];
count = 0;
missing = [];
for idx,row in train_true.iterrows():
res = results.get(row['Id']);
if res is None:
missing.append(row['Id']);
continue
count+=1;
x.extend(res);
y.extend([row['Systole'],row['Diastole']]);
print("{} cases are used to fit the model".format(count));
if len(missing)>0:
print("cases are missing: " + ','.join([str(_x) for _x in missing]));
x = np.asarray(x).reshape((-1,2));
y = np.asarray(y);
ff = minimize(lambda p:analysis.crps_score(self._get_result(x,p),y), self.p0, bounds=self.bounds, options={'gtol':1e-5,'maxiter':500,'eps':1e-5});
self.p = ff.x;
print("fitting parameters " + str(self.p));
print("fitting score " + str(ff.fun));
def predict(self,results):
res = {};
if self.p is None:
print("need to fit the model first");
for case,sd in results.iteritems():
res[case] = self._get_result(np.asarray(sd).reshape(-1,2),self.p).flatten();
return res;
class SaxFeatureModel(BaseModel):
def __init__(self):
self.p0 = [0.2,-0.2,0.9, 0.5,-0.5,0.5,4];
self.bounds = [(-0.5,0.5),(-0.5,0.5),(0.0,2.0),\
(-3.0,3.0),(-3.0,3.0),(-3.0,3.0),(2,10)];
self.p = None;
def _get_result(self,X,p):#X a single column vector of sys and dias volume
Y = np.zeros((X.shape[0],2));
e1 = (X[:,1]>1)*1.0;
e2 = (X[:,2]<=7)*1.0;
e3 = (X[:,3]>1.3)*1.0;
Y[:,0] = X[:,0] - np.sqrt(X[:,0])*(p[0]*e1+p[1]*e2+p[2])
Y[:,1] = np.clip(X[:,0]*(p[3]*e1+p[4]*e2+p[5]*e3+p[6])/100+4,4,30);
return Y;
def fit(self, results,train_true):
x = [];
y = [];
count = 0;
missing = [];
for idx,row in train_true.iterrows():
res = results.get(row['Id']);
if res is None:
missing.append(row['Id']);
continue
count+=1;
x.extend(res);
y.extend([row['Systole'],row['Diastole']]);
print("{} cases are used to fit the model".format(count));
if len(missing)>0:
print("cases are missing: " + ','.join([str(_x) for _x in missing]));
x = np.asarray(x).reshape((-1,4));
y = np.asarray(y);
ff = minimize(lambda p:analysis.crps_score(self._get_result(x,p),y), self.p0, bounds=self.bounds, options={'gtol':1e-6,'maxiter':500,'eps':1e-5});
self.p = ff.x;
print("fitting parameters " + str(self.p));
print("fitting score " + str(ff.fun));
def predict(self,results):
res = {};
if self.p is None:
print("need to fit the model first");
for case,sd in results.iteritems():
res[case] = self._get_result(np.asarray(sd).reshape(-1,4),self.p).flatten();
return res;
| mit | -2,980,231,887,850,449,000 | 36.454545 | 154 | 0.461386 | false |
vrjuggler/maestro | maestro/daemon/plugins/services/reboot/grubconfig.py | 2 | 13881 | # Copyright (C) Infiscape Corporation 2006
import re
import logging
class GrubBootTarget:
UNKNOWN = -1
LINUX = 0
WINDOWS = 1
FREEBSD = 2
# This matches the Linxu kernel version, the RPM revision, and any
# additional text.
#
# Groups:
# 1 - full kernel path (/boot/vmlinuz-... or /vmlinuz-...)
# 2 - kernel version with RPM revision (2.x.y-...)
# 3 - kernel version (2.x.y)
# 4 - RPM revision
# 5 - additional package text (may be nothing)
# 6 - kernel boot options
sLinuxKernelBootRe = re.compile(r'kernel\s+(/(boot/|)vmlinuz-((\d+\.\d+\.\d+)-([\d.]+)\.(\S*)))\s+(.*)\s*$')
# This matches the target typically used for booting FreeBSD from GRUB.
#
# Groups:
# 1 - full kernel path (/boot/kernel...)
sFreeBsdKernelBootRe = re.compile(r'kernel\s+(/boot/kernel.*)\s*')
# This matches the target typically used for booting Windows from GRUB.
#
# Groups:
# 1 - chain loader integer index
sChainBootRe = re.compile(r'chainloader \+(\d+)\s*$')
def __init__(self, index, title, body):
self.mIndex = index
self.mTitle = title
self.mBody = body
self.mOS = self.UNKNOWN
self.mLogger = logging.getLogger("maestrod.reboot.GrubBootTarget")
self.mKernelPath = ''
self.mKernelPkgVersion = ''
self.mKernelVersion = ''
self.mKernelPkgRevision = ''
self.mKernelPkgExtraText = ''
self.mKernelOpts = ''
for l in body:
match = self.sLinuxKernelBootRe.search(l)
if match is not None:
self.mOS = self.LINUX
self.mKernelPath = match.group(1)
self.mKernelPkgVersion = match.group(2)
self.mKernelVersion = match.group(3)
self.mKernelPkgRevision = match.group(4)
self.mKernelPkgExtraText = match.group(5)
self.mKernelOpts = match.group(6)
#self.mLogger.debug("mKernelPkgVersion = %s" % self.mKernelPkgVersion)
#self.mLogger.debug("mKernelVersion = %s" % self.mKernelVersion)
#self.mLogger.debug("mKernelPkgRevision = %s" % self.mKernelPkgRevision)
#self.mLogger.debug("mKernelPkgExtraText = %s" % self.mKernelPkgExtraText)
break
elif self.sFreeBsdKernelBootRe.search(l) is not None:
self.mOS = self.FREEBSD
self.mKernelPath = match.group(1)
break
elif self.sChainBootRe.search(l) is not None:
self.mOS = self.WINDOWS
break
if self.mOS == self.UNKNOWN:
self.mLogger.warning("Unknown operating system in:")
for l in body:
self.mLogger.warning(l.rstrip())
def getIndex(self):
return self.mIndex
def getOS(self):
return self.mOS
def isLinux(self):
return self.mOS == self.LINUX
def isWindows(self):
return self.mOS == self.WINDOWS
def isFreeBSD(self):
return self.mOS == self.FREEBSD
def getKernelPath(self):
'''
getKernelPath() -> str
Returns the full path to the kernel that will be booted by this target.
'''
return self.mKernelPath
def getKernelPkgVersion(self):
'''
getKernelPkgVersion() -> str
Returns the full package kernel version string.
'''
return self.mKernelPkgVersion
def getKernelVersion(self):
'''
getKernelVersion() -> str
Returns the kernel version string (of the form 2.x.y).
'''
return self.mKernelVersion
def getKernelPkgRevision(self):
'''
getKernelPkgRevision() -> str
Returns the kernel revision string. The form of this will vary, but
currently recognized forms are either a single integer (seen on
Red Hat Enterprise Linux 4) or a version of the form x.y (seen on
Fedora Core). Either way, the returned revision is a string.
'''
return self.mKernelPkgRevision
def getKernelPkgExtraText(self):
'''
getKernelPkgExtraText() -> str
Returns any additional text that may be part of the kernel package
version. Typically, this will include the distribution name and/or
whether this target is for an SMP kernel.
'''
return self.mKernelPkgExtraText
def __str__(self):
result = self.mTitle
for l in self.mBody:
result += l
return result
class GrubConfig:
sTitleRe = re.compile(r'^title\s+(.*)\s*$')
sDefaultRe = re.compile(r'^default=(\d+)\s*$')
sSavedDefaultRe = re.compile(r'^#saved_default=(\d+)\s*$')
sTimeoutRe = re.compile(r'^timeout=(\d+)\s*$')
def __init__(self, grubConfFile):
self.mFile = grubConfFile
self.__read(self.mFile)
def __read(self, file):
f = open(file, 'r')
self.mContents = f.readlines()
f.close()
self.mTargets = []
i = 0
line_count = len(self.mContents)
cur_index = 0
while i < line_count:
line = self.mContents[i]
if self.sTitleRe.search(line) is None:
i += 1
else:
title = line
body = []
i += 1
while i < line_count:
line = self.mContents[i]
if self.sTitleRe.search(line) is None:
body.append(line)
else:
break
i += 1
self.mTargets.append(GrubBootTarget(cur_index, title, body))
cur_index += 1
def reset(self, file = None):
'''
reset([string])
Resets the state of this GRUB configuration object to be that of the
input file. If no argument is given to this method, then the original
GRUB configuration file is re-read. Otherwise, the given string is
interpreted as a different GRUB configuration that is used as a
replacement (in memory) for the old.
'''
if file is not None:
self.mFile = file
self.__read(self.mFile)
def getDefault(self):
'''
getDefault() -> int
Gets the integer identifier of the boot target that is the current
default. If there is no such default boot target, then None is returned.
'''
for l in self.mContents:
match = self.sDefaultRe.search(l)
if match is not None:
return int(match.group(1))
return None
def setDefault(self, index):
'''
setDefault(int)
Sets the default boot target to be the given identifier. It is assumed
that the given identifer is for a valid target.
'''
# TODO: Should raise an exception if index > len(self.mTargets)
i = 0
line_count = len(self.mContents)
while i < line_count:
line = self.mContents[i]
if self.sDefaultRe.search(line) is not None:
self.mContents[i] = self.__makeDefault(index)
i += 1
def makeDefault(self, targetMatch):
'''
makeDefault(callable)
Changes the default boot target to be the one matched by the given
callable object. The callable must take a single argument that will be
of type GrubBootTarget, and it must return either True or False.
'''
t = 0
target_count = len(self.mTargets)
while t < target_count:
if targetMatch(self.mTargets[t]):
self.mContents[self.__getDefaultLine()] = self.__makeDefault(t)
t += 1
def getTimeout(self):
'''
getTimeout() -> int
Gets the current timeout to wait before booting the default target. If
there is no such default boot target, then None is returned.
'''
for l in self.mContents:
match = self.sTimeoutRe.search(l)
if match is not None:
return int(match.group(1))
return None
def setTimeout(self, timeout):
'''
setTimeout(int)
Sets the timeout to wait before booting the default target.
'''
i = 0
line_count = len(self.mContents)
while i < line_count:
line = self.mContents[i]
if self.sTimeoutRe.search(line) is not None:
self.mContents[i] = self.__makeTimeout(timeout)
i += 1
def __getDefaultLine(self):
'''
__getDefaultLine() -> int
Returns the line number of the default target setting in
self.mContents. If there is no such default boot target line in
self.mContents, then None is returned.
'''
line_count = len(self.mContents)
i = 0
while i < line_count:
l = self.mContents[i]
if self.sDefaultRe.search(l) is not None:
return i
i += 1
return None
def saveDefault(self):
'''
saveDefault()
Saves the current default boot target using a special token that is added
to the GRUB configuration data. This GRUB configuration object must be
serialized to a file in order for this change to take effect.
'''
# Ensure that we have only one saved default target line by removing any
# stale entries.
while self.hasSavedDefault():
(linux_default, line_num) = self.__getSavedDefault()
self.mContents.remove(self.mContents[line_num])
i = 0
line_count = len(self.mContents)
while i < line_count:
line = self.mContents[i]
match = self.sDefaultRe.search(line)
if match is not None:
cur_default_target = int(match.group(1))
# Inject the saved default into self.mContents after the default
# target line.
i += 1
self.mContents[i:i] = [self.__makeSavedDefault(cur_default_target)]
break
i += 1
def restoreDefault(self, targetMatch):
'''
restoreDefault(callable)
Restores the saved default (see saveDefault()) if there is one.
Otherwise, the given callbale object is used to find a replacement
default target. The first target matching the criteria of the given
callable is used as the new default. The given callable must take a
single argument of type GrubBootTarget and return either True or False.
'''
if self.hasSavedDefault():
# Get the saved default and then remove it from self.mContents.
(saved_default, line_num) = self.__getSavedDefault()
self.mContents.remove(self.mContents[line_num])
else:
target_count = len(self.mTargets)
t = 0
while t < target_count:
if targetMatch(self.mTargets[t]):
saved_default = t
break
t += 1
# Set the default boot target to be saved_default.
self.mContents[self.__getDefaultLine()] = self.__makeDefault(saved_default)
def save(self, outputFile = None):
'''
save([str])
Saves this GRUB configuration to an output file. If no argument is
given, then the original input file is overwritten. Otherwise, the
named file will be used for saving this GRUB configuration.
'''
if outputFile is None:
outputFile = self.mFile
f = open(outputFile, 'w')
f.writelines(self.mContents)
f.close()
def hasSavedDefault(self):
'''
hasSavedDefault() -> boolean
Identifies whether this GRUB configuration contains a saved default
boot target.
'''
for l in self.mContents:
if self.sSavedDefaultRe.search(l) is not None:
return True
return False
def getSavedDefault(self):
'''
getSavedDefault() -> int
Returns the boot target index for the saved default in this GRUB
configuration. If this GRUB configuration has no saved default, then
None is returned.
'''
return self.__getSavedDefault()[0]
def __getSavedDefault(self):
'''
getLinuxDefault() -> (int, int)
Retrieves the saved default boot target from self.mContents and returns
a tuple containing the target index and the index in self.mContents
where this is set. If a saved default boot target is not found, the
returned tuple will be (None, None).
'''
line_count = len(self.mContents)
i = 0
while i < line_count:
l = self.mContents[i]
match = self.sSavedDefaultRe.search(l)
if match is not None:
return (int(match.group(1)), i)
i += 1
return (None, None)
def getTargets(self):
'''
getTargets() -> list
Returns a list of all the GRUB boot targets (instances of
GrubBootTarget) defined in this GRUB configuration.
'''
return self.mTargets
def getTarget(self, index):
'''
getTarget(int) -> GrubBootTarget
Returns the GrubBootTarget instance identified by the given integer
index.
'''
return self.mTargets[index]
def __makeDefault(self, index):
'''
makeDefault(int) -> str
Creates a new line for the GRUB configuration that makes the boot
target identified by the given integer value the default boot target.
This is suitable for being injected into a GRUB configruation file.
'''
return 'default=%d\n' % index
def __makeSavedDefault(self, index):
'''
makeLinuxDefault(int) -> str
Creates a new line for the GRUB configuration that represents the saved
Linux default boot target index, identified by the given integer value.
This is suitable for being injected into a GRUB configruation file.
'''
return '#saved_default=%d\n' % index
def __makeTimeout(self, timeout):
'''
makeTimeout(int) -> str
Creates a new line for the GRUB configuration that uses the given timeout
value. This is suitable for being injected into a GRUB configruation file.
'''
return 'timeout=%d\n' % timeout
| gpl-2.0 | -7,353,712,225,808,360,000 | 30.910345 | 111 | 0.605576 | false |
mhogg/scipy | scipy/special/add_newdocs.py | 11 | 70503 | # Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
.. math:: Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi}\frac{(n-m)!}{(n+m)!}} e^{i m \theta} P^m_n(\cos(\phi))
Parameters
----------
m : int
``|m| <= n``; the order of the harmonic.
n : int
where `n` >= 0; the degree of the harmonic. This is often called
``l`` (lower case L) in descriptions of spherical harmonics.
theta : float
[0, 2*pi]; the azimuthal (longitudinal) coordinate.
phi : float
[0, pi]; the polar (colatitudinal) coordinate.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at `theta` and `phi`
Notes
-----
There are different conventions for the meaning of input arguments
`theta` and `phi`. We take `theta` to be the azimuthal angle and
`phi` to be the polar angle. It is common to see the opposite
convention - that is `theta` as the polar angle and `phi` as the
azimuthal angle.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30. http://dlmf.nist.gov/14.30
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "airy",
"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : float or complex
Argument.
Returns
-------
Ai, Aip, Bi, Bip
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
The Airy functions Ai and Bi are two independent solutions of y''(x) = x y.
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs((2.0/3.0*z*sqrt(z)).real))
eBip = Bip * exp(-abs((2.0/3.0*z*sqrt(z)).real))
Parameters
----------
z : float or complex
Argument.
Returns
-------
eAi, eAip, eBi, eBip
Airy functions Ai and Bi, and their derivatives Aip and Bip
""")
add_newdoc("scipy.special", "bdtr",
"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through k of the Binomial probability density.
::
y = sum(nCj p**j (1-p)**(n-j),j=0..k)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtrc",
"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms k+1 through n of the Binomial probability density
::
y = sum(nCj p**j (1-p)**(n-j), j=k+1..n)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to bdtr vs. p
Finds probability `p` such that for the cumulative binomial
probability ``bdtr(k, n, p) == y``.
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to bdtr vs k
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to bdtr vs n
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
""")
add_newdoc("scipy.special", "btdtria",
"""
btdtria(p, b, x)
Inverse of btdtr vs a
""")
add_newdoc("scipy.special", "btdtrib",
"""
btdtria(a, p, x)
Inverse of btdtr vs b
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function bei
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function ber
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighted integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a,b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to x::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute x such that betainc(a,b,x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(x)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "btdtr",
"""
btdtr(a,b,x)
Cumulative beta distribution.
Returns the area from zero to x under the beta density function::
gamma(a+b)/(gamma(a)*gamma(b)))*integral(t**(a-1) (1-t)**(b-1), t=0..x)
See Also
--------
betainc
""")
add_newdoc("scipy.special", "btdtri",
"""
btdtri(a,b,p)
p-th quantile of the beta distribution.
This is effectively the inverse of btdtr returning the value of x for which
``btdtr(a,b,x) = p``
See Also
--------
betaincinv
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Cube root of x
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to x) of the Chi
square probability density function with v degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v,x)
Chi square survival function
Returns the area under the right hand tail (from x to
infinity) of the Chi square probability density function with v
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v,p)
Inverse to chdtrc
Returns the argument x such that ``chdtrc(v,x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtri(p, x)
Inverse to chdtr vs v
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to chndtr vs x
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to chndtr vs df
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to chndtr vs nc
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle x given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle x given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2),t=0..x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "ellipe",
"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipeinc",
"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter m between
0 and 1, and real u.
Parameters
----------
m, u
Parameters
Returns
-------
sn, cn, dn, ph
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value ``ph`` is such that if ``u = ellik(ph, m)``,
then ``sn(u|m) = sin(ph)`` and ``cn(u|m) = cos(ph)``.
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around m = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as m = 1 - p.
Returns
-------
K : ndarray
Value of the elliptic integral.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipkinc",
"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
This function is also called ``F(phi, m)``.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points x.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points x.
See Also
--------
erfc, erfinv, erfcinv
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, 1 - erf(x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, -i erf(i z).
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, exp(x^2) erfc(x).
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "eval_jacobi",
"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_gegenbauer",
"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyt",
"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev T polynomial at a point.
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebys",
"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev S polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyc",
"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev C polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev T polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_legendre",
"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_legendre",
"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_genlaguerre",
"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_laguerre",
"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermite",
"""
eval_hermite(n, x, out=None)
Evaluate Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermitenorm",
"""
eval_hermitenorm(n, x, out=None)
Evaluate normalized Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t,t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
10**x
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
2**x
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t,t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
exp(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer n and non-negative x and n::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "exprel",
r"""
exprel(x)
Relative error exponential, (exp(x)-1)/x, for use when x is near zero.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
expm1
.. versionadded:: 0.17.0
""")
add_newdoc("scipy.special", "fdtr",
"""
fdtr(dfn, dfd, x)
F cumulative distribution function
Returns the area from zero to x under the F density function (also
known as Snedcor's density or the variance ratio density). This
is the density of X = (unum/dfn)/(uden/dfd), where unum and uden
are random variables having Chi square distributions with dfn and
dfd degrees of freedom, respectively.
""")
add_newdoc("scipy.special", "fdtrc",
"""
fdtrc(dfn, dfd, x)
F survival function
Returns the complemented F distribution function.
""")
add_newdoc("scipy.special", "fdtri",
"""
fdtri(dfn, dfd, p)
Inverse to fdtr vs x
Finds the F density argument x such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to fdtr vs dfd
Finds the F density argument dfd such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to fdtr vs dfn
finds the F density argument dfn such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2),t=0..z)
csa = integral(cos(pi/2 * t**2),t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
"""
gamma(z)
Gamma function
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
""")
add_newdoc("scipy.special", "gammainc",
"""
gammainc(a, x)
Incomplete gamma function
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=0..x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammaincc",
"""
gammaincc(a,x)
Complemented incomplete gamma integral
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=x..inf) = 1 - gammainc(a,x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a,y)
Inverse to gammaincc
Returns `x` such that ``gammaincc(a,x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to gammainc
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "gammaln",
"""
gammaln(z)
Logarithm of absolute value of gamma function
Defined as::
ln(abs(gamma(z)))
See Also
--------
gammasgn
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
""")
add_newdoc("scipy.special", "gdtr",
"""
gdtr(a,b,x)
Gamma distribution cumulative density function.
Returns the integral from zero to x of the gamma probability
density function::
a**b / gamma(b) * integral(t**(b-1) exp(-at),t=0..x).
The arguments a and b are used differently here than in other
definitions.
""")
add_newdoc("scipy.special", "gdtrc",
"""
gdtrc(a,b,x)
Gamma distribution survival function.
Integral from x to infinity of the gamma probability density
function.
See Also
--------
gdtr, gdtri
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of gdtr vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of gdtr vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of gdtr vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : float
Order
z : float or complex
Argument
""")
add_newdoc("scipy.special", "hankel1e",
"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(-1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2",
"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2e",
"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z).
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
"""
i0(x)
Modified Bessel function of order 0
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
""")
add_newdoc("scipy.special", "i1",
"""
i1(x)
Modified Bessel function of order 1
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t,t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
"""
it2struve0(x)
Integral related to Struve function of order 0
Returns
-------
i
``integral(H0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functios
Calculates the integral of Airy functions from 0 to x
Returns
-------
Apt, Bpt
Integrals for positive arguments
Ant, Bnt
Integrals for negative arguments
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order modified
Bessel functions i0 and k0.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order Bessel
functions j0 and y0.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
"""
itmodstruve0(x)
Integral of the modified Struve function of order 0
Returns
-------
i
``integral(L0(t), t=0..x)``
""")
add_newdoc("scipy.special", "itstruve0",
"""
itstruve0(x)
Integral of the Struve function of order 0
Returns
-------
i
``integral(H0(t), t=0..x)``
""")
add_newdoc("scipy.special", "iv",
"""
iv(v,z)
Modified Bessel function of the first kind of real order
Parameters
----------
v
Order. If z is of real type and negative, v must be integer valued.
z
Argument.
""")
add_newdoc("scipy.special", "ive",
"""
ive(v,z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v,z) = iv(v,z) * exp(-abs(z.real))
""")
add_newdoc("scipy.special", "j0",
"""
j0(x)
Bessel function the first kind of order 0
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order n.
Notes
-----
`jn` is an alias of `jv`.
""")
add_newdoc("scipy.special", "jv",
"""
jv(v, z)
Bessel function of the first kind of real order v
""")
add_newdoc("scipy.special", "jve",
"""
jve(v, z)
Exponentially scaled Bessel function of order v
Defined as::
jve(v,z) = jv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "k0",
"""
k0(x)
Modified Bessel function K of order 0
Modified Bessel function of the second kind (sometimes called the
third kind) of order 0.
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
""")
add_newdoc("scipy.special", "k1",
"""
i1(x)
Modified Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at x. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in x and y.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "kn",
"""
kn(n, x)
Modified Bessel function of the second kind of integer order n
These are also sometimes called functions of the third kind.
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
"""
kv(v,z)
Modified Bessel function of the second kind of real order v
Returns the modified Bessel function of the second kind (sometimes
called the third kind) for real order v at complex z.
""")
add_newdoc("scipy.special", "kve",
"""
kve(v,z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order v at
complex z::
kve(v,z) = kv(v,z) * exp(z)
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when x is near zero
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "lpmv",
"""
lpmv(m, v, x)
Associated legendre function of integer order.
Parameters
----------
m : int
Order
v : real
Degree. Must be ``v>-m-1`` or ``v<m``
x : complex
Argument. Must be ``|x| <= 1``.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m,q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m,q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m,q,x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x,q)``, of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of ce_m(x,q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x,q)``, and its derivative at `x` for order m and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m,q,x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x,q), of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of se_m(x,q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t),t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t),t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
"""
modstruve(v, x)
Modified Struve function
Returns the modified Struve function Lv(x) of order v at x, x must
be positive unless v is an integer.
""")
add_newdoc("scipy.special", "nbdtr",
"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function
Returns the sum of the terms 0 through k of the negative binomial
distribution::
sum((n+j-1)Cj p**n (1-p)**j,j=0..k).
In a sequence of Bernoulli trials this is the probability that k
or fewer failures precede the nth success.
""")
add_newdoc("scipy.special", "nbdtrc",
"""
nbdtrc(k,n,p)
Negative binomial survival function
Returns the sum of the terms k+1 to infinity of the negative
binomial distribution.
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of nbdtr vs p
Finds the argument p such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrik",
"""
nbdtrik(y,n,p)
Inverse of nbdtr vs k
Finds the argument k such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrin",
"""
nbdtrin(k,y,p)
Inverse of nbdtr vs n
Finds the argument n such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "ncfdtr",
"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncdfdtri : Inverse CDF (iCDF) of the non-central F distribution.
ncdfdtridfd : Calculate dfd, given CDF and iCDF values.
ncdfdtridfn : Calculate dfn, given CDF and iCDF values.
ncdfdtrinc : Calculate noncentrality parameter, given CDF, iCDF, dfn, dfd.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "ncfdtri",
"""
ncfdtri(p, dfn, dfd, nc)
Inverse cumulative distribution function of the non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
ncfdtridfd(p, f, dfn, nc)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
ncfdtridfn(p, f, dfd, nc)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
ncfdtrinc(p, f, dfn, dfd)
Calculate non-centrality parameter for non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central t distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("scipy.special", "ndtr",
"""
ndtr(x)
Gaussian cumulative distribution function
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
1/sqrt(2*pi) * integral(exp(-t**2 / 2),t=-inf..x)
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
nrdtristd, ndtr
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of ndtr vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to x)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m,n,c,x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m,n,c,cv,x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m,n,c,x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m,n,c,cv,x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d,dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v,x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
"""
pbwa(a,x)
Parabolic cylinder function W
Returns the parabolic cylinder function W(a,x) in w and the
derivative, W'(a,x) in wp.
.. warning::
May not be accurate for large (>5) arguments in a and/or x.
Returns
-------
w
Value of the function
wp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first k terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k,y)
Inverse to pdtr vs m
Returns the Poisson variable m such that the sum from 0 to k of
the Poisson density is equal to the given probability y:
calculated by gammaincinv(k+1, y). k must be a nonnegative
integer and y between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p,m)
Inverse to pdtr vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as::
(z)_m = gamma(z + m) / gamma(z)
For positive integer `m` it reads::
(z)_m = z * (z + 1) * ... * (z + m - 1)
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m,n,c,x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m,n,c,cv,x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m,n,c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m,n,c,x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m,n,c,cv,x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m,n,c,x)
Prolate spheroidal radial function of the secon kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m,n,c,cv,x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "psi",
"""
psi(z)
Digamma function
The derivative of the logarithm of the gamma function evaluated at
z (also called the digamma function).
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to x as a double precision floating
point result. If x ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
"""
shichi(x)
Hyperbolic sine and cosine integrals
Returns
-------
shi
``integral(sinh(t)/t, t=0..x)``
chi
``eul + ln x + integral((cosh(t)-1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sici",
"""
sici(x)
Sine and cosine integrals
Returns
-------
si
``integral(sin(t)/t, t=0..x)``
ci
``eul + ln x + integral((cos(t) - 1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n, e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on n samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n, y)
Inverse to smirnov
Returns ``e`` such that ``smirnov(n, e) = y``.
""")
add_newdoc("scipy.special", "spence",
"""
spence(x)
Dilogarithm integral
Returns the dilogarithm integral::
-integral(log t / (t-1),t=1..x)
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df,t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p,t)
Inverse of stdtr vs df
Returns the argument df such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df,p)
Inverse of stdtr vs t
Returns the argument t such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "struve",
"""
struve(v,x)
Struve function
Computes the struve function Hv(x) of order v at x, x must be
positive unless v is an integer.
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2)*erfc(-i*z)
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if `x = 0`.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if `x = 0`.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "y0",
"""
y0(x)
Bessel function of the second kind of order 0
Returns the Bessel function of the second kind of order 0 at x.
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1
Returns the Bessel function of the second kind of order 1 at x.
""")
add_newdoc("scipy.special", "yn",
"""
yn(n,x)
Bessel function of the second kind of integer order
Returns the Bessel function of the second kind of integer order n
at x.
""")
add_newdoc("scipy.special", "yv",
"""
yv(v,z)
Bessel function of the second kind of real order
Returns the Bessel function of the second kind of real order v at
complex z.
""")
add_newdoc("scipy.special", "yve",
"""
yve(v,z)
Exponentially scaled Bessel function of the second kind of real order
Returns the exponentially scaled Bessel function of the second
kind of real order v at complex z::
yve(v,z) = yv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "zeta",
"""
zeta(x, q)
Hurwitz zeta function
The Riemann zeta function of two arguments (also known as the
Hurwitz zeta funtion).
This function is defined as
.. math:: \\zeta(x, q) = \\sum_{k=0}^{\\infty} 1 / (k+q)^x,
where ``x > 1`` and ``q > 0``.
See also
--------
zetac
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using Bessel function series
Returns
-------
v, err
""")
| bsd-3-clause | -5,916,435,615,003,109,000 | 20.216672 | 194 | 0.582429 | false |
sanitysoon/nbase-arc | smr/test/integrated/Conf.py | 4 | 2058 | #
# Copyright 2015 Naver Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
CWD = os.getcwd()
BASE_DIR = CWD
SMR_DIR = os.path.abspath("../../replicator")
BE_DIR = os.path.abspath("../cs")
LOG_DIR = os.path.abspath("../../smr")
SMR_BIN_PATH = os.path.join(SMR_DIR, "smr-replicator")
BE_BIN_PATH = os.path.join(BE_DIR, "smr-server")
CLIENT_BIN_PATH = os.path.join(BE_DIR, "smr-client")
LOG_UTIL_BIN_PATH = os.path.join(LOG_DIR, "smr-logutil")
PIN = None
PINTOOL_BASE = None
try:
PIN = os.environ['PIN']
PINTOOL_BASE = os.environ['PINTOOL_BASE']
except:
pass
OVERRIDE_SMR_BIN_PATH = None
VALGRIND_SMR = False
VALGRIND_BE = False
SMR_OPT_X = None
USE_MEM_LOG = os.path.exists("/tmp/opt_use_memlog")
##
## some global flags
##
def get_smr_args(pgs):
args = []
if VALGRIND_SMR:
args.append('valgrind')
args.append('-v')
args.append('--leak-check=full')
args.append('--show-reachable=yes')
if OVERRIDE_SMR_BIN_PATH:
args.append(OVERRIDE_SMR_BIN_PATH)
else:
args.append(SMR_BIN_PATH)
args.append('-d')
args.append(pgs.dir)
args.append('-b')
args.append(str(pgs.base_port))
if SMR_OPT_X:
args.append('-x')
args.append(SMR_OPT_X)
return args
def get_be_args(pgs):
args = []
args.append(BE_BIN_PATH)
args.append('-p')
args.append(str(pgs.base_port))
args.append('-s')
args.append(str(pgs.base_port + 9))
return args
def get_client_args():
args = []
args.append(CLIENT_BIN_PATH)
return args
| apache-2.0 | -6,125,678,582,260,120,000 | 24.725 | 74 | 0.654519 | false |
xia2/xia2 | src/xia2/cli/plot_multiplicity.py | 1 | 12682 | import json
import sys
import iotbx.phil
from cctbx.miller.display import render_2d, scene
from dials.util import Sorry
from iotbx.gui_tools.reflections import get_array_description
from iotbx.reflection_file_reader import any_reflection_file
from scitbx.array_family import flex
class MultiplicityViewPng(render_2d):
def __init__(self, scene, settings=None):
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot
render_2d.__init__(self, scene, settings)
self._open_circle_points = flex.vec2_double()
self._open_circle_radii = []
self._open_circle_colors = []
self._filled_circle_points = flex.vec2_double()
self._filled_circle_radii = []
self._filled_circle_colors = []
self.fig, self.ax = pyplot.subplots(figsize=self.settings.size_inches)
self.render(self.ax)
pyplot.close()
def GetSize(self):
return self.fig.get_size_inches() * self.fig.dpi # size in pixels
def draw_line(self, ax, x1, y1, x2, y2):
ax.plot([x1, x2], [y1, y2], c=self._foreground)
def draw_text(self, ax, text, x, y):
ax.text(x, y, text, color=self._foreground, size=self.settings.font_size)
def draw_open_circle(self, ax, x, y, radius, color=None):
self._open_circle_points.append((x, y))
self._open_circle_radii.append(2 * radius)
if color is None:
color = self._foreground
self._open_circle_colors.append(color)
def draw_filled_circle(self, ax, x, y, radius, color):
self._filled_circle_points.append((x, y))
self._filled_circle_radii.append(2 * radius)
self._filled_circle_colors.append(color)
def render(self, ax):
from matplotlib import pyplot
from matplotlib import colors
render_2d.render(self, ax)
if self._open_circle_points.size():
x, y = self._open_circle_points.parts()
ax.scatter(
x.as_numpy_array(),
y.as_numpy_array(),
s=self._open_circle_radii,
marker="o",
edgecolors=self._open_circle_colors,
facecolors=None,
)
if self._filled_circle_points.size():
x, y = self._filled_circle_points.parts()
# use pyplot colormaps then we can more easily get a colorbar
data = self.scene.multiplicities.data()
cmap_d = {
"heatmap": "hot",
"redblue": colors.LinearSegmentedColormap.from_list(
"RedBlue", ["b", "r"]
),
"grayscale": "Greys_r" if self.settings.black_background else "Greys",
"mono": (
colors.LinearSegmentedColormap.from_list("mono", ["w", "w"])
if self.settings.black_background
else colors.LinearSegmentedColormap.from_list(
"mono", ["black", "black"]
)
),
}
cm = cmap_d.get(self.settings.color_scheme, self.settings.color_scheme)
if isinstance(cm, str):
cm = pyplot.cm.get_cmap(cm)
im = ax.scatter(
x.as_numpy_array(),
y.as_numpy_array(),
s=self._filled_circle_radii,
marker="o",
c=data.select(self.scene.slice_selection).as_numpy_array(),
edgecolors="none",
vmin=0,
vmax=flex.max(data),
cmap=cm,
)
# colorbar
cb = self.fig.colorbar(im, ax=ax)
[t.set_color(self._foreground) for t in cb.ax.get_yticklabels()]
[t.set_fontsize(self.settings.font_size) for t in cb.ax.get_yticklabels()]
self.ax.set_aspect("equal")
self.ax.set_facecolor(self._background)
xmax, ymax = self.GetSize()
ax.set_xlim(0, xmax)
ax.set_ylim(0, ymax)
ax.invert_yaxis()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
self.fig.tight_layout()
self.fig.savefig(
self.settings.plot.filename, bbox_inches="tight", facecolor=self._background
)
class MultiplicityViewJson(render_2d):
def __init__(self, scene, settings=None):
render_2d.__init__(self, scene, settings)
self._open_circle_points = flex.vec2_double()
self._open_circle_radii = []
self._open_circle_colors = []
self._filled_circle_points = flex.vec2_double()
self._filled_circle_radii = []
self._filled_circle_colors = []
self._text = {"x": [], "y": [], "text": []}
self._lines = []
json_d = self.render(None)
if self.settings.json.compact:
indent = None
else:
indent = 2
with open(self.settings.json.filename, "w") as fh:
json.dump(json_d, fh, indent=indent)
def GetSize(self):
return 1600, 1600 # size in pixels
def draw_line(self, ax, x1, y1, x2, y2):
self._lines.append((x1, y1, x2, y2))
def draw_text(self, ax, text, x, y):
self._text["x"].append(x)
self._text["y"].append(y)
self._text["text"].append(text)
def draw_open_circle(self, ax, x, y, radius, color=None):
self._open_circle_points.append((x, y))
self._open_circle_radii.append(2 * radius)
if color is None:
color = self._foreground
self._open_circle_colors.append(color)
def draw_filled_circle(self, ax, x, y, radius, color):
self._filled_circle_points.append((x, y))
self._filled_circle_radii.append(2 * radius)
self._filled_circle_colors.append(color)
def render(self, ax):
render_2d.render(self, ax)
data = []
if self._open_circle_points.size():
x, y = self._open_circle_points.parts()
z = self._open_circle_colors
data.append(
{
"x": list(x.round(1)),
"y": list(y.round(1)),
#'z': list(z),
"type": "scatter",
"mode": "markers",
"name": "missing reflections",
"showlegend": False,
"marker": {
#'color': list(z),
"color": (
"white" if self.settings.black_background else "black"
),
"line": {
#'color': 'black',
"width": 0
},
"symbol": "circle",
"size": 5,
},
}
)
if self._filled_circle_points.size():
x, y = self._filled_circle_points.parts()
z = self.scene.multiplicities.data().select(self.scene.slice_selection)
# why doesn't this work?
# colorscale = []
# assert len(z) == len(self._filled_circle_colors)
# for zi in range(flex.max(z)+1):
# i = flex.first_index(z, zi)
# if i is None: continue
# print i, self._filled_circle_colors[i], 'rgb(%i,%i,%i)' %tuple(rgb * 264 for rgb in self._filled_circle_colors[i])
# colorscale.append([zi, 'rgb(%i,%i,%i)' %self._filled_circle_colors[i]])
cmap_d = {
"rainbow": "Jet",
"heatmap": "Hot",
"redblue": "RdbU",
"grayscale": "Greys",
"mono": None,
}
color = list(z)
colorscale = cmap_d.get(
self.settings.color_scheme, self.settings.color_scheme
)
if self.settings.color_scheme == "mono":
color = "black"
colorscale = None
data.append(
{
"x": list(x.round(1)),
"y": list(y.round(1)),
#'z': list(z),
"type": "scatter",
"mode": "markers",
"name": "multiplicity",
"showlegend": False,
"marker": {
"color": color,
"colorscale": colorscale,
"cmin": 0,
"cmax": flex.max(self.scene.multiplicities.data()),
"showscale": True,
"colorbar": {"title": "Multiplicity", "titleside": "right"},
"line": {
#'color': 'white',
"width": 0
},
"symbol": "circle",
"size": 5,
},
}
)
text = {"mode": "text", "showlegend": False, "textposition": "top right"}
text.update(self._text)
data.append(text)
shapes = []
for x0, y0, x1, y1 in self._lines:
# color = 'rgb(%i,%i,%i)' %tuple(rgb * 264 for rgb in self._foreground)
color = "black"
shapes.append(
{
"type": "line",
"x0": x0,
"y0": y0,
"x1": x1,
"y1": y1,
"layer": "below",
"line": {"color": color, "width": 2},
}
)
d = {
"data": data,
"layout": {
"plot_bgcolor": "rgb(%i,%i,%i)"
% tuple(rgb * 264 for rgb in self._background),
"title": "Multiplicity plot (%s=%s)"
% (self.settings.slice_axis, self.settings.slice_index),
"shapes": shapes,
"hovermode": False,
"xaxis": {
"showgrid": False,
"zeroline": False,
"showline": False,
"ticks": "",
"showticklabels": False,
},
"yaxis": {
"autorange": "reversed",
"showgrid": False,
"zeroline": False,
"showline": False,
"ticks": "",
"showticklabels": False,
},
},
}
return d
master_phil = iotbx.phil.parse(
"""
include scope cctbx.miller.display.master_phil
unit_cell = None
.type = unit_cell
space_group = None
.type = space_group
plot {
filename = multiplicities.png
.type = path
}
json {
filename = None
.type = path
compact = True
.type = bool
}
size_inches = 20,20
.type = floats(size=2, value_min=0)
font_size = 20
.type = int(value_min=1)
""",
process_includes=True,
)
def run(args=sys.argv[1:]):
pcl = iotbx.phil.process_command_line_with_files(
args=args,
master_phil=master_phil,
reflection_file_def="data",
pdb_file_def="symmetry_file",
usage_string="xia2.plot_multiplicity scaled_unmerged.mtz [options]",
)
settings = pcl.work.extract()
file_name = settings.data
try:
hkl_file = any_reflection_file(file_name)
except Exception as e:
raise Sorry(str(e))
arrays = hkl_file.as_miller_arrays(merge_equivalents=False)
valid_arrays = []
array_info = []
for array in arrays:
if array.is_hendrickson_lattman_array():
continue
if (not array.is_real_array()) and (not array.is_complex_array()):
continue
labels = array.info().label_string()
desc = get_array_description(array)
array_info.append(f"{labels} ({desc})")
valid_arrays.append(array)
if len(valid_arrays) == 0:
msg = "No arrays of the supported types in this file."
raise Sorry(msg)
miller_array = valid_arrays[0]
plot_multiplicity(miller_array, settings)
def plot_multiplicity(miller_array, settings):
settings.scale_colors_multiplicity = True
settings.scale_radii_multiplicity = True
settings.expand_to_p1 = True
settings.expand_anomalous = True
settings.slice_mode = True
if settings.plot.filename is not None:
MultiplicityViewPng(
scene(miller_array, settings, merge=True), settings=settings
)
if settings.json.filename is not None:
MultiplicityViewJson(
scene(miller_array, settings, merge=True), settings=settings
)
| bsd-3-clause | -8,879,024,635,849,080,000 | 33.091398 | 129 | 0.492746 | false |
ric2b/Vivaldi-browser | chromium/testing/scripts/run_chromedriver_tests.py | 10 | 1880 | #!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs chrome driver tests.
This script attempts to emulate the contract of gtest-style tests
invoked via recipes. The main contract is that the caller passes the
argument:
--isolated-script-test-output=[FILENAME]
json is written to that file in the format detailed here:
https://www.chromium.org/developers/the-json-test-results-format
Optional argument:
--isolated-script-test-filter=[TEST_NAMES]
is a double-colon-separated ("::") list of test names, to run just that subset
of tests. This list is forwarded to the chrome driver test runner.
"""
import argparse
import json
import os
import shutil
import sys
import tempfile
import traceback
import common
class ChromeDriverAdapter(common.BaseIsolatedScriptArgsAdapter):
def generate_test_output_args(self, output):
return ['--isolated-script-test-output', output]
def generate_test_filter_args(self, test_filter_str):
if any('--filter' in arg for arg in self.rest_args):
self.parser.error(
'can\'t have the test call filter with the'
'--isolated-script-test-filter argument to the wrapper script')
return ['--filter', test_filter_str.replace('::', ':')]
def main():
adapter = ChromeDriverAdapter()
return adapter.run_test()
# This is not really a "script test" so does not need to manually add
# any additional compile targets.
def main_compile_targets(args):
json.dump([], args.output)
if __name__ == '__main__':
# Conform minimally to the protocol defined by ScriptTest.
if 'compile_targets' in sys.argv:
funcs = {
'run': None,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
sys.exit(main())
| bsd-3-clause | -3,160,538,949,060,423,700 | 26.246377 | 78 | 0.717553 | false |
kasshyss/pynard | legacy/pynard.py | 1 | 2386 | #!/usr/bin/env python
from flask import Flask, render_template, request
import m_conf as conf
import m_log as log
import m_IO as io
app = Flask(__name__)
page_title = "Pynard"
#Application start point
@app.route('/')
@app.route('/index/', methods=['GET', 'POST'])
@app.route('/index.html/')
@app.route('/home/')
def index():
title="Welcome in pynard, your cave manager"
#[label, targed page]
options_label = [["Add bottles", "add"],["Remove bottles", "remove"],["Display the stock", "stock"],["Display the bottles which nee to be drink", "to_drink"],["Ask the robot", "robot"]]
return render_template('index.html', options_label = options_label, title = title, page_title = page_title)
#Application add bottle point
@app.route('/add/', methods=['GET', 'POST'])
def add_bottle():
bottle = conf.get_conf('bottle.conf')
productor = conf.get_conf('productor.conf')
if request.method == 'GET':
title="Add new bottles to your cave"
button_label = 'Validate'
button_targed_page = 'add.html'
return render_template('add.html', title = title, page_title = page_title, bottle = bottle, button_label = button_label, productor = productor)
else: #POST
bottle_attribute_dic = {}
#for attribute in bottle:
# bottle_attribute_dic[db_name[attribute]] = request.form[attribute]
#if m_save.add_bottle(bottle_attribute_dic):
# title="Bottles added !"
#else:
title='Error in bottles creation'
button_label='Return to the main page'
button_target_page = '/index/'
return render_template('add.html', title = title, page_title = page_title, button_label=button_label, button_target_page=button_target_page)
#Application remove from the stock
@app.route('/remove/')
def rm_bottle():
title="Remove bootles to your cave"
return render_template('remove.html', title = title, page_title = page_title)
#Display the stock
@app.route('/stock/')
def stock():
cave = m_get.get_cave()
title="Display bootles from your cave"
return render_template('stock.html', title = title, page_title = page_title, cave = cave)
#Display bottles whicjh need to be drink
@app.route('/to_drink/')
def to_drink():
title="Display bootles to older"
return render_template('to_drink.html', title = title, page_title = page_title)
| gpl-3.0 | 5,586,692,323,532,126,000 | 35.707692 | 189 | 0.654233 | false |
pombredanne/stdeb | stdeb/cli_runner.py | 4 | 5411 | from __future__ import print_function
import sys, os, shutil, subprocess
try:
# python 2.x
from ConfigParser import SafeConfigParser
except ImportError as err:
# python 3.x
from configparser import SafeConfigParser
from distutils.util import strtobool
from distutils.fancy_getopt import FancyGetopt, translate_longopt
from stdeb.util import stdeb_cmdline_opts, stdeb_cmd_bool_opts
from stdeb.util import expand_sdist_file, apply_patch
from stdeb import log
from pkg_resources import Requirement, Distribution
class OptObj: pass
def runit(cmd,usage):
if cmd not in ['sdist_dsc','bdist_deb']:
raise ValueError('unknown command %r'%cmd)
# process command-line options
bool_opts = map(translate_longopt, stdeb_cmd_bool_opts)
parser = FancyGetopt(stdeb_cmdline_opts+[
('help', 'h', "show detailed help message"),
])
optobj = OptObj()
args = parser.getopt(object=optobj)
for option in optobj.__dict__:
value = getattr(optobj,option)
is_string = type(value) == str
if option in bool_opts and is_string:
setattr(optobj, option, strtobool(value))
if hasattr(optobj,'help'):
print(usage)
parser.set_option_table(stdeb_cmdline_opts)
parser.print_help("Options:")
return 0
if len(args)!=1:
log.error('not given single argument (distfile), args=%r', args)
print(usage)
return 1
sdist_file = args[0]
final_dist_dir = optobj.__dict__.get('dist_dir','deb_dist')
tmp_dist_dir = os.path.join(final_dist_dir,'tmp_py2dsc')
if os.path.exists(tmp_dist_dir):
shutil.rmtree(tmp_dist_dir)
os.makedirs(tmp_dist_dir)
if not os.path.isfile(sdist_file):
log.error("Package %s not found."%sdist_file)
sys.exit(1)
patch_file = optobj.__dict__.get('patch_file',None)
patch_level = int(optobj.__dict__.get('patch_level',0))
patch_posix = int(optobj.__dict__.get('patch_posix',0))
expand_dir = os.path.join(tmp_dist_dir,'stdeb_tmp')
if os.path.exists(expand_dir):
shutil.rmtree(expand_dir)
if not os.path.exists(tmp_dist_dir):
os.mkdir(tmp_dist_dir)
os.mkdir(expand_dir)
expand_sdist_file(os.path.abspath(sdist_file),cwd=expand_dir)
# now the sdist package is expanded in expand_dir
expanded_root_files = os.listdir(expand_dir)
assert len(expanded_root_files)==1
repackaged_dirname = expanded_root_files[0]
fullpath_repackaged_dirname = os.path.join(tmp_dist_dir,repackaged_dirname)
base_dir = os.path.join(expand_dir,expanded_root_files[0])
if os.path.exists(fullpath_repackaged_dirname):
# prevent weird build errors if this dir exists
shutil.rmtree(fullpath_repackaged_dirname)
os.renames(base_dir, fullpath_repackaged_dirname)
del base_dir # no longer useful
##############################################
if patch_file is not None:
log.info('py2dsc applying patch %s', patch_file)
apply_patch(patch_file,
posix=patch_posix,
level=patch_level,
cwd=fullpath_repackaged_dirname)
patch_already_applied = 1
else:
patch_already_applied = 0
##############################################
abs_dist_dir = os.path.abspath(final_dist_dir)
extra_args = []
for long in parser.long_opts:
if long in ['dist-dir=','patch-file=']:
continue # dealt with by this invocation
attr = parser.get_attr_name(long).rstrip('=')
if hasattr(optobj,attr):
val = getattr(optobj,attr)
if attr=='extra_cfg_file':
val = os.path.abspath(val)
if long in bool_opts or long.replace('-', '_') in bool_opts:
extra_args.append('--%s' % long)
else:
extra_args.append('--'+long+str(val))
if patch_already_applied == 1:
extra_args.append('--patch-already-applied')
if cmd=='bdist_deb':
extra_args.append('bdist_deb')
args = [sys.executable,'setup.py','--command-packages','stdeb.command',
'sdist_dsc','--dist-dir=%s'%abs_dist_dir,
'--use-premade-distfile=%s'%os.path.abspath(sdist_file)]+extra_args
log.info('-='*35 + '-')
# print >> sys.stderr, '-='*20
# print >> sys.stderr, "Note that the .cfg file(s), if present, have not "\
# "been read at this stage. If options are necessary, pass them from "\
# "the command line"
log.info("running the following command in directory: %s\n%s",
fullpath_repackaged_dirname, ' '.join(args))
log.info('-='*35 + '-')
try:
returncode = subprocess.call(
args,cwd=fullpath_repackaged_dirname,
)
except:
log.error('ERROR running: %s', ' '.join(args))
log.error('ERROR in %s', fullpath_repackaged_dirname)
raise
if returncode:
log.error('ERROR running: %s', ' '.join(args))
log.error('ERROR in %s', fullpath_repackaged_dirname)
#log.error(' stderr: %s'res.stderr.read())
#print >> sys.stderr, 'ERROR running: %s'%(' '.join(args),)
#print >> sys.stderr, res.stderr.read()
return returncode
#raise RuntimeError('returncode %d'%returncode)
#result = res.stdout.read().strip()
shutil.rmtree(tmp_dist_dir)
return returncode
| mit | -5,088,382,721,770,560,000 | 34.366013 | 80 | 0.608575 | false |
WakelessDragon/learning-python | gettingStarted/t_office/readTxt2.py | 1 | 3102 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
order_relation = []
with codecs.open(r'C:\Users\since\Downloads\20170209_task\order_relation.txt', 'r') as f:
ln = 0
for line in f:
ln += 1
if ln <= 1:
continue
arr = line.strip(' \t\n\r').split(" ")
obj = {"id": arr[0].strip(' \t\n\r'),
"user_id": arr[1].strip(' \t\n\r'),
"order_id": arr[2].strip(' \t\n\r'),
"entity_id": arr[3].strip(' \t\n\r'),
"entity_type": arr[4].strip(' \t\n\r')}
order_relation.append(obj)
print 'order_relation:', len(order_relation)
order_num = []
with codecs.open(r'C:\Users\since\Downloads\20170209_task\order_num.txt', 'r') as f:
ln = 0
for line in f:
ln += 1
if ln <= 1:
continue
arr = line.strip(' \t\n\r').split(" ")
obj = {"buyer_id": arr[0].strip(' \t\n\r'), "num": arr[1].strip(' \t\n\r')}
order_num.append(obj)
print 'order_num:', len(order_num)
userTaskId_userId = []
with codecs.open(r'C:\Users\since\Downloads\20170209_task\userTaskId_userId.txt', 'r') as f:
ln = 0
for line in f:
ln += 1
if ln <= 1:
continue
arr = line.strip(' \t\n\r').split("|")
obj = {"user_task_id": arr[1].strip(' \t\n\r'), "user_id": arr[2].strip(' \t\n\r')}
userTaskId_userId.append(obj)
print 'userTaskId_userId:', len(userTaskId_userId)
order_relation_distinct = []
def distinct_data():
for rel in order_relation:
for ut in userTaskId_userId:
if (rel['entity_type'] == '1') and (rel['user_id'] == ut['user_id']) and (rel['entity_id'] == ut['user_task_id']):
order_relation_distinct.append(rel)
distinct_data()
print 'order_relation_distinct:', len(order_relation_distinct)
order_num_group = {}
def group_order_num():
for o in order_num:
order_num_group[o['buyer_id']] = o['num']
group_order_num()
user_group = {}
def group_user():
for u in order_relation_distinct:
user_id = u['user_id']
obj = user_group.get(user_id, None)
if obj is None:
obj = {'user_id': user_id, 'task_num': 0, 'true_num': 0, 'order_num': order_num_group.get(user_id, 0)}
user_group[user_id] = obj
obj['task_num'] += 1
if u['order_id'] != '-1':
obj['true_num'] += 1
group_user()
print len(user_group)
eventual_list = []
def eventual_filter():
for key, value in user_group.iteritems():
m = min(int(value['task_num']), int(value['order_num']))
if int(value['true_num']) < m:
value['reissue'] = m - int(value['true_num'])
eventual_list.append(value)
eventual_filter()
with codecs.open(r'C:\Users\since\Downloads\20170209_task\eventual.txt', 'w') as f:
for row in eventual_list:
line = row['user_id'] + "," + \
str(row['task_num']) + "," + \
str(row['true_num']) + "," + \
str(row['order_num']) + "," + \
str(row['reissue'])
f.write(line + "\n")
| gpl-2.0 | 3,033,995,660,956,369,000 | 30.979381 | 126 | 0.530625 | false |
emersonsoftware/ansiblefork | lib/ansible/plugins/filter/core.py | 9 | 16712 | # (c) 2012, Jeroen Hoekx <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import base64
import itertools
import json
import os.path
import ntpath
import glob
import re
import crypt
import hashlib
import string
from functools import partial
from random import Random, SystemRandom, shuffle
from datetime import datetime
import uuid
import yaml
from jinja2.filters import environmentfilter, do_groupby as _do_groupby
try:
import passlib.hash
HAS_PASSLIB = True
except:
HAS_PASSLIB = False
from ansible import errors
from ansible.compat.six import iteritems, string_types, integer_types
from ansible.compat.six.moves import reduce
from ansible.compat.six.moves import shlex_quote
from ansible.module_utils._text import to_text
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.utils.hashing import md5s, checksum_s
from ansible.utils.unicode import unicode_wrap
from ansible.utils.vars import merge_hash
from ansible.vars.hostvars import HostVars
UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
class AnsibleJSONEncoder(json.JSONEncoder):
'''
Simple encoder class to deal with JSON encoding of internal
types like HostVars
'''
def default(self, o):
if isinstance(o, HostVars):
return dict(o)
else:
return super(AnsibleJSONEncoder, self).default(o)
def to_yaml(a, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, **kw)
return to_text(transformed)
def to_nice_yaml(a, indent=4, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, indent=indent, allow_unicode=True, default_flow_style=False, **kw)
return to_text(transformed)
def to_json(a, *args, **kw):
''' Convert the value to JSON '''
return json.dumps(a, cls=AnsibleJSONEncoder, *args, **kw)
def to_nice_json(a, indent=4, *args, **kw):
'''Make verbose, human readable JSON'''
# python-2.6's json encoder is buggy (can't encode hostvars)
if sys.version_info < (2, 7):
try:
import simplejson
except ImportError:
pass
else:
try:
major = int(simplejson.__version__.split('.')[0])
except:
pass
else:
if major >= 2:
return simplejson.dumps(a, indent=indent, sort_keys=True, *args, **kw)
try:
return json.dumps(a, indent=indent, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw)
except:
# Fallback to the to_json filter
return to_json(a, *args, **kw)
def to_bool(a):
''' return a bool for the arg '''
if a is None or type(a) == bool:
return a
if isinstance(a, string_types):
a = a.lower()
if a in ['yes', 'on', '1', 'true', 1]:
return True
else:
return False
def to_datetime(string, format="%Y-%d-%m %H:%M:%S"):
return datetime.strptime(string, format)
def quote(a):
''' return its argument quoted for shell usage '''
return shlex_quote(a)
def fileglob(pathname):
''' return list of matched regular files for glob '''
return [ g for g in glob.glob(pathname) if os.path.isfile(g) ]
def regex_replace(value='', pattern='', replacement='', ignorecase=False):
''' Perform a `re.sub` returning a string '''
value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
if ignorecase:
flags = re.I
else:
flags = 0
_re = re.compile(pattern, flags=flags)
return _re.sub(replacement, value)
def regex_findall(value, regex, multiline=False, ignorecase=False):
''' Perform re.findall and return the list of matches '''
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
return re.findall(regex, value, flags)
def regex_search(value, regex, *args, **kwargs):
''' Perform re.search and return the list of matches or a backref '''
groups = list()
for arg in args:
if arg.startswith('\\g'):
match = re.match(r'\\g<(\S+)>', arg).group(1)
groups.append(match)
elif arg.startswith('\\'):
match = int(re.match(r'\\(\d+)', arg).group(1))
groups.append(match)
else:
raise errors.AnsibleFilterError('Unknown argument')
flags = 0
if kwargs.get('ignorecase'):
flags |= re.I
if kwargs.get('multiline'):
flags |= re.M
match = re.search(regex, value, flags)
if match:
if not groups:
return match.group()
else:
items = list()
for item in groups:
items.append(match.group(item))
return items
def ternary(value, true_val, false_val):
''' value ? true_val : false_val '''
if value:
return true_val
else:
return false_val
def regex_escape(string):
'''Escape all regular expressions special characters from STRING.'''
return re.escape(string)
def from_yaml(data):
if isinstance(data, string_types):
return yaml.safe_load(data)
return data
@environmentfilter
def rand(environment, end, start=None, step=None, seed=None):
if seed is None:
r = SystemRandom()
else:
r = Random(seed)
if isinstance(end, integer_types):
if not start:
start = 0
if not step:
step = 1
return r.randrange(start, end, step)
elif hasattr(end, '__iter__'):
if start or step:
raise errors.AnsibleFilterError('start and step can only be used with integer values')
return r.choice(end)
else:
raise errors.AnsibleFilterError('random can only be used on sequences and integers')
def randomize_list(mylist, seed=None):
try:
mylist = list(mylist)
if seed:
r = Random(seed)
r.shuffle(mylist)
else:
shuffle(mylist)
except:
pass
return mylist
def get_hash(data, hashtype='sha1'):
try: # see if hash is supported
h = hashlib.new(hashtype)
except:
return None
h.update(data)
return h.hexdigest()
def get_encrypted_password(password, hashtype='sha512', salt=None):
# TODO: find a way to construct dynamically from system
cryptmethod= {
'md5': '1',
'blowfish': '2a',
'sha256': '5',
'sha512': '6',
}
if hashtype in cryptmethod:
if salt is None:
r = SystemRandom()
if hashtype in ['md5']:
saltsize = 8
else:
saltsize = 16
salt = ''.join([r.choice(string.ascii_letters + string.digits) for _ in range(saltsize)])
if not HAS_PASSLIB:
if sys.platform.startswith('darwin'):
raise errors.AnsibleFilterError('|password_hash requires the passlib python module to generate password hashes on Mac OS X/Darwin')
saltstring = "$%s$%s" % (cryptmethod[hashtype],salt)
encrypted = crypt.crypt(password, saltstring)
else:
if hashtype == 'blowfish':
cls = passlib.hash.bcrypt
else:
cls = getattr(passlib.hash, '%s_crypt' % hashtype)
encrypted = cls.encrypt(password, salt=salt)
return encrypted
return None
def to_uuid(string):
return str(uuid.uuid5(UUID_NAMESPACE_ANSIBLE, str(string)))
def mandatory(a):
from jinja2.runtime import Undefined
''' Make a variable mandatory '''
if isinstance(a, Undefined):
raise errors.AnsibleFilterError('Mandatory variable not defined.')
return a
def combine(*terms, **kwargs):
recursive = kwargs.get('recursive', False)
if len(kwargs) > 1 or (len(kwargs) == 1 and 'recursive' not in kwargs):
raise errors.AnsibleFilterError("'recursive' is the only valid keyword argument")
for t in terms:
if not isinstance(t, dict):
raise errors.AnsibleFilterError("|combine expects dictionaries, got " + repr(t))
if recursive:
return reduce(merge_hash, terms)
else:
return dict(itertools.chain(*map(iteritems, terms)))
def comment(text, style='plain', **kw):
# Predefined comment types
comment_styles = {
'plain': {
'decoration': '# '
},
'erlang': {
'decoration': '% '
},
'c': {
'decoration': '// '
},
'cblock': {
'beginning': '/*',
'decoration': ' * ',
'end': ' */'
},
'xml': {
'beginning': '<!--',
'decoration': ' - ',
'end': '-->'
}
}
# Pointer to the right comment type
style_params = comment_styles[style]
if 'decoration' in kw:
prepostfix = kw['decoration']
else:
prepostfix = style_params['decoration']
# Default params
p = {
'newline': '\n',
'beginning': '',
'prefix': (prepostfix).rstrip(),
'prefix_count': 1,
'decoration': '',
'postfix': (prepostfix).rstrip(),
'postfix_count': 1,
'end': ''
}
# Update default params
p.update(style_params)
p.update(kw)
# Compose substrings for the final string
str_beginning = ''
if p['beginning']:
str_beginning = "%s%s" % (p['beginning'], p['newline'])
str_prefix = ''
if p['prefix']:
if p['prefix'] != p['newline']:
str_prefix = str(
"%s%s" % (p['prefix'], p['newline'])) * int(p['prefix_count'])
else:
str_prefix = str(
"%s" % (p['newline'])) * int(p['prefix_count'])
str_text = ("%s%s" % (
p['decoration'],
# Prepend each line of the text with the decorator
text.replace(
p['newline'], "%s%s" % (p['newline'], p['decoration'])))).replace(
# Remove trailing spaces when only decorator is on the line
"%s%s" % (p['decoration'], p['newline']),
"%s%s" % (p['decoration'].rstrip(), p['newline']))
str_postfix = p['newline'].join(
[''] + [p['postfix'] for x in range(p['postfix_count'])])
str_end = ''
if p['end']:
str_end = "%s%s" % (p['newline'], p['end'])
# Return the final string
return "%s%s%s%s%s" % (
str_beginning,
str_prefix,
str_text,
str_postfix,
str_end)
def extract(item, container, morekeys=None):
from jinja2.runtime import Undefined
value = container[item]
if value is not Undefined and morekeys is not None:
if not isinstance(morekeys, list):
morekeys = [morekeys]
try:
value = reduce(lambda d, k: d[k], morekeys, value)
except KeyError:
value = Undefined()
return value
def failed(*a, **kw):
''' Test if task result yields failed '''
item = a[0]
if type(item) != dict:
raise errors.AnsibleFilterError("|failed expects a dictionary")
rc = item.get('rc',0)
failed = item.get('failed',False)
if rc != 0 or failed:
return True
else:
return False
def success(*a, **kw):
''' Test if task result yields success '''
return not failed(*a, **kw)
def changed(*a, **kw):
''' Test if task result yields changed '''
item = a[0]
if type(item) != dict:
raise errors.AnsibleFilterError("|changed expects a dictionary")
if not 'changed' in item:
changed = False
if ('results' in item # some modules return a 'results' key
and type(item['results']) == list
and type(item['results'][0]) == dict):
for result in item['results']:
changed = changed or result.get('changed', False)
else:
changed = item.get('changed', False)
return changed
def skipped(*a, **kw):
''' Test if task result yields skipped '''
item = a[0]
if type(item) != dict:
raise errors.AnsibleFilterError("|skipped expects a dictionary")
skipped = item.get('skipped', False)
return skipped
@environmentfilter
def do_groupby(environment, value, attribute):
"""Overridden groupby filter for jinja2, to address an issue with
jinja2>=2.9.0,<2.9.5 where a namedtuple was returned which
has repr that prevents ansible.template.safe_eval.safe_eval from being
able to parse and eval the data.
jinja2<2.9.0,>=2.9.5 is not affected, as <2.9.0 uses a tuple, and
>=2.9.5 uses a standard tuple repr on the namedtuple.
The adaptation here, is to run the jinja2 `do_groupby` function, and
cast all of the namedtuples to a regular tuple.
See https://github.com/ansible/ansible/issues/20098
We may be able to remove this in the future.
"""
return [tuple(t) for t in _do_groupby(environment, value, attribute)]
class FilterModule(object):
''' Ansible core jinja2 filters '''
def filters(self):
return {
# jinja2 overrides
'groupby': do_groupby,
# base 64
'b64decode': partial(unicode_wrap, base64.b64decode),
'b64encode': partial(unicode_wrap, base64.b64encode),
# uuid
'to_uuid': to_uuid,
# json
'to_json': to_json,
'to_nice_json': to_nice_json,
'from_json': json.loads,
# yaml
'to_yaml': to_yaml,
'to_nice_yaml': to_nice_yaml,
'from_yaml': from_yaml,
#date
'to_datetime': to_datetime,
# path
'basename': partial(unicode_wrap, os.path.basename),
'dirname': partial(unicode_wrap, os.path.dirname),
'expanduser': partial(unicode_wrap, os.path.expanduser),
'realpath': partial(unicode_wrap, os.path.realpath),
'relpath': partial(unicode_wrap, os.path.relpath),
'splitext': partial(unicode_wrap, os.path.splitext),
'win_basename': partial(unicode_wrap, ntpath.basename),
'win_dirname': partial(unicode_wrap, ntpath.dirname),
'win_splitdrive': partial(unicode_wrap, ntpath.splitdrive),
# value as boolean
'bool': to_bool,
# quote string for shell usage
'quote': quote,
# hash filters
# md5 hex digest of string
'md5': md5s,
# sha1 hex digeset of string
'sha1': checksum_s,
# checksum of string as used by ansible for checksuming files
'checksum': checksum_s,
# generic hashing
'password_hash': get_encrypted_password,
'hash': get_hash,
# file glob
'fileglob': fileglob,
# regex
'regex_replace': regex_replace,
'regex_escape': regex_escape,
'regex_search': regex_search,
'regex_findall': regex_findall,
# ? : ;
'ternary': ternary,
# list
# random stuff
'random': rand,
'shuffle': randomize_list,
# undefined
'mandatory': mandatory,
# merge dicts
'combine': combine,
# comment-style decoration
'comment': comment,
# array and dict lookups
'extract': extract,
# failure testing
'failed' : failed,
'failure' : failed,
'success' : success,
'succeeded' : success,
# changed testing
'changed' : changed,
'change' : changed,
# skip testing
'skipped' : skipped,
'skip' : skipped,
# debug
'type_debug': lambda o: o.__class__.__name__,
}
| gpl-3.0 | 7,307,018,940,142,525,000 | 28.736655 | 147 | 0.57725 | false |
dpendl00/headphones | lib/pygazelle/inbox.py | 26 | 3948 | class MailboxMessage(object):
def __init__(self, api, message):
self.id = message['convId']
self.conv = Conversation(api, self.id)
self.subject = message['subject']
self.unread = message['unread']
self.sticky = message['sticky']
self.fwd_id = message['forwardedId']
self.fwd_name = message['forwardedName']
self.sender_id = message['senderId']
self.username = message['username']
self.donor = message['donor']
self.warned = message['warned']
self.enabled = message['enabled']
self.date = message['date']
def __repr__(self):
return "MailboxMessage ID %s - %s %s %s" % (self.id, self.subject, self.sender_id, self.username)
class ConversationMessage(object):
def __init__(self, msg_resp):
self.id = msg_resp['messageId']
self.sender_id = msg_resp['senderId']
self.sender_name = msg_resp['senderName']
self.sent_date = msg_resp['sentDate']
self.bb_body = msg_resp['bbBody']
self.body = msg_resp['body']
def __repr__(self):
return "ConversationMessage ID %s - %s %s" % (self.id, self.sender_name, self.sent_date)
class Conversation(object):
def __init__(self, api, conv_id):
self.id = conv_id
self.parent_api = api
self.subject = None
self.sticky = None
self.messages = []
def __repr__(self):
return "Conversation ID %s - %s" % (self.id, self.subject)
def set_conv_data(self, conv_resp):
assert self.id == conv_resp['convId']
self.subject = conv_resp['subject']
self.sticky = conv_resp['sticky']
self.messages = [ConversationMessage(m) for m in conv_resp['messages']]
def update_conv_data(self):
response = self.parent_api.request(action='inbox',
type='viewconv', id=self.id)
self.set_conv_data(response)
class Mailbox(object):
"""
This class represents the logged in user's inbox/sentbox
"""
def __init__(self, parent_api, boxtype='inbox', page='1', sort='unread'):
self.parent_api = parent_api
self.boxtype = boxtype
self.current_page = page
self.total_pages = None
self.sort = sort
self.messages = None
def set_mbox_data(self, mbox_resp):
"""
Takes parsed JSON response from 'inbox' action on api
and updates the available subset of mailbox information.
"""
self.current_page = mbox_resp['currentPage']
self.total_pages = mbox_resp['pages']
self.messages = \
[MailboxMessage(self.parent_api, m) for m in mbox_resp['messages']]
def update_mbox_data(self):
response = self.parent_api.request(action='inbox',
type=self.boxtype, page=self.current_page, sort=self.sort)
self.set_mbox_data(response)
def next_page(self):
if not self.total_pages:
raise ValueError("call update_mbox_data() first")
total_pages = int(self.total_pages)
cur_page = int(self.current_page)
if cur_page < total_pages:
return Mailbox(self.parent_api, self.boxtype,
str(cur_page + 1), self.sort)
raise ValueError("Already at page %d/%d" % (cur_page, total_pages))
def prev_page(self):
if not self.total_pages:
raise ValueError("call update_mbox_data() first")
total_pages = int(self.total_pages)
cur_page = int(self.current_page)
if cur_page > 1:
return Mailbox(self.parent_api, self.boxtype,
str(cur_page - 1), self.sort)
raise ValueError("Already at page %d/%d" % (cur_page, total_pages))
def __repr__(self):
return "Mailbox: %s %s Page %s/%s" \
% (self.boxtype, self.sort,
self.current_page, self.total_pages)
| gpl-3.0 | 6,020,182,143,425,904,000 | 35.897196 | 105 | 0.578267 | false |
crmccreary/openerp_server | openerp/addons/stock/wizard/stock_change_standard_price.py | 9 | 5671 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
from tools.translate import _
import decimal_precision as dp
class change_standard_price(osv.osv_memory):
_name = "stock.change.standard.price"
_description = "Change Standard Price"
_columns = {
'new_price': fields.float('Price', required=True, digits_compute=dp.get_precision('Account'),
help="If cost price is increased, stock variation account will be debited "
"and stock output account will be credited with the value = (difference of amount * quantity available).\n"
"If cost price is decreased, stock variation account will be creadited and stock input account will be debited."),
'stock_account_input':fields.many2one('account.account', 'Stock Input Account'),
'stock_account_output':fields.many2one('account.account', 'Stock Output Account'),
'stock_journal':fields.many2one('account.journal', 'Stock journal', required=True),
'enable_stock_in_out_acc':fields.boolean('Enable Related Account',),
}
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None:
context = {}
product_pool = self.pool.get('product.product')
product_obj = product_pool.browse(cr, uid, context.get('active_id', False))
res = super(change_standard_price, self).default_get(cr, uid, fields, context=context)
accounts = product_pool.get_product_accounts(cr, uid, context.get('active_id', False), context={})
price = product_obj.standard_price
if 'new_price' in fields:
res.update({'new_price': price})
if 'stock_account_input' in fields:
res.update({'stock_account_input': accounts['stock_account_input']})
if 'stock_account_output' in fields:
res.update({'stock_account_output': accounts['stock_account_output']})
if 'stock_journal' in fields:
res.update({'stock_journal': accounts['stock_journal']})
if 'enable_stock_in_out_acc' in fields:
res.update({'enable_stock_in_out_acc': True})
return res
# onchange_price function is not used anywhere
def onchange_price(self, cr, uid, ids, new_price, context=None):
""" Sets stock input and output account according to the difference
of old price and new price.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param new_price: Changed price
@param context: A standard dictionary
@return: Dictionary of values
"""
if context is None:
context = {}
product_obj = self.pool.get('product.product').browse(cr, uid, context.get('active_id', False), context=context)
price = product_obj.standard_price
diff = price - new_price
if diff > 0 :
return {'value' : {'enable_stock_in_out_acc':True}}
else :
return {'value' : {'enable_stock_in_out_acc':False}}
def change_price(self, cr, uid, ids, context=None):
""" Changes the Standard Price of Product.
And creates an account move accordingly.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
if context is None:
context = {}
rec_id = context and context.get('active_id', False)
assert rec_id, _('Active ID is not set in Context')
prod_obj = self.pool.get('product.product')
res = self.browse(cr, uid, ids, context=context)
datas = {
'new_price' : res[0].new_price,
'stock_output_account' : res[0].stock_account_output.id,
'stock_input_account' : res[0].stock_account_input.id,
'stock_journal' : res[0].stock_journal.id
}
prod_obj.do_change_standard_price(cr, uid, [rec_id], datas, context)
return {'type': 'ir.actions.act_window_close'}
change_standard_price()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,892,312,078,342,729,000 | 45.867769 | 154 | 0.611885 | false |
sekikn/incubator-airflow | airflow/migrations/versions/c8ffec048a3b_add_fields_to_dag.py | 8 | 1395 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add fields to dag
Revision ID: c8ffec048a3b
Revises: 41f5f12752f8
Create Date: 2018-12-23 21:55:46.463634
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'c8ffec048a3b'
down_revision = '41f5f12752f8'
branch_labels = None
depends_on = None
def upgrade(): # noqa: D103
op.add_column('dag', sa.Column('description', sa.Text(), nullable=True))
op.add_column('dag', sa.Column('default_view', sa.String(25), nullable=True))
def downgrade(): # noqa: D103
op.drop_column('dag', 'description')
op.drop_column('dag', 'default_view')
| apache-2.0 | 1,705,448,137,080,268,300 | 30.704545 | 81 | 0.739785 | false |
silvansky/pjsip_mod | tests/pjsua/scripts-sendto/171_timer_initiated_by_uas.py | 3 | 1191 | # $Id: 171_timer_initiated_by_uas.py 3307 2010-09-08 05:38:49Z nanang $
import inc_sip as sip
import inc_sdp as sdp
sdp = \
"""
v=0
o=- 0 0 IN IP4 127.0.0.1
s=pjmedia
c=IN IP4 127.0.0.1
t=0 0
m=audio 4000 RTP/AVP 0 101
a=rtpmap:0 PCMU/8000
a=sendrecv
a=rtpmap:101 telephone-event/8000
a=fmtp:101 0-15
"""
# RFC 4028 Section 9:
# If the incoming request contains a Supported header field with a
# value 'timer' but does not contain a Session-Expires header, it means
# that the UAS is indicating support for timers but is not requesting
# one. The UAS may request a session timer in the 2XX response by
# including a Session-Expires header field. The value MUST NOT be set
# to a duration lower than the value in the Min-SE header field in the
# request, if it is present.
pjsua_args = "--null-audio --auto-answer 200 --use-timer 2 --timer-min-se 90 --timer-se 1800"
extra_headers = "Supported: timer\n"
include = ["Session-Expires: .*;refresher=.*"]
exclude = []
sendto_cfg = sip.SendtoCfg("Session Timer initiated by UAS", pjsua_args, sdp, 200,
extra_headers=extra_headers,
resp_inc=include, resp_exc=exclude)
| gpl-2.0 | -3,742,569,806,358,724,600 | 31.083333 | 93 | 0.68094 | false |
spitfire88/upm | examples/python/grovewfs.py | 7 | 2489 | #!/usr/bin/python
# Author: Zion Orent <[email protected]>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_grovewfs as upmGrovewfs
def main():
# Instantiate a Grove Water Flow Sensor on digital pin D2
myWaterFlow = upmGrovewfs.GroveWFS(2)
## Exit handlers ##
# This stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit,
# including functions from myWaterFlow
def exitHandler():
myWaterFlow.stopFlowCounter()
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
# set the flow counter to 0 and start counting
myWaterFlow.clearFlowCounter()
myWaterFlow.startFlowCounter()
while (1):
# we grab these (millis and flowCount) just for display
# purposes in this example
millis = myWaterFlow.getMillis()
flowCount = myWaterFlow.flowCounter()
fr = myWaterFlow.flowRate()
# output milliseconds passed, flow count, and computed flow rate
outputStr = "Millis: {0} Flow Count: {1} Flow Rate: {2} LPM".format(
millis, flowCount, fr)
print(outputStr)
time.sleep(2)
if __name__ == '__main__':
main()
| mit | -5,607,194,514,976,063,000 | 36.149254 | 76 | 0.714343 | false |
gion86/awlsim | awlsim/core/operators.py | 1 | 24016 | # -*- coding: utf-8 -*-
#
# AWL simulator - operators
#
# Copyright 2012-2015 Michael Buesch <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, absolute_import, print_function, unicode_literals
from awlsim.common.compat import *
#from awlsim.core.dynattrs cimport * #@cy
#from awlsim.core.statusword cimport * #@cy
from awlsim.core.dynattrs import * #@nocy
from awlsim.core.datatypes import *
from awlsim.core.statusword import * #@nocy
from awlsim.core.lstack import *
from awlsim.core.util import *
class AwlOperator(DynAttrs):
"""An AWL operator.
An operator is an 'argument' to an instruction.
For example MW 10 in:
L MW 10
"""
EnumGen.start # Operator types
__IMM_START = EnumGen.item
IMM = EnumGen.item # Immediate value (constant)
IMM_REAL = EnumGen.item # Real
IMM_S5T = EnumGen.item # S5T immediate
IMM_TIME = EnumGen.item # T# immediate
IMM_DATE = EnumGen.item # D# immediate
IMM_TOD = EnumGen.item # TOD# immediate
IMM_DT = EnumGen.item # DT# immediate
IMM_PTR = EnumGen.item # Pointer immediate (P#x.y, P#area x.y, P#DBn.DBX x.y)
IMM_STR = EnumGen.item # STRING immediate ('abc')
__IMM_END = EnumGen.item
MEM_E = EnumGen.item # Input
MEM_A = EnumGen.item # Output
MEM_M = EnumGen.item # Flags
MEM_L = EnumGen.item # Localstack
MEM_VL = EnumGen.item # Parent localstack (indirect access)
MEM_DB = EnumGen.item # Global datablock
MEM_DI = EnumGen.item # Instance datablock
MEM_T = EnumGen.item # Timer
MEM_Z = EnumGen.item # Counter
MEM_PA = EnumGen.item # Peripheral output
MEM_PE = EnumGen.item # Peripheral input
MEM_STW = EnumGen.item # Status word bit read
MEM_STW_Z = EnumGen.item # Status word "==0" read
MEM_STW_NZ = EnumGen.item # Status word "<>0" read
MEM_STW_POS = EnumGen.item # Status word ">0" read
MEM_STW_NEG = EnumGen.item # Status word "<0" read
MEM_STW_POSZ = EnumGen.item # Status word ">=0" read
MEM_STW_NEGZ = EnumGen.item # Status word "<=0" read
MEM_STW_UO = EnumGen.item # Status word "UO" read
MEM_DBLG = EnumGen.item # DB-register: DB length
MEM_DBNO = EnumGen.item # DB-register: DB number
MEM_DILG = EnumGen.item # DI-register: DB length
MEM_DINO = EnumGen.item # DI-register: DB number
MEM_AR2 = EnumGen.item # AR2 register
BLKREF_FC = EnumGen.item # FC reference
BLKREF_SFC = EnumGen.item # SFC reference
BLKREF_FB = EnumGen.item # FB reference
BLKREF_SFB = EnumGen.item # SFB reference
BLKREF_UDT = EnumGen.item # UDT reference
BLKREF_DB = EnumGen.item # DB reference
BLKREF_DI = EnumGen.item # DI reference
BLKREF_OB = EnumGen.item # OB reference (only symbol table)
BLKREF_VAT = EnumGen.item # VAT reference (only symbol table)
MULTI_FB = EnumGen.item # FB multiinstance reference
MULTI_SFB = EnumGen.item # SFB multiinstance reference
LBL_REF = EnumGen.item # Label reference
SYMBOLIC = EnumGen.item # Classic symbolic reference ("xyz")
NAMED_LOCAL = EnumGen.item # Named local reference (#abc)
NAMED_LOCAL_PTR = EnumGen.item # Pointer to named local (P##abc)
NAMED_DBVAR = EnumGen.item # Named DB variable reference (DBx.VAR)
INDIRECT = EnumGen.item # Indirect access
UNSPEC = EnumGen.item # Not (yet) specified memory region
# Virtual operators used internally in awlsim, only.
# These operators do not have standard AWL mnemonics.
VIRT_ACCU = EnumGen.item # Accu
VIRT_AR = EnumGen.item # AR
VIRT_DBR = EnumGen.item # DB and DI registers
EnumGen.end # Operator types
# Type to string map
type2str = {
IMM : "IMMEDIATE",
IMM_REAL : "REAL",
IMM_S5T : "S5T",
IMM_TIME : "TIME",
IMM_DATE : "DATE",
IMM_TOD : "TOD",
IMM_DT : "DT",
IMM_PTR : "P#",
MEM_E : "E",
MEM_A : "A",
MEM_M : "M",
MEM_L : "L",
MEM_VL : "VL",
MEM_DB : "DB",
MEM_DI : "DI",
MEM_T : "T",
MEM_Z : "Z",
MEM_PA : "PA",
MEM_PE : "PE",
MEM_DBLG : "DBLG",
MEM_DBNO : "DBNO",
MEM_DILG : "DILG",
MEM_DINO : "DINO",
MEM_AR2 : "AR2",
MEM_STW : "STW",
MEM_STW_Z : "==0",
MEM_STW_NZ : "<>0",
MEM_STW_POS : ">0",
MEM_STW_NEG : "<0",
MEM_STW_POSZ : ">=0",
MEM_STW_NEGZ : "<=0",
MEM_STW_UO : "UO",
LBL_REF : "LABEL",
BLKREF_FC : "BLOCK_FC",
BLKREF_SFC : "BLOCK_SFC",
BLKREF_FB : "BLOCK_FB",
BLKREF_SFB : "BLOCK_SFB",
BLKREF_UDT : "BLOCK_UDT",
BLKREF_DB : "BLOCK_DB",
BLKREF_DI : "BLOCK_DI",
BLKREF_OB : "BLOCK_OB",
BLKREF_VAT : "BLOCK_VAT",
INDIRECT : "__INDIRECT",
VIRT_ACCU : "__ACCU",
VIRT_AR : "__AR",
VIRT_DBR : "__DBR",
}
# Dynamic attributes
dynAttrs = {
# Extended-operator flag.
"isExtended" : False,
# Possible label index.
"labelIndex" : None,
# Interface index number.
# May be set by the symbol resolver.
"interfaceIndex" : None,
# Compound data type flag.
# Set to true for accesses > 32 bit or
# arrays/structs or array/struct elements.
"compound" : False,
# The access data type (AwlDataType), if known.
# Only set for resolved symbolic accesses.
"dataType" : None,
}
def __init__(self, type, width, value, insn=None):
# type -> The operator type ID number. See "Operator types" above.
# width -> The bit width of the access.
# value -> The value. May be an AwlOffset or a string (depends on type).
# insn -> The instruction this operator is used in. May be None.
self.type, self.width, self.value, self.insn =\
type, width, value, insn
# Make a deep copy, except for "insn".
def dup(self):
if isInteger(self.value):
dupValue = self.value
else:
dupValue = self.value.dup()
oper = AwlOperator(type = self.type,
width = self.width,
value = dupValue,
insn = self.insn)
oper.setExtended(self.isExtended)
oper.setLabelIndex(self.labelIndex)
oper.interfaceIndex = self.interfaceIndex
return oper
def setInsn(self, newInsn):
self.insn = newInsn
def setExtended(self, isExtended):
self.isExtended = isExtended
def setLabelIndex(self, newLabelIndex):
self.labelIndex = newLabelIndex
def isImmediate(self):
return self.type > self.__IMM_START and\
self.type < self.__IMM_END
def _raiseTypeError(self, actualType, expectedTypes):
expectedTypes = [ self.type2str[t] for t in sorted(expectedTypes) ]
raise AwlSimError("Invalid operator type. Got %s, but expected %s." %\
(self.type2str[actualType],
listToHumanStr(expectedTypes)),
insn=self.insn)
def assertType(self, types, lowerLimit=None, upperLimit=None, widths=None):
if self.type == AwlOperator.NAMED_LOCAL or\
self.type == AwlOperator.NAMED_LOCAL_PTR:
return #FIXME we should check type for these, too.
types = toSet(types)
if not self.type in types:
self._raiseTypeError(self.type, types)
if lowerLimit is not None:
if self.value < lowerLimit:
raise AwlSimError("Operator value too small",
insn=self.insn)
if upperLimit is not None:
if self.value > upperLimit:
raise AwlSimError("Operator value too big",
insn=self.insn)
if widths is not None:
widths = toSet(widths)
if not self.width in widths:
raise AwlSimError("Invalid operator width. "
"Got %d, but expected %s." %\
(self.width, listToHumanStr(widths)))
def checkDataTypeCompat(self, cpu, dataType):
assert(isinstance(dataType, AwlDataType))
if self.type in (AwlOperator.NAMED_LOCAL,
AwlOperator.NAMED_LOCAL_PTR,
AwlOperator.NAMED_DBVAR,
AwlOperator.SYMBOLIC):
# These are checked again after resolve.
# So don't check them now.
return
def mismatch(dataType, oper, operWidth):
raise AwlSimError("Data type '%s' of width %d bits "
"is not compatible with operator '%s' "
"of width %d bits." %\
(str(dataType), dataType.width, str(oper), operWidth))
if dataType.type == AwlDataType.TYPE_UDT_X:
try:
udt = cpu.udts[dataType.index]
if udt.struct.getSize() * 8 != self.width:
raise ValueError
except (KeyError, ValueError) as e:
mismatch(dataType, self, self.width)
elif dataType.type in {AwlDataType.TYPE_POINTER,
AwlDataType.TYPE_ANY}:
if self.type == AwlOperator.IMM_PTR:
if dataType.type == AwlDataType.TYPE_POINTER and\
self.width > 48:
raise AwlSimError("Invalid immediate pointer "
"assignment to POINTER type.")
else:
if self.isImmediate():
raise AwlSimError("Invalid immediate "
"assignment to '%s' type." %\
str(dataType))
# Try to make pointer from operator.
# This will raise AwlSimError on failure.
self.makePointer()
elif dataType.type == AwlDataType.TYPE_CHAR:
if self.type == AwlOperator.IMM_STR:
if self.width != (2 + 1) * 8:
raise AwlSimError("String to CHAR parameter "
"must be only one single character "
"long.")
else:
if self.isImmediate():
raise AwlSimError("Invalid immediate '%s'"
"for CHAR data type." %\
str(self))
if self.width != dataType.width:
mismatch(dataType, self, self.width)
elif dataType.type == AwlDataType.TYPE_STRING:
if self.type == AwlOperator.IMM_STR:
if self.width > dataType.width:
mismatch(dataType, self, self.width)
else:
if self.isImmediate():
raise AwlSimError("Invalid immediate '%s'"
"for STRING data type." %\
str(self))
assert(self.width <= (254 + 2) * 8)
assert(dataType.width <= (254 + 2) * 8)
if dataType.width != (254 + 2) * 8:
if self.width != dataType.width:
mismatch(dataType, self, self.width)
else:
if self.width != dataType.width:
mismatch(dataType, self, self.width)
# Resolve this indirect operator to a direct operator.
def resolve(self, store=True):
# This already is a direct operator.
if self.type == self.NAMED_LOCAL:
# This is a named-local access (#abc).
# Resolve it to an interface-operator.
return self.insn.cpu.callStackTop.interfRefs[self.interfaceIndex].resolve(store)
return self
# Make an area-spanning Pointer (32 bit) to this memory area.
def makePointer(self):
return Pointer(self.makePointerValue())
# Make an area-spanning pointer value (32 bit) to this memory area.
def makePointerValue(self):
try:
area = AwlIndirectOp.optype2area[self.type]
except KeyError as e:
raise AwlSimError("Could not transform operator '%s' "
"into a pointer." % str(self))
return area | self.value.toPointerValue()
# Make a DBPointer (48 bit) to this memory area.
def makeDBPointer(self):
return DBPointer(self.makePointerValue(),
self.value.dbNumber)
# Make an ANY-pointer to this memory area.
# Returns an ANYPointer().
def makeANYPointer(self, areaShifted=None):
ptrValue = self.makePointerValue()
if areaShifted:
ptrValue &= ~Pointer.AREA_MASK_S
ptrValue |= areaShifted
if ANYPointer.dataTypeIsSupported(self.dataType):
return ANYPointer.makeByAutoType(dataType = self.dataType,
ptrValue = ptrValue,
dbNr = self.value.dbNumber)
return ANYPointer.makeByTypeWidth(bitWidth = self.width,
ptrValue = ptrValue,
dbNr = self.value.dbNumber)
def __repr__(self):
if self.type == self.IMM:
if self.width == 1:
return "TRUE" if (self.value & 1) else "FALSE"
elif self.width == 8:
return str(byteToSignedPyInt(self.value))
elif self.width == 16:
return str(wordToSignedPyInt(self.value))
elif self.width == 32:
return "L#" + str(dwordToSignedPyInt(self.value))
if self.type == self.IMM_REAL:
return str(dwordToPyFloat(self.value))
elif self.type == self.IMM_S5T:
seconds = Timer.s5t_to_seconds(self.value)
return "S5T#" + AwlDataType.formatTime(seconds)
elif self.type == self.IMM_TIME:
return "T#" + AwlDataType.formatTime(self.value / 1000.0)
elif self.type == self.IMM_DATE:
return "D#" #TODO
elif self.type == self.IMM_TOD:
return "TOD#" #TODO
elif self.type == self.IMM_PTR:
return self.value.toPointerString()
elif self.type == self.IMM_STR:
strLen = self.value[1]
import awlsim.core.parser as parser
return "'" + self.value[2:2+strLen].decode(parser.AwlParser.TEXT_ENCODING) + "'"
elif self.type in (self.MEM_A, self.MEM_E,
self.MEM_M, self.MEM_L, self.MEM_VL):
pfx = self.type2str[self.type]
if self.width == 1:
return "%s %d.%d" %\
(pfx, self.value.byteOffset, self.value.bitOffset)
elif self.width == 8:
return "%sB %d" % (pfx, self.value.byteOffset)
elif self.width == 16:
return "%sW %d" % (pfx, self.value.byteOffset)
elif self.width == 32:
return "%sD %d" % (pfx, self.value.byteOffset)
return self.makeANYPointer().toPointerString()
elif self.type == self.MEM_DB:
if self.value.dbNumber is None:
dbPrefix = ""
else:
dbPrefix = "DB%d." % self.value.dbNumber
if self.width == 1:
return "%sDBX %d.%d" % (dbPrefix,
self.value.byteOffset,
self.value.bitOffset)
elif self.width == 8:
return "%sDBB %d" % (dbPrefix, self.value.byteOffset)
elif self.width == 16:
return "%sDBW %d" % (dbPrefix, self.value.byteOffset)
elif self.width == 32:
return "%sDBD %d" % (dbPrefix, self.value.byteOffset)
return self.makeANYPointer().toPointerString()
elif self.type == self.MEM_DI:
if self.width == 1:
return "DIX %d.%d" % (self.value.byteOffset, self.value.bitOffset)
elif self.width == 8:
return "DIB %d" % self.value.byteOffset
elif self.width == 16:
return "DIW %d" % self.value.byteOffset
elif self.width == 32:
return "DID %d" % self.value.byteOffset
return self.makeANYPointer().toPointerString()
elif self.type == self.MEM_T:
return "T %d" % self.value.byteOffset
elif self.type == self.MEM_Z:
return "Z %d" % self.value.byteOffset
elif self.type == self.MEM_PA:
if self.width == 8:
return "PAB %d" % self.value.byteOffset
elif self.width == 16:
return "PAW %d" % self.value.byteOffset
elif self.width == 32:
return "PAD %d" % self.value.byteOffset
return self.makeANYPointer().toPointerString()
elif self.type == self.MEM_PE:
if self.width == 8:
return "PEB %d" % self.value.byteOffset
elif self.width == 16:
return "PEW %d" % self.value.byteOffset
elif self.width == 32:
return "PED %d" % self.value.byteOffset
return self.makeANYPointer().toPointerString()
elif self.type == self.MEM_STW:
return "__STW " + S7StatusWord.nr2name_german[self.value.bitOffset]
elif self.type == self.LBL_REF:
return self.value
elif self.type == self.BLKREF_FC:
return "FC %d" % self.value.byteOffset
elif self.type == self.BLKREF_SFC:
return "SFC %d" % self.value.byteOffset
elif self.type == self.BLKREF_FB:
return "FB %d" % self.value.byteOffset
elif self.type == self.BLKREF_SFB:
return "SFB %d" % self.value.byteOffset
elif self.type == self.BLKREF_UDT:
return "UDT %d" % self.value.byteOffset
elif self.type == self.BLKREF_DB:
return "DB %d" % self.value.byteOffset
elif self.type == self.BLKREF_DI:
return "DI %d" % self.value.byteOffset
elif self.type == self.BLKREF_OB:
return "OB %d" % self.value.byteOffset
elif self.type == self.BLKREF_VAT:
return "VAT %d" % self.value.byteOffset
elif self.type == self.MULTI_FB:
return "#FB<" + self.makeANYPointer(AwlIndirectOp.AREA_DI).toPointerString() + ">"
elif self.type == self.MULTI_SFB:
return "#SFB<" + self.makeANYPointer(AwlIndirectOp.AREA_DI).toPointerString() + ">"
elif self.type == self.SYMBOLIC:
return '"%s"' % self.value.identChain.getString()
elif self.type == self.NAMED_LOCAL:
return "#" + self.value.identChain.getString()
elif self.type == self.NAMED_LOCAL_PTR:
return "P##" + self.value.identChain.getString()
elif self.type == self.NAMED_DBVAR:
return str(self.value) # value is AwlOffset
elif self.type == self.INDIRECT:
assert(0) # Overloaded in AwlIndirectOp
elif self.type == self.VIRT_ACCU:
return "__ACCU %d" % self.value.byteOffset
elif self.type == self.VIRT_AR:
return "__AR %d" % self.value.byteOffset
elif self.type == self.VIRT_DBR:
return "__DBR %d" % self.value.byteOffset
elif self.type == self.UNSPEC:
return "__UNSPEC"
try:
return self.type2str[self.type]
except KeyError:
assert(0)
class AwlIndirectOp(AwlOperator):
"Indirect addressing operand"
# Address register
AR_NONE = 0 # No address register
AR_1 = 1 # Use AR1
AR_2 = 2 # Use AR2
# Pointer area constants
AREA_MASK = Pointer.AREA_MASK_S
EXT_AREA_MASK = Pointer.AREA_MASK_S | 0xFF00000000
# Pointer area encodings
AREA_NONE = 0
AREA_P = Pointer.AREA_P << Pointer.AREA_SHIFT
AREA_E = Pointer.AREA_E << Pointer.AREA_SHIFT
AREA_A = Pointer.AREA_A << Pointer.AREA_SHIFT
AREA_M = Pointer.AREA_M << Pointer.AREA_SHIFT
AREA_DB = Pointer.AREA_DB << Pointer.AREA_SHIFT
AREA_DI = Pointer.AREA_DI << Pointer.AREA_SHIFT
AREA_L = Pointer.AREA_L << Pointer.AREA_SHIFT
AREA_VL = Pointer.AREA_VL << Pointer.AREA_SHIFT
# Extended area encodings. Only used for internal purposes.
# These are not used in the interpreted AWL code.
EXT_AREA_T = 0x01FF000000 # Timer
EXT_AREA_Z = 0x02FF000000 # Counter
EXT_AREA_BLKREF_DB = 0x03FF000000 # DB block reference
EXT_AREA_BLKREF_DI = 0x04FF000000 # DI block reference
EXT_AREA_BLKREF_FB = 0x05FF000000 # FB block reference
EXT_AREA_BLKREF_FC = 0x06FF000000 # FC block reference
# Map for converting area code to operator type for fetch operations
area2optype_fetch = {
AREA_P : AwlOperator.MEM_PE,
AREA_E : AwlOperator.MEM_E,
AREA_A : AwlOperator.MEM_A,
AREA_M : AwlOperator.MEM_M,
AREA_DB : AwlOperator.MEM_DB,
AREA_DI : AwlOperator.MEM_DI,
AREA_L : AwlOperator.MEM_L,
AREA_VL : AwlOperator.MEM_VL,
EXT_AREA_T : AwlOperator.MEM_T,
EXT_AREA_Z : AwlOperator.MEM_Z,
EXT_AREA_BLKREF_DB : AwlOperator.BLKREF_DB,
EXT_AREA_BLKREF_DI : AwlOperator.BLKREF_DI,
EXT_AREA_BLKREF_FB : AwlOperator.BLKREF_FB,
EXT_AREA_BLKREF_FC : AwlOperator.BLKREF_FC,
}
# Map for converting area code to operator type for store operations
area2optype_store = area2optype_fetch.copy()
area2optype_store[AREA_P] = AwlOperator.MEM_PA
# Map for converting operator type to area code
optype2area = pivotDict(area2optype_fetch)
optype2area[AwlOperator.MEM_PA] = AREA_P
optype2area[AwlOperator.MULTI_FB] = AREA_DI
optype2area[AwlOperator.MULTI_SFB] = AREA_DI
optype2area[AwlOperator.NAMED_DBVAR] = AREA_DB
optype2area[AwlOperator.UNSPEC] = AREA_NONE
def __init__(self, area, width, addressRegister, offsetOper, insn=None):
# area -> The area code for this indirect operation.
# AREA_... or EXT_AREA_...
# This corresponds to the area code in AWL pointer format.
# width -> The width (in bits) of the region that is being adressed.
# addressRegister -> One of:
# AR_NONE => This is a memory-indirect access.
# AR_1 => This is a register-indirect access with AR1.
# AR_2 => This is a register-indirect access with AR2.
# offsetOper -> This is the AwlOperator for the offset.
# For memory-indirect access, this must be an AwlOperator
# with "type in __possibleOffsetOperTypes".
# For register-indirect access, this must be an AwlOperator
# with "type==IMM_PTR".
# insn -> The instruction this operator is used in. May be None.
AwlOperator.__init__(self,
type = AwlOperator.INDIRECT,
width = width,
value = None,
insn = insn)
self.area, self.addressRegister, self.offsetOper =\
area, addressRegister, offsetOper
# Make a deep copy, except for "insn".
def dup(self):
return AwlIndirectOp(area = self.area,
width = self.width,
addressRegister = self.addressRegister,
offsetOper = self.offsetOper.dup(),
insn = self.insn)
def setInsn(self, newInsn):
AwlOperator.setInsn(self, newInsn)
self.offsetOper.setInsn(newInsn)
def assertType(self, types, lowerLimit=None, upperLimit=None):
types = toSet(types)
if not self.area2optype_fetch[self.area] in types and\
not self.area2optype_store[self.area] in types:
self._raiseTypeError(self.area2optype_fetch[self.area], types)
assert(lowerLimit is None)
assert(upperLimit is None)
# Possible offset oper types for indirect access
__possibleOffsetOperTypes = (AwlOperator.MEM_M,
AwlOperator.MEM_L,
AwlOperator.MEM_DB,
AwlOperator.MEM_DI)
# Resolve this indirect operator to a direct operator.
def resolve(self, store=True):
bitwiseDirectOffset = True
offsetOper = self.offsetOper
# Construct the pointer
if self.addressRegister == AwlIndirectOp.AR_NONE:
# Memory-indirect access
if self.area == AwlIndirectOp.AREA_NONE:
raise AwlSimError("Area-spanning access not "
"possible in indirect access without "
"address register.")
if self.area > AwlIndirectOp.AREA_MASK:
# Is extended area
possibleWidths = {8, 16, 32}
bitwiseDirectOffset = False
else:
# Is standard area
possibleWidths = {32,}
if offsetOper.type not in self.__possibleOffsetOperTypes:
raise AwlSimError("Offset operator in indirect "
"access is not a valid memory offset.")
if offsetOper.width not in possibleWidths:
raise AwlSimError("Offset operator in indirect "
"access is not of %s bit width." %\
listToHumanStr(possibleWidths))
offsetValue = self.insn.cpu.fetch(offsetOper)
pointer = (self.area | (offsetValue & 0x0007FFFF))
else:
# Register-indirect access
if offsetOper.type != AwlOperator.IMM_PTR:
raise AwlSimError("Offset operator in "
"register-indirect access is not a "
"pointer immediate.")
offsetValue = self.insn.cpu.fetch(offsetOper) & 0x0007FFFF
if self.area == AwlIndirectOp.AREA_NONE:
# Area-spanning access
pointer = (self.insn.cpu.getAR(self.addressRegister).get() +\
offsetValue) & 0xFFFFFFFF
else:
# Area-internal access
pointer = ((self.insn.cpu.getAR(self.addressRegister).get() +
offsetValue) & 0x0007FFFF) |\
self.area
# Create a direct operator
try:
if store:
optype = AwlIndirectOp.area2optype_store[
pointer & AwlIndirectOp.EXT_AREA_MASK]
else:
optype = AwlIndirectOp.area2optype_fetch[
pointer & AwlIndirectOp.EXT_AREA_MASK]
except KeyError:
raise AwlSimError("Invalid area code (%X hex) in indirect addressing" %\
((pointer & AwlIndirectOp.EXT_AREA_MASK) >>\
Pointer.AREA_SHIFT))
if bitwiseDirectOffset:
# 'pointer' has pointer format
directOffset = AwlOffset.fromPointerValue(pointer)
else:
# 'pointer' is a byte offset
directOffset = AwlOffset(pointer & 0x0000FFFF)
if self.width != 1 and directOffset.bitOffset:
raise AwlSimError("Bit offset (lowest three bits) in %d-bit "
"indirect addressing is not zero. "
"(Computed offset is: %s)" %\
(self.width, str(directOffset)))
return AwlOperator(optype, self.width, directOffset, self.insn)
def __pointerError(self):
# This is a programming error.
# The caller should resolve() the operator first.
raise AwlSimBug("Can not transform indirect operator "
"into a pointer. Resolve it first.")
def makePointer(self):
self.__pointerError()
def makePointerValue(self):
self.__pointerError()
def makeDBPointer(self):
self.__pointerError()
def makeANYPointer(self, areaShifted=None):
self.__pointerError()
def __repr__(self):
return "__INDIRECT" #TODO
| gpl-2.0 | -8,560,336,037,900,516,000 | 33.210826 | 86 | 0.677798 | false |
gramps-project/gramps | gramps/gen/plug/docbackend/__init__.py | 10 | 1185 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2009 B. Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
The docbackend package for managing the specific files an implementation of the
docgen API writes on. It provides common functionality, and translates between
gen data specific for output (eg markup in gen/lib) and output where needed
"""
from .docbackend import DocBackendError, DocBackend
from .cairobackend import CairoBackend
#__all__ = [ DocBackend, CairoBackend, LaTeXBackend ]
| gpl-2.0 | -5,276,227,395,151,894,000 | 38.5 | 79 | 0.770464 | false |
CompPhysics/MachineLearning | doc/Programs/ProjectsData/Project2_1.py | 1 | 9043 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 17 10:00:03 2020
@author: Bharat Mishra, Patricia Perez-Martin, Pinelopi Christodoulou
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
# close all previous images
plt.close('all')
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
# ensure the same random numbers appear every time
np.random.seed(0)
# download breast cancer dataset
cancer = load_breast_cancer()
# define inputs and labels
inputs = cancer.data
outputs = cancer.target #Malignant or bening
labels = cancer.feature_names[0:30]
print('The content of the breast cancer dataset is:')
print('-------------------------')
print("inputs = " + str(inputs.shape))
print("outputs = " + str(outputs.shape))
print("labels = "+ str(labels.shape))
n_inputs = len(inputs)
#%% VISUALIZATION
X = inputs
y = outputs
plt.figure()
plt.scatter(X[:,0], X[:,2], s=40, c=y, cmap=plt.cm.Spectral)
plt.xlabel('Mean radius')
plt.ylabel('Mean perimeter')
plt.show()
plt.figure()
plt.scatter(X[:,5], X[:,6], s=40, c=y, cmap=plt.cm.Spectral)
plt.xlabel('Mean compactness')
plt.ylabel('Mean concavity')
plt.show()
plt.figure()
plt.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral)
plt.xlabel('Mean radius')
plt.ylabel('Mean texture')
plt.show()
plt.figure()
plt.scatter(X[:,2], X[:,1], s=40, c=y, cmap=plt.cm.Spectral)
plt.xlabel('Mean perimeter')
plt.ylabel('Mean compactness')
plt.show()
# %% COVARIANCE AND CORRELATION
import pandas as pd
import seaborn as sns
# Making a data frame
meanpd = pd.DataFrame(X[:,0:10],columns=labels[0:10])
corr = meanpd.corr().round(1) # Compute pairwise correlation of columns, excluding NA/null values.
# use the heatmap function from seaborn to plot the correlation matrix
plt.figure()
sns.heatmap(corr, cbar = True, annot=False,
xticklabels= labels[0:10], yticklabels= labels[0:10],
cmap= 'YlOrRd')
X_t = X[ : , 1:3]
clf = LogisticRegression()
clf.fit(X_t, y)
# Set min and max values and give it some padding
x_min, x_max = X_t[:, 1].min() - .5, X_t[:, 1].max() + .5
y_min, y_max = X_t[:, 0].min() - .5, X_t[:, 0].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
#Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 2], X[:, 1], c=y, cmap=plt.cm.Spectral)
plt.xlabel('Mean perimeter')
plt.ylabel('Mean texture')
plt.title('Logistic Regression')
plt.show()
# %% TRAIN AND TEST DATASET
# Set up training data: from scikit-learn library
train_size = 0.9
test_size = 1 - train_size
X_train, X_test, y_train, y_test = train_test_split(inputs, outputs, train_size=train_size,
test_size=test_size)
# %% LOGISTIC REGRESSION and ACCURACY
print('----------------------')
print('LOGISTIC REGRESSION')
print('----------------------')
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print("Train set accuracy with Logistic Regression:: {:.2f}".format(logreg.score(X_train,y_train)))
print("Test set accuracy with Logistic Regression:: {:.2f}".format(logreg.score(X_test,y_test)))
# Scale data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
logreg.fit(X_train_scaled, y_train)
print("Train set accuracy Logistic Regression scaled data: {:.2f}".format(logreg.score(X_train_scaled,y_train)))
print("Test set accuracy scaled data: {:.2f}".format(logreg.score(X_test_scaled,y_test)))
# %% CROSS VALIDATION FROM SCIKIT-LEARN
from sklearn.linear_model import LogisticRegressionCV
print('----------------------')
print('LOGISTIC REGRESSION with CROSS VALIDATION 5-KFold')
print('----------------------')
logreg = LogisticRegressionCV()
logreg.fit(X_train, y_train)
print("Train set accuracy with Logistic Regression, CV:: {:.2f}".format(logreg.score(X_train,y_train)))
print("Test set accuracy with Logistic Regression, CV:: {:.2f}".format(logreg.score(X_test,y_test)))
# Scale data
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
logreg.fit(X_train_scaled, y_train)
print("Train set accuracy Logistic Regression scaled data: {:.2f}".format(logreg.score(X_train_scaled,y_train)))
print("Test set accuracy scaled data: {:.2f}".format(logreg.score(X_test_scaled,y_test)))
# %% CROSS VALIDATION: OUR OWN CODE
"""Implement cross-validation framework (only on liblinear solver)"""
#Initiate k-fold instance for implementing manual cross-validation using KFold
import seaborn as sns
from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression as LogReg
import os
"""Generate training and testing datasets"""
x=inputs
#Select features relevant to classification (texture,perimeter,compactness and symmetery)
#and add to input matrix
temp1=np.reshape(inputs[:,1],(len(inputs[:,1]),1))
temp2=np.reshape(inputs[:,2],(len(inputs[:,2]),1))
X=np.hstack((temp1,temp2))
temp=np.reshape(inputs[:,5],(len(inputs[:,5]),1))
X=np.hstack((X,temp))
temp=np.reshape(inputs[:,8],(len(inputs[:,8]),1))
X=np.hstack((X,temp))
lamda=np.logspace(-5,5,11) #Define array of hyperparameters
"""Implement K-fold cross-validation"""
k=5
kfold=KFold(n_splits=k)
train_scores=np.zeros((len(lamda),k))
test_scores=np.zeros((len(lamda),k))
for i in range(len(lamda)):
j=0
for train_inds,test_inds in kfold.split(X):
X_train=X[train_inds]
y_train=y[train_inds]
X_test=X[test_inds]
y_test=y[test_inds]
clf=LogReg(C=1.0/lamda[i],random_state=1,verbose=0,max_iter=1E+3,tol=1E-5,solver='liblinear')
clf.fit(X_train,y_train)
train_scores[i,j]=clf.score(X_train,y_train)
test_scores[i,j]=clf.score(X_test,y_test)
j+=1
train_accuracy_cv_kfold=np.mean(train_scores,axis=1)
test_accuracy_cv_kfold=np.mean(test_scores,axis=1)
"""Plot results after K-fold cross validation"""
plt.figure()
plt.semilogx(lamda,train_accuracy_cv_kfold,'*-b',label='Training')
plt.semilogx(lamda,test_accuracy_cv_kfold,'*-r',label='Test')
plt.legend()
plt.xlabel('Hyperparameters')
plt.ylabel('Accuracy')
plt.title('Accuracy LogReg (5 k-Fold CV)')
plt.show()
# %% DECISION TREES: CLASSIFICATION and ACCURACY
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
from sklearn.preprocessing import StandardScaler, OneHotEncoder
# Create the encoder.
encoder = OneHotEncoder(handle_unknown="ignore")
# Assume for simplicity all features are categorical.
encoder.fit(X)
# Apply the encoder.
X = encoder.transform(X)
# Classification tree: with and without scaling (sc)
DEPTH=np.arange(start=1,stop=11,step=1)
test_acc = np.zeros(len(DEPTH))
test_acc_sc = np.zeros(len(DEPTH))
for i in DEPTH:
tree_clf = DecisionTreeClassifier(max_depth= i)
tree_clf.fit(X_train, y_train)
test_acc[i-1] = tree_clf.score(X_test,y_test)
print("Decision Tree (No Max depth): {:.2f}".format(DEPTH[i-1]))
print(" Test accuracy: {:.2f}".format(test_acc[i-1]))
export_graphviz(
tree_clf,
out_file="ride.dot",
rounded=True,
filled=True
)
cmd = 'dot -Tpng ride.dot -o DecisionTree_max_depth_{:.2f}.png'.format(DEPTH[i-1])
os.system(cmd)
#PLOT TEST ACCURACY
fig,p1=plt.subplots()
p1.plot(DEPTH, test_acc, label='Test accuracy')
p1.set_xlabel('Max_depth in Decision Tree')
p1.set_ylabel('Accuracy')
p1.set_title("Decision Tree Test Accuracy", fontsize=18)
p1.legend()
tree_clf = DecisionTreeClassifier(max_depth=None)
tree_clf.fit(X_train, y_train)
print("Test set accuracy with Decision Tree (No Max depth): {:.2f}".format(tree_clf.score(X_test,y_test)))
# %% RANDOM FOREST and ACCURACY
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
print('RANDOM FOREST')
model=RandomForestClassifier(n_estimators= 100)# a simple random forest model
model.fit(X_train,y_train)# now fit our model for training data
y_pred = model.predict(X_test)# predict for the test data
RFtest_acc = metrics.accuracy_score(y_pred,y_test) # to check the accuracy
print("Test set accuracy with RANDOM FOREST: {:.2f}".format(RFtest_acc))
| cc0-1.0 | 5,585,809,186,858,056,000 | 28.143333 | 112 | 0.665266 | false |
bearstech/modoboa | modoboa/admin/forms/account.py | 1 | 21130 | """Forms related to accounts management."""
from __future__ import unicode_literals
from collections import OrderedDict
from functools import reduce
from django import forms
from django.core.urlresolvers import reverse
from django.http import QueryDict
from django.utils.translation import ugettext as _, ugettext_lazy
from passwords.fields import PasswordField
from modoboa.core import signals as core_signals
from modoboa.core.models import User
from modoboa.lib.email_utils import split_mailbox
from modoboa.lib import exceptions as lib_exceptions
from modoboa.lib import fields as lib_fields
from modoboa.lib.form_utils import (
DynamicForm, TabForms, WizardForm, WizardStep
)
from modoboa.lib.permissions import get_account_roles
from modoboa.lib.validators import validate_utf8_email
from modoboa.lib.web_utils import render_to_json_response
from modoboa.parameters import tools as param_tools
from .. import models
from .. import signals
class AccountFormGeneral(forms.ModelForm):
"""General account form."""
username = forms.CharField(
label=ugettext_lazy("Username"),
help_text=ugettext_lazy(
"The user's name. Must be a valid e-mail address for simple users "
"or administrators with a mailbox."
)
)
role = forms.ChoiceField(
label=ugettext_lazy("Role"),
choices=[("", ugettext_lazy("Choose"))],
help_text=ugettext_lazy("What level of permission this user will have")
)
password1 = PasswordField(
label=ugettext_lazy("Password"), widget=forms.widgets.PasswordInput
)
password2 = PasswordField(
label=ugettext_lazy("Confirmation"),
widget=forms.widgets.PasswordInput,
help_text=ugettext_lazy(
"Enter the same password as above, for verification."
)
)
class Meta:
model = User
fields = (
"username", "first_name", "last_name", "role", "is_active",
"master_user"
)
labels = {
"is_active": ugettext_lazy("Enabled")
}
def __init__(self, user, *args, **kwargs):
super(AccountFormGeneral, self).__init__(*args, **kwargs)
self.fields = OrderedDict(
(key, self.fields[key]) for key in
["role", "username", "first_name", "last_name", "password1",
"password2", "master_user", "is_active"]
)
self.user = user
condition = (
user.role == "DomainAdmins" or
user.role == "Resellers" and self.instance == user
)
if condition:
self.fields["role"] = forms.CharField(
label="",
widget=forms.HiddenInput(attrs={"class": "form-control"}),
required=False
)
else:
self.fields["role"].choices += (
get_account_roles(user, self.instance)
if self.instance.pk else get_account_roles(user)
)
if not user.is_superuser:
del self.fields["master_user"]
if not self.instance.pk:
return
domain_disabled = (
hasattr(self.instance, "mailbox") and
not self.instance.mailbox.domain.enabled
)
if domain_disabled:
self.fields["is_active"].widget.attrs["disabled"] = "disabled"
if args:
empty_password = (
args[0].get("password1", "") == "" and
args[0].get("password2", "") == "")
if empty_password:
self.fields["password1"].required = False
self.fields["password2"].required = False
if domain_disabled:
del self.fields["is_active"]
self.fields["role"].initial = self.instance.role
condition = (
not self.instance.is_local and
param_tools.get_global_parameter(
"ldap_auth_method", app="core") == "directbind")
if condition:
del self.fields["password1"]
del self.fields["password2"]
def domain_is_disabled(self):
"""Little shortcut to get the domain's state.
We need this information inside a template and the form is the
only object available...
"""
if not hasattr(self.instance, "mailbox"):
return False
return not self.instance.mailbox.domain.enabled
def clean_role(self):
if self.user.role == "DomainAdmins":
if self.instance == self.user:
return "DomainAdmins"
return "SimpleUsers"
elif self.user.role == "Resellers" and self.instance == self.user:
return "Resellers"
return self.cleaned_data["role"]
def clean_username(self):
"""username must be a valid email address for simple users."""
if "role" not in self.cleaned_data:
return self.cleaned_data["username"]
if self.cleaned_data["role"] != "SimpleUsers":
return self.cleaned_data["username"]
uname = self.cleaned_data["username"].lower()
validate_utf8_email(uname)
return uname
def clean_password2(self):
password1 = self.cleaned_data.get("password1", "")
password2 = self.cleaned_data["password2"]
if password1 != password2:
raise forms.ValidationError(
_("The two password fields didn't match."))
return password2
def clean(self):
"""Check master user mode."""
super(AccountFormGeneral, self).clean()
if self.errors:
return self.cleaned_data
condition = (
self.cleaned_data.get("master_user") and
self.cleaned_data["role"] != "SuperAdmins"
)
if condition:
self.add_error(
"master_user",
_("Only super administrators are allowed for this mode")
)
return self.cleaned_data
def save(self, commit=True):
account = super(AccountFormGeneral, self).save(commit=False)
if self.user == account and not self.cleaned_data["is_active"]:
raise lib_exceptions.PermDeniedException(
_("You can't disable your own account"))
if commit:
if "password1" in self.cleaned_data \
and self.cleaned_data["password1"] != "":
account.set_password(self.cleaned_data["password1"])
account.save()
account.role = self.cleaned_data["role"]
return account
class AccountFormMail(forms.Form, DynamicForm):
"""Form to handle mail part."""
email = lib_fields.UTF8EmailField(
label=ugettext_lazy("E-mail"), required=False)
quota = forms.IntegerField(
label=ugettext_lazy("Quota"),
required=False,
help_text=_("Quota in MB for this mailbox. Define a custom value or "
"use domain's default one. Leave empty to define an "
"unlimited value (not allowed for domain "
"administrators)."),
widget=forms.widgets.TextInput(attrs={"class": "form-control"})
)
quota_act = forms.BooleanField(required=False)
aliases = lib_fields.UTF8AndEmptyUserEmailField(
label=ugettext_lazy("Alias(es)"),
required=False,
help_text=ugettext_lazy(
"Alias(es) of this mailbox. Indicate only one address per input, "
"press ENTER to add a new input. To create a catchall alias, just "
"enter the domain name (@domain.tld)."
)
)
senderaddress = lib_fields.UTF8AndEmptyUserEmailField(
label=ugettext_lazy("Sender addresses"),
required=False,
help_text=ugettext_lazy(
"Additional sender address(es) for this account. The user will be "
"allowed to send emails using this address, even if it "
"does not exist locally. Indicate one address per input. Press "
"ENTER to add a new input."
)
)
def __init__(self, user, *args, **kwargs):
self.mb = kwargs.pop("instance", None)
self.user = user
super(AccountFormMail, self).__init__(*args, **kwargs)
self.field_widths = {
"quota": 3
}
if self.mb is not None:
self.fields["email"].required = True
qset = self.mb.aliasrecipient_set.filter(alias__internal=False)
for cpt, ralias in enumerate(qset):
name = "aliases_{}".format(cpt + 1)
self._create_field(
lib_fields.UTF8AndEmptyUserEmailField, name,
ralias.alias.address)
for cpt, saddress in enumerate(self.mb.senderaddress_set.all()):
name = "senderaddress_{}".format(cpt + 1)
self._create_field(
lib_fields.UTF8AndEmptyUserEmailField, name,
saddress.address)
self.fields["email"].initial = self.mb.full_address
self.fields["quota_act"].initial = self.mb.use_domain_quota
if not self.mb.use_domain_quota and self.mb.quota:
self.fields["quota"].initial = self.mb.quota
else:
self.fields["quota_act"].initial = True
if len(args) and isinstance(args[0], QueryDict):
self._load_from_qdict(
args[0], "aliases", lib_fields.UTF8AndEmptyUserEmailField)
self._load_from_qdict(
args[0], "senderaddress",
lib_fields.UTF8AndEmptyUserEmailField)
def clean_email(self):
"""Ensure lower case emails"""
email = self.cleaned_data["email"].lower()
self.locpart, domname = split_mailbox(email)
if not domname:
return email
try:
self.domain = models.Domain.objects.get(name=domname)
except models.Domain.DoesNotExist:
raise forms.ValidationError(_("Domain does not exist"))
if not self.mb:
try:
core_signals.can_create_object.send(
sender=self.__class__, context=self.domain,
object_type="mailboxes")
except lib_exceptions.ModoboaException as inst:
raise forms.ValidationError(inst)
return email
def clean(self):
"""Custom fields validation.
Check if quota is >= 0 only when the domain value is not used.
"""
cleaned_data = super(AccountFormMail, self).clean()
use_default_domain_quota = cleaned_data["quota_act"]
condition = (
not use_default_domain_quota and
cleaned_data["quota"] is not None and
cleaned_data["quota"] < 0)
if condition:
self.add_error("quota", _("Must be a positive integer"))
self.aliases = []
self.sender_addresses = []
for name, value in list(cleaned_data.items()):
if value == "":
continue
if name.startswith("aliases"):
local_part, domname = split_mailbox(value)
domain = models.Domain.objects.filter(name=domname).first()
if not domain:
self.add_error(name, _("Local domain does not exist"))
continue
if not self.user.can_access(domain):
self.add_error(
name, _("You don't have access to this domain"))
continue
self.aliases.append(value.lower())
elif name.startswith("senderaddress"):
local_part, domname = split_mailbox(value)
domain = models.Domain.objects.filter(name=domname).first()
if domain and not self.user.can_access(domain):
self.add_error(
name, _("You don't have access to this domain"))
continue
self.sender_addresses.append(value.lower())
return cleaned_data
def create_mailbox(self, user, account):
"""Create a mailbox associated to :kw:`account`."""
if not user.can_access(self.domain):
raise lib_exceptions.PermDeniedException
core_signals.can_create_object.send(
self.__class__, context=user, klass=models.Mailbox)
self.mb = models.Mailbox(
address=self.locpart, domain=self.domain, user=account,
use_domain_quota=self.cleaned_data["quota_act"])
self.mb.set_quota(self.cleaned_data["quota"],
user.has_perm("admin.add_domain"))
self.mb.save(creator=user)
def _update_aliases(self, user, account):
"""Update mailbox aliases."""
qset = self.mb.aliasrecipient_set.select_related("alias").filter(
alias__internal=False)
for ralias in qset:
if ralias.alias.address not in self.aliases:
alias = ralias.alias
ralias.delete()
if alias.recipients_count > 0:
continue
alias.delete()
else:
self.aliases.remove(ralias.alias.address)
if not self.aliases:
return
core_signals.can_create_object.send(
self.__class__, context=user, klass=models.Alias,
count=len(self.aliases))
core_signals.can_create_object.send(
self.__class__, context=self.mb.domain,
object_type="mailbox_aliases", count=len(self.aliases))
for alias in self.aliases:
if self.mb.aliasrecipient_set.select_related("alias").filter(
alias__address=alias).exists():
continue
local_part, domname = split_mailbox(alias)
al = models.Alias(address=alias, enabled=account.is_active)
al.domain = models.Domain.objects.get(name=domname)
al.save()
al.set_recipients([self.mb.full_address])
al.post_create(user)
def _update_sender_addresses(self):
"""Update mailbox sender addresses."""
for saddress in self.mb.senderaddress_set.all():
if saddress.address not in self.sender_addresses:
saddress.delete()
else:
self.sender_addresses.remove(saddress.address)
if not len(self.sender_addresses):
return
to_create = []
for saddress in self.sender_addresses:
to_create.append(
models.SenderAddress(address=saddress, mailbox=self.mb))
models.SenderAddress.objects.bulk_create(to_create)
def save(self, user, account):
"""Save or update account mailbox."""
if self.cleaned_data["email"] == "":
return None
if self.cleaned_data["quota_act"]:
self.cleaned_data["quota"] = None
if not hasattr(self, "mb") or self.mb is None:
self.create_mailbox(user, account)
else:
self.cleaned_data["use_domain_quota"] = (
self.cleaned_data["quota_act"])
self.mb.update_from_dict(user, self.cleaned_data)
account.email = self.cleaned_data["email"]
account.save()
self._update_aliases(user, account)
self._update_sender_addresses()
return self.mb
class AccountPermissionsForm(forms.Form, DynamicForm):
"""A form to assign domain(s) permission."""
domains = lib_fields.DomainNameField(
label=ugettext_lazy("Domain(s)"),
required=False,
help_text=ugettext_lazy("Domain(s) that user administrates")
)
def __init__(self, *args, **kwargs):
if "instance" in kwargs:
self.account = kwargs["instance"]
del kwargs["instance"]
super(AccountPermissionsForm, self).__init__(*args, **kwargs)
if not hasattr(self, "account") or self.account is None:
return
qset = models.Domain.objects.get_for_admin(self.account)
for pos, dom in enumerate(qset):
name = "domains_%d" % (pos + 1)
self._create_field(lib_fields.DomainNameField, name, dom.name)
if len(args) and isinstance(args[0], QueryDict):
self._load_from_qdict(
args[0], "domains", lib_fields.DomainNameField)
def save(self):
current_domains = [
dom.name for dom in
models.Domain.objects.get_for_admin(self.account)
]
for name, value in self.cleaned_data.items():
if not name.startswith("domains"):
continue
if value in ["", None]:
continue
if value not in current_domains:
domain = models.Domain.objects.get(name=value)
domain.add_admin(self.account)
for domain in models.Domain.objects.get_for_admin(self.account):
if not filter(lambda name: self.cleaned_data[name] == domain.name,
self.cleaned_data.keys()):
domain.remove_admin(self.account)
class AccountForm(TabForms):
"""Account edition form."""
def __init__(self, request, *args, **kwargs):
self.user = request.user
self.forms = [
{"id": "general", "title": _("General"),
"formtpl": "admin/account_general_form.html",
"cls": AccountFormGeneral,
"new_args": [self.user], "mandatory": True},
{"id": "mail",
"title": _("Mail"), "formtpl": "admin/mailform.html",
"cls": AccountFormMail,
"new_args": [self.user]},
{"id": "perms", "title": _("Permissions"),
"formtpl": "admin/permsform.html",
"cls": AccountPermissionsForm}
]
cbargs = {"user": self.user}
if "instances" in kwargs:
cbargs["account"] = kwargs["instances"]["general"]
results = signals.extra_account_forms.send(
sender=self.__class__, **cbargs)
self.forms += reduce(
lambda a, b: a + b, [result[1] for result in results])
super(AccountForm, self).__init__(request, *args, **kwargs)
def extra_context(self, context):
account = self.instances["general"]
context.update({
'title': account.username,
'formid': 'accountform',
'action': reverse("admin:account_change",
args=[account.id]),
})
def check_perms(self, account):
"""Check if perms form must displayed or not."""
return (
self.user.is_superuser and
not account.is_superuser and
account.has_perm("core.add_user")
)
def _before_is_valid(self, form):
if form["id"] == "general":
return True
if hasattr(self, "check_%s" % form["id"]):
if not getattr(self, "check_%s" % form["id"])(self.account):
return False
return True
results = signals.check_extra_account_form.send(
sender=self.__class__, account=self.account, form=form)
results = [result[1] for result in results]
if False in results:
return False
return True
def is_valid(self):
"""Two steps validation."""
self.instances["general"].oldgroup = self.instances["general"].role
if super(AccountForm, self).is_valid(mandatory_only=True):
self.account = self.forms[0]["instance"].save()
return super(AccountForm, self).is_valid(optional_only=True)
return False
def save(self):
"""Custom save method
As forms interact with each other, it is simpler to make
custom code to save them.
"""
self.forms[1]["instance"].save(self.user, self.account)
if len(self.forms) <= 2:
return
for f in self.forms[2:]:
f["instance"].save()
def done(self):
return render_to_json_response(_("Account updated"))
class AccountWizard(WizardForm):
"""Account creation wizard."""
def __init__(self, request):
super(AccountWizard, self).__init__(request)
self.add_step(
WizardStep(
"general", AccountFormGeneral, _("General"),
new_args=[request.user]
)
)
self.add_step(
WizardStep(
"mail", AccountFormMail, _("Mail"),
"admin/mailform.html",
new_args=[request.user]
)
)
def extra_context(self, context):
context.update({
'title': _("New account"),
'action': reverse("admin:account_add"),
'formid': 'newaccount_form'
})
def done(self):
from modoboa.lib.web_utils import render_to_json_response
account = self.first_step.form.save()
account.post_create(self.request.user)
mailform = self.steps[1].form
mailform.save(self.request.user, account)
return render_to_json_response(_("Account created"))
| isc | -4,428,757,062,902,145,500 | 36.266314 | 79 | 0.567818 | false |
Frankkkkk/arctic | tests/unit/test_compression.py | 1 | 1778 | from mock import patch, Mock, sentinel, call
from arctic._compression import compress, compress_array, decompress, decompress_array, enable_parallel_lz4
def test_compress():
assert len(compress(b'foobar')) > 0
def test_compress_LZ4():
cfn = Mock()
with patch('arctic._compression.clz4.compress', cfn):
compress(b"foo")
assert cfn.call_count == 1
def test_compressarr():
assert len(compress_array([b"foobar"*10])) > 0
assert isinstance(compress_array([b"foobar"*10]), list)
def test_compress_array_usesLZ4():
cfn = Mock()
with patch('arctic._compression.clz4.compressarr', cfn):
compress_array([b"foo"] * 100)
assert cfn.call_count == 1
def test_compress_array_LZ4_sequential():
cfn = Mock()
with patch('arctic._compression.clz4.compress', cfn):
compress_array([b"foo"] * 49)
assert cfn.call_count == 49
def test_decompress():
assert decompress(compress(b"foo")) == b"foo"
def test_decompress_array():
ll = [('foo%s' % i).encode('ascii') for i in range(100)]
assert decompress_array(compress_array(ll)) == ll
def test_compression_equal_regardless_parallel_mode():
a = [b'spam '] * 666
with patch('arctic._compression.ENABLE_PARALLEL', True):
parallel = compress_array(a)
with patch('arctic._compression.ENABLE_PARALLEL', False):
serial = compress_array(a)
assert serial == parallel
def test_enable_parallel_lz4():
enable_parallel_lz4(True)
from arctic._compression import ENABLE_PARALLEL
assert(ENABLE_PARALLEL is True)
enable_parallel_lz4(False)
from arctic._compression import ENABLE_PARALLEL
assert(ENABLE_PARALLEL is False)
def test_compress_empty_string():
assert(decompress(compress(b'')) == b'')
| lgpl-2.1 | -5,804,132,935,893,217,000 | 27.222222 | 107 | 0.669854 | false |
douban/dpark | docs/en/conf.py | 2 | 9255 | # -*- coding: utf-8 -*-
#
# DPark documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 25 17:28:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DPark'
copyright = u'2016, Davies Liu, Zhongbo Tian'
author = u'Davies Liu, Zhongbo Tian'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.2.6'
# The full version, including alpha/beta/rc tags.
release = u'0.2.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'DParkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DPark.tex', u'DPark Documentation',
u'Davies Liu, Zhongbo Tian', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dpark', u'DPark Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DPark', u'DPark Documentation',
author, 'DPark', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause | 2,472,581,576,041,837,600 | 31.024221 | 79 | 0.706105 | false |
ashang/calibre | src/calibre/gui2/convert/comic_input.py | 14 | 1289 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
from calibre.gui2.convert.comic_input_ui import Ui_Form
from calibre.gui2.convert import Widget
class PluginWidget(Widget, Ui_Form):
TITLE = _('Comic Input')
HELP = _('Options specific to')+' comic '+_('input')
COMMIT_NAME = 'comic_input'
ICON = I('mimetypes/png.png')
def __init__(self, parent, get_option, get_help, db=None, book_id=None):
Widget.__init__(self, parent,
['colors', 'dont_normalize', 'keep_aspect_ratio', 'right2left',
'despeckle', 'no_sort', 'no_process', 'landscape',
'dont_sharpen', 'disable_trim', 'wide', 'output_format',
'dont_grayscale', 'comic_image_size',
'dont_add_comic_pages_to_toc']
)
self.db, self.book_id = db, book_id
for x in get_option('output_format').option.choices:
self.opt_output_format.addItem(x)
self.initialize_options(get_option, get_help, db, book_id)
self.opt_no_process.toggle()
self.opt_no_process.toggle()
| gpl-3.0 | 6,021,926,588,658,968,000 | 35.828571 | 79 | 0.596587 | false |
T-Tony-T/mainframe-env-simulator | zPE/base/core/IO_JES2.py | 1 | 2606 | # this is a simplification of the "Job Entry Subsystem - IO Component"
# it is used to manage the SPOOL files
from zPE.util import spool_encode
from zPE.util.global_config import CONFIG_PATH, JCL, SP_DEFAULT_OUT_STEP
import os
import re
import sqlite3
class JES_DB(object):
@staticmethod
def append(src, dest):
return src + dest
def __init__(self, job_id, job_name, owner, spool_key):
self.__job_id = job_id
self.__job_name = job_name
self.__owner = owner
self.__spool_key = spool_key
self.__buffer = '' # the buffer for output
# connect db
self.__db = sqlite3.connect(CONFIG_PATH['SPOOL'])
self.__db.create_function( # register `append(src, dest)` to SQLite
"append", 2, JES_DB.append
)
self.__db.text_factory = str # map TEXT to str instead of unicode
self.__db_opened = True
self.__c = self.__db.cursor()
# insert JOB information
self.__c.execute(
'''SELECT Job_ID FROM JOB WHERE Job_ID = ?''',
(self.__job_id,)
)
if not self.__c.fetchone():
self.__c.execute(
'''INSERT INTO JOB VALUES (?, ?, ?)''',
( self.__job_id, self.__job_name, self.__owner, )
)
# initiate SPOOL information
self.__c.execute(
'''INSERT INTO SPOOL VALUES (NULL, ?, ?, ?, ?)''',
( self.__job_id, self.__spool_key,
SP_DEFAULT_OUT_STEP[self.__spool_key], '', )
)
self.__db.commit()
def __del__(self):
if self.__db_opened:
self.close()
def close(self):
self.flush()
self.__c.close()
self.__db.close()
self.__db_opened = False
def flush(self):
if not self.__buffer:
return # no need to flush, early return
self.__c.execute(
'''
UPDATE SPOOL
SET Content = append(Content, ?)
WHERE Job_id = ?
AND Spool_key = ?
''',
( spool_encode(self.__buffer), self.__job_id, self.__spool_key )
)
# clear buffer
self.__buffer = ''
self.__db.commit()
def write(self, line, force_flush = False):
self.__buffer += line
if force_flush:
self.flush()
# open the target file in regardless of the existance
def open_file(dsn, mode):
return JES_DB(
JCL['jobid'],
JCL['jobname'],
JCL['owner'],
os.path.join(* dsn)
)
def rm_file(dsn):
return None
| bsd-3-clause | -3,982,466,559,243,623,000 | 23.819048 | 77 | 0.512663 | false |
factorlibre/odoomrp-wip | product_variants_no_automatic_creation/models/product_product.py | 8 | 4841 | # -*- coding: utf-8 -*-
# © 2015 Oihane Crucelaegui - AvanzOSC
# © 2016 Pedro M. Baeza <[email protected]>
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3
from openerp import api, exceptions, fields, models, _
class ProductProduct(models.Model):
_inherit = ['product.product', 'product.configurator']
_name = "product.product"
# This is needed as the AbstractModel removes the delegated related field
name = fields.Char(related="product_tmpl_id.name")
@api.multi
@api.depends('attribute_value_ids', 'attribute_value_ids.price_ids',
'attribute_value_ids.price_ids.price_extra',
'attribute_value_ids.price_ids.product_tmpl_id',
'product_tmpl_id')
def _compute_price_extra(self):
for record in self:
record.price_extra = sum(
record.mapped('attribute_value_ids.price_ids').filtered(
lambda x: (x.product_tmpl_id == record.product_tmpl_id)
).mapped('price_extra'))
@api.multi
def _get_product_attributes_values_dict(self):
# Retrieve first the attributes from template to preserve order
res = self.product_tmpl_id._get_product_attributes_dict()
for val in res:
value = self.attribute_value_ids.filtered(
lambda x: x.attribute_id.id == val['attribute_id'])
val['value_id'] = value.id
return res
@api.multi
def _get_product_attributes_values_text(self):
description = self.attribute_value_ids.mapped(
lambda x: "%s: %s" % (x.attribute_id.name, x.name))
return "%s\n%s" % (self.product_tmpl_id.name, "\n".join(description))
@api.model
def _build_attributes_domain(self, product_template, product_attributes):
domain = []
cont = 0
if product_template:
domain.append(('product_tmpl_id', '=', product_template.id))
for attr_line in product_attributes:
if isinstance(attr_line, dict):
value_id = attr_line.get('value_id')
else:
value_id = attr_line.value_id.id
if value_id:
domain.append(('attribute_value_ids', '=', value_id))
cont += 1
return domain, cont
@api.model
def _product_find(self, product_template, product_attributes):
if product_template:
domain, cont = self._build_attributes_domain(
product_template, product_attributes)
products = self.search(domain)
# Filter the product with the exact number of attributes values
for product in products:
if len(product.attribute_value_ids) == cont:
return product
return False
@api.constrains('product_tmpl_id', 'attribute_value_ids')
def _check_duplicity(self):
for product in self:
domain = [('product_tmpl_id', '=', product.product_tmpl_id.id)]
for value in product.attribute_value_ids:
domain.append(('attribute_value_ids', '=', value.id))
other_products = self.search(domain)
# Filter the product with the exact number of attributes values
cont = len(product.attribute_value_ids)
for other_product in other_products:
if (len(other_product.attribute_value_ids) == cont and
other_product != product):
raise exceptions.ValidationError(
_("There's another product with the same attributes."))
@api.constrains('product_tmpl_id', 'attribute_value_ids')
def _check_configuration_validity(self):
"""This method checks that the current selection values are correct
according rules. As default, the validity means that all the attributes
values are set. This can be overridden to set another rules.
:raises: exceptions.ValidationError: If the check is not valid.
"""
for product in self:
if bool(product.product_tmpl_id.attribute_line_ids.mapped(
'attribute_id') -
product.attribute_line_ids.mapped('attribute_id')):
raise exceptions.ValidationError(
_("You have to fill all the attributes values."))
@api.model
def create(self, vals):
if (not vals.get('attribute_value_ids') and
vals.get('product_attribute_ids')):
vals['attribute_value_ids'] = (
(4, x[2]['value_id'])
for x in vals.pop('product_attribute_ids')
if x[2].get('value_id'))
obj = self.with_context(product_name=vals.get('name', ''))
return super(ProductProduct, obj).create(vals)
| agpl-3.0 | 1,425,943,806,410,973,400 | 42.594595 | 79 | 0.585865 | false |
HKUST-SING/tensorflow | tensorflow/python/kernel_tests/metrics_test.py | 9 | 139350 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
NAN = float('nan')
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(
values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_2d_sparse_value(labels):
"""Convert dense 2D binary indicator to sparse ID.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator, shape [batch_size, num_classes].
Returns:
`SparseTensorValue` of shape [batch_size, num_classes], where num_classes
is the number of `1` values in each row of `labels`. Values are indices
of `1` values along the last dimension of `labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_2d_label_to_1d_sparse_value(labels):
"""Convert dense 2D binary indicator to sparse ID.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator, shape [batch_size, num_classes]. Each
row must contain exactly 1 `1` value.
Returns:
`SparseTensorValue` of shape [batch_size]. Values are indices of `1` values
along the last dimension of `labels`.
Raises:
ValueError: if there is not exactly 1 `1` value per row of `labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
if indices != [[i] for i in range(len(labels))]:
raise ValueError('Expected 1 label/example, got %s.' % indices)
shape = [len(labels)]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
def _assert_local_variables(test_case, expected):
test_case.assertEquals(
set(expected), set(v.name for v in variables.local_variables()))
def _test_values(shape):
return np.reshape(np.cumsum(np.ones(shape)), newshape=shape)
class MeanTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean(array_ops.ones([4, 3]))
_assert_local_variables(self, ('mean/count:0', 'mean/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4 / 6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUnweighted(self):
values = _test_values((3, 2, 4))
mean_results = (
metrics.mean(values),
metrics.mean(values, weights=1.0),
metrics.mean(values, weights=np.ones((1, 1, 1))),
metrics.mean(values, weights=np.ones((1, 1, 1, 1))),
metrics.mean(values, weights=np.ones((1, 1, 4))),
metrics.mean(values, weights=np.ones((1, 2, 1))),
metrics.mean(values, weights=np.ones((1, 2, 4))),
metrics.mean(values, weights=np.ones((3, 1, 1))),
metrics.mean(values, weights=np.ones((3, 1, 4))),
metrics.mean(values, weights=np.ones((3, 2, 1))),
metrics.mean(values, weights=np.ones((3, 2, 4))),
metrics.mean(values, weights=np.ones((3, 2, 4, 1))),)
expected = np.mean(values)
with self.test_session():
variables.local_variables_initializer().run()
for mean_result in mean_results:
mean, update_op = mean_result
self.assertAlmostEqual(expected, update_op.eval())
self.assertAlmostEqual(expected, mean.eval())
def _test_3d_weighted(self, values, weights):
expected = (
np.sum(np.multiply(weights, values)) /
np.sum(np.multiply(weights, np.ones_like(values)))
)
mean, update_op = metrics.mean(values, weights=weights)
with self.test_session():
variables.local_variables_initializer().run()
self.assertAlmostEqual(expected, update_op.eval(), places=5)
self.assertAlmostEqual(expected, mean.eval(), places=5)
def test1x1x1Weighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5,)).reshape((1, 1, 1)))
def test1x1xNWeighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5, 7, 11, 3)).reshape((1, 1, 4)))
def test1xNx1Weighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5, 11)).reshape((1, 2, 1)))
def test1xNxNWeighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5, 7, 11, 3, 2, 13, 7, 5)).reshape((1, 2, 4)))
def testNx1x1Weighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5, 7, 11)).reshape((3, 1, 1)))
def testNx1xNWeighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3)).reshape((3, 1, 4)))
def testNxNxNWeighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4)))
def testInvalidWeights(self):
values_placeholder = array_ops.placeholder(dtype=dtypes_lib.float32)
values = _test_values((3, 2, 4, 1))
invalid_weights = (
(1,),
(1, 1),
(1, 1, 1),
(3, 2),
(3, 2, 4),
(2, 4, 1),
(4, 2, 4, 1),
(3, 3, 4, 1),
(3, 2, 5, 1),
(3, 2, 4, 2),
(1, 1, 1, 1, 1))
expected_error_msg = 'weights can not be broadcast to values'
for invalid_weight in invalid_weights:
# Static shapes.
with self.assertRaisesRegexp(ValueError, expected_error_msg):
metrics.mean(values, invalid_weight)
# Dynamic shapes.
with self.assertRaisesRegexp(errors_impl.OpError, expected_error_msg):
with self.test_session():
_, update_op = metrics.mean(values_placeholder, invalid_weight)
variables.local_variables_initializer().run()
update_op.eval(feed_dict={values_placeholder: values})
class MeanTensorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean_tensor(array_ops.ones([4, 3]))
_assert_local_variables(self, ('mean/total_tensor:0',
'mean/count_tensor:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_tensor(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_tensor(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))
def testMultiDimensional(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean_tensor(values)
sess.run(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
self.assertAllClose([[2.3 / 3., 10.1 / 3.]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
class AccuracyTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
name='my_accuracy')
_assert_local_variables(self, ('my_accuracy/count:0',
'my_accuracy/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 4))
with self.assertRaises(ValueError):
metrics.accuracy(labels, predictions)
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 3))
weights = array_ops.ones((9, 3))
with self.assertRaises(ValueError):
metrics.accuracy(labels, predictions, weights)
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
accuracy, update_op = metrics.accuracy(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
def testMultipleUpdates(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
accuracy, update_op = metrics.accuracy(labels, predictions)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, accuracy.eval())
def testEffectivelyEquivalentSizes(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.test_session() as sess:
accuracy, update_op = metrics.accuracy(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithScalarWeight(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.test_session() as sess:
accuracy, update_op = metrics.accuracy(labels, predictions, weights=2.0)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithStaticShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),
1) # shape 3, 1
with self.test_session() as sess:
accuracy, update_op = metrics.accuracy(labels, predictions, weights)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(), .95)
self.assertGreater(accuracy.eval(), .95)
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
weights_placeholder = array_ops.placeholder(
dtype=dtypes_lib.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.test_session() as sess:
accuracy, update_op = metrics.accuracy(labels, predictions,
weights_placeholder)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
def testMultipleUpdatesWithWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
accuracy, update_op = metrics.accuracy(labels, predictions, weights)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, accuracy.eval())
class PrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.precision(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('precision/false_positives/count:0',
'precision/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op))
self.assertAlmostEqual(1, precision.eval())
def testSomeCorrect_multipleInputDtypes(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant([1, 0, 1, 0], shape=(1, 4)), dtype=dtype)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=dtype)
precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.precision(
labels, predictions, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeightedScalar_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.precision(labels, predictions, weights=2)
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 2.0
weighted_positives = (2.0 + 2.0) + (2.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted1d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.precision(
labels, predictions, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.precision(
labels,
predictions,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted2d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.precision(
labels,
predictions,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(1 - inputs)
precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, precision.eval())
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = constant_op.constant([0, 0, 0, 0])
labels = constant_op.constant([0, 0, 0, 0])
precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0.0, precision.eval())
class RecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.recall(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('recall/false_negatives/count:0',
'recall/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertEqual(initial_recall, recall.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, recall.eval())
def testSomeCorrect_multipleInputDtypes(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant([1, 0, 1, 0], shape=(1, 4)), dtype=dtype)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=dtype)
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
recall, update_op = metrics.recall(labels, predictions, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.recall(labels, predictions, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
class AUCTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.auc(predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)))
_assert_local_variables(self,
('auc/true_positives:0', 'auc/false_negatives:0',
'auc/false_positives:0', 'auc/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.auc(predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.auc(predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
auc, update_op = metrics.auc(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testAllCorrect(self):
self.allCorrectAsExpected('ROC')
def allCorrectAsExpected(self, curve):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
auc, update_op = metrics.auc(labels, predictions, curve=curve)
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, auc.eval())
def testSomeCorrect_multipleLabelDtypes(self):
with self.test_session() as sess:
for label_dtype in (
dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=label_dtype)
auc, update_op = metrics.auc(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op))
self.assertAlmostEqual(0.5, auc.eval())
def testWeighted1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([2], shape=(1, 1))
auc, update_op = metrics.auc(labels, predictions, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
def testWeighted2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))
auc, update_op = metrics.auc(labels, predictions, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.7, sess.run(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
def testAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.auc(labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
def testAnotherAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.auc(labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
def testThirdAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.auc(labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.auc(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0, sess.run(update_op))
self.assertAlmostEqual(0, auc.eval())
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
auc, update_op = metrics.auc(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.test_session() as sess:
predictions = array_ops.ones([4], dtype=dtypes_lib.float32)
labels = array_ops.ones([4])
auc, update_op = metrics.auc(labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def np_auc(self, predictions, labels, weights):
"""Computes the AUC explicitely using Numpy.
Args:
predictions: an ndarray with shape [N].
labels: an ndarray with shape [N].
weights: an ndarray with shape [N].
Returns:
the area under the ROC curve.
"""
if weights is None:
weights = np.ones(np.size(predictions))
is_positive = labels > 0
num_positives = np.sum(weights[is_positive])
num_negatives = np.sum(weights[~is_positive])
# Sort descending:
inds = np.argsort(-predictions)
sorted_labels = labels[inds]
sorted_weights = weights[inds]
is_positive = sorted_labels > 0
tp = np.cumsum(sorted_weights * is_positive) / num_positives
return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=num_samples)
noise = np.random.normal(0.0, scale=0.2, size=num_samples)
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
x_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
for weights in (None, np.ones(num_samples), np.random.exponential(
scale=1.0, size=num_samples)):
expected_auc = self.np_auc(predictions, labels, weights)
with self.test_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
tf_weights = (_enqueue_as_batches(weights, enqueue_ops) if
weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
auc, update_op = metrics.auc(tf_labels,
tf_predictions,
curve='ROC',
num_thresholds=500,
weights=tf_weights)
sess.run(variables.local_variables_initializer())
for i in range(num_batches):
sess.run(update_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
class SpecificityAtSensitivityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7)
_assert_local_variables(self,
('specificity_at_sensitivity/true_positives:0',
'specificity_at_sensitivity/false_negatives:0',
'specificity_at_sensitivity/false_positives:0',
'specificity_at_sensitivity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
def testSomeCorrectLowSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted1d_multipleLabelDtypes(self):
for label_dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = math_ops.cast(labels_values, dtype=label_dtype)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
class SensitivityAtSpecificityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7)
_assert_local_variables(self,
('sensitivity_at_specificity/true_positives:0',
'sensitivity_at_specificity/false_negatives:0',
'sensitivity_at_specificity/false_positives:0',
'sensitivity_at_specificity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1)
sensitivity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
def testSomeCorrectLowSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted_multipleLabelDtypes(self):
for label_dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions_values = [
0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = math_ops.cast(labels_values, dtype=label_dtype)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, weights=weights, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.675, sess.run(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
class PrecisionRecallThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_local_variables(self, (
'precision_at_thresholds/true_positives:0',
'precision_at_thresholds/false_positives:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [precision_op, recall_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions, thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates, then verify idempotency.
sess.run([prec_op, rec_op])
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
sess.run([prec_op, rec_op])
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
def testSomeCorrect_multipleLabelDtypes(self):
with self.test_session() as sess:
for label_dtype in (
dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=label_dtype)
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
self.assertAlmostEqual(0.5, rec.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.precision_at_thresholds(
labels, predictions, thresholds, weights=weights)
rec, rec_op = metrics.recall_at_thresholds(
labels, predictions, thresholds, weights=weights)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
prec_low = array_ops.reshape(prec_low, shape=())
prec_high = array_ops.reshape(prec_high, shape=())
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
rec_low = array_ops.reshape(rec_low, shape=())
rec_high = array_ops.reshape(rec_high, shape=())
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.precision_at_thresholds(
labels, predictions, thresholds, weights=weights)
rec, rec_op = metrics.recall_at_thresholds(
labels, predictions, thresholds, weights=weights)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
prec_low = array_ops.reshape(prec_low, shape=())
prec_high = array_ops.reshape(prec_high, shape=())
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
rec_low = array_ops.reshape(rec_low, shape=())
rec_high = array_ops.reshape(rec_high, shape=())
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
self.assertAlmostEqual(0, rec.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.precision_at_thresholds(tf_labels, tf_predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(tf_labels, tf_predictions,
thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, prec.eval(), 2)
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
def _test_sparse_precision_at_k(predictions,
labels,
k,
expected,
class_id=None,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(test_case, update.eval())
_assert_nan(test_case, metric.eval())
else:
test_case.assertEqual(expected, update.eval())
test_case.assertEqual(expected, metric.eval())
def _test_sparse_average_precision_at_k(predictions,
labels,
k,
expected,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.sparse_average_precision_at_k(
labels, predictions, k, weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(test_case, update.eval())
_assert_nan(test_case, metric.eval())
else:
test_case.assertAlmostEqual(expected, update.eval())
test_case.assertAlmostEqual(expected, metric.eval())
class SingleLabelSparsePrecisionTest(test.TestCase):
def setUp(self):
self._predictions = ((0.1, 0.3, 0.2, 0.4), (0.1, 0.2, 0.3, 0.4))
indicator_labels = ((0, 0, 0, 1), (0, 0, 1, 0))
class_labels = (3, 2)
# Sparse vs dense, and 1d vs 2d labels should all be handled the same.
self._labels = (
_binary_2d_label_to_1d_sparse_value(indicator_labels),
_binary_2d_label_to_2d_sparse_value(indicator_labels), np.array(
class_labels, dtype=np.int64), np.array(
[[class_id] for class_id in class_labels], dtype=np.int64))
self._test_sparse_precision_at_k = functools.partial(
_test_sparse_precision_at_k, test_case=self)
self._test_sparse_average_precision_at_k = functools.partial(
_test_sparse_average_precision_at_k, test_case=self)
def test_at_k1_nan(self):
for labels in self._labels:
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_sparse_precision_at_k(
self._predictions, labels, k=1, expected=NAN, class_id=class_id)
def test_at_k1(self):
for labels in self._labels:
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_sparse_precision_at_k(
self._predictions, labels, k=1, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_sparse_precision_at_k(
self._predictions, labels, k=1, expected=1.0 / 2)
class MultiLabelSparsePrecisionTest(test.TestCase):
def setUp(self):
self._test_sparse_precision_at_k = functools.partial(
_test_sparse_precision_at_k, test_case=self)
self._test_sparse_average_precision_at_k = functools.partial(
_test_sparse_average_precision_at_k, test_case=self)
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
streaming_precision = [(ex1 + ex2) / 2
for ex1, ex2 in zip(precision_ex1, precision_ex2)]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_sparse_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
self._test_sparse_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_sparse_average_precision_at_k(
predictions,
labels,
k,
expected=streaming_average_precision[i],
weights=weights)
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
sparse_labels = _binary_2d_label_to_2d_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_labels(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
sparse_labels = _binary_2d_label_to_2d_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
sparse_labels = _binary_2d_label_to_2d_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
def test_3d_no_labels(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
def _test_recall_at_k(predictions,
labels,
k,
expected,
class_id=None,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(test_case, update.eval())
_assert_nan(test_case, metric.eval())
else:
test_case.assertEqual(expected, update.eval())
test_case.assertEqual(expected, metric.eval())
class SingleLabelRecallAtKTest(test.TestCase):
def setUp(self):
self._predictions = ((0.1, 0.3, 0.2, 0.4), (0.1, 0.2, 0.3, 0.4))
indicator_labels = ((0, 0, 0, 1), (0, 0, 1, 0))
class_labels = (3, 2)
# Sparse vs dense, and 1d vs 2d labels should all be handled the same.
self._labels = (
_binary_2d_label_to_1d_sparse_value(indicator_labels),
_binary_2d_label_to_2d_sparse_value(indicator_labels), np.array(
class_labels, dtype=np.int64), np.array(
[[class_id] for class_id in class_labels], dtype=np.int64))
self._test_recall_at_k = functools.partial(
_test_recall_at_k, test_case=self)
def test_at_k1_nan(self):
# Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of
# range.
for labels in self._labels:
for class_id in (-1, 0, 1, 4):
self._test_recall_at_k(
self._predictions, labels, k=1, expected=NAN, class_id=class_id)
def test_at_k1_no_predictions(self):
for labels in self._labels:
# Class 2: 0 predictions.
self._test_recall_at_k(
self._predictions, labels, k=1, expected=0.0, class_id=2)
def test_one_label_at_k1(self):
for labels in self._labels:
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_recall_at_k(
self._predictions, labels, k=1, expected=1.0 / 1, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_recall_at_k(self._predictions, labels, k=1, expected=1.0 / 2)
def test_one_label_at_k1_weighted(self):
predictions = self._predictions
for labels in self._labels:
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_recall_at_k(
predictions, labels, k=1, expected=NAN, weights=(0.0,))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_recall_at_k(
predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))
class MultiLabel2dRecallAtKTest(test.TestCase):
def setUp(self):
self._predictions = ((0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9),
(0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6))
indicator_labels = ((0, 0, 1, 0, 0, 0, 0, 1, 1, 0),
(0, 1, 1, 0, 0, 1, 0, 0, 0, 0))
class_labels = ((2, 7, 8), (1, 2, 5))
# Sparse vs dense labels should be handled the same.
self._labels = (_binary_2d_label_to_2d_sparse_value(indicator_labels),
np.array(
class_labels, dtype=np.int64))
self._test_recall_at_k = functools.partial(
_test_recall_at_k, test_case=self)
def test_at_k5_nan(self):
for labels in self._labels:
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_recall_at_k(
self._predictions, labels, k=5, expected=NAN, class_id=class_id)
def test_at_k5_no_predictions(self):
for labels in self._labels:
# Class 8: 1 label, no predictions.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=0.0 / 1, class_id=8)
def test_at_k5(self):
for labels in self._labels:
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_recall_at_k(self._predictions, labels, k=5, expected=3.0 / 6)
def test_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 8 labels, 3 correct.
self._test_recall_at_k(self._predictions, labels, k=5, expected=3.0 / 8)
class MultiLabel3dRecallAtKTest(test.TestCase):
def setUp(self):
self._predictions = (((0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9),
(0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6)),
((0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6),
(0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9)))
# Note: We don't test dense labels here, since examples have different
# numbers of labels.
self._labels = _binary_3d_label_to_sparse_value(((
(0, 0, 1, 0, 0, 0, 0, 1, 1, 0), (0, 1, 1, 0, 0, 1, 0, 0, 0, 0)), (
(0, 1, 1, 0, 0, 1, 0, 1, 0, 0), (0, 0, 1, 0, 0, 0, 0, 0, 1, 0))))
self._test_recall_at_k = functools.partial(
_test_recall_at_k, test_case=self)
def test_3d_nan(self):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN, class_id=class_id)
def test_3d_no_predictions(self):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=0.0, class_id=class_id)
def test_3d(self):
# Class 2: 4 labels, all correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=4.0 / 4, class_id=2)
# Class 5: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=2.0 / 2, class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=1.0 / 2, class_id=7)
# All classes: 12 labels, 7 correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=7.0 / 12)
def test_3d_ignore_all(self):
for class_id in xrange(10):
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=NAN,
weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
class MeanAbsoluteErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean_absolute_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_absolute_error/count:0',
'mean_absolute_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.mean_absolute_error(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.mean_absolute_error(labels, predictions, weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(3, sess.run(update_op))
self.assertEqual(3, error.eval())
class MeanRelativeErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_relative_error/count:0',
'mean_relative_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
normalizer = random_ops.random_normal((10, 3), seed=3)
error, update_op = metrics.mean_relative_error(labels, predictions,
normalizer)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels), np_labels))
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(np_labels, shape=(1, 4))
error, update_op = metrics.mean_relative_error(
labels, predictions, normalizer=labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_relative_error(
labels, predictions, normalizer=array_ops.zeros_like(labels))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.0, sess.run(update_op))
self.assertEqual(0.0, error.eval())
class MeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_squared_error/count:0',
'mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.mean_squared_error(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_squared_error(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError(self):
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_squared_error(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(6, sess.run(update_op))
self.assertEqual(6, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.mean_squared_error(labels, predictions, weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(13, sess.run(update_op))
self.assertEqual(13, error.eval())
def testMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.mean_squared_error(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)
self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testMetricsComputedConcurrently(self):
with self.test_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.mean_squared_error(
labels0, predictions0, name='msd0')
mse1, update_op1 = metrics.mean_squared_error(
labels1, predictions1, name='msd1')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1])
sess.run([update_op0, update_op1])
mse0, mse1 = sess.run([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.mean_absolute_error(labels, predictions)
mse, ms_update_op = metrics.mean_squared_error(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run([ma_update_op, ms_update_op])
sess.run([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
class RootMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.root_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('root_mean_squared_error/count:0',
'root_mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.root_mean_squared_error(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
0.0, shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.root_mean_squared_error(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, rmse.eval())
def testSingleUpdateWithError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.root_mean_squared_error(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
rmse, update_op = metrics.root_mean_squared_error(labels, predictions,
weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
def _reweight(predictions, labels, weights):
return (np.concatenate([[p] * int(w) for p, w in zip(predictions, weights)]),
np.concatenate([[l] * int(w) for l, w in zip(labels, weights)]))
class MeanCosineDistanceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1)
_assert_local_variables(self, (
'mean_cosine_distance/count:0',
'mean_cosine_distance/total:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError1(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
def testSingleUpdateWithError2(self):
np_predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
np_labels = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
def testSingleUpdateWithErrorAndWeights1(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(
labels, predictions, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithErrorAndWeights2(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(
labels, predictions, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
class PcntBelowThreshTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.percentage_below(values=array_ops.ones((10,)), threshold=2)
_assert_local_variables(self, (
'percentage_below_threshold/count:0',
'percentage_below_threshold/total:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.percentage_below(
values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.percentage_below(
values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.percentage_below(values, 100, name='high')
pcnt1, update_op1 = metrics.percentage_below(values, 7, name='medium')
pcnt2, update_op2 = metrics.percentage_below(values, 1, name='low')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
def testSomePresentOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.percentage_below(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.percentage_below(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.percentage_below(
values, 1, weights=weights, name='low')
sess.run(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
sess.run([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
class MeanIOUTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_local_variables(self, ('mean_iou/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.mean_iou(labels, predictions, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.mean_iou(labels, predictions, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
mean_iou, update_op = metrics.mean_iou(
labels, predictions, num_classes=num_classes)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_mean_iou = mean_iou.eval()
for _ in range(10):
self.assertEqual(initial_mean_iou, mean_iou.eval())
def testMultipleUpdates(self):
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])
self.assertEqual(desired_output, miou.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
mean_iou, update_op = metrics.mean_iou(
labels, predictions, num_classes, weights=weights)
variables.local_variables_initializer().run()
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, mean_iou.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0, 0.])
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_miou = np.mean([3. / 5., 5. / 7.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.test_session() as sess:
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
weights = array_ops.concat(
[
constant_op.constant(
0, shape=[1]), constant_op.constant(
1, shape=[8]), constant_op.constant(
0, shape=[1])
],
0)
with self.test_session() as sess:
miou, update_op = metrics.mean_iou(
labels, predictions, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
class MeanPerClassAccuracyTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.mean_per_class_accuracy(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_local_variables(self, ('mean_accuracy/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_accuracy, _ = metrics.mean_per_class_accuracy(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [mean_accuracy])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_per_class_accuracy(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.mean_per_class_accuracy(labels, predictions, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.mean_per_class_accuracy(
labels, predictions, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes=num_classes)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_mean_accuracy = mean_accuracy.eval()
for _ in range(10):
self.assertEqual(initial_mean_accuracy, mean_accuracy.eval())
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0, 1.0 / 3.0, 0.0])
self.assertAlmostEqual(desired_output, mean_accuracy.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes, weights=weights)
variables.local_variables_initializer().run()
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0 / 2.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, mean_accuracy.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 2.0, 2.0 / 3.0, 0.])
self.assertAlmostEqual(desired_output, mean_accuracy.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat([
constant_op.constant(0, shape=[5]), constant_op.constant(1, shape=[5])
], 0)
labels = array_ops.concat([
constant_op.constant(0, shape=[3]), constant_op.constant(1, shape=[7])
], 0)
num_classes = 2
with self.test_session() as sess:
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_mean_accuracy = np.mean([3. / 3., 5. / 7.])
self.assertAlmostEqual(desired_mean_accuracy, mean_accuracy.eval())
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.test_session() as sess:
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, mean_accuracy.eval())
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.test_session() as sess:
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., mean_accuracy.eval())
def testResultsWithSomeMissing(self):
predictions = array_ops.concat([
constant_op.constant(0, shape=[5]), constant_op.constant(1, shape=[5])
], 0)
labels = array_ops.concat([
constant_op.constant(0, shape=[3]), constant_op.constant(1, shape=[7])
], 0)
num_classes = 2
weights = array_ops.concat([
constant_op.constant(0, shape=[1]), constant_op.constant(1, shape=[8]),
constant_op.constant(0, shape=[1])
], 0)
with self.test_session() as sess:
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_mean_accuracy = np.mean([2. / 2., 4. / 6.])
self.assertAlmostEqual(desired_mean_accuracy, mean_accuracy.eval())
if __name__ == '__main__':
test.main()
| apache-2.0 | -8,894,845,595,615,712,000 | 37.188545 | 80 | 0.617022 | false |
regebro/tzlocal | tests/test_tzlocal.py | 1 | 5185 | import os
import sys
from datetime import datetime, timezone
from pathlib import Path
from unittest.mock import MagicMock, Mock
import pytest
import tzlocal.unix
import tzlocal.utils
if sys.version_info >= (3, 9):
from zoneinfo import ZoneInfo, ZoneInfoNotFoundError
else:
from backports.zoneinfo import ZoneInfo, ZoneInfoNotFoundError
@pytest.fixture(scope='session', autouse=True)
def clear_tz_env_variable():
os.environ.pop('TZ', None)
def tz_path(zonefile: str = None) -> str:
path = Path(__file__).parent.joinpath('test_data')
if zonefile:
return str(path / zonefile)
else:
return str(path)
def test_env(monkeypatch):
tz_harare = tzlocal.unix._tz_from_env(':Africa/Harare')
assert tz_harare.key == 'Africa/Harare'
# Some Unices allow this as well, so we must allow it:
tz_harare = tzlocal.unix._tz_from_env('Africa/Harare')
assert tz_harare.key == 'Africa/Harare'
tz_local = tzlocal.unix._tz_from_env(':' + tz_path('Harare'))
assert tz_local.key == 'local'
# Make sure the local timezone is the same as the Harare one above.
# We test this with a past date, so that we don't run into future changes
# of the Harare timezone.
dt = datetime(2012, 1, 1, 5)
assert dt.replace(tzinfo=tz_harare) == dt.replace(tzinfo=tz_local)
# Non-zoneinfo timezones are not supported in the TZ environment.
pytest.raises(ZoneInfoNotFoundError, tzlocal.unix._tz_from_env, 'GMT+03:00')
# Test the _try function
monkeypatch.setenv('TZ', 'Africa/Harare')
tz_harare = tzlocal.unix._try_tz_from_env()
assert tz_harare.key == 'Africa/Harare'
# With a zone that doesn't exist
monkeypatch.setenv('TZ', 'Just Nonsense')
tz_harare = tzlocal.unix._try_tz_from_env()
assert tz_harare is None
def test_timezone():
# Most versions of Ubuntu
tz = tzlocal.unix._get_localzone(_root=tz_path('timezone'))
assert tz.key == 'Africa/Harare'
def test_timezone_top_line_comment():
tz = tzlocal.unix._get_localzone(_root=tz_path('top_line_comment'))
assert tz.key == 'Africa/Harare'
def test_zone_setting():
# A ZONE setting in /etc/sysconfig/clock, f ex CentOS
tz = tzlocal.unix._get_localzone(_root=tz_path('zone_setting'))
assert tz.key == 'Africa/Harare'
def test_timezone_setting():
# A ZONE setting in /etc/conf.d/clock, f ex Gentoo
tz = tzlocal.unix._get_localzone(_root=tz_path('timezone_setting'))
assert tz.key == 'Africa/Harare'
def test_symlink_localtime():
# A ZONE setting in the target path of a symbolic linked localtime, f ex systemd distributions
tz = tzlocal.unix._get_localzone(_root=tz_path('symlink_localtime'))
assert tz.key == 'Africa/Harare'
def test_vardbzoneinfo_setting():
# A ZONE setting in /etc/conf.d/clock, f ex Gentoo
tz = tzlocal.unix._get_localzone(_root=tz_path('vardbzoneinfo'))
assert tz.key == 'Africa/Harare'
def test_only_localtime():
tz = tzlocal.unix._get_localzone(_root=tz_path('localtime'))
assert tz.key == 'local'
dt = datetime(2012, 1, 1, 5)
assert dt.replace(tzinfo=ZoneInfo('Africa/Harare')) == dt.replace(tzinfo=tz)
def test_get_reload(mocker, monkeypatch):
mocker.patch('tzlocal.utils.assert_tz_offset')
# Clear any cached zone
monkeypatch.setattr(tzlocal.unix, '_cache_tz', None)
monkeypatch.setenv('TZ', 'Africa/Harare')
tz_harare = tzlocal.unix.get_localzone()
assert tz_harare.key == 'Africa/Harare'
# Changing the TZ makes no difference, because it's cached
monkeypatch.setenv('TZ', 'Africa/Johannesburg')
tz_harare = tzlocal.unix.get_localzone()
assert tz_harare.key == 'Africa/Harare'
# So we reload it
tz_harare = tzlocal.unix.reload_localzone()
assert tz_harare.key == 'Africa/Johannesburg'
def test_fail(recwarn):
with pytest.warns(UserWarning, match='Can not find any timezone configuration'):
tz = tzlocal.unix._get_localzone(_root=tz_path())
assert tz == timezone.utc
def test_assert_tz_offset():
# The local zone should be the local zone:
local = tzlocal.get_localzone()
tzlocal.utils.assert_tz_offset(local)
# Get a non local zone. Let's use Chatham, population 600.
other = ZoneInfo('Pacific/Chatham')
pytest.raises(ValueError, tzlocal.utils.assert_tz_offset, other)
def test_win32(mocker):
if sys.platform == 'win32':
import tzlocal.win32
tzlocal.win32.get_localzone()
return
# Yes, winreg is all mocked out, but this test means we at least
# catch syntax errors, etc.
mocker.patch('tzlocal.utils.assert_tz_offset')
winreg = MagicMock()
winreg.EnumValue.configure_mock(return_value=('TimeZoneKeyName','Belarus Standard Time'))
winreg.EnumKey.configure_mock(return_value='Bahia Standard Time')
sys.modules['winreg'] = winreg
import tzlocal.win32
tz = tzlocal.win32.get_localzone()
assert tz.key == 'Europe/Minsk'
tzlocal.win32.valuestodict = Mock(return_value={
'StandardName': 'Mocked Standard Time',
'Std': 'Mocked Standard Time',
})
tz = tzlocal.win32.reload_localzone()
assert tz.key == 'America/Bahia'
| mit | -6,251,496,927,820,954,000 | 31.006173 | 98 | 0.682353 | false |
InQuest/ThreatKB | app/models/c2ip.py | 1 | 14333 | import distutils
import re
from ipaddr import IPAddress, IPNetwork
from sqlalchemy.event import listens_for
from sqlalchemy.orm import Session
import app
from app import db, current_user, ENTITY_MAPPING, ACTIVITY_TYPE
from app.geo_ip_helper import get_geo_for_ip
from app.models.comments import Comments
from app.models.whitelist import Whitelist
from app.routes import tags_mapping
from app.models.metadata import Metadata, MetadataMapping
from app.models import cfg_states, cfg_settings, activity_log
from flask import abort
import time
class C2ip(db.Model):
__tablename__ = "c2ip"
id = db.Column(db.Integer, primary_key=True)
date_created = db.Column(db.DateTime(timezone=True), default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime(timezone=True), default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
ip = db.Column(db.String(15), index=True, unique=True)
asn = db.Column(db.String(128))
country = db.Column(db.String(64))
state = db.Column(db.String(32), index=True)
description = db.Column(db.TEXT())
references = db.Column(db.TEXT())
expiration_timestamp = db.Column(db.DateTime(timezone=True))
active = db.Column(db.Boolean, nullable=False, default=True, index=True)
created_user_id = db.Column(db.Integer, db.ForeignKey('kb_users.id'), nullable=False)
created_user = db.relationship('KBUser', foreign_keys=created_user_id,
primaryjoin="KBUser.id==C2ip.created_user_id")
modified_user_id = db.Column(db.Integer, db.ForeignKey('kb_users.id'), nullable=False)
modified_user = db.relationship('KBUser', foreign_keys=modified_user_id,
primaryjoin="KBUser.id==C2ip.modified_user_id")
owner_user_id = db.Column(db.Integer, db.ForeignKey('kb_users.id'), nullable=True)
owner_user = db.relationship('KBUser', foreign_keys=owner_user_id,
primaryjoin="KBUser.id==C2ip.owner_user_id")
comments = db.relationship("Comments", foreign_keys=[id],
primaryjoin="and_(Comments.entity_id==C2ip.id, Comments.entity_type=='%s')" % (
ENTITY_MAPPING["IP"]), uselist=True)
tags = []
WHITELIST_CACHE = None
WHITELIST_CACHE_LAST_UPDATE = None
@property
def metadata_fields(self):
return db.session.query(Metadata).filter(Metadata.artifact_type == ENTITY_MAPPING["IP"]).all()
@property
def metadata_values(self):
return db.session.query(MetadataMapping)\
.join(Metadata, Metadata.id == MetadataMapping.metadata_id)\
.filter(Metadata.active > 0) \
.filter(Metadata.artifact_type == ENTITY_MAPPING["IP"])\
.filter(MetadataMapping.artifact_id == self.id)\
.all()
def to_dict(self, include_metadata=True, include_tags=True, include_comments=True):
d = dict(
active=self.active,
date_created=self.date_created.isoformat() if self.date_created else None,
date_modified=self.date_modified.isoformat() if self.date_modified else None,
ip=self.ip,
asn=self.asn,
country=self.country,
state=self.state,
description=self.description,
references=self.references,
expiration_timestamp=self.expiration_timestamp.isoformat() if self.expiration_timestamp else None,
id=self.id,
created_user=self.created_user.to_dict(),
modified_user=self.modified_user.to_dict(),
owner_user=self.owner_user.to_dict() if self.owner_user else None,
)
if include_tags:
d["tags"] = tags_mapping.get_tags_for_source(self.__tablename__, self.id)
if include_comments:
d["comments"] = [comment.to_dict() for comment in Comments.query.filter_by(entity_id=self.id).filter_by(
entity_type=ENTITY_MAPPING["IP"]).all()]
if include_metadata:
metadata_values_dict = {}
metadata_keys = Metadata.get_metadata_keys("IP")
metadata_values_dict = {m["metadata"]["key"]: m for m in
[entity.to_dict() for entity in self.metadata_values]}
for key in list(set(metadata_keys) - set(metadata_values_dict.keys())):
metadata_values_dict[key] = {}
d.update(dict(metadata=Metadata.get_metadata_dict("IP"), metadata_values=metadata_values_dict))
return d
def to_release_dict(self, metadata_cache, user_cache):
return dict(
date_created=self.date_created.isoformat() if self.date_created else None,
date_modified=self.date_modified.isoformat() if self.date_modified else None,
ip=self.ip,
asn=self.asn,
country=self.country,
state=self.state,
description=self.description,
references=self.references,
expiration_timestamp=self.expiration_timestamp.isoformat() if self.expiration_timestamp else None,
id=self.id,
created_user=user_cache[self.created_user_id],
modified_user=user_cache[self.modified_user_id],
owner_user=user_cache[self.owner_user_id] if self.owner_user_id else None,
metadata=metadata_cache["IP"][self.id]["metadata"] if metadata_cache["IP"].get(self.id, None) and
metadata_cache["IP"][self.id].get("metadata",
None) else {},
metadata_values=metadata_cache["IP"][self.id]["metadata_values"] if metadata_cache["IP"].get(self.id,
None) and
metadata_cache["IP"][self.id].get(
"metadata_values", None) else {},
)
def save_metadata(self, metadata):
for name, val in metadata.iteritems():
val = val if not type(val) == dict else val.get("value", None)
if not val:
continue
m = db.session.query(MetadataMapping).join(Metadata, Metadata.id == MetadataMapping.metadata_id).filter(
Metadata.key == name).filter(Metadata.artifact_type == ENTITY_MAPPING["DNS"]).filter(
MetadataMapping.artifact_id == self.id).first()
if m:
m.value = val
db.session.add(m)
dirty = True
else:
m = db.session.query(Metadata).filter(Metadata.key == name).filter(
Metadata.artifact_type == ENTITY_MAPPING["IP"]).first()
db.session.add(MetadataMapping(value=val, metadata_id=m.id, artifact_id=self.id,
created_user_id=current_user.id))
try:
db.session.commit()
except Exception as e:
app.logger.exception(e)
@staticmethod
def get_metadata_to_save(artifact, metadata, metadata_cache={}, user_cache={}):
metadata_to_save = []
metas = {}
for meta in db.session.query(Metadata).all():
if not meta.artifact_type in metas.keys():
metas[meta.artifact_type] = {}
metas[meta.artifact_type][meta.key] = meta
for name, val in metadata.iteritems():
val = val if not type(val) == dict else val["value"]
if metadata_cache:
m = metadata_cache["IP"].get(artifact.id, {}).get("metadata_values", {}).get(name, None)
else:
m = db.session.query(MetadataMapping).join(Metadata, Metadata.id == MetadataMapping.metadata_id).filter(
Metadata.key == name).filter(Metadata.artifact_type == ENTITY_MAPPING["IP"]).filter(
MetadataMapping.artifact_id == artifact.id).first()
if m:
m.value = val
metadata_to_save.append(m)
else:
m = metas.get(ENTITY_MAPPING["IP"], {}).get(name, None)
# m = db.session.query(Metadata).filter(Metadata.key == name).filter(
# Metadata.artifact_type == ENTITY_MAPPING["IP"]).first()
if m:
metadata_to_save.append(MetadataMapping(value=val, metadata_id=m.id, artifact_id=artifact.id,
created_user_id=current_user.id))
return metadata_to_save
@classmethod
def get_c2ip_from_ip(cls, ip, metadata_field_mapping):
artifact = None
if type(ip) is dict:
artifact = ip
ip = ip["artifact"]
geo_ip = get_geo_for_ip(str(ip))
c2ip = C2ip()
c2ip.ip = ip
if artifact and metadata_field_mapping:
for key, val in metadata_field_mapping.iteritems():
try:
setattr(c2ip, val, artifact["metadata"][key])
except:
pass
if hasattr(c2ip, "asn") and not c2ip.asn:
c2ip.asn = geo_ip["asn"]
c2ip.country = geo_ip["country_code"]
return c2ip
def __repr__(self):
return '<C2ip %r>' % (self.id)
@listens_for(C2ip, "before_insert")
def run_against_whitelist(mapper, connect, target):
whitelist_enabled = cfg_settings.Cfg_settings.get_setting("ENABLE_IP_WHITELIST_CHECK_ON_SAVE")
whitelist_states = cfg_settings.Cfg_settings.get_setting("WHITELIST_STATES")
if whitelist_enabled and distutils.util.strtobool(whitelist_enabled) and whitelist_states:
states = []
for s in whitelist_states.split(","):
if hasattr(cfg_states.Cfg_states, s):
result = cfg_states.Cfg_states.query.filter(getattr(cfg_states.Cfg_states, s) > 0).first()
if result:
states.append(result.state)
if target.state in states:
new_ip = target.ip
abort_import = False
if not C2ip.WHITELIST_CACHE_LAST_UPDATE or not C2ip.WHITELIST_CACHE or (
time.time() - C2ip.WHITELIST_CACHE_LAST_UPDATE) > 60:
C2ip.WHITELIST_CACHE = Whitelist.query.all()
C2ip.WHITELIST_CACHE_LAST_UPDATE = time.time()
whitelists = C2ip.WHITELIST_CACHE
for whitelist in whitelists:
wa = str(whitelist.whitelist_artifact)
try:
if str(IPAddress(new_ip)) == str(IPAddress(wa)):
abort_import = True
break
except ValueError:
pass
try:
if IPAddress(new_ip) in IPNetwork(wa):
abort_import = True
break
except ValueError:
pass
try:
regex = re.compile(wa)
result = regex.search(new_ip)
except:
result = False
if result:
abort_import = True
break
if abort_import:
raise Exception('Failed Whitelist Validation')
# Verify the ip is well formed
IPAddress(new_ip)
if not current_user.admin:
release_state = cfg_states.Cfg_states.query.filter(cfg_states.Cfg_states.is_release_state > 0).first()
if release_state and target.state == release_state.state:
abort(403)
@listens_for(C2ip, "before_update")
def c2ip_before_update(mapper, connect, target):
if current_user and not current_user.admin:
release_state = cfg_states.Cfg_states.query.filter(cfg_states.Cfg_states.is_release_state > 0).first()
if release_state and target.state == release_state.state:
abort(403)
@listens_for(C2ip, "after_insert")
def ip_created(mapper, connection, target):
activity_log.log_activity(connection=connection,
activity_type=ACTIVITY_TYPE.keys()[ACTIVITY_TYPE.keys().index("ARTIFACT_CREATED")],
activity_text=target.ip,
activity_date=target.date_created,
entity_type=ENTITY_MAPPING["IP"],
entity_id=target.id,
user_id=target.created_user_id)
@listens_for(C2ip, "after_update")
def ip_modified(mapper, connection, target):
session = Session.object_session(target)
if session.is_modified(target, include_collections=False):
state_activity_text = activity_log.get_state_change(target, target.ip)
if state_activity_text:
activity_log.log_activity(connection=connection,
activity_type=app.ACTIVITY_TYPE.keys()[ACTIVITY_TYPE.keys().index("STATE_TOGGLED")],
activity_text=state_activity_text,
activity_date=target.date_modified,
entity_type=ENTITY_MAPPING["IP"],
entity_id=target.id,
user_id=target.modified_user_id)
changes = activity_log.get_modified_changes(target)
if changes.__len__() > 0:
activity_log.log_activity(connection=connection,
activity_type=ACTIVITY_TYPE.keys()[ACTIVITY_TYPE.keys().index("ARTIFACT_MODIFIED")],
activity_text="'%s' modified with changes: %s"
% (target.ip, ', '.join(map(str, changes))),
activity_date=target.date_modified,
entity_type=ENTITY_MAPPING["IP"],
entity_id=target.id,
user_id=target.modified_user_id)
| gpl-2.0 | -3,743,784,552,797,139,500 | 42.831804 | 122 | 0.550897 | false |
ahuarte47/QGIS | python/plugins/processing/algs/gdal/gdalcalc.py | 9 | 11704 | # -*- coding: utf-8 -*-
"""
***************************************************************************
gdalcalc.py
---------------------
Date : Janaury 2015
Copyright : (C) 2015 by Giovanni Manghi
Email : giovanni dot manghi at naturalgis dot pt
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Giovanni Manghi'
__date__ = 'January 2015'
__copyright__ = '(C) 2015, Giovanni Manghi'
from qgis.core import (QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterBand,
QgsProcessingParameterNumber,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.tools.system import isWindows
class gdalcalc(GdalAlgorithm):
INPUT_A = 'INPUT_A'
INPUT_B = 'INPUT_B'
INPUT_C = 'INPUT_C'
INPUT_D = 'INPUT_D'
INPUT_E = 'INPUT_E'
INPUT_F = 'INPUT_F'
BAND_A = 'BAND_A'
BAND_B = 'BAND_B'
BAND_C = 'BAND_C'
BAND_D = 'BAND_D'
BAND_E = 'BAND_E'
BAND_F = 'BAND_F'
FORMULA = 'FORMULA'
OUTPUT = 'OUTPUT'
NO_DATA = 'NO_DATA'
OPTIONS = 'OPTIONS'
EXTRA = 'EXTRA'
RTYPE = 'RTYPE'
TYPE = ['Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64']
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(
QgsProcessingParameterRasterLayer(
self.INPUT_A,
self.tr('Input layer A'),
optional=False))
self.addParameter(
QgsProcessingParameterBand(
self.BAND_A,
self.tr('Number of raster band for A'),
parentLayerParameterName=self.INPUT_A))
self.addParameter(
QgsProcessingParameterRasterLayer(
self.INPUT_B,
self.tr('Input layer B'),
optional=True))
self.addParameter(
QgsProcessingParameterBand(
self.BAND_B,
self.tr('Number of raster band for B'),
parentLayerParameterName=self.INPUT_B,
optional=True))
self.addParameter(
QgsProcessingParameterRasterLayer(
self.INPUT_C,
self.tr('Input layer C'),
optional=True))
self.addParameter(
QgsProcessingParameterBand(self.BAND_C,
self.tr('Number of raster band for C'),
parentLayerParameterName=self.INPUT_C,
optional=True))
self.addParameter(
QgsProcessingParameterRasterLayer(
self.INPUT_D,
self.tr('Input layer D'),
optional=True))
self.addParameter(
QgsProcessingParameterBand(
self.BAND_D,
self.tr('Number of raster band for D'),
parentLayerParameterName=self.INPUT_D,
optional=True))
self.addParameter(
QgsProcessingParameterRasterLayer(
self.INPUT_E,
self.tr('Input layer E'),
optional=True))
self.addParameter(
QgsProcessingParameterBand(
self.BAND_E,
self.tr('Number of raster band for E'),
parentLayerParameterName=self.INPUT_E,
optional=True))
self.addParameter(
QgsProcessingParameterRasterLayer(
self.INPUT_F,
self.tr('Input layer F'),
optional=True))
self.addParameter(
QgsProcessingParameterBand(
self.BAND_F,
self.tr('Number of raster band for F'),
parentLayerParameterName=self.INPUT_F,
optional=True))
self.addParameter(
QgsProcessingParameterString(
self.FORMULA,
self.tr('Calculation in gdalnumeric syntax using +-/* or any numpy array functions (i.e. logical_and())'),
'A*2',
optional=False))
self.addParameter(
QgsProcessingParameterNumber(
self.NO_DATA,
self.tr('Set output nodata value'),
type=QgsProcessingParameterNumber.Double,
defaultValue=None,
optional=True))
self.addParameter(
QgsProcessingParameterEnum(
self.RTYPE,
self.tr('Output raster type'),
options=self.TYPE,
defaultValue=5))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation options'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
self.addParameter(options_param)
extra_param = QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True)
extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(extra_param)
self.addParameter(
QgsProcessingParameterRasterDestination(
self.OUTPUT,
self.tr('Calculated')))
def name(self):
return 'rastercalculator'
def displayName(self):
return self.tr('Raster calculator')
def group(self):
return self.tr('Raster miscellaneous')
def groupId(self):
return 'rastermiscellaneous'
def commandName(self):
return 'gdal_calc' if isWindows() else 'gdal_calc.py'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
self.setOutputValue(self.OUTPUT, out)
formula = self.parameterAsString(parameters, self.FORMULA, context)
if self.NO_DATA in parameters and parameters[self.NO_DATA] is not None:
noData = self.parameterAsDouble(parameters, self.NO_DATA, context)
else:
noData = None
arguments = []
arguments.append('--calc "{}"'.format(formula))
arguments.append('--format')
arguments.append(GdalUtils.getFormatShortNameFromFilename(out))
arguments.append('--type')
arguments.append(self.TYPE[self.parameterAsEnum(parameters, self.RTYPE, context)])
if noData is not None:
arguments.append('--NoDataValue')
arguments.append(noData)
layer = self.parameterAsRasterLayer(parameters, self.INPUT_A, context)
if layer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT_A))
arguments.append('-A')
arguments.append(layer.source())
if self.parameterAsString(parameters, self.BAND_A, context):
arguments.append('--A_band ' + self.parameterAsString(parameters, self.BAND_A, context))
if self.INPUT_B in parameters and parameters[self.INPUT_B] is not None:
layer = self.parameterAsRasterLayer(parameters, self.INPUT_B, context)
if layer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT_B))
arguments.append('-B')
arguments.append(layer.source())
if self.parameterAsString(parameters, self.BAND_B, context):
arguments.append('--B_band ' + self.parameterAsString(parameters, self.BAND_B, context))
if self.INPUT_C in parameters and parameters[self.INPUT_C] is not None:
layer = self.parameterAsRasterLayer(parameters, self.INPUT_C, context)
if layer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT_C))
arguments.append('-C')
arguments.append(layer.source())
if self.parameterAsString(parameters, self.BAND_C, context):
arguments.append('--C_band ' + self.parameterAsString(parameters, self.BAND_C, context))
if self.INPUT_D in parameters and parameters[self.INPUT_D] is not None:
layer = self.parameterAsRasterLayer(parameters, self.INPUT_D, context)
if layer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT_D))
arguments.append('-D')
arguments.append(layer.source())
if self.parameterAsString(parameters, self.BAND_D, context):
arguments.append('--D_band ' + self.parameterAsString(parameters, self.BAND_D, context))
if self.INPUT_E in parameters and parameters[self.INPUT_E] is not None:
layer = self.parameterAsRasterLayer(parameters, self.INPUT_E, context)
if layer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT_E))
arguments.append('-E')
arguments.append(layer.source())
if self.parameterAsString(parameters, self.BAND_E, context):
arguments.append('--E_band ' + self.parameterAsString(parameters, self.BAND_E, context))
if self.INPUT_F in parameters and parameters[self.INPUT_F] is not None:
layer = self.parameterAsRasterLayer(parameters, self.INPUT_F, context)
if layer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT_F))
arguments.append('-F')
arguments.append(layer.source())
if self.parameterAsString(parameters, self.BAND_F, context):
arguments.append('--F_band ' + self.parameterAsString(parameters, self.BAND_F, context))
options = self.parameterAsString(parameters, self.OPTIONS, context)
if options:
parts = options.split('|')
for p in parts:
arguments.append('--co ' + p)
if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''):
extra = self.parameterAsString(parameters, self.EXTRA, context)
arguments.append(extra)
arguments.append('--outfile')
arguments.append(out)
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 | -6,716,286,052,499,562,000 | 42.509294 | 122 | 0.561176 | false |
SUTFutureCoder/intelligence_server | A_Imoto/jieba/__main__.py | 4 | 1848 | """Jieba command line interface."""
import sys
import jieba
from argparse import ArgumentParser
parser = ArgumentParser(usage="%s -m jieba [options] filename" % sys.executable, description="Jieba command line interface.", epilog="If no filename specified, use STDIN instead.")
parser.add_argument("-d", "--delimiter", metavar="DELIM", default=' / ',
nargs='?', const=' ',
help="use DELIM instead of ' / ' for word delimiter; or a space if it is used without DELIM")
parser.add_argument("-D", "--dict", help="use DICT as dictionary")
parser.add_argument("-u", "--user-dict",
help="use USER_DICT together with the default dictionary or DICT (if specified)")
parser.add_argument("-a", "--cut-all",
action="store_true", dest="cutall", default=False,
help="full pattern cutting")
parser.add_argument("-n", "--no-hmm", dest="hmm", action="store_false",
default=True, help="don't use the Hidden Markov Model")
parser.add_argument("-q", "--quiet", action="store_true", default=False,
help="don't print loading messages to stderr")
parser.add_argument("-V", '--version', action='version',
version="Jieba " + jieba.__version__)
parser.add_argument("filename", nargs='?', help="input file")
args = parser.parse_args()
if args.quiet:
jieba.setLogLevel(60)
delim = unicode(args.delimiter)
cutall = args.cutall
hmm = args.hmm
fp = open(args.filename, 'r') if args.filename else sys.stdin
if args.dict:
jieba.initialize(args.dict)
else:
jieba.initialize()
if args.user_dict:
jieba.load_userdict(args.user_dict)
ln = fp.readline()
while ln:
l = ln.rstrip('\r\n')
print(delim.join(jieba.cut(ln.rstrip('\r\n'), cutall, hmm)).encode('utf-8'))
ln = fp.readline()
fp.close()
| gpl-2.0 | 8,288,706,801,230,907,000 | 39.173913 | 180 | 0.635281 | false |
lsanzdiaz/MITK-BiiG | Build/Tools/Python/renameClass.py | 4 | 7545 | #!/usr/bin/python
# mitk c++ class rename script by Marco Nolden and Michael Mueller
#
# There are two ways to use this:
#
# 1. renameClass <dir> <oldClassName> <newClassName>
#
# 2. renameClass <dir> <csvFileOfClassNameReplaces>
#
#
# Always backup your code before using this! It has only been tested on a few cases for a special purpose!
# It does not parse the c++ , but just does a text replace on ClassName, mitkClassName, m_ClassName, GetClassName
# and SetClassName and renames files accordingly. There is some basic mechanism to avoid name clashes but better
# double check the results.
#
# using the commitCommandString and the renameFileCommand you can commit your rename results directly to your
# favourite version control.
#
# some source code was taken from regexplace by Stefano Spinucci
#
import os;
import fileinput;
import re;
import sys;
import stat;
import os.path;
import csv;
commitCommandString = None
renameFileCommand = None
#
# uncomment and adapt this to commit after each rename. Parameters will be oldname, newname
# commitCommandString = "git commit -a -m \"CHG (#3669): renamed %s to %s\""
# uncomment and adapt this for renaming files. If undefined, a normal file rename will we performed
# using python commands
# renameFileCommand = "git mv %s %s "
class FileList:
def __init__(self,dir):
self.filelist = [];
for root,dirs,files in os.walk(dir):
if ".svn" in dirs:
dirs.remove(".svn")
if ".git" in dirs:
dirs.remove(".git")
for name in files:
self.filelist.append((root,name))
def contains(self,filename):
for root,xfilename in self.filelist:
if (xfilename == filename):
return (root,filename)
return None
def rename_file(self,source,dest):
self.filelist.remove(source)
xroot,xfile = source
self.filelist.append((xroot,dest))
if renameFileCommand:
os.system(renameFileCommand % (os.path.join(xroot,xfile),os.path.join(xroot,dest) ) )
else:
os.rename(os.path.join(xroot,xfile),os.path.join(xroot,dest))
def exists_somewhere(self,stringlist):
exists = False
regexString = str(stringlist.pop())
for string in stringlist:
regexString = regexString + "|" + string
regexString = "\\b(" + regexString + ")\\b"
regex = re.compile(regexString)
for root,filename in self.filelist:
xfile = os.path.join(root,filename)
# open file for read
readlines=open(xfile,'r').readlines()
# search and replace in current file printing to the user changed lines
for currentline in readlines:
if regex.search(currentline):
print "warning: %s found in %s" % (string,xfile)
exists = True
return exists
def find_all(dir):
filelist = [];
for root,dirs,files in os.walk(dir):
if ".svn" in dirs:
dirs.remove(".svn")
if ".git" in dirs:
dirs.remove(".git")
for name in files:
filelist.append((root,name))
return filelist
# in all files in 'fileslist' search the regexp 'searchregx' and replace
# with 'replacestring'; real substitution in files only if 'simulation' = 0;
# real substitution may also be step by step (if 'stepbystep' = 1)
def replace_in_files(fileslist, searchregx, replacestring, simulation, stepbystep):
# compile regexp
cregex=re.compile(searchregx)
# print message to the user
if simulation == 1:
print '\nReplaced (simulation):\n'
else:
print '\nReplaced:\n'
# loop on all files
for root,filename in fileslist:
xfile = os.path.join(root,filename)
# initialize the replace flag
replaceflag=0
fileAtt = os.stat(xfile)[0]
if (not fileAtt & stat.S_IWRITE):
continue
# open file for read
readlines=open(xfile,'r').readlines()
# intialize the list counter
listindex = -1
# search and replace in current file printing to the user changed lines
for currentline in readlines:
# increment the list counter
listindex = listindex + 1
# if the regexp is found
if cregex.search(currentline):
# make the substitution
f=re.sub(searchregx,replacestring,currentline)
# print the current filename, the old string and the new string
print '\n' + xfile
print '- ' + currentline ,
if currentline[-1:]!='\n': print '\n' ,
print '+ ' + f ,
if f[-1:]!='\n': print '\n' ,
# if substitution is real
if simulation == 0:
# if substitution is step by step
if stepbystep == 1:
# ask user if the current line must be replaced
question = raw_input('write(Y), skip (n), quit (q) ? ')
question = string.lower(question)
# if quit
if question=='q':
sys.exit('\ninterrupted by the user !!!')
# if skip
elif question=='n':
pass
# if write
else:
# update the whole file variable ('readlines')
readlines[listindex] = f
replaceflag=1
# if substitution is not step by step
else:
# update the whole file variable ('readlines')
readlines[listindex] = f
replaceflag=1
# if some text was replaced
# overwrite the original file
if replaceflag==1:
# open the file for writting
write_file=open(xfile,'w')
# overwrite the file
for line in readlines:
write_file.write(line)
# close the file
write_file.close()
def replace_word_in_files(fileslist, searchword, replaceword, simulation = False, stepbystep = False):
replace_in_files(fileslist,"\\b" + searchword + "\\b",replaceword,simulation,stepbystep)
def rename_class(filelist, oldname, newname,classPrefix = "mitk" ):
suffixes = [ "h","cpp","txx" ]
for suffix in suffixes:
origName = classPrefix + oldname + "." + suffix
newName = classPrefix + newname + "." + suffix
fileName = filelist.contains(origName)
if fileName:
replace_word_in_files(filelist.filelist,origName,newName)
filelist.rename_file(fileName,newName)
replace_word_in_files(filelist.filelist,oldname,newname)
prefixes = [ "Get" , "Set" , "m_" ]
newnames = map(lambda x : x + newname , prefixes)
if filelist.exists_somewhere(newnames):
print "Skipping member variable and getter/setter renaming due to name conflict"
return
for prefix in prefixes:
replace_word_in_files(filelist.filelist,prefix + oldname, prefix + newname)
x = FileList(sys.argv[1])
if len(sys.argv) == 4:
rename_class(x,sys.argv[2],sys.argv[3])
if len(sys.argv) == 3:
csvReader = csv.reader(open(sys.argv[2],'r'))
for row in csvReader:
print row
rename_class(x,row[0],row[1])
if commitCommandString:
os.system(commitCommandString % ( row[0],row[1] ) )
| bsd-3-clause | -2,524,685,303,388,429,300 | 30.049383 | 113 | 0.589529 | false |
osh/gnuradio | gr-qtgui/examples/pyqt_waterfall_f.py | 58 | 6051 | #!/usr/bin/env python
#
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import blocks
import sys
try:
from gnuradio import qtgui
from PyQt4 import QtGui, QtCore
import sip
except ImportError:
sys.stderr.write("Error: Program requires PyQt4 and gr-qtgui.\n")
sys.exit(1)
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
class dialog_box(QtGui.QWidget):
def __init__(self, display, control):
QtGui.QWidget.__init__(self, None)
self.setWindowTitle('PyQt Test GUI')
self.boxlayout = QtGui.QBoxLayout(QtGui.QBoxLayout.LeftToRight, self)
self.boxlayout.addWidget(display, 1)
self.boxlayout.addWidget(control)
self.resize(800, 500)
class control_box(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle('Control Panel')
self.setToolTip('Control the signals')
QtGui.QToolTip.setFont(QtGui.QFont('OldEnglish', 10))
self.layout = QtGui.QFormLayout(self)
# Control the first signal
self.freq1Edit = QtGui.QLineEdit(self)
self.freq1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Frequency:", self.freq1Edit)
self.connect(self.freq1Edit, QtCore.SIGNAL("editingFinished()"),
self.freq1EditText)
self.amp1Edit = QtGui.QLineEdit(self)
self.amp1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Amplitude:", self.amp1Edit)
self.connect(self.amp1Edit, QtCore.SIGNAL("editingFinished()"),
self.amp1EditText)
# Control the second signal
self.freq2Edit = QtGui.QLineEdit(self)
self.freq2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Frequency:", self.freq2Edit)
self.connect(self.freq2Edit, QtCore.SIGNAL("editingFinished()"),
self.freq2EditText)
self.amp2Edit = QtGui.QLineEdit(self)
self.amp2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Amplitude:", self.amp2Edit)
self.connect(self.amp2Edit, QtCore.SIGNAL("editingFinished()"),
self.amp2EditText)
self.quit = QtGui.QPushButton('Close', self)
self.quit.setMinimumWidth(100)
self.layout.addWidget(self.quit)
self.connect(self.quit, QtCore.SIGNAL('clicked()'),
QtGui.qApp, QtCore.SLOT('quit()'))
def attach_signal1(self, signal):
self.signal1 = signal
self.freq1Edit.setText(QtCore.QString("%1").arg(self.signal1.frequency()))
self.amp1Edit.setText(QtCore.QString("%1").arg(self.signal1.amplitude()))
def attach_signal2(self, signal):
self.signal2 = signal
self.freq2Edit.setText(QtCore.QString("%1").arg(self.signal2.frequency()))
self.amp2Edit.setText(QtCore.QString("%1").arg(self.signal2.amplitude()))
def freq1EditText(self):
try:
newfreq = float(self.freq1Edit.text())
self.signal1.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp1EditText(self):
try:
newamp = float(self.amp1Edit.text())
self.signal1.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
def freq2EditText(self):
try:
newfreq = float(self.freq2Edit.text())
self.signal2.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp2EditText(self):
try:
newamp = float(self.amp2Edit.text())
self.signal2.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
Rs = 8000
f1 = 100
f2 = 2000
npts = 2048
self.qapp = QtGui.QApplication(sys.argv)
src1 = analog.sig_source_f(Rs, analog.GR_SIN_WAVE, f1, 0.1, 0)
src2 = analog.sig_source_f(Rs, analog.GR_SIN_WAVE, f2, 0.1, 0)
src = blocks.add_ff()
thr = blocks.throttle(gr.sizeof_float, 100*npts)
self.snk1 = qtgui.waterfall_sink_f(npts, filter.firdes.WIN_BLACKMAN_hARRIS,
0, Rs,
"Real Waterfall Example", 2)
self.connect(src1, (src,0))
self.connect(src2, (src,1))
self.connect(src, thr, (self.snk1, 0))
self.connect(src1, (self.snk1, 1))
self.ctrl_win = control_box()
self.ctrl_win.attach_signal1(src1)
self.ctrl_win.attach_signal2(src2)
# Get the reference pointer to the SpectrumDisplayForm QWidget
pyQt = self.snk1.pyqwidget()
# Wrap the pointer as a PyQt SIP object
# This can now be manipulated as a PyQt4.QtGui.QWidget
pyWin = sip.wrapinstance(pyQt, QtGui.QWidget)
#pyWin.show()
self.main_box = dialog_box(pyWin, self.ctrl_win)
self.main_box.show()
if __name__ == "__main__":
tb = my_top_block();
tb.start()
tb.qapp.exec_()
tb.stop()
| gpl-3.0 | 6,714,310,063,018,431,000 | 32.247253 | 83 | 0.630474 | false |
Subsets and Splits