repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
feigames/Odoo | openerp/models.py | 3 | 277182 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Object Relational Mapping module:
* Hierarchical structure
* Constraints consistency and validation
* Object metadata depends on its status
* Optimised processing by complex query (multiple actions at once)
* Default field values
* Permissions optimisation
* Persistant object: DB postgresql
* Data conversion
* Multi-level caching system
* Two different inheritance mechanisms
* Rich set of field types:
- classical (varchar, integer, boolean, ...)
- relational (one2many, many2one, many2many)
- functional
"""
import datetime
import functools
import itertools
import logging
import operator
import pickle
import pytz
import re
import time
from collections import defaultdict, MutableMapping
from inspect import getmembers
import babel.dates
import dateutil.relativedelta
import psycopg2
from lxml import etree
import openerp
from . import SUPERUSER_ID
from . import api
from . import tools
from .api import Environment
from .exceptions import except_orm, AccessError, MissingError, ValidationError
from .osv import fields
from .osv.query import Query
from .tools import lazy_property, ormcache
from .tools.config import config
from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from .tools.safe_eval import safe_eval as eval
from .tools.translate import _
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__ + '.schema')
regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
regex_object_name = re.compile(r'^[a-z0-9_.]+$')
onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
def check_object_name(name):
""" Check if the given name is a valid openerp object name.
The _name attribute in osv and osv_memory object is subject to
some restrictions. This function returns True or False whether
the given name is allowed or not.
TODO: this is an approximation. The goal in this approximation
is to disallow uppercase characters (in some places, we quote
table/column names and in other not, which leads to this kind
of errors:
psycopg2.ProgrammingError: relation "xxx" does not exist).
The same restriction should apply to both osv and osv_memory
objects for consistency.
"""
if regex_object_name.match(name) is None:
return False
return True
def raise_on_invalid_object_name(name):
if not check_object_name(name):
msg = "The _name attribute %s is not valid." % name
_logger.error(msg)
raise except_orm('ValueError', msg)
POSTGRES_CONFDELTYPES = {
'RESTRICT': 'r',
'NO ACTION': 'a',
'CASCADE': 'c',
'SET NULL': 'n',
'SET DEFAULT': 'd',
}
def intersect(la, lb):
return filter(lambda x: x in lb, la)
def same_name(f, g):
""" Test whether functions `f` and `g` are identical or have the same name """
return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
def fix_import_export_id_paths(fieldname):
"""
Fixes the id fields in import and exports, and splits field paths
on '/'.
:param str fieldname: name of the field to import/export
:return: split field name
:rtype: list of str
"""
fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
return fixed_external_id.split('/')
def pg_varchar(size=0):
""" Returns the VARCHAR declaration for the provided size:
* If no size (or an empty or negative size is provided) return an
'infinite' VARCHAR
* Otherwise return a VARCHAR(n)
:type int size: varchar size, optional
:rtype: str
"""
if size:
if not isinstance(size, int):
raise TypeError("VARCHAR parameter should be an int, got %s"
% type(size))
if size > 0:
return 'VARCHAR(%d)' % size
return 'VARCHAR'
FIELDS_TO_PGTYPES = {
fields.boolean: 'bool',
fields.integer: 'int4',
fields.text: 'text',
fields.html: 'text',
fields.date: 'date',
fields.datetime: 'timestamp',
fields.binary: 'bytea',
fields.many2one: 'int4',
fields.serialized: 'text',
}
def get_pg_type(f, type_override=None):
"""
:param fields._column f: field to get a Postgres type for
:param type type_override: use the provided type for dispatching instead of the field's own type
:returns: (postgres_identification_type, postgres_type_specification)
:rtype: (str, str)
"""
field_type = type_override or type(f)
if field_type in FIELDS_TO_PGTYPES:
pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
elif issubclass(field_type, fields.float):
if f.digits:
pg_type = ('numeric', 'NUMERIC')
else:
pg_type = ('float8', 'DOUBLE PRECISION')
elif issubclass(field_type, (fields.char, fields.reference)):
pg_type = ('varchar', pg_varchar(f.size))
elif issubclass(field_type, fields.selection):
if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
or getattr(f, 'size', None) == -1:
pg_type = ('int4', 'INTEGER')
else:
pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
elif issubclass(field_type, fields.function):
if f._type == 'selection':
pg_type = ('varchar', pg_varchar())
else:
pg_type = get_pg_type(f, getattr(fields, f._type))
else:
_logger.warning('%s type not supported!', field_type)
pg_type = None
return pg_type
class MetaModel(api.Meta):
""" Metaclass for the models.
This class is used as the metaclass for the class :class:`BaseModel` to
discover the models defined in a module (without instanciating them).
If the automatic discovery is not needed, it is possible to set the model's
``_register`` attribute to False.
"""
module_to_models = {}
def __init__(self, name, bases, attrs):
if not self._register:
self._register = True
super(MetaModel, self).__init__(name, bases, attrs)
return
if not hasattr(self, '_module'):
# The (OpenERP) module name can be in the `openerp.addons` namespace
# or not. For instance, module `sale` can be imported as
# `openerp.addons.sale` (the right way) or `sale` (for backward
# compatibility).
module_parts = self.__module__.split('.')
if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
module_name = self.__module__.split('.')[2]
else:
module_name = self.__module__.split('.')[0]
self._module = module_name
# Remember which models to instanciate for this module.
if not self._custom:
self.module_to_models.setdefault(self._module, []).append(self)
# transform columns into new-style fields (enables field inheritance)
for name, column in self._columns.iteritems():
if not hasattr(self, name):
setattr(self, name, column.to_field())
class NewId(object):
""" Pseudo-ids for new records. """
def __nonzero__(self):
return False
IdType = (int, long, basestring, NewId)
# maximum number of prefetched records
PREFETCH_MAX = 200
# special columns automatically created by the ORM
LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
class BaseModel(object):
""" Base class for OpenERP models.
OpenERP models are created by inheriting from this class' subclasses:
* :class:`Model` for regular database-persisted models
* :class:`TransientModel` for temporary data, stored in the database but
automatically vaccuumed every so often
* :class:`AbstractModel` for abstract super classes meant to be shared by
multiple inheriting model
The system automatically instantiates every model once per database. Those
instances represent the available models on each database, and depend on
which modules are installed on that database. The actual class of each
instance is built from the Python classes that create and inherit from the
corresponding model.
Every model instance is a "recordset", i.e., an ordered collection of
records of the model. Recordsets are returned by methods like
:meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
explicit representation: a record is represented as a recordset of one
record.
To create a class that should not be instantiated, the _register class
attribute may be set to False.
"""
__metaclass__ = MetaModel
_auto = True # create database backend
_register = False # Set to false if the model shouldn't be automatically discovered.
_name = None
_columns = {}
_constraints = []
_custom = False
_defaults = {}
_rec_name = None
_parent_name = 'parent_id'
_parent_store = False
_parent_order = False
_date_name = 'date'
_order = 'id'
_sequence = None
_description = None
_needaction = False
_translate = True # set to False to disable translations export for this model
# dict of {field:method}, with method returning the (name_get of records, {id: fold})
# to include in the _read_group, if grouped on this field
_group_by_full = {}
# Transience
_transient = False # True in a TransientModel
# structure:
# { 'parent_model': 'm2o_field', ... }
_inherits = {}
# Mapping from inherits'd field name to triple (m, r, f, n) where m is the
# model from which it is inherits'd, r is the (local) field towards m, f
# is the _column object itself, and n is the original (i.e. top-most)
# parent model.
# Example:
# { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
# field_column_obj, origina_parent_model), ... }
_inherit_fields = {}
# Mapping field name/column_info object
# This is similar to _inherit_fields but:
# 1. includes self fields,
# 2. uses column_info instead of a triple.
_all_columns = {}
_table = None
_log_create = False
_sql_constraints = []
# model dependencies, for models backed up by sql views:
# {model_name: field_names, ...}
_depends = {}
CONCURRENCY_CHECK_FIELD = '__last_update'
def log(self, cr, uid, id, message, secondary=False, context=None):
return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
def view_init(self, cr, uid, fields_list, context=None):
"""Override this method to do specific things when a view on the object is opened."""
pass
def _field_create(self, cr, context=None):
""" Create entries in ir_model_fields for all the model's fields.
If necessary, also create an entry in ir_model, and if called from the
modules loading scheme (by receiving 'module' in the context), also
create entries in ir_model_data (for the model and the fields).
- create an entry in ir_model (if there is not already one),
- create an entry in ir_model_data (if there is not already one, and if
'module' is in the context),
- update ir_model_fields with the fields found in _columns
(TODO there is some redundancy as _columns is updated from
ir_model_fields in __init__).
"""
if context is None:
context = {}
cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
if not cr.rowcount:
cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
model_id = cr.fetchone()[0]
cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
else:
model_id = cr.fetchone()[0]
if 'module' in context:
name_id = 'model_'+self._name.replace('.', '_')
cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
if not cr.rowcount:
cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name_id, context['module'], 'ir.model', model_id)
)
cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
cols = {}
for rec in cr.dictfetchall():
cols[rec['name']] = rec
ir_model_fields_obj = self.pool.get('ir.model.fields')
# sparse field should be created at the end, as it depends on its serialized field already existing
model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
for (k, f) in model_fields:
vals = {
'model_id': model_id,
'model': self._name,
'name': k,
'field_description': f.string,
'ttype': f._type,
'relation': f._obj or '',
'select_level': tools.ustr(int(f.select)),
'readonly': (f.readonly and 1) or 0,
'required': (f.required and 1) or 0,
'selectable': (f.selectable and 1) or 0,
'translate': (f.translate and 1) or 0,
'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
'serialization_field_id': None,
}
if getattr(f, 'serialization_field', None):
# resolve link to serialization_field if specified by name
serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
if not serialization_field_id:
raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
vals['serialization_field_id'] = serialization_field_id[0]
# When its a custom field,it does not contain f.select
if context.get('field_state', 'base') == 'manual':
if context.get('field_name', '') == k:
vals['select_level'] = context.get('select', '0')
#setting value to let the problem NOT occur next time
elif k in cols:
vals['select_level'] = cols[k]['select_level']
if k not in cols:
cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
id = cr.fetchone()[0]
vals['id'] = id
cr.execute("""INSERT INTO ir_model_fields (
id, model_id, model, name, field_description, ttype,
relation,state,select_level,relation_field, translate, serialization_field_id
) VALUES (
%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
)""", (
id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
vals['relation'], 'base',
vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
))
if 'module' in context:
name1 = 'field_' + self._table + '_' + k
cr.execute("select name from ir_model_data where name=%s", (name1,))
if cr.fetchone():
name1 = name1 + "_" + str(id)
cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name1, context['module'], 'ir.model.fields', id)
)
else:
for key, val in vals.items():
if cols[k][key] != vals[key]:
cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
cr.execute("""UPDATE ir_model_fields SET
model_id=%s, field_description=%s, ttype=%s, relation=%s,
select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
WHERE
model=%s AND name=%s""", (
vals['model_id'], vals['field_description'], vals['ttype'],
vals['relation'],
vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
))
break
self.invalidate_cache(cr, SUPERUSER_ID)
@classmethod
def _add_field(cls, name, field):
""" Add the given `field` under the given `name` in the class """
field.set_class_name(cls, name)
# add field in _fields (for reflection)
cls._fields[name] = field
# add field as an attribute, unless another kind of value already exists
if isinstance(getattr(cls, name, field), Field):
setattr(cls, name, field)
else:
_logger.warning("In model %r, member %r is not a field", cls._name, name)
if field.store:
cls._columns[name] = field.to_column()
else:
# remove potential column that may be overridden by field
cls._columns.pop(name, None)
@classmethod
def _pop_field(cls, name):
""" Remove the field with the given `name` from the model.
This method should only be used for manual fields.
"""
field = cls._fields.pop(name)
cls._columns.pop(name, None)
cls._all_columns.pop(name, None)
if hasattr(cls, name):
delattr(cls, name)
return field
@classmethod
def _add_magic_fields(cls):
""" Introduce magic fields on the current class
* id is a "normal" field (with a specific getter)
* create_uid, create_date, write_uid and write_date have become
"normal" fields
* $CONCURRENCY_CHECK_FIELD is a computed field with its computing
method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
to get the same structure as the previous
``(now() at time zone 'UTC')::timestamp``::
# select (now() at time zone 'UTC')::timestamp;
timezone
----------------------------
2013-06-18 08:30:37.292809
>>> str(datetime.datetime.utcnow())
'2013-06-18 08:31:32.821177'
"""
def add(name, field):
""" add `field` with the given `name` if it does not exist yet """
if name not in cls._columns and name not in cls._fields:
cls._add_field(name, field)
# cyclic import
from . import fields
# this field 'id' must override any other column or field
cls._add_field('id', fields.Id(automatic=True))
add('display_name', fields.Char(string='Display Name', automatic=True,
compute='_compute_display_name'))
if cls._log_access:
add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
add('create_date', fields.Datetime(string='Created on', automatic=True))
add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
last_modified_name = 'compute_concurrency_field_with_access'
else:
last_modified_name = 'compute_concurrency_field'
# this field must override any other column or field
cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
string='Last Modified on', compute=last_modified_name, automatic=True))
@api.one
def compute_concurrency_field(self):
self[self.CONCURRENCY_CHECK_FIELD] = \
datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
@api.one
@api.depends('create_date', 'write_date')
def compute_concurrency_field_with_access(self):
self[self.CONCURRENCY_CHECK_FIELD] = \
self.write_date or self.create_date or \
datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
#
# Goal: try to apply inheritance at the instanciation level and
# put objects in the pool var
#
@classmethod
def _build_model(cls, pool, cr):
""" Instanciate a given model.
This class method instanciates the class of some model (i.e. a class
deriving from osv or osv_memory). The class might be the class passed
in argument or, if it inherits from another class, a class constructed
by combining the two classes.
"""
# IMPORTANT: the registry contains an instance for each model. The class
# of each model carries inferred metadata that is shared among the
# model's instances for this registry, but not among registries. Hence
# we cannot use that "registry class" for combining model classes by
# inheritance, since it confuses the metadata inference process.
# Keep links to non-inherited constraints in cls; this is useful for
# instance when exporting translations
cls._local_constraints = cls.__dict__.get('_constraints', [])
cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
# determine inherited models
parents = getattr(cls, '_inherit', [])
parents = [parents] if isinstance(parents, basestring) else (parents or [])
# determine the model's name
name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
# determine the module that introduced the model
original_module = pool[name]._original_module if name in parents else cls._module
# build the class hierarchy for the model
for parent in parents:
if parent not in pool:
raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
'You may need to add a dependency on the parent class\' module.' % (name, parent))
parent_model = pool[parent]
# do no use the class of parent_model, since that class contains
# inferred metadata; use its ancestor instead
parent_class = type(parent_model).__base__
# don't inherit custom fields
columns = dict((key, val)
for key, val in parent_class._columns.iteritems()
if not val.manual
)
columns.update(cls._columns)
defaults = dict(parent_class._defaults)
defaults.update(cls._defaults)
inherits = dict(parent_class._inherits)
inherits.update(cls._inherits)
depends = dict(parent_class._depends)
for m, fs in cls._depends.iteritems():
depends[m] = depends.get(m, []) + fs
old_constraints = parent_class._constraints
new_constraints = cls._constraints
# filter out from old_constraints the ones overridden by a
# constraint with the same function name in new_constraints
constraints = new_constraints + [oldc
for oldc in old_constraints
if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
for newc in new_constraints)
]
sql_constraints = cls._sql_constraints + \
parent_class._sql_constraints
attrs = {
'_name': name,
'_register': False,
'_columns': columns,
'_defaults': defaults,
'_inherits': inherits,
'_depends': depends,
'_constraints': constraints,
'_sql_constraints': sql_constraints,
}
cls = type(name, (cls, parent_class), attrs)
# introduce the "registry class" of the model;
# duplicate some attributes so that the ORM can modify them
attrs = {
'_name': name,
'_register': False,
'_columns': dict(cls._columns),
'_defaults': dict(cls._defaults),
'_inherits': dict(cls._inherits),
'_depends': dict(cls._depends),
'_constraints': list(cls._constraints),
'_sql_constraints': list(cls._sql_constraints),
'_original_module': original_module,
}
cls = type(cls._name, (cls,), attrs)
# instantiate the model, and initialize it
model = object.__new__(cls)
model.__init__(pool, cr)
return model
@classmethod
def _init_function_fields(cls, pool, cr):
# initialize the list of non-stored function fields for this model
pool._pure_function_fields[cls._name] = []
# process store of low-level function fields
for fname, column in cls._columns.iteritems():
if hasattr(column, 'digits_change'):
column.digits_change(cr)
# filter out existing store about this field
pool._store_function[cls._name] = [
stored
for stored in pool._store_function.get(cls._name, [])
if (stored[0], stored[1]) != (cls._name, fname)
]
if not isinstance(column, fields.function):
continue
if not column.store:
# register it on the pool for invalidation
pool._pure_function_fields[cls._name].append(fname)
continue
# process store parameter
store = column.store
if store is True:
get_ids = lambda self, cr, uid, ids, c={}: ids
store = {cls._name: (get_ids, None, column.priority, None)}
for model, spec in store.iteritems():
if len(spec) == 4:
(fnct, fields2, order, length) = spec
elif len(spec) == 3:
(fnct, fields2, order) = spec
length = None
else:
raise except_orm('Error',
('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
pool._store_function.setdefault(model, [])
t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
if t not in pool._store_function[model]:
pool._store_function[model].append(t)
pool._store_function[model].sort(key=lambda x: x[4])
@classmethod
def _init_manual_fields(cls, pool, cr):
# Check whether the query is already done
if pool.fields_by_model is not None:
manual_fields = pool.fields_by_model.get(cls._name, [])
else:
cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
manual_fields = cr.dictfetchall()
for field in manual_fields:
if field['name'] in cls._columns:
continue
attrs = {
'string': field['field_description'],
'required': bool(field['required']),
'readonly': bool(field['readonly']),
'domain': eval(field['domain']) if field['domain'] else None,
'size': field['size'] or None,
'ondelete': field['on_delete'],
'translate': (field['translate']),
'manual': True,
'_prefetch': False,
#'select': int(field['select_level'])
}
if field['serialization_field_id']:
cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
if field['ttype'] in ['many2one', 'one2many', 'many2many']:
attrs.update({'relation': field['relation']})
cls._columns[field['name']] = fields.sparse(**attrs)
elif field['ttype'] == 'selection':
cls._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
elif field['ttype'] == 'reference':
cls._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
elif field['ttype'] == 'many2one':
cls._columns[field['name']] = fields.many2one(field['relation'], **attrs)
elif field['ttype'] == 'one2many':
cls._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
elif field['ttype'] == 'many2many':
_rel1 = field['relation'].replace('.', '_')
_rel2 = field['model'].replace('.', '_')
_rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
cls._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
else:
cls._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
@classmethod
def _init_constraints_onchanges(cls):
# store sql constraint error messages
for (key, _, msg) in cls._sql_constraints:
cls.pool._sql_error[cls._table + '_' + key] = msg
# collect constraint and onchange methods
cls._constraint_methods = []
cls._onchange_methods = defaultdict(list)
for attr, func in getmembers(cls, callable):
if hasattr(func, '_constrains'):
if not all(name in cls._fields for name in func._constrains):
_logger.warning("@constrains%r parameters must be field names", func._constrains)
cls._constraint_methods.append(func)
if hasattr(func, '_onchange'):
if not all(name in cls._fields for name in func._onchange):
_logger.warning("@onchange%r parameters must be field names", func._onchange)
for name in func._onchange:
cls._onchange_methods[name].append(func)
def __new__(cls):
# In the past, this method was registering the model class in the server.
# This job is now done entirely by the metaclass MetaModel.
#
# Do not create an instance here. Model instances are created by method
# _build_model().
return None
def __init__(self, pool, cr):
""" Initialize a model and make it part of the given registry.
- copy the stored fields' functions in the registry,
- retrieve custom fields and add them in the model,
- ensure there is a many2one for each _inherits'd parent,
- update the children's _columns,
- give a chance to each field to initialize itself.
"""
cls = type(self)
# link the class to the registry, and update the registry
cls.pool = pool
cls._model = self # backward compatibility
pool.add(cls._name, self)
# determine description, table, sequence and log_access
if not cls._description:
cls._description = cls._name
if not cls._table:
cls._table = cls._name.replace('.', '_')
if not cls._sequence:
cls._sequence = cls._table + '_id_seq'
if not hasattr(cls, '_log_access'):
# If _log_access is not specified, it is the same value as _auto.
cls._log_access = cls._auto
# Transience
if cls.is_transient():
cls._transient_check_count = 0
cls._transient_max_count = config.get('osv_memory_count_limit')
cls._transient_max_hours = config.get('osv_memory_age_limit')
assert cls._log_access, \
"TransientModels must have log_access turned on, " \
"in order to implement their access rights policy"
# retrieve new-style fields and duplicate them (to avoid clashes with
# inheritance between different models)
cls._fields = {}
for attr, field in getmembers(cls, Field.__instancecheck__):
if not field.inherited:
cls._add_field(attr, field.copy())
# introduce magic fields
cls._add_magic_fields()
# register stuff about low-level function fields and custom fields
cls._init_function_fields(pool, cr)
cls._init_manual_fields(pool, cr)
# process _inherits
cls._inherits_check()
cls._inherits_reload()
# register constraints and onchange methods
cls._init_constraints_onchanges()
# check defaults
for k in cls._defaults:
assert k in cls._fields, \
"Model %s has a default for nonexiting field %s" % (cls._name, k)
# restart columns
for column in cls._columns.itervalues():
column.restart()
# validate rec_name
if cls._rec_name:
assert cls._rec_name in cls._fields, \
"Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
elif 'name' in cls._fields:
cls._rec_name = 'name'
# prepare ormcache, which must be shared by all instances of the model
cls._ormcache = {}
@api.model
@ormcache()
def _is_an_ordinary_table(self):
self.env.cr.execute("""\
SELECT 1
FROM pg_class
WHERE relname = %s
AND relkind = %s""", [self._table, 'r'])
return bool(self.env.cr.fetchone())
def __export_xml_id(self):
""" Return a valid xml_id for the record `self`. """
if not self._is_an_ordinary_table():
raise Exception(
"You can not export the column ID of model %s, because the "
"table %s is not an ordinary table."
% (self._name, self._table))
ir_model_data = self.sudo().env['ir.model.data']
data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
if data:
if data[0].module:
return '%s.%s' % (data[0].module, data[0].name)
else:
return data[0].name
else:
postfix = 0
name = '%s_%s' % (self._table, self.id)
while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
postfix += 1
name = '%s_%s_%s' % (self._table, self.id, postfix)
ir_model_data.create({
'model': self._name,
'res_id': self.id,
'module': '__export__',
'name': name,
})
return '__export__.' + name
@api.multi
def __export_rows(self, fields):
""" Export fields of the records in `self`.
:param fields: list of lists of fields to traverse
:return: list of lists of corresponding values
"""
lines = []
for record in self:
# main line of record, initially empty
current = [''] * len(fields)
lines.append(current)
# list of primary fields followed by secondary field(s)
primary_done = []
# process column by column
for i, path in enumerate(fields):
if not path:
continue
name = path[0]
if name in primary_done:
continue
if name == '.id':
current[i] = str(record.id)
elif name == 'id':
current[i] = record.__export_xml_id()
else:
field = record._fields[name]
value = record[name]
# this part could be simpler, but it has to be done this way
# in order to reproduce the former behavior
if not isinstance(value, BaseModel):
current[i] = field.convert_to_export(value, self.env)
else:
primary_done.append(name)
# This is a special case, its strange behavior is intended!
if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
xml_ids = [r.__export_xml_id() for r in value]
current[i] = ','.join(xml_ids) or False
continue
# recursively export the fields that follow name
fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
lines2 = value.__export_rows(fields2)
if lines2:
# merge first line with record's main line
for j, val in enumerate(lines2[0]):
if val:
current[j] = val
# check value of current field
if not current[i]:
# assign xml_ids, and forget about remaining lines
xml_ids = [item[1] for item in value.name_get()]
current[i] = ','.join(xml_ids)
else:
# append the other lines at the end
lines += lines2[1:]
else:
current[i] = False
return lines
@api.multi
def export_data(self, fields_to_export, raw_data=False):
""" Export fields for selected objects
:param fields_to_export: list of fields
:param raw_data: True to return value in native Python type
:rtype: dictionary with a *datas* matrix
This method is used when exporting data via client menu
"""
fields_to_export = map(fix_import_export_id_paths, fields_to_export)
if raw_data:
self = self.with_context(export_raw_data=True)
return {'datas': self.__export_rows(fields_to_export)}
def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
"""
.. deprecated:: 7.0
Use :meth:`~load` instead
Import given data in given module
This method is used when importing data via client menu.
Example of fields to import for a sale.order::
.id, (=database_id)
partner_id, (=name_search)
order_line/.id, (=database_id)
order_line/name,
order_line/product_id/id, (=xml id)
order_line/price_unit,
order_line/product_uom_qty,
order_line/product_uom/id (=xml_id)
This method returns a 4-tuple with the following structure::
(return_code, errored_resource, error_message, unused)
* The first item is a return code, it is ``-1`` in case of
import error, or the last imported row number in case of success
* The second item contains the record data dict that failed to import
in case of error, otherwise it's 0
* The third item contains an error message string in case of error,
otherwise it's 0
* The last item is currently unused, with no specific semantics
:param fields: list of fields to import
:param datas: data to import
:param mode: 'init' or 'update' for record creation
:param current_module: module name
:param noupdate: flag for record creation
:param filename: optional file to store partial import state for recovery
:returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
:rtype: (int, dict or 0, str or 0, str or 0)
"""
context = dict(context) if context is not None else {}
context['_import_current_module'] = current_module
fields = map(fix_import_export_id_paths, fields)
ir_model_data_obj = self.pool.get('ir.model.data')
def log(m):
if m['type'] == 'error':
raise Exception(m['message'])
if config.get('import_partial') and filename:
with open(config.get('import_partial'), 'rb') as partial_import_file:
data = pickle.load(partial_import_file)
position = data.get(filename, 0)
position = 0
try:
for res_id, xml_id, res, info in self._convert_records(cr, uid,
self._extract_records(cr, uid, fields, datas,
context=context, log=log),
context=context, log=log):
ir_model_data_obj._update(cr, uid, self._name,
current_module, res, mode=mode, xml_id=xml_id,
noupdate=noupdate, res_id=res_id, context=context)
position = info.get('rows', {}).get('to', 0) + 1
if config.get('import_partial') and filename and (not (position%100)):
with open(config.get('import_partial'), 'rb') as partial_import:
data = pickle.load(partial_import)
data[filename] = position
with open(config.get('import_partial'), 'wb') as partial_import:
pickle.dump(data, partial_import)
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
cr.commit()
except Exception, e:
cr.rollback()
return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
return position, 0, 0, 0
def load(self, cr, uid, fields, data, context=None):
"""
Attempts to load the data matrix, and returns a list of ids (or
``False`` if there was an error and no id could be generated) and a
list of messages.
The ids are those of the records created and saved (in database), in
the same order they were extracted from the file. They can be passed
directly to :meth:`~read`
:param fields: list of fields to import, at the same index as the corresponding data
:type fields: list(str)
:param data: row-major matrix of data to import
:type data: list(list(str))
:param dict context:
:returns: {ids: list(int)|False, messages: [Message]}
"""
cr.execute('SAVEPOINT model_load')
messages = []
fields = map(fix_import_export_id_paths, fields)
ModelData = self.pool['ir.model.data'].clear_caches()
fg = self.fields_get(cr, uid, context=context)
mode = 'init'
current_module = ''
noupdate = False
ids = []
for id, xid, record, info in self._convert_records(cr, uid,
self._extract_records(cr, uid, fields, data,
context=context, log=messages.append),
context=context, log=messages.append):
try:
cr.execute('SAVEPOINT model_load_save')
except psycopg2.InternalError, e:
# broken transaction, exit and hope the source error was
# already logged
if not any(message['type'] == 'error' for message in messages):
messages.append(dict(info, type='error',message=
u"Unknown database error: '%s'" % e))
break
try:
ids.append(ModelData._update(cr, uid, self._name,
current_module, record, mode=mode, xml_id=xid,
noupdate=noupdate, res_id=id, context=context))
cr.execute('RELEASE SAVEPOINT model_load_save')
except psycopg2.Warning, e:
messages.append(dict(info, type='warning', message=str(e)))
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
except psycopg2.Error, e:
messages.append(dict(
info, type='error',
**PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
# Failed to write, log to messages, rollback savepoint (to
# avoid broken transaction) and keep going
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
except Exception, e:
message = (_('Unknown error during import:') +
' %s: %s' % (type(e), unicode(e)))
moreinfo = _('Resolve other errors first')
messages.append(dict(info, type='error',
message=message,
moreinfo=moreinfo))
# Failed for some reason, perhaps due to invalid data supplied,
# rollback savepoint and keep going
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
if any(message['type'] == 'error' for message in messages):
cr.execute('ROLLBACK TO SAVEPOINT model_load')
ids = False
return {'ids': ids, 'messages': messages}
def _extract_records(self, cr, uid, fields_, data,
context=None, log=lambda a: None):
""" Generates record dicts from the data sequence.
The result is a generator of dicts mapping field names to raw
(unconverted, unvalidated) values.
For relational fields, if sub-fields were provided the value will be
a list of sub-records
The following sub-fields may be set on the record (by key):
* None is the name_get for the record (to use with name_create/name_search)
* "id" is the External ID for the record
* ".id" is the Database ID for the record
"""
columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
# Fake columns to avoid special cases in extractor
columns[None] = fields.char('rec_name')
columns['id'] = fields.char('External ID')
columns['.id'] = fields.integer('Database ID')
# m2o fields can't be on multiple lines so exclude them from the
# is_relational field rows filter, but special-case it later on to
# be handled with relational fields (as it can have subfields)
is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
get_o2m_values = itemgetter_tuple(
[index for index, field in enumerate(fields_)
if columns[field[0]]._type == 'one2many'])
get_nono2m_values = itemgetter_tuple(
[index for index, field in enumerate(fields_)
if columns[field[0]]._type != 'one2many'])
# Checks if the provided row has any non-empty non-relational field
def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
return any(g(row)) and not any(f(row))
index = 0
while True:
if index >= len(data): return
row = data[index]
# copy non-relational fields to record dict
record = dict((field[0], value)
for field, value in itertools.izip(fields_, row)
if not is_relational(field[0]))
# Get all following rows which have relational values attached to
# the current record (no non-relational values)
record_span = itertools.takewhile(
only_o2m_values, itertools.islice(data, index + 1, None))
# stitch record row back on for relational fields
record_span = list(itertools.chain([row], record_span))
for relfield in set(
field[0] for field in fields_
if is_relational(field[0])):
column = columns[relfield]
# FIXME: how to not use _obj without relying on fields_get?
Model = self.pool[column._obj]
# get only cells for this sub-field, should be strictly
# non-empty, field path [None] is for name_get column
indices, subfields = zip(*((index, field[1:] or [None])
for index, field in enumerate(fields_)
if field[0] == relfield))
# return all rows which have at least one value for the
# subfields of relfield
relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
record[relfield] = [subrecord
for subrecord, _subinfo in Model._extract_records(
cr, uid, subfields, relfield_data,
context=context, log=log)]
yield record, {'rows': {
'from': index,
'to': index + len(record_span) - 1
}}
index += len(record_span)
def _convert_records(self, cr, uid, records,
context=None, log=lambda a: None):
""" Converts records from the source iterable (recursive dicts of
strings) into forms which can be written to the database (via
self.create or (ir.model.data)._update)
:returns: a list of triplets of (id, xid, record)
:rtype: list((int|None, str|None, dict))
"""
if context is None: context = {}
Converter = self.pool['ir.fields.converter']
columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
Translation = self.pool['ir.translation']
field_names = dict(
(f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
context.get('lang'))
or column.string))
for f, column in columns.iteritems())
convert = Converter.for_model(cr, uid, self, context=context)
def _log(base, field, exception):
type = 'warning' if isinstance(exception, Warning) else 'error'
# logs the logical (not human-readable) field name for automated
# processing of response, but injects human readable in message
record = dict(base, type=type, field=field,
message=unicode(exception.args[0]) % base)
if len(exception.args) > 1 and exception.args[1]:
record.update(exception.args[1])
log(record)
stream = CountingStream(records)
for record, extras in stream:
dbid = False
xid = False
# name_get/name_create
if None in record: pass
# xid
if 'id' in record:
xid = record['id']
# dbid
if '.id' in record:
try:
dbid = int(record['.id'])
except ValueError:
# in case of overridden id column
dbid = record['.id']
if not self.search(cr, uid, [('id', '=', dbid)], context=context):
log(dict(extras,
type='error',
record=stream.index,
field='.id',
message=_(u"Unknown database identifier '%s'") % dbid))
dbid = False
converted = convert(record, lambda field, err:\
_log(dict(extras, record=stream.index, field=field_names[field]), field, err))
yield dbid, xid, converted, dict(extras, record=stream.index)
@api.multi
def _validate_fields(self, field_names):
field_names = set(field_names)
# old-style constraint methods
trans = self.env['ir.translation']
cr, uid, context = self.env.args
ids = self.ids
errors = []
for fun, msg, names in self._constraints:
try:
# validation must be context-independent; call `fun` without context
valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
extra_error = None
except Exception, e:
_logger.debug('Exception while validating constraint', exc_info=True)
valid = False
extra_error = tools.ustr(e)
if not valid:
if callable(msg):
res_msg = msg(self._model, cr, uid, ids, context=context)
if isinstance(res_msg, tuple):
template, params = res_msg
res_msg = template % params
else:
res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
if extra_error:
res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
errors.append(
_("Field(s) `%s` failed against a constraint: %s") %
(', '.join(names), res_msg)
)
if errors:
raise ValidationError('\n'.join(errors))
# new-style constraint methods
for check in self._constraint_methods:
if set(check._constrains) & field_names:
try:
check(self)
except ValidationError, e:
raise
except Exception, e:
raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
def default_get(self, cr, uid, fields_list, context=None):
""" default_get(fields) -> default_values
Return default values for the fields in `fields_list`. Default
values are determined by the context, user defaults, and the model
itself.
:param fields_list: a list of field names
:return: a dictionary mapping each field name to its corresponding
default value; the keys of the dictionary are the fields in
`fields_list` that have a default value different from ``False``.
This method should not be overridden. In order to change the
mechanism for determining default values, you should override method
:meth:`add_default_value` instead.
"""
# trigger view init hook
self.view_init(cr, uid, fields_list, context)
# use a new record to determine default values; evaluate fields on the
# new record and put default values in result
record = self.new(cr, uid, {}, context=context)
result = {}
for name in fields_list:
if name in self._fields:
value = record[name]
if name in record._cache:
result[name] = value # it really is a default value
# convert default values to the expected format
result = self._convert_to_write(result)
return result
def add_default_value(self, field):
""" Set the default value of `field` to the new record `self`.
The value must be assigned to `self`.
"""
assert not self.id, "Expected new record: %s" % self
cr, uid, context = self.env.args
name = field.name
# 1. look up context
key = 'default_' + name
if key in context:
self[name] = context[key]
return
# 2. look up ir_values
# Note: performance is good, because get_defaults_dict is cached!
ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
if name in ir_values_dict:
self[name] = ir_values_dict[name]
return
# 3. look up property fields
# TODO: get rid of this one
column = self._columns.get(name)
if isinstance(column, fields.property):
self[name] = self.env['ir.property'].get(name, self._name)
return
# 4. look up _defaults
if name in self._defaults:
value = self._defaults[name]
if callable(value):
value = value(self._model, cr, uid, context)
self[name] = value
return
# 5. delegate to field
field.determine_default(self)
def fields_get_keys(self, cr, user, context=None):
res = self._columns.keys()
# TODO I believe this loop can be replace by
# res.extend(self._inherit_fields.key())
for parent in self._inherits:
res.extend(self.pool[parent].fields_get_keys(cr, user, context))
return res
def _rec_name_fallback(self, cr, uid, context=None):
rec_name = self._rec_name
if rec_name not in self._columns:
rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
return rec_name
#
# Overload this method if you need a window title which depends on the context
#
def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
return False
def user_has_groups(self, cr, uid, groups, context=None):
"""Return true if the user is at least member of one of the groups
in groups_str. Typically used to resolve `groups` attribute
in view and model definitions.
:param str groups: comma-separated list of fully-qualified group
external IDs, e.g.: ``base.group_user,base.group_system``
:return: True if the current user is a member of one of the
given groups
"""
return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
for group_ext_id in groups.split(','))
def _get_default_form_view(self, cr, user, context=None):
""" Generates a default single-line form view using all fields
of the current model except the m2m and o2m ones.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a form view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('form', string=self._description)
group = etree.SubElement(view, 'group', col="4")
for fname, field in self._fields.iteritems():
if field.automatic or field.type in ('one2many', 'many2many'):
continue
etree.SubElement(group, 'field', name=fname)
if field.type == 'text':
etree.SubElement(group, 'newline')
return view
def _get_default_search_view(self, cr, user, context=None):
""" Generates a single-field search view, based on _rec_name.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a tree view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('search', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
return view
def _get_default_tree_view(self, cr, user, context=None):
""" Generates a single-field tree view, based on _rec_name.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a tree view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('tree', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
return view
def _get_default_calendar_view(self, cr, user, context=None):
""" Generates a default calendar view by trying to infer
calendar fields from a number of pre-set attribute names
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a calendar view
:rtype: etree._Element
"""
def set_first_of(seq, in_, to):
"""Sets the first value of `seq` also found in `in_` to
the `to` attribute of the view being closed over.
Returns whether it's found a suitable value (and set it on
the attribute) or not
"""
for item in seq:
if item in in_:
view.set(to, item)
return True
return False
view = etree.Element('calendar', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
if self._date_name not in self._columns:
date_found = False
for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
if dt in self._columns:
self._date_name = dt
date_found = True
break
if not date_found:
raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
view.set('date_start', self._date_name)
set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
self._columns, 'color')
if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
self._columns, 'date_stop'):
if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
self._columns, 'date_delay'):
raise except_orm(
_('Invalid Object Architecture!'),
_("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
return view
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
""" fields_view_get([view_id | view_type='form'])
Get the detailed composition of the requested view like fields, model, view architecture
:param view_id: id of the view or None
:param view_type: type of the view to return if view_id is None ('form', tree', ...)
:param toolbar: true to include contextual actions
:param submenu: deprecated
:return: dictionary describing the composition of the requested view (including inherited views and extensions)
:raise AttributeError:
* if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
* if some tag other than 'position' is found in parent view
:raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
"""
if context is None:
context = {}
View = self.pool['ir.ui.view']
result = {
'model': self._name,
'field_parent': False,
}
# try to find a view_id if none provided
if not view_id:
# <view_type>_view_ref in context can be used to overrride the default view
view_ref_key = view_type + '_view_ref'
view_ref = context.get(view_ref_key)
if view_ref:
if '.' in view_ref:
module, view_ref = view_ref.split('.', 1)
cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
view_ref_res = cr.fetchone()
if view_ref_res:
view_id = view_ref_res[0]
else:
_logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
self._name)
if not view_id:
# otherwise try to find the lowest priority matching ir.ui.view
view_id = View.default_view(cr, uid, self._name, view_type, context=context)
# context for post-processing might be overriden
ctx = context
if view_id:
# read the view with inherited views applied
root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
result['arch'] = root_view['arch']
result['name'] = root_view['name']
result['type'] = root_view['type']
result['view_id'] = root_view['id']
result['field_parent'] = root_view['field_parent']
# override context fro postprocessing
if root_view.get('model') != self._name:
ctx = dict(context, base_model_name=root_view.get('model'))
else:
# fallback on default views methods if no ir.ui.view could be found
try:
get_func = getattr(self, '_get_default_%s_view' % view_type)
arch_etree = get_func(cr, uid, context)
result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
result['type'] = view_type
result['name'] = 'default'
except AttributeError:
raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
# Apply post processing, groups and modifiers etc...
xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
result['arch'] = xarch
result['fields'] = xfields
# Add related action information if aksed
if toolbar:
toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
def clean(x):
x = x[2]
for key in toclean:
x.pop(key, None)
return x
ir_values_obj = self.pool.get('ir.values')
resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
#When multi="True" set it will display only in More of the list view
resrelate = [clean(action) for action in resrelate
if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
for x in itertools.chain(resprint, resaction, resrelate):
x['string'] = x['name']
result['toolbar'] = {
'print': resprint,
'action': resaction,
'relate': resrelate
}
return result
def get_formview_id(self, cr, uid, id, context=None):
""" Return an view id to open the document with. This method is meant to be
overridden in addons that want to give specific view ids for example.
:param int id: id of the document to open
"""
return False
def get_formview_action(self, cr, uid, id, context=None):
""" Return an action to open the document. This method is meant to be
overridden in addons that want to give specific view ids for example.
:param int id: id of the document to open
"""
view_id = self.get_formview_id(cr, uid, id, context=context)
return {
'type': 'ir.actions.act_window',
'res_model': self._name,
'view_type': 'form',
'view_mode': 'form',
'views': [(view_id, 'form')],
'target': 'current',
'res_id': id,
}
def get_access_action(self, cr, uid, id, context=None):
""" Return an action to open the document. This method is meant to be
overridden in addons that want to give specific access to the document.
By default it opens the formview of the document.
:paramt int id: id of the document to open
"""
return self.get_formview_action(cr, uid, id, context=context)
def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
return self.pool['ir.ui.view'].postprocess_and_fields(
cr, uid, self._name, node, view_id, context=context)
def search_count(self, cr, user, args, context=None):
""" search_count(args) -> int
Returns the number of records in the current model matching :ref:`the
provided domain <reference/orm/domains>`.
"""
res = self.search(cr, user, args, context=context, count=True)
if isinstance(res, list):
return len(res)
return res
@api.returns('self')
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
""" search(args[, offset=0][, limit=None][, order=None][, count=False])
Searches for records based on the ``args``
:ref:`search domain <reference/orm/domains>`.
:param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
list to match all records.
:param int offset: number of results to ignore (default: none)
:param int limit: maximum number of records to return (default: all)
:param str order: sort string
:param bool count: if ``True``, the call should return the number of
records matching ``args`` rather than the records
themselves.
:returns: at most ``limit`` records matching the search criteria
:raise AccessError: * if user tries to bypass access rules for read on the requested object.
"""
return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
#
# display_name, name_get, name_create, name_search
#
@api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
def _compute_display_name(self):
names = dict(self.name_get())
for record in self:
record.display_name = names.get(record.id, False)
@api.multi
def name_get(self):
""" name_get() -> [(id, name), ...]
Returns a textual representation for the records in ``self``.
By default this is the value of the ``display_name`` field.
:return: list of pairs ``(id, text_repr)`` for each records
:rtype: list(tuple)
"""
result = []
name = self._rec_name
if name in self._fields:
convert = self._fields[name].convert_to_display_name
for record in self:
result.append((record.id, convert(record[name])))
else:
for record in self:
result.append((record.id, "%s,%s" % (record._name, record.id)))
return result
@api.model
def name_create(self, name):
""" name_create(name) -> record
Create a new record by calling :meth:`~.create` with only one value
provided: the display name of the new record.
The new record will be initialized with any default values
applicable to this model, or provided through the context. The usual
behavior of :meth:`~.create` applies.
:param name: display name of the record to create
:rtype: tuple
:return: the :meth:`~.name_get` pair value of the created record
"""
if self._rec_name:
record = self.create({self._rec_name: name})
return record.name_get()[0]
else:
_logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
return False
@api.model
def name_search(self, name='', args=None, operator='ilike', limit=100):
""" name_search(name='', args=None, operator='ilike', limit=100) -> records
Search for records that have a display name matching the given
`name` pattern when compared with the given `operator`, while also
matching the optional search domain (`args`).
This is used for example to provide suggestions based on a partial
value for a relational field. Sometimes be seen as the inverse
function of :meth:`~.name_get`, but it is not guaranteed to be.
This method is equivalent to calling :meth:`~.search` with a search
domain based on ``display_name`` and then :meth:`~.name_get` on the
result of the search.
:param str name: the name pattern to match
:param list args: optional search domain (see :meth:`~.search` for
syntax), specifying further restrictions
:param str operator: domain operator for matching `name`, such as
``'like'`` or ``'='``.
:param int limit: optional max number of records to return
:rtype: list
:return: list of pairs ``(id, text_repr)`` for all matching records.
"""
return self._name_search(name, args, operator, limit=limit)
def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
# private implementation of name_search, allows passing a dedicated user
# for the name_get part to solve some access rights issues
args = list(args or [])
# optimize out the default criterion of ``ilike ''`` that matches everything
if not self._rec_name:
_logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
elif not (name == '' and operator == 'ilike'):
args += [(self._rec_name, operator, name)]
access_rights_uid = name_get_uid or user
ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
res = self.name_get(cr, access_rights_uid, ids, context)
return res
def read_string(self, cr, uid, id, langs, fields=None, context=None):
res = {}
res2 = {}
self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
if not fields:
fields = self._columns.keys() + self._inherit_fields.keys()
#FIXME: collect all calls to _get_source into one SQL call.
for lang in langs:
res[lang] = {'code': lang}
for f in fields:
if f in self._columns:
res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
if res_trans:
res[lang][f] = res_trans
else:
res[lang][f] = self._columns[f].string
for table in self._inherits:
cols = intersect(self._inherit_fields.keys(), fields)
res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
for lang in res2:
if lang in res:
res[lang]['code'] = lang
for f in res2[lang]:
res[lang][f] = res2[lang][f]
return res
def write_string(self, cr, uid, id, langs, vals, context=None):
self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
#FIXME: try to only call the translation in one SQL
for lang in langs:
for field in vals:
if field in self._columns:
src = self._columns[field].string
self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
for table in self._inherits:
cols = intersect(self._inherit_fields.keys(), vals)
if cols:
self.pool[table].write_string(cr, uid, id, langs, vals, context)
return True
def _add_missing_default_values(self, cr, uid, values, context=None):
# avoid overriding inherited values when parent is set
avoid_tables = []
for tables, parent_field in self._inherits.items():
if parent_field in values:
avoid_tables.append(tables)
# compute missing fields
missing_defaults = set()
for field in self._columns.keys():
if not field in values:
missing_defaults.add(field)
for field in self._inherit_fields.keys():
if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
missing_defaults.add(field)
# discard magic fields
missing_defaults -= set(MAGIC_COLUMNS)
if missing_defaults:
# override defaults with the provided values, never allow the other way around
defaults = self.default_get(cr, uid, list(missing_defaults), context)
for dv in defaults:
if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
defaults[dv] = [(6, 0, defaults[dv])]
if (dv in self._columns and self._columns[dv]._type == 'one2many' \
or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
defaults[dv] = [(0, 0, x) for x in defaults[dv]]
defaults.update(values)
values = defaults
return values
def clear_caches(self):
""" Clear the caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi``.
"""
try:
self._ormcache.clear()
self.pool._any_cache_cleared = True
except AttributeError:
pass
def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
aggregated_fields, count_field,
read_group_result, read_group_order=None, context=None):
"""Helper method for filling in empty groups for all possible values of
the field being grouped by"""
# self._group_by_full should map groupable fields to a method that returns
# a list of all aggregated values that we want to display for this field,
# in the form of a m2o-like pair (key,label).
# This is useful to implement kanban views for instance, where all columns
# should be displayed even if they don't contain any record.
# Grab the list of all groups that should be displayed, including all present groups
present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
read_group_order=read_group_order,
access_rights_uid=openerp.SUPERUSER_ID,
context=context)
result_template = dict.fromkeys(aggregated_fields, False)
result_template[groupby + '_count'] = 0
if remaining_groupbys:
result_template['__context'] = {'group_by': remaining_groupbys}
# Merge the left_side (current results as dicts) with the right_side (all
# possible values as m2o pairs). Both lists are supposed to be using the
# same ordering, and can be merged in one pass.
result = []
known_values = {}
def append_left(left_side):
grouped_value = left_side[groupby] and left_side[groupby][0]
if not grouped_value in known_values:
result.append(left_side)
known_values[grouped_value] = left_side
else:
known_values[grouped_value].update({count_field: left_side[count_field]})
def append_right(right_side):
grouped_value = right_side[0]
if not grouped_value in known_values:
line = dict(result_template)
line[groupby] = right_side
line['__domain'] = [(groupby,'=',grouped_value)] + domain
result.append(line)
known_values[grouped_value] = line
while read_group_result or all_groups:
left_side = read_group_result[0] if read_group_result else None
right_side = all_groups[0] if all_groups else None
assert left_side is None or left_side[groupby] is False \
or isinstance(left_side[groupby], (tuple,list)), \
'M2O-like pair expected, got %r' % left_side[groupby]
assert right_side is None or isinstance(right_side, (tuple,list)), \
'M2O-like pair expected, got %r' % right_side
if left_side is None:
append_right(all_groups.pop(0))
elif right_side is None:
append_left(read_group_result.pop(0))
elif left_side[groupby] == right_side:
append_left(read_group_result.pop(0))
all_groups.pop(0) # discard right_side
elif not left_side[groupby] or not left_side[groupby][0]:
# left side == "Undefined" entry, not present on right_side
append_left(read_group_result.pop(0))
else:
append_right(all_groups.pop(0))
if folded:
for r in result:
r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
return result
def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
"""
Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
to the query if order should be computed against m2o field.
:param orderby: the orderby definition in the form "%(field)s %(order)s"
:param aggregated_fields: list of aggregated fields in the query
:param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
These dictionaries contains the qualified name of each groupby
(fully qualified SQL name for the corresponding field),
and the (non raw) field name.
:param osv.Query query: the query under construction
:return: (groupby_terms, orderby_terms)
"""
orderby_terms = []
groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
if not orderby:
return groupby_terms, orderby_terms
self._check_qorder(orderby)
for order_part in orderby.split(','):
order_split = order_part.split()
order_field = order_split[0]
if order_field in groupby_fields:
if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
if order_clause:
orderby_terms.append(order_clause)
groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
else:
order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
orderby_terms.append(order)
elif order_field in aggregated_fields:
orderby_terms.append(order_part)
else:
# Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
_logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
self._name, order_part)
return groupby_terms, orderby_terms
def _read_group_process_groupby(self, gb, query, context):
"""
Helper method to collect important information about groupbys: raw
field name, type, time informations, qualified name, ...
"""
split = gb.split(':')
field_type = self._all_columns[split[0]].column._type
gb_function = split[1] if len(split) == 2 else None
temporal = field_type in ('date', 'datetime')
tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
qualified_field = self._inherits_join_calc(split[0], query)
if temporal:
display_formats = {
'day': 'dd MMM YYYY',
'week': "'W'w YYYY",
'month': 'MMMM YYYY',
'quarter': 'QQQ YYYY',
'year': 'YYYY'
}
time_intervals = {
'day': dateutil.relativedelta.relativedelta(days=1),
'week': datetime.timedelta(days=7),
'month': dateutil.relativedelta.relativedelta(months=1),
'quarter': dateutil.relativedelta.relativedelta(months=3),
'year': dateutil.relativedelta.relativedelta(years=1)
}
if tz_convert:
qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
if field_type == 'boolean':
qualified_field = "coalesce(%s,false)" % qualified_field
return {
'field': split[0],
'groupby': gb,
'type': field_type,
'display_format': display_formats[gb_function or 'month'] if temporal else None,
'interval': time_intervals[gb_function or 'month'] if temporal else None,
'tz_convert': tz_convert,
'qualified_field': qualified_field
}
def _read_group_prepare_data(self, key, value, groupby_dict, context):
"""
Helper method to sanitize the data received by read_group. The None
values are converted to False, and the date/datetime are formatted,
and corrected according to the timezones.
"""
value = False if value is None else value
gb = groupby_dict.get(key)
if gb and gb['type'] in ('date', 'datetime') and value:
if isinstance(value, basestring):
dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
value = datetime.datetime.strptime(value, dt_format)
if gb['tz_convert']:
value = pytz.timezone(context['tz']).localize(value)
return value
def _read_group_get_domain(self, groupby, value):
"""
Helper method to construct the domain corresponding to a groupby and
a given value. This is mostly relevant for date/datetime.
"""
if groupby['type'] in ('date', 'datetime') and value:
dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
domain_dt_begin = value
domain_dt_end = value + groupby['interval']
if groupby['tz_convert']:
domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
domain_dt_end = domain_dt_end.astimezone(pytz.utc)
return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
(groupby['field'], '<', domain_dt_end.strftime(dt_format))]
if groupby['type'] == 'many2one' and value:
value = value[0]
return [(groupby['field'], '=', value)]
def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
"""
Helper method to format the data contained in the dictianary data by
adding the domain corresponding to its values, the groupbys in the
context and by properly formatting the date/datetime values.
"""
domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
for k,v in data.iteritems():
gb = groupby_dict.get(k)
if gb and gb['type'] in ('date', 'datetime') and v:
data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
data['__domain'] = domain_group + domain
if len(groupby) - len(annotated_groupbys) >= 1:
data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
del data['id']
return data
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
"""
Get the list of records in list view grouped by the given ``groupby`` fields
:param cr: database cursor
:param uid: current user id
:param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
:param list fields: list of fields present in the list view specified on the object
:param list groupby: list of groupby descriptions by which the records will be grouped.
A groupby description is either a field (then it will be grouped by that field)
or a string 'field:groupby_function'. Right now, the only functions supported
are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
date/datetime fields.
:param int offset: optional number of records to skip
:param int limit: optional max number of records to return
:param dict context: context arguments, like lang, time zone.
:param list orderby: optional ``order by`` specification, for
overriding the natural sort ordering of the
groups, see also :py:meth:`~osv.osv.osv.search`
(supported only for many2one fields currently)
:param bool lazy: if true, the results are only grouped by the first groupby and the
remaining groupbys are put in the __context key. If false, all the groupbys are
done in one call.
:return: list of dictionaries(one dictionary for each record) containing:
* the values of fields grouped by the fields in ``groupby`` argument
* __domain: list of tuples specifying the search criteria
* __context: dictionary with argument like ``groupby``
:rtype: [{'field_name_1': value, ...]
:raise AccessError: * if user has no read rights on the requested object
* if user tries to bypass access rules for read on the requested object
"""
if context is None:
context = {}
self.check_access_rights(cr, uid, 'read')
query = self._where_calc(cr, uid, domain, context=context)
fields = fields or self._columns.keys()
groupby = [groupby] if isinstance(groupby, basestring) else groupby
groupby_list = groupby[:1] if lazy else groupby
annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
for gb in groupby_list]
groupby_fields = [g['field'] for g in annotated_groupbys]
order = orderby or ','.join([g for g in groupby_list])
groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
self._apply_ir_rules(cr, uid, query, 'read', context=context)
for gb in groupby_fields:
assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
if not (gb in self._all_columns):
# Don't allow arbitrary values, as this would be a SQL injection vector!
raise except_orm(_('Invalid group_by'),
_('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
aggregated_fields = [
f for f in fields
if f not in ('id', 'sequence')
if f not in groupby_fields
if f in self._all_columns
if self._all_columns[f].column._type in ('integer', 'float')
if getattr(self._all_columns[f].column, '_classic_write')]
field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
for gb in annotated_groupbys:
select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
from_clause, where_clause, where_clause_params = query.get_sql()
if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
else:
count_field = '_'
count_field += '_count'
prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
query = """
SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
FROM %(from)s
%(where)s
%(groupby)s
%(orderby)s
%(limit)s
%(offset)s
""" % {
'table': self._table,
'count_field': count_field,
'extra_fields': prefix_terms(',', select_terms),
'from': from_clause,
'where': prefix_term('WHERE', where_clause),
'groupby': prefix_terms('GROUP BY', groupby_terms),
'orderby': prefix_terms('ORDER BY', orderby_terms),
'limit': prefix_term('LIMIT', int(limit) if limit else None),
'offset': prefix_term('OFFSET', int(offset) if limit else None),
}
cr.execute(query, where_clause_params)
fetched_data = cr.dictfetchall()
if not groupby_fields:
return fetched_data
many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
if many2onefields:
data_ids = [r['id'] for r in fetched_data]
many2onefields = list(set(many2onefields))
data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
for d in fetched_data:
d.update(data_dict[d['id']])
data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
if lazy and groupby_fields[0] in self._group_by_full:
# Right now, read_group only fill results in lazy mode (by default).
# If you need to have the empty groups in 'eager' mode, then the
# method _read_group_fill_results need to be completely reimplemented
# in a sane way
result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
aggregated_fields, count_field, result, read_group_order=order,
context=context)
return result
def _inherits_join_add(self, current_model, parent_model_name, query):
"""
Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
:param current_model: current model object
:param parent_model_name: name of the parent model for which the clauses should be added
:param query: query object on which the JOIN should be added
"""
inherits_field = current_model._inherits[parent_model_name]
parent_model = self.pool[parent_model_name]
parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
return parent_alias
def _inherits_join_calc(self, field, query):
"""
Adds missing table select and join clause(s) to ``query`` for reaching
the field coming from an '_inherits' parent table (no duplicates).
:param field: name of inherited field to reach
:param query: query object on which the JOIN should be added
:return: qualified name of field, to be used in SELECT clause
"""
current_table = self
parent_alias = '"%s"' % current_table._table
while field in current_table._inherit_fields and not field in current_table._columns:
parent_model_name = current_table._inherit_fields[field][0]
parent_table = self.pool[parent_model_name]
parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
current_table = parent_table
return '%s."%s"' % (parent_alias, field)
def _parent_store_compute(self, cr):
if not self._parent_store:
return
_logger.info('Computing parent left and right for table %s...', self._table)
def browse_rec(root, pos=0):
# TODO: set order
where = self._parent_name+'='+str(root)
if not root:
where = self._parent_name+' IS NULL'
if self._parent_order:
where += ' order by '+self._parent_order
cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
pos2 = pos + 1
for id in cr.fetchall():
pos2 = browse_rec(id[0], pos2)
cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
return pos2 + 1
query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
if self._parent_order:
query += ' order by ' + self._parent_order
pos = 0
cr.execute(query)
for (root,) in cr.fetchall():
pos = browse_rec(root, pos)
self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
return True
def _update_store(self, cr, f, k):
_logger.info("storing computed values of fields.function '%s'", k)
ss = self._columns[k]._symbol_set
update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
cr.execute('select id from '+self._table)
ids_lst = map(lambda x: x[0], cr.fetchall())
while ids_lst:
iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
for key, val in res.items():
if f._multi:
val = val[k]
# if val is a many2one, just write the ID
if type(val) == tuple:
val = val[0]
if val is not False:
cr.execute(update_query, (ss[1](val), key))
def _check_selection_field_value(self, cr, uid, field, value, context=None):
"""Raise except_orm if value is not among the valid values for the selection field"""
if self._columns[field]._type == 'reference':
val_model, val_id_str = value.split(',', 1)
val_id = False
try:
val_id = long(val_id_str)
except ValueError:
pass
if not val_id:
raise except_orm(_('ValidateError'),
_('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
val = val_model
else:
val = value
if isinstance(self._columns[field].selection, (tuple, list)):
if val in dict(self._columns[field].selection):
return
elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
return
raise except_orm(_('ValidateError'),
_('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._name, field))
def _check_removed_columns(self, cr, log=False):
# iterate on the database columns to drop the NOT NULL constraints
# of fields which were required but have been removed (or will be added by another module)
columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
columns += MAGIC_COLUMNS
cr.execute("SELECT a.attname, a.attnotnull"
" FROM pg_class c, pg_attribute a"
" WHERE c.relname=%s"
" AND c.oid=a.attrelid"
" AND a.attisdropped=%s"
" AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
" AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
for column in cr.dictfetchall():
if log:
_logger.debug("column %s is in the table %s but not in the corresponding object %s",
column['attname'], self._table, self._name)
if column['attnotnull']:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
_schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, column['attname'])
def _save_constraint(self, cr, constraint_name, type):
"""
Record the creation of a constraint for this model, to make it possible
to delete it later when the module is uninstalled. Type can be either
'f' or 'u' depending on the constraint being a foreign key or not.
"""
if not self._module:
# no need to save constraints for custom models as they're not part
# of any module
return
assert type in ('f', 'u')
cr.execute("""
SELECT 1 FROM ir_model_constraint, ir_module_module
WHERE ir_model_constraint.module=ir_module_module.id
AND ir_model_constraint.name=%s
AND ir_module_module.name=%s
""", (constraint_name, self._module))
if not cr.rowcount:
cr.execute("""
INSERT INTO ir_model_constraint
(name, date_init, date_update, module, model, type)
VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
(SELECT id FROM ir_module_module WHERE name=%s),
(SELECT id FROM ir_model WHERE model=%s), %s)""",
(constraint_name, self._module, self._name, type))
def _save_relation_table(self, cr, relation_table):
"""
Record the creation of a many2many for this model, to make it possible
to delete it later when the module is uninstalled.
"""
cr.execute("""
SELECT 1 FROM ir_model_relation, ir_module_module
WHERE ir_model_relation.module=ir_module_module.id
AND ir_model_relation.name=%s
AND ir_module_module.name=%s
""", (relation_table, self._module))
if not cr.rowcount:
cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
(SELECT id FROM ir_module_module WHERE name=%s),
(SELECT id FROM ir_model WHERE model=%s))""",
(relation_table, self._module, self._name))
self.invalidate_cache(cr, SUPERUSER_ID)
# checked version: for direct m2o starting from `self`
def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
assert self.is_transient() or not dest_model.is_transient(), \
'Many2One relationships from non-transient Model to TransientModel are forbidden'
if self.is_transient() and not dest_model.is_transient():
# TransientModel relationships to regular Models are annoying
# usually because they could block deletion due to the FKs.
# So unless stated otherwise we default them to ondelete=cascade.
ondelete = ondelete or 'cascade'
fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
self._foreign_keys.add(fk_def)
_schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
# unchecked version: for custom cases, such as m2m relationships
def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
self._foreign_keys.add(fk_def)
_schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
def _drop_constraint(self, cr, source_table, constraint_name):
cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
# Find FK constraint(s) currently established for the m2o field,
# and see whether they are stale or not
cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
cl2.relname as foreign_table
FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
pg_attribute as att1, pg_attribute as att2
WHERE con.conrelid = cl1.oid
AND cl1.relname = %s
AND con.confrelid = cl2.oid
AND array_lower(con.conkey, 1) = 1
AND con.conkey[1] = att1.attnum
AND att1.attrelid = cl1.oid
AND att1.attname = %s
AND array_lower(con.confkey, 1) = 1
AND con.confkey[1] = att2.attnum
AND att2.attrelid = cl2.oid
AND att2.attname = %s
AND con.contype = 'f'""", (source_table, source_field, 'id'))
constraints = cr.dictfetchall()
if constraints:
if len(constraints) == 1:
# Is it the right constraint?
cons, = constraints
if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
or cons['foreign_table'] != dest_model._table:
# Wrong FK: drop it and recreate
_schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
source_table, cons['constraint_name'])
self._drop_constraint(cr, source_table, cons['constraint_name'])
else:
# it's all good, nothing to do!
return
else:
# Multiple FKs found for the same field, drop them all, and re-create
for cons in constraints:
_schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
source_table, cons['constraint_name'])
self._drop_constraint(cr, source_table, cons['constraint_name'])
# (re-)create the FK
self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
def _set_default_value_on_column(self, cr, column_name, context=None):
# ideally should use add_default_value but fails
# due to ir.values not being ready
# get old-style default
default = self._defaults.get(column_name)
if callable(default):
default = default(self, cr, SUPERUSER_ID, context)
# get new_style default if no old-style
if default is None:
record = self.new(cr, SUPERUSER_ID, context=context)
field = self._fields[column_name]
field.determine_default(record)
defaults = dict(record._cache)
if column_name in defaults:
default = field.convert_to_write(defaults[column_name])
column = self._columns[column_name]
ss = column._symbol_set
db_default = ss[1](default)
# Write default if non-NULL, except for booleans for which False means
# the same as NULL - this saves us an expensive query on large tables.
write_default = (db_default is not None if column._type != 'boolean'
else db_default)
if write_default:
_logger.debug("Table '%s': setting default value of new column %s to %r",
self._table, column_name, default)
query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
self._table, column_name, ss[0], column_name)
cr.execute(query, (db_default,))
# this is a disgrace
cr.commit()
def _auto_init(self, cr, context=None):
"""
Call _field_create and, unless _auto is False:
- create the corresponding table in database for the model,
- possibly add the parent columns in database,
- possibly add the columns 'create_uid', 'create_date', 'write_uid',
'write_date' in database if _log_access is True (the default),
- report on database columns no more existing in _columns,
- remove no more existing not null constraints,
- alter existing database columns to match _columns,
- create database tables to match _columns,
- add database indices to match _columns,
- save in self._foreign_keys a list a foreign keys to create (see
_auto_end).
"""
self._foreign_keys = set()
raise_on_invalid_object_name(self._name)
if context is None:
context = {}
store_compute = False
stored_fields = [] # new-style stored fields with compute
todo_end = []
update_custom_fields = context.get('update_custom_fields', False)
self._field_create(cr, context=context)
create = not self._table_exist(cr)
if self._auto:
if create:
self._create_table(cr)
has_rows = False
else:
cr.execute('SELECT 1 FROM "%s" LIMIT 1' % self._table)
has_rows = cr.rowcount
cr.commit()
if self._parent_store:
if not self._parent_columns_exist(cr):
self._create_parent_columns(cr)
store_compute = True
self._check_removed_columns(cr, log=False)
# iterate on the "object columns"
column_data = self._select_column_data(cr)
for k, f in self._columns.iteritems():
if k == 'id': # FIXME: maybe id should be a regular column?
continue
# Don't update custom (also called manual) fields
if f.manual and not update_custom_fields:
continue
if isinstance(f, fields.one2many):
self._o2m_raise_on_missing_reference(cr, f)
elif isinstance(f, fields.many2many):
self._m2m_raise_or_create_relation(cr, f)
else:
res = column_data.get(k)
# The field is not found as-is in database, try if it
# exists with an old name.
if not res and hasattr(f, 'oldname'):
res = column_data.get(f.oldname)
if res:
cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
res['attname'] = k
column_data[k] = res
_schema.debug("Table '%s': renamed column '%s' to '%s'",
self._table, f.oldname, k)
# The field already exists in database. Possibly
# change its type, rename it, drop it or change its
# constraints.
if res:
f_pg_type = res['typname']
f_pg_size = res['size']
f_pg_notnull = res['attnotnull']
if isinstance(f, fields.function) and not f.store and\
not getattr(f, 'nodrop', False):
_logger.info('column %s (%s) converted to a function, removed from table %s',
k, f.string, self._table)
cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
cr.commit()
_schema.debug("Table '%s': dropped column '%s' with cascade",
self._table, k)
f_obj_type = None
else:
f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
if f_obj_type:
ok = False
casts = [
('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
('varchar', 'text', 'TEXT', ''),
('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
('timestamp', 'date', 'date', '::date'),
('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
]
if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
try:
with cr.savepoint():
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
except psycopg2.NotSupportedError:
# In place alter table cannot be done because a view is depending of this field.
# Do a manual copy. This will drop the view (that will be recreated later)
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
cr.commit()
_schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
for c in casts:
if (f_pg_type==c[0]) and (f._type==c[1]):
if f_pg_type != f_obj_type:
ok = True
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
cr.commit()
_schema.debug("Table '%s': column '%s' changed type from %s to %s",
self._table, k, c[0], c[1])
break
if f_pg_type != f_obj_type:
if not ok:
i = 0
while True:
newname = k + '_moved' + str(i)
cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
"WHERE c.relname=%s " \
"AND a.attname=%s " \
"AND c.oid=a.attrelid ", (self._table, newname))
if not cr.fetchone()[0]:
break
i += 1
if f_pg_notnull:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
_schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
self._table, k, f_pg_type, f._type, newname)
# if the field is required and hasn't got a NOT NULL constraint
if f.required and f_pg_notnull == 0:
if has_rows:
self._set_default_value_on_column(cr, k, context=context)
# add the NOT NULL constraint
try:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
cr.commit()
_schema.debug("Table '%s': column '%s': added NOT NULL constraint",
self._table, k)
except Exception:
msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
"If you want to have it, you should update the records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
_schema.warning(msg, self._table, k, self._table, k)
cr.commit()
elif not f.required and f_pg_notnull == 1:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.commit()
_schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, k)
# Verify index
indexname = '%s_%s_index' % (self._table, k)
cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
res2 = cr.dictfetchall()
if not res2 and f.select:
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
cr.commit()
if f._type == 'text':
# FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
"This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
" because there is a length limit for indexable btree values!\n"\
"Use a search view instead if you simply want to make the field searchable."
_schema.warning(msg, self._table, f._type, k)
if res2 and not f.select:
cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
cr.commit()
msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
_schema.debug(msg, self._table, k, f._type)
if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
dest_model = self.pool[f._obj]
if dest_model._auto and dest_model._table != 'ir_actions':
self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
# The field doesn't exist in database. Create it if necessary.
else:
if not isinstance(f, fields.function) or f.store:
# add the missing field
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, k, get_pg_type(f)[1])
# initialize it
if has_rows:
self._set_default_value_on_column(cr, k, context=context)
# remember the functions to call for the stored fields
if isinstance(f, fields.function):
order = 10
if f.store is not True: # i.e. if f.store is a dict
order = f.store[f.store.keys()[0]][2]
todo_end.append((order, self._update_store, (f, k)))
# remember new-style stored fields with compute method
if k in self._fields and self._fields[k].depends:
stored_fields.append(self._fields[k])
# and add constraints if needed
if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
if f._obj not in self.pool:
raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
dest_model = self.pool[f._obj]
ref = dest_model._table
# ir_actions is inherited so foreign key doesn't work on it
if dest_model._auto and ref != 'ir_actions':
self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
if f.select:
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
if f.required:
try:
cr.commit()
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
_schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
self._table, k)
except Exception:
msg = "WARNING: unable to set column %s of table %s not null !\n"\
"Try to re-run: openerp-server --update=module\n"\
"If it doesn't work, update records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
_logger.warning(msg, k, self._table, self._table, k, exc_info=True)
cr.commit()
else:
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
create = not bool(cr.fetchone())
cr.commit() # start a new transaction
if self._auto:
self._add_sql_constraints(cr)
if create:
self._execute_sql(cr)
if store_compute:
self._parent_store_compute(cr)
cr.commit()
if stored_fields:
# trigger computation of new-style stored fields with a compute
def func(cr):
_logger.info("Storing computed values of %s fields %s",
self._name, ', '.join(sorted(f.name for f in stored_fields)))
recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
recs = recs.search([])
if recs:
map(recs._recompute_todo, stored_fields)
recs.recompute()
todo_end.append((1000, func, ()))
return todo_end
def _auto_end(self, cr, context=None):
""" Create the foreign keys recorded by _auto_init. """
for t, k, r, d in self._foreign_keys:
cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
cr.commit()
del self._foreign_keys
def _table_exist(self, cr):
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
return cr.rowcount
def _create_table(self, cr):
cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
_schema.debug("Table '%s': created", self._table)
def _parent_columns_exist(self, cr):
cr.execute("""SELECT c.relname
FROM pg_class c, pg_attribute a
WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
""", (self._table, 'parent_left'))
return cr.rowcount
def _create_parent_columns(self, cr):
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
if 'parent_left' not in self._columns:
_logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_left', 'INTEGER')
elif not self._columns['parent_left'].select:
_logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if 'parent_right' not in self._columns:
_logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_right', 'INTEGER')
elif not self._columns['parent_right'].select:
_logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
_logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
self._parent_name, self._name)
cr.commit()
def _select_column_data(self, cr):
# attlen is the number of bytes necessary to represent the type when
# the type has a fixed size. If the type has a varying size attlen is
# -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
"FROM pg_class c,pg_attribute a,pg_type t " \
"WHERE c.relname=%s " \
"AND c.oid=a.attrelid " \
"AND a.atttypid=t.oid", (self._table,))
return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
def _o2m_raise_on_missing_reference(self, cr, f):
# TODO this check should be a method on fields.one2many.
if f._obj in self.pool:
other = self.pool[f._obj]
# TODO the condition could use fields_get_keys().
if f._fields_id not in other._columns.keys():
if f._fields_id not in other._inherit_fields.keys():
raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
def _m2m_raise_or_create_relation(self, cr, f):
m2m_tbl, col1, col2 = f._sql_names(self)
# do not create relations for custom fields as they do not belong to a module
# they will be automatically removed when dropping the corresponding ir.model.field
# table name for custom relation all starts with x_, see __init__
if not m2m_tbl.startswith('x_'):
self._save_relation_table(cr, m2m_tbl)
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
if not cr.dictfetchall():
if f._obj not in self.pool:
raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
dest_model = self.pool[f._obj]
ref = dest_model._table
cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
# create foreign key references with ondelete=cascade, unless the targets are SQL views
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
if not cr.fetchall():
self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
if not cr.fetchall():
self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
cr.commit()
_schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
def _add_sql_constraints(self, cr):
"""
Modify this model's database table constraints so they match the one in
_sql_constraints.
"""
def unify_cons_text(txt):
return txt.lower().replace(', ',',').replace(' (','(')
for (key, con, _) in self._sql_constraints:
conname = '%s_%s' % (self._table, key)
self._save_constraint(cr, conname, 'u')
cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
existing_constraints = cr.dictfetchall()
sql_actions = {
'drop': {
'execute': False,
'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
self._table, conname, con),
'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
'order': 1,
},
'add': {
'execute': False,
'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
self._table, con),
'order': 2,
},
}
if not existing_constraints:
# constraint does not exists:
sql_actions['add']['execute'] = True
sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
# constraint exists but its definition has changed:
sql_actions['drop']['execute'] = True
sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
sql_actions['add']['execute'] = True
sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
# we need to add the constraint:
sql_actions = [item for item in sql_actions.values()]
sql_actions.sort(key=lambda x: x['order'])
for sql_action in [action for action in sql_actions if action['execute']]:
try:
cr.execute(sql_action['query'])
cr.commit()
_schema.debug(sql_action['msg_ok'])
except:
_schema.warning(sql_action['msg_err'])
cr.rollback()
def _execute_sql(self, cr):
""" Execute the SQL code from the _sql attribute (if any)."""
if hasattr(self, "_sql"):
for line in self._sql.split(';'):
line2 = line.replace('\n', '').strip()
if line2:
cr.execute(line2)
cr.commit()
#
# Update objects that uses this one to update their _inherits fields
#
@classmethod
def _inherits_reload_src(cls):
""" Recompute the _inherit_fields mapping on each _inherits'd child model."""
for model in cls.pool.values():
if cls._name in model._inherits:
model._inherits_reload()
@classmethod
def _inherits_reload(cls):
""" Recompute the _inherit_fields mapping.
This will also call itself on each inherits'd child model.
"""
res = {}
for table in cls._inherits:
other = cls.pool[table]
for col in other._columns.keys():
res[col] = (table, cls._inherits[table], other._columns[col], table)
for col in other._inherit_fields.keys():
res[col] = (table, cls._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
cls._inherit_fields = res
cls._all_columns = cls._get_column_infos()
# interface columns with new-style fields
for attr, column in cls._columns.items():
if attr not in cls._fields:
cls._add_field(attr, column.to_field())
# interface inherited fields with new-style fields (note that the
# reverse order is for being consistent with _all_columns above)
for parent_model, parent_field in reversed(cls._inherits.items()):
for attr, field in cls.pool[parent_model]._fields.iteritems():
if attr not in cls._fields:
cls._add_field(attr, field.copy(
inherited=True,
related=(parent_field, attr),
related_sudo=False,
))
cls._inherits_reload_src()
@classmethod
def _get_column_infos(cls):
"""Returns a dict mapping all fields names (direct fields and
inherited field via _inherits) to a ``column_info`` struct
giving detailed columns """
result = {}
# do not inverse for loops, since local fields may hide inherited ones!
for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
result[k] = fields.column_info(k, col, parent, m2o, original_parent)
for k, col in cls._columns.iteritems():
result[k] = fields.column_info(k, col)
return result
@classmethod
def _inherits_check(cls):
for table, field_name in cls._inherits.items():
if field_name not in cls._columns:
_logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
required=True, ondelete="cascade")
elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
_logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
cls._columns[field_name].required = True
cls._columns[field_name].ondelete = "cascade"
# reflect fields with delegate=True in dictionary cls._inherits
for field in cls._fields.itervalues():
if field.type == 'many2one' and not field.related and field.delegate:
if not field.required:
_logger.warning("Field %s with delegate=True must be required.", field)
field.required = True
if field.ondelete.lower() not in ('cascade', 'restrict'):
field.ondelete = 'cascade'
cls._inherits[field.comodel_name] = field.name
@api.model
def _prepare_setup_fields(self):
""" Prepare the setup of fields once the models have been loaded. """
for field in self._fields.itervalues():
field.reset()
@api.model
def _setup_fields(self, partial=False):
""" Setup the fields (dependency triggers, etc). """
for field in self._fields.itervalues():
if partial and field.manual and \
field.relational and \
(field.comodel_name not in self.pool or \
(field.type == 'one2many' and field.inverse_name not in self.pool[field.comodel_name]._fields)):
# do not set up manual fields that refer to unknown models
continue
field.setup(self.env)
# group fields by compute to determine field.computed_fields
fields_by_compute = defaultdict(list)
for field in self._fields.itervalues():
if field.compute:
field.computed_fields = fields_by_compute[field.compute]
field.computed_fields.append(field)
else:
field.computed_fields = []
def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
""" fields_get([fields])
Return the definition of each field.
The returned value is a dictionary (indiced by field name) of
dictionaries. The _inherits'd fields are included. The string, help,
and selection (if present) attributes are translated.
:param cr: database cursor
:param user: current user id
:param allfields: list of fields
:param context: context arguments, like lang, time zone
:return: dictionary of field dictionaries, each one describing a field of the business object
:raise AccessError: * if user has no create/write rights on the requested object
"""
recs = self.browse(cr, user, [], context)
res = {}
for fname, field in self._fields.iteritems():
if allfields and fname not in allfields:
continue
if not field.setup_done:
continue
if field.groups and not recs.user_has_groups(field.groups):
continue
res[fname] = field.get_description(recs.env)
# if user cannot create or modify records, make all fields readonly
has_access = functools.partial(recs.check_access_rights, raise_exception=False)
if not (has_access('write') or has_access('create')):
for description in res.itervalues():
description['readonly'] = True
description['states'] = {}
return res
def get_empty_list_help(self, cr, user, help, context=None):
""" Generic method giving the help message displayed when having
no result to display in a list or kanban view. By default it returns
the help given in parameter that is generally the help message
defined in the action.
"""
return help
def check_field_access_rights(self, cr, user, operation, fields, context=None):
"""
Check the user access rights on the given fields. This raises Access
Denied if the user does not have the rights. Otherwise it returns the
fields (as is if the fields is not falsy, or the readable/writable
fields if fields is falsy).
"""
if user == SUPERUSER_ID:
return fields or list(self._fields)
def valid(fname):
""" determine whether user has access to field `fname` """
field = self._fields.get(fname)
if field and field.groups:
return self.user_has_groups(cr, user, groups=field.groups, context=context)
else:
return True
if not fields:
fields = filter(valid, self._fields)
else:
invalid_fields = set(filter(lambda name: not valid(name), fields))
if invalid_fields:
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
operation, user, self._name, ', '.join(invalid_fields))
raise AccessError(
_('The requested operation cannot be completed due to security restrictions. '
'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
return fields
# add explicit old-style implementation to read()
@api.v7
def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
records = self.browse(cr, user, ids, context)
result = BaseModel.read(records, fields, load=load)
return result if isinstance(ids, list) else (bool(result) and result[0])
# new-style implementation of read()
@api.v8
def read(self, fields=None, load='_classic_read'):
""" read([fields])
Reads the requested fields for the records in `self`, low-level/RPC
method. In Python code, prefer :meth:`~.browse`.
:param fields: list of field names to return (default is all fields)
:return: a list of dictionaries mapping field names to their values,
with one dictionary per record
:raise AccessError: if user has no read rights on some of the given
records
"""
# check access rights
self.check_access_rights('read')
fields = self.check_field_access_rights('read', fields)
# split fields into stored and computed fields
stored, computed = [], []
for name in fields:
if name in self._columns:
stored.append(name)
elif name in self._fields:
computed.append(name)
else:
_logger.warning("%s.read() with unknown field '%s'", self._name, name)
# fetch stored fields from the database to the cache
self._read_from_database(stored)
# retrieve results from records; this takes values from the cache and
# computes remaining fields
result = []
name_fields = [(name, self._fields[name]) for name in (stored + computed)]
use_name_get = (load == '_classic_read')
for record in self:
try:
values = {'id': record.id}
for name, field in name_fields:
values[name] = field.convert_to_read(record[name], use_name_get)
result.append(values)
except MissingError:
pass
return result
@api.multi
def _prefetch_field(self, field):
""" Read from the database in order to fetch `field` (:class:`Field`
instance) for `self` in cache.
"""
# fetch the records of this model without field_name in their cache
records = self._in_cache_without(field)
if len(records) > PREFETCH_MAX:
records = records[:PREFETCH_MAX] | self
# by default, simply fetch field
fnames = {field.name}
if self.env.in_draft:
# we may be doing an onchange, do not prefetch other fields
pass
elif self.env.field_todo(field):
# field must be recomputed, do not prefetch records to recompute
records -= self.env.field_todo(field)
elif not self._context.get('prefetch_fields', True):
# do not prefetch other fields
pass
elif self._columns[field.name]._prefetch:
# here we can optimize: prefetch all classic and many2one fields
fnames = set(fname
for fname, fcolumn in self._columns.iteritems()
if fcolumn._prefetch
if not fcolumn.groups or self.user_has_groups(fcolumn.groups)
)
# fetch records with read()
assert self in records and field.name in fnames
result = []
try:
result = records.read(list(fnames), load='_classic_write')
except AccessError:
pass
# check the cache, and update it if necessary
if not self._cache.contains(field):
for values in result:
record = self.browse(values.pop('id'))
record._cache.update(record._convert_to_cache(values, validate=False))
if not self._cache.contains(field):
e = AccessError("No value found for %s.%s" % (self, field.name))
self._cache[field] = FailedValue(e)
@api.multi
def _read_from_database(self, field_names):
""" Read the given fields of the records in `self` from the database,
and store them in cache. Access errors are also stored in cache.
"""
env = self.env
cr, user, context = env.args
# FIXME: The query construction needs to be rewritten using the internal Query
# object, as in search(), to avoid ambiguous column references when
# reading/sorting on a table that is auto_joined to another table with
# common columns (e.g. the magical columns)
# Construct a clause for the security rules.
# 'tables' holds the list of tables necessary for the SELECT, including
# the ir.rule clauses, and contains at least self._table.
rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
# determine the fields that are stored as columns in self._table
fields_pre = [f for f in field_names if self._columns[f]._classic_write]
# we need fully-qualified column names in case len(tables) > 1
def qualify(f):
if isinstance(self._columns.get(f), fields.binary) and \
context.get('bin_size_%s' % f, context.get('bin_size')):
# PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
else:
return '%s."%s"' % (self._table, f)
qual_names = map(qualify, set(fields_pre + ['id']))
query = """ SELECT %(qual_names)s FROM %(tables)s
WHERE %(table)s.id IN %%s AND (%(extra)s)
ORDER BY %(order)s
""" % {
'qual_names': ",".join(qual_names),
'tables': ",".join(tables),
'table': self._table,
'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
'order': self._parent_order or self._order,
}
result = []
for sub_ids in cr.split_for_in_conditions(self.ids):
cr.execute(query, [tuple(sub_ids)] + rule_params)
result.extend(cr.dictfetchall())
ids = [vals['id'] for vals in result]
if ids:
# translate the fields if necessary
if context.get('lang'):
ir_translation = env['ir.translation']
for f in fields_pre:
if self._columns[f].translate:
#TODO: optimize out of this loop
res_trans = ir_translation._get_ids(
'%s,%s' % (self._name, f), 'model', context['lang'], ids)
for vals in result:
vals[f] = res_trans.get(vals['id'], False) or vals[f]
# apply the symbol_get functions of the fields we just read
for f in fields_pre:
symbol_get = self._columns[f]._symbol_get
if symbol_get:
for vals in result:
vals[f] = symbol_get(vals[f])
# store result in cache for POST fields
for vals in result:
record = self.browse(vals['id'])
record._cache.update(record._convert_to_cache(vals, validate=False))
# determine the fields that must be processed now
fields_post = [f for f in field_names if not self._columns[f]._classic_write]
# Compute POST fields, grouped by multi
by_multi = defaultdict(list)
for f in fields_post:
by_multi[self._columns[f]._multi].append(f)
for multi, fs in by_multi.iteritems():
if multi:
res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
assert res2 is not None, \
'The function field "%s" on the "%s" model returned None\n' \
'(a dictionary was expected).' % (fs[0], self._name)
for vals in result:
# TOCHECK : why got string instend of dict in python2.6
# if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
multi_fields = res2.get(vals['id'], {})
if multi_fields:
for f in fs:
vals[f] = multi_fields.get(f, [])
else:
for f in fs:
res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
for vals in result:
if res2:
vals[f] = res2[vals['id']]
else:
vals[f] = []
# Warn about deprecated fields now that fields_pre and fields_post are computed
for f in field_names:
column = self._columns[f]
if column.deprecated:
_logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
# store result in cache
for vals in result:
record = self.browse(vals.pop('id'))
record._cache.update(record._convert_to_cache(vals, validate=False))
# store failed values in cache for the records that could not be read
fetched = self.browse(ids)
missing = self - fetched
if missing:
extras = fetched - self
if extras:
raise AccessError(
_("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
', '.join(map(repr, missing._ids)),
', '.join(map(repr, extras._ids)),
))
# store an access error exception in existing records
exc = AccessError(
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._name, 'read')
)
forbidden = missing.exists()
forbidden._cache.update(FailedValue(exc))
# store a missing error exception in non-existing records
exc = MissingError(
_('One of the documents you are trying to access has been deleted, please try again after refreshing.')
)
(missing - forbidden)._cache.update(FailedValue(exc))
@api.multi
def get_metadata(self):
"""
Returns some metadata about the given records.
:return: list of ownership dictionaries for each requested record
:rtype: list of dictionaries with the following keys:
* id: object id
* create_uid: user who created the record
* create_date: date when the record was created
* write_uid: last user who changed the record
* write_date: date of the last change to the record
* xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
"""
fields = ['id']
if self._log_access:
fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
quoted_table = '"%s"' % self._table
fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
query = '''SELECT %s, __imd.module, __imd.name
FROM %s LEFT JOIN ir_model_data __imd
ON (__imd.model = %%s and __imd.res_id = %s.id)
WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
self._cr.execute(query, (self._name, tuple(self.ids)))
res = self._cr.dictfetchall()
uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
names = dict(self.env['res.users'].browse(uids).name_get())
for r in res:
for key in r:
value = r[key] = r[key] or False
if key in ('write_uid', 'create_uid') and value in names:
r[key] = (value, names[value])
r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
del r['name'], r['module']
return res
def _check_concurrency(self, cr, ids, context):
if not context:
return
if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
return
check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
for sub_ids in cr.split_for_in_conditions(ids):
ids_to_check = []
for id in sub_ids:
id_ref = "%s,%s" % (self._name, id)
update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
if update_date:
ids_to_check.extend([id, update_date])
if not ids_to_check:
continue
cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
res = cr.fetchone()
if res:
# mention the first one only to keep the error message readable
raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
"""Verify the returned rows after applying record rules matches
the length of `ids`, and raise an appropriate exception if it does not.
"""
if context is None:
context = {}
ids, result_ids = set(ids), set(result_ids)
missing_ids = ids - result_ids
if missing_ids:
# Attempt to distinguish record rule restriction vs deleted records,
# to provide a more specific error message - check if the missinf
cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
forbidden_ids = [x[0] for x in cr.fetchall()]
if forbidden_ids:
# the missing ids are (at least partially) hidden by access rules
if uid == SUPERUSER_ID:
return
_logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
raise except_orm(_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
else:
# If we get here, the missing_ids are not in the database
if operation in ('read','unlink'):
# No need to warn about deleting an already deleted record.
# And no error when reading a record that was deleted, to prevent spurious
# errors for non-transactional search/read sequences coming from clients
return
_logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
raise except_orm(_('Missing document(s)'),
_('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
"""Verifies that the operation given by ``operation`` is allowed for the user
according to the access rights."""
return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
def check_access_rule(self, cr, uid, ids, operation, context=None):
"""Verifies that the operation given by ``operation`` is allowed for the user
according to ir.rules.
:param operation: one of ``write``, ``unlink``
:raise except_orm: * if current ir.rules do not permit this operation.
:return: None if the operation is allowed
"""
if uid == SUPERUSER_ID:
return
if self.is_transient():
# Only one single implicit access rule for transient models: owner only!
# This is ok to hardcode because we assert that TransientModels always
# have log_access enabled so that the create_uid column is always there.
# And even with _inherits, these fields are always present in the local
# table too, so no need for JOINs.
cr.execute("""SELECT distinct create_uid
FROM %s
WHERE id IN %%s""" % self._table, (tuple(ids),))
uids = [x[0] for x in cr.fetchall()]
if len(uids) != 1 or uids[0] != uid:
raise except_orm(_('Access Denied'),
_('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
else:
where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
if where_clause:
where_clause = ' and ' + ' and '.join(where_clause)
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
' WHERE ' + self._table + '.id IN %s' + where_clause,
[sub_ids] + where_params)
returned_ids = [x['id'] for x in cr.dictfetchall()]
self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
def create_workflow(self, cr, uid, ids, context=None):
"""Create a workflow instance for each given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_create(uid, self._name, res_id, cr)
# self.invalidate_cache(cr, uid, context=context) ?
return True
def delete_workflow(self, cr, uid, ids, context=None):
"""Delete the workflow instances bound to the given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_delete(uid, self._name, res_id, cr)
self.invalidate_cache(cr, uid, context=context)
return True
def step_workflow(self, cr, uid, ids, context=None):
"""Reevaluate the workflow instances of the given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_write(uid, self._name, res_id, cr)
# self.invalidate_cache(cr, uid, context=context) ?
return True
def signal_workflow(self, cr, uid, ids, signal, context=None):
"""Send given workflow signal and return a dict mapping ids to workflow results"""
from openerp import workflow
result = {}
for res_id in ids:
result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
# self.invalidate_cache(cr, uid, context=context) ?
return result
def redirect_workflow(self, cr, uid, old_new_ids, context=None):
""" Rebind the workflow instance bound to the given 'old' record IDs to
the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
"""
from openerp import workflow
for old_id, new_id in old_new_ids:
workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
self.invalidate_cache(cr, uid, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
""" unlink()
Deletes the records of the current set
:raise AccessError: * if user has no unlink rights on the requested object
* if user tries to bypass access rules for unlink on the requested object
:raise UserError: if the record is default property for other records
"""
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
# for recomputing new-style fields
recs = self.browse(cr, uid, ids, context)
recs.modified(self._fields)
self._check_concurrency(cr, ids, context)
self.check_access_rights(cr, uid, 'unlink')
ir_property = self.pool.get('ir.property')
# Check if the records are used as default properties.
domain = [('res_id', '=', False),
('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
]
if ir_property.search(cr, uid, domain, context=context):
raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
# Delete the records' properties.
property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
ir_property.unlink(cr, uid, property_ids, context=context)
self.delete_workflow(cr, uid, ids, context=context)
self.check_access_rule(cr, uid, ids, 'unlink', context=context)
pool_model_data = self.pool.get('ir.model.data')
ir_values_obj = self.pool.get('ir.values')
ir_attachment_obj = self.pool.get('ir.attachment')
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('delete from ' + self._table + ' ' \
'where id IN %s', (sub_ids,))
# Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
# as these are not connected with real database foreign keys, and would be dangling references.
# Note: following steps performed as admin to avoid access rights restrictions, and with no context
# to avoid possible side-effects during admin calls.
# Step 1. Calling unlink of ir_model_data only for the affected IDS
reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
# Step 2. Marching towards the real deletion of referenced records
if reference_ids:
pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
# For the same reason, removing the record relevant to ir_values
ir_value_ids = ir_values_obj.search(cr, uid,
['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
context=context)
if ir_value_ids:
ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
# For the same reason, removing the record relevant to ir_attachment
# The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
if ir_attachment_ids:
ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
# invalidate the *whole* cache, since the orm does not handle all
# changes made in the database, like cascading delete!
recs.invalidate_cache()
for order, obj_name, store_ids, fields in result_store:
if obj_name == self._name:
effective_store_ids = set(store_ids) - set(ids)
else:
effective_store_ids = store_ids
if effective_store_ids:
obj = self.pool[obj_name]
cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
rids = map(lambda x: x[0], cr.fetchall())
if rids:
obj._store_set_values(cr, uid, rids, fields, context)
# recompute new-style fields
recs.recompute()
return True
#
# TODO: Validate
#
@api.multi
def write(self, vals):
""" write(vals)
Updates all records in the current set with the provided values.
:param dict vals: fields to update and the value to set on them e.g::
{'foo': 1, 'bar': "Qux"}
will set the field ``foo`` to ``1`` and the field ``bar`` to
``"Qux"`` if those are valid (otherwise it will trigger an error).
:raise AccessError: * if user has no write rights on the requested object
* if user tries to bypass access rules for write on the requested object
:raise ValidateError: if user tries to enter invalid value for a field that is not in selection
:raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
.. _openerp/models/relationals/format:
.. note:: Relational fields use a special "commands" format to manipulate their values
This format is a list of command triplets executed sequentially,
possible command triplets are:
``(0, _, values: dict)``
links to a new record created from the provided values
``(1, id, values: dict)``
updates the already-linked record of id ``id`` with the
provided ``values``
``(2, id, _)``
unlinks and deletes the linked record of id ``id``
``(3, id, _)``
unlinks the linked record of id ``id`` without deleting it
``(4, id, _)``
links to an existing record of id ``id``
``(5, _, _)``
unlinks all records in the relation, equivalent to using
the command ``3`` on every linked record
``(6, _, ids)``
replaces the existing list of linked records by the provoded
ones, equivalent to using ``5`` then ``4`` for each id in
``ids``)
(in command triplets, ``_`` values are ignored and can be
anything, generally ``0`` or ``False``)
Any command can be used on :class:`~openerp.fields.Many2many`,
only ``0``, ``1`` and ``2`` can be used on
:class:`~openerp.fields.One2many`.
"""
if not self:
return True
self._check_concurrency(self._ids)
self.check_access_rights('write')
# No user-driven update of these columns
for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
vals.pop(field, None)
# split up fields into old-style and pure new-style ones
old_vals, new_vals, unknown = {}, {}, []
for key, val in vals.iteritems():
if key in self._columns:
old_vals[key] = val
elif key in self._fields:
new_vals[key] = val
else:
unknown.append(key)
if unknown:
_logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
# write old-style fields with (low-level) method _write
if old_vals:
self._write(old_vals)
# put the values of pure new-style fields into cache, and inverse them
if new_vals:
for record in self:
record._cache.update(record._convert_to_cache(new_vals, update=True))
for key in new_vals:
self._fields[key].determine_inverse(self)
return True
def _write(self, cr, user, ids, vals, context=None):
# low-level implementation of write()
if not context:
context = {}
readonly = None
self.check_field_access_rights(cr, user, 'write', vals.keys())
deleted_related = defaultdict(list)
for field in vals.keys():
fobj = None
if field in self._columns:
fobj = self._columns[field]
elif field in self._inherit_fields:
fobj = self._inherit_fields[field][2]
if not fobj:
continue
if fobj._type in ['one2many', 'many2many'] and vals[field]:
for wtuple in vals[field]:
if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
deleted_related[fobj._obj].append(wtuple[1])
groups = fobj.write
if groups:
edit = False
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
edit = True
break
if not edit:
vals.pop(field)
result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
# for recomputing new-style fields
recs = self.browse(cr, user, ids, context)
modified_fields = list(vals)
if self._log_access:
modified_fields += ['write_date', 'write_uid']
recs.modified(modified_fields)
parents_changed = []
parent_order = self._parent_order or self._order
if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
# The parent_left/right computation may take up to
# 5 seconds. No need to recompute the values if the
# parent is the same.
# Note: to respect parent_order, nodes must be processed in
# order, so ``parents_changed`` must be ordered properly.
parent_val = vals[self._parent_name]
if parent_val:
query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
(self._table, self._parent_name, self._parent_name, parent_order)
cr.execute(query, (tuple(ids), parent_val))
else:
query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
(self._table, self._parent_name, parent_order)
cr.execute(query, (tuple(ids),))
parents_changed = map(operator.itemgetter(0), cr.fetchall())
upd0 = []
upd1 = []
upd_todo = []
updend = []
direct = []
totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
for field in vals:
field_column = self._all_columns.get(field) and self._all_columns.get(field).column
if field_column and field_column.deprecated:
_logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
if field in self._columns:
if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
if (not totranslate) or not self._columns[field].translate:
upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
upd1.append(self._columns[field]._symbol_set[1](vals[field]))
direct.append(field)
else:
upd_todo.append(field)
else:
updend.append(field)
if field in self._columns \
and hasattr(self._columns[field], 'selection') \
and vals[field]:
self._check_selection_field_value(cr, user, field, vals[field], context=context)
if self._log_access:
upd0.append('write_uid=%s')
upd0.append("write_date=(now() at time zone 'UTC')")
upd1.append(user)
if len(upd0):
self.check_access_rule(cr, user, ids, 'write', context=context)
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
'where id IN %s', upd1 + [sub_ids])
if cr.rowcount != len(sub_ids):
raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
if totranslate:
# TODO: optimize
for f in direct:
if self._columns[f].translate:
src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
if not src_trans:
src_trans = vals[f]
# Inserting value to DB
context_wo_lang = dict(context, lang=None)
self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
# call the 'set' method of fields which are not classic_write
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
# default element in context must be removed when call a one2many or many2many
rel_context = context.copy()
for c in context.items():
if c[0].startswith('default_'):
del rel_context[c[0]]
for field in upd_todo:
for id in ids:
result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
unknown_fields = updend[:]
for table in self._inherits:
col = self._inherits[table]
nids = []
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
'where id IN %s', (sub_ids,))
nids.extend([x[0] for x in cr.fetchall()])
v = {}
for val in updend:
if self._inherit_fields[val][0] == table:
v[val] = vals[val]
unknown_fields.remove(val)
if v:
self.pool[table].write(cr, user, nids, v, context)
if unknown_fields:
_logger.warning(
'No such field(s) in model %s: %s.',
self._name, ', '.join(unknown_fields))
# check Python constraints
recs._validate_fields(vals)
# TODO: use _order to set dest at the right position and not first node of parent
# We can't defer parent_store computation because the stored function
# fields that are computer may refer (directly or indirectly) to
# parent_left/right (via a child_of domain)
if parents_changed:
if self.pool._init:
self.pool._init_parent[self._name] = True
else:
order = self._parent_order or self._order
parent_val = vals[self._parent_name]
if parent_val:
clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
else:
clause, params = '%s IS NULL' % (self._parent_name,), ()
for id in parents_changed:
cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
pleft, pright = cr.fetchone()
distance = pright - pleft + 1
# Positions of current siblings, to locate proper insertion point;
# this can _not_ be fetched outside the loop, as it needs to be refreshed
# after each update, in case several nodes are sequentially inserted one
# next to the other (i.e computed incrementally)
cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
parents = cr.fetchall()
# Find Position of the element
position = None
for (parent_pright, parent_id) in parents:
if parent_id == id:
break
position = parent_pright and parent_pright + 1 or 1
# It's the first node of the parent
if not position:
if not parent_val:
position = 1
else:
cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
position = cr.fetchone()[0] + 1
if pleft < position <= pright:
raise except_orm(_('UserError'), _('Recursivity Detected.'))
if pleft < position:
cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
else:
cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
recs.invalidate_cache(['parent_left', 'parent_right'])
result += self._store_get_values(cr, user, ids, vals.keys(), context)
result.sort()
# for recomputing new-style fields
recs.modified(modified_fields)
done = {}
for order, model_name, ids_to_update, fields_to_recompute in result:
key = (model_name, tuple(fields_to_recompute))
done.setdefault(key, {})
# avoid to do several times the same computation
todo = []
for id in ids_to_update:
if id not in done[key]:
done[key][id] = True
if id not in deleted_related[model_name]:
todo.append(id)
self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
# recompute new-style fields
if context.get('recompute', True):
recs.recompute()
self.step_workflow(cr, user, ids, context=context)
return True
#
# TODO: Should set perm to user.xxx
#
@api.model
@api.returns('self', lambda value: value.id)
def create(self, vals):
""" create(vals) -> record
Creates a new record for the model.
The new record is initialized using the values from ``vals`` and
if necessary those from :meth:`~.default_get`.
:param dict vals:
values for the model's fields, as a dictionary::
{'field_name': field_value, ...}
see :meth:`~.write` for details
:return: new record created
:raise AccessError: * if user has no create rights on the requested object
* if user tries to bypass access rules for create on the requested object
:raise ValidateError: if user tries to enter invalid value for a field that is not in selection
:raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
"""
self.check_access_rights('create')
# add missing defaults, and drop fields that may not be set by user
vals = self._add_missing_default_values(vals)
for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
vals.pop(field, None)
# split up fields into old-style and pure new-style ones
old_vals, new_vals, unknown = {}, {}, []
for key, val in vals.iteritems():
if key in self._all_columns:
old_vals[key] = val
elif key in self._fields:
new_vals[key] = val
else:
unknown.append(key)
if unknown:
_logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
# create record with old-style fields
record = self.browse(self._create(old_vals))
# put the values of pure new-style fields into cache, and inverse them
record._cache.update(record._convert_to_cache(new_vals))
for key in new_vals:
self._fields[key].determine_inverse(record)
return record
def _create(self, cr, user, vals, context=None):
# low-level implementation of create()
if not context:
context = {}
if self.is_transient():
self._transient_vacuum(cr, user)
tocreate = {}
for v in self._inherits:
if self._inherits[v] not in vals:
tocreate[v] = {}
else:
tocreate[v] = {'id': vals[self._inherits[v]]}
updates = [
# list of column assignments defined as tuples like:
# (column_name, format_string, column_value)
# (column_name, sql_formula)
# Those tuples will be used by the string formatting for the INSERT
# statement below.
('id', "nextval('%s')" % self._sequence),
]
upd_todo = []
unknown_fields = []
for v in vals.keys():
if v in self._inherit_fields and v not in self._columns:
(table, col, col_detail, original_parent) = self._inherit_fields[v]
tocreate[table][v] = vals[v]
del vals[v]
else:
if (v not in self._inherit_fields) and (v not in self._columns):
del vals[v]
unknown_fields.append(v)
if unknown_fields:
_logger.warning(
'No such field(s) in model %s: %s.',
self._name, ', '.join(unknown_fields))
for table in tocreate:
if self._inherits[table] in vals:
del vals[self._inherits[table]]
record_id = tocreate[table].pop('id', None)
if record_id is None or not record_id:
record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
else:
self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
updates.append((self._inherits[table], '%s', record_id))
#Start : Set bool fields to be False if they are not touched(to make search more powerful)
bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
for bool_field in bool_fields:
if bool_field not in vals:
vals[bool_field] = False
#End
for field in vals.keys():
fobj = None
if field in self._columns:
fobj = self._columns[field]
else:
fobj = self._inherit_fields[field][2]
if not fobj:
continue
groups = fobj.write
if groups:
edit = False
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
edit = True
break
elif readonly[0][0] == 0:
edit = False
else:
edit = False
if not edit:
vals.pop(field)
for field in vals:
current_field = self._columns[field]
if current_field._classic_write:
updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
#for the function fields that receive a value, we set them directly in the database
#(they may be required), but we also need to trigger the _fct_inv()
if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
#TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
#one week of the release candidate. It seems the only good way to handle correctly this is to add an
#attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
#if, for example, the related has a default value (for usability) then the fct_inv is called and it
#may raise some access rights error. Changing this is a too big change for now, and is thus postponed
#after the release but, definitively, the behavior shouldn't be different for related and function
#fields.
upd_todo.append(field)
else:
#TODO: this `if´ statement should be removed because there is no good reason to special case the fields
#related. See the above TODO comment for further explanations.
if not isinstance(current_field, fields.related):
upd_todo.append(field)
if field in self._columns \
and hasattr(current_field, 'selection') \
and vals[field]:
self._check_selection_field_value(cr, user, field, vals[field], context=context)
if self._log_access:
updates.append(('create_uid', '%s', user))
updates.append(('write_uid', '%s', user))
updates.append(('create_date', "(now() at time zone 'UTC')"))
updates.append(('write_date', "(now() at time zone 'UTC')"))
# the list of tuples used in this formatting corresponds to
# tuple(field_name, format, value)
# In some case, for example (id, create_date, write_date) we does not
# need to read the third value of the tuple, because the real value is
# encoded in the second value (the format).
cr.execute(
"""INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
self._table,
', '.join('"%s"' % u[0] for u in updates),
', '.join(u[1] for u in updates)
),
tuple([u[2] for u in updates if len(u) > 2])
)
id_new, = cr.fetchone()
recs = self.browse(cr, user, id_new, context)
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
if self._parent_store and not context.get('defer_parent_store_computation'):
if self.pool._init:
self.pool._init_parent[self._name] = True
else:
parent = vals.get(self._parent_name, False)
if parent:
cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
pleft_old = None
result_p = cr.fetchall()
for (pleft,) in result_p:
if not pleft:
break
pleft_old = pleft
if not pleft_old:
cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
pleft_old = cr.fetchone()[0]
pleft = pleft_old
else:
cr.execute('select max(parent_right) from '+self._table)
pleft = cr.fetchone()[0] or 0
cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
recs.invalidate_cache(['parent_left', 'parent_right'])
# default element in context must be remove when call a one2many or many2many
rel_context = context.copy()
for c in context.items():
if c[0].startswith('default_'):
del rel_context[c[0]]
result = []
for field in upd_todo:
result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
# check Python constraints
recs._validate_fields(vals)
# invalidate and mark new-style fields to recompute
modified_fields = list(vals)
if self._log_access:
modified_fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
recs.modified(modified_fields)
if context.get('recompute', True):
result += self._store_get_values(cr, user, [id_new],
list(set(vals.keys() + self._inherits.values())),
context)
result.sort()
done = []
for order, model_name, ids, fields2 in result:
if not (model_name, ids, fields2) in done:
self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
done.append((model_name, ids, fields2))
# recompute new-style fields
recs.recompute()
if self._log_create and context.get('recompute', True):
message = self._description + \
" '" + \
self.name_get(cr, user, [id_new], context=context)[0][1] + \
"' " + _("created.")
self.log(cr, user, id_new, message, True, context=context)
self.check_access_rule(cr, user, [id_new], 'create', context=context)
self.create_workflow(cr, user, [id_new], context=context)
return id_new
def _store_get_values(self, cr, uid, ids, fields, context):
"""Returns an ordered list of fields.function to call due to
an update operation on ``fields`` of records with ``ids``,
obtained by calling the 'store' triggers of these fields,
as setup by their 'store' attribute.
:return: [(priority, model_name, [record_ids,], [function_fields,])]
"""
if fields is None: fields = []
stored_functions = self.pool._store_function.get(self._name, [])
# use indexed names for the details of the stored_functions:
model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
# only keep store triggers that should be triggered for the ``fields``
# being written to.
triggers_to_compute = (
f for f in stored_functions
if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
)
to_compute_map = {}
target_id_results = {}
for store_trigger in triggers_to_compute:
target_func_id_ = id(store_trigger[target_ids_func_])
if target_func_id_ not in target_id_results:
# use admin user for accessing objects having rules defined on store fields
target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
target_ids = target_id_results[target_func_id_]
# the compound key must consider the priority and model name
key = (store_trigger[priority_], store_trigger[model_name_])
for target_id in target_ids:
to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
# Here to_compute_map looks like:
# { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
# (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
# (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
# }
# Now we need to generate the batch function calls list
# call_map =
# { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
call_map = {}
for ((priority,model), id_map) in to_compute_map.iteritems():
trigger_ids_maps = {}
# function_ids_maps =
# { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
for target_id, triggers in id_map.iteritems():
trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
for triggers, target_ids in trigger_ids_maps.iteritems():
call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
[t[func_field_to_compute_] for t in triggers]))
result = []
if call_map:
result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
return result
def _store_set_values(self, cr, uid, ids, fields, context):
"""Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
respecting ``multi`` attributes), and stores the resulting values in the database directly."""
if not ids:
return True
field_flag = False
field_dict = {}
if self._log_access:
cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
res = cr.fetchall()
for r in res:
if r[1]:
field_dict.setdefault(r[0], [])
res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
for i in self.pool._store_function.get(self._name, []):
if i[5]:
up_write_date = write_date + datetime.timedelta(hours=i[5])
if datetime.datetime.now() < up_write_date:
if i[1] in fields:
field_dict[r[0]].append(i[1])
if not field_flag:
field_flag = True
todo = {}
keys = []
for f in fields:
if self._columns[f]._multi not in keys:
keys.append(self._columns[f]._multi)
todo.setdefault(self._columns[f]._multi, [])
todo[self._columns[f]._multi].append(f)
for key in keys:
val = todo[key]
if key:
# use admin user for accessing objects having rules defined on store fields
result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
for id, value in result.items():
if field_flag:
for f in value.keys():
if f in field_dict[id]:
value.pop(f)
upd0 = []
upd1 = []
for v in value:
if v not in val:
continue
if self._columns[v]._type == 'many2one':
try:
value[v] = value[v][0]
except:
pass
upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
upd1.append(self._columns[v]._symbol_set[1](value[v]))
upd1.append(id)
if upd0 and upd1:
cr.execute('update "' + self._table + '" set ' + \
','.join(upd0) + ' where id = %s', upd1)
else:
for f in val:
# use admin user for accessing objects having rules defined on store fields
result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
for r in result.keys():
if field_flag:
if r in field_dict.keys():
if f in field_dict[r]:
result.pop(r)
for id, value in result.items():
if self._columns[f]._type == 'many2one':
try:
value = value[0]
except:
pass
cr.execute('update "' + self._table + '" set ' + \
'"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
# invalidate and mark new-style fields to recompute
self.browse(cr, uid, ids, context).modified(fields)
return True
# TODO: ameliorer avec NULL
def _where_calc(self, cr, user, domain, active_test=True, context=None):
"""Computes the WHERE clause needed to implement an OpenERP domain.
:param domain: the domain to compute
:type domain: list
:param active_test: whether the default filtering of records with ``active``
field set to ``False`` should be applied.
:return: the query expressing the given domain as provided in domain
:rtype: osv.query.Query
"""
if not context:
context = {}
domain = domain[:]
# if the object has a field named 'active', filter out all inactive
# records unless they were explicitely asked for
if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
if domain:
# the item[0] trick below works for domain items and '&'/'|'/'!'
# operators too
if not any(item[0] == 'active' for item in domain):
domain.insert(0, ('active', '=', 1))
else:
domain = [('active', '=', 1)]
if domain:
e = expression.expression(cr, user, domain, self, context)
tables = e.get_tables()
where_clause, where_params = e.to_sql()
where_clause = where_clause and [where_clause] or []
else:
where_clause, where_params, tables = [], [], ['"%s"' % self._table]
return Query(tables, where_clause, where_params)
def _check_qorder(self, word):
if not regex_order.match(word):
raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
return True
def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
"""Add what's missing in ``query`` to implement all appropriate ir.rules
(using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
:param query: the current query object
"""
if uid == SUPERUSER_ID:
return
def apply_rule(added_clause, added_params, added_tables, parent_model=None):
""" :param parent_model: name of the parent model, if the added
clause comes from a parent model
"""
if added_clause:
if parent_model:
# as inherited rules are being applied, we need to add the missing JOIN
# to reach the parent table (if it was not JOINed yet in the query)
parent_alias = self._inherits_join_add(self, parent_model, query)
# inherited rules are applied on the external table -> need to get the alias and replace
parent_table = self.pool[parent_model]._table
added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
# change references to parent_table to parent_alias, because we now use the alias to refer to the table
new_tables = []
for table in added_tables:
# table is just a table name -> switch to the full alias
if table == '"%s"' % parent_table:
new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
# table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
else:
new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
added_tables = new_tables
query.where_clause += added_clause
query.where_clause_params += added_params
for table in added_tables:
if table not in query.tables:
query.tables.append(table)
return True
return False
# apply main rules on the object
rule_obj = self.pool.get('ir.rule')
rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
# apply ir.rules from the parents (through _inherits)
for inherited_model in self._inherits:
rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
parent_model=inherited_model)
def _generate_m2o_order_by(self, order_field, query):
"""
Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
either native m2o fields or function/related fields that are stored, including
intermediate JOINs for inheritance if required.
:return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
"""
if order_field not in self._columns and order_field in self._inherit_fields:
# also add missing joins for reaching the table containing the m2o field
qualified_field = self._inherits_join_calc(order_field, query)
order_field_column = self._inherit_fields[order_field][2]
else:
qualified_field = '"%s"."%s"' % (self._table, order_field)
order_field_column = self._columns[order_field]
assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
_logger.debug("Many2one function/related fields must be stored " \
"to be used as ordering fields! Ignoring sorting for %s.%s",
self._name, order_field)
return
# figure out the applicable order_by for the m2o
dest_model = self.pool[order_field_column._obj]
m2o_order = dest_model._order
if not regex_order.match(m2o_order):
# _order is complex, can't use it here, so we default to _rec_name
m2o_order = dest_model._rec_name
else:
# extract the field names, to be able to qualify them and add desc/asc
m2o_order_list = []
for order_part in m2o_order.split(","):
m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
m2o_order = m2o_order_list
# Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
# as we don't want to exclude results that have NULL values for the m2o
src_table, src_field = qualified_field.replace('"', '').split('.', 1)
dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
def _generate_order_by(self, order_spec, query):
"""
Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
:raise" except_orm in case order_spec is malformed
"""
order_by_clause = ''
order_spec = order_spec or self._order
if order_spec:
order_by_elements = []
self._check_qorder(order_spec)
for order_part in order_spec.split(','):
order_split = order_part.strip().split(' ')
order_field = order_split[0].strip()
order_direction = order_split[1].strip() if len(order_split) == 2 else ''
order_column = None
inner_clause = None
if order_field == 'id':
order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
elif order_field in self._columns:
order_column = self._columns[order_field]
if order_column._classic_read:
inner_clause = '"%s"."%s"' % (self._table, order_field)
elif order_column._type == 'many2one':
inner_clause = self._generate_m2o_order_by(order_field, query)
else:
continue # ignore non-readable or "non-joinable" fields
elif order_field in self._inherit_fields:
parent_obj = self.pool[self._inherit_fields[order_field][3]]
order_column = parent_obj._columns[order_field]
if order_column._classic_read:
inner_clause = self._inherits_join_calc(order_field, query)
elif order_column._type == 'many2one':
inner_clause = self._generate_m2o_order_by(order_field, query)
else:
continue # ignore non-readable or "non-joinable" fields
else:
raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
if order_column and order_column._type == 'boolean':
inner_clause = "COALESCE(%s, false)" % inner_clause
if inner_clause:
if isinstance(inner_clause, list):
for clause in inner_clause:
order_by_elements.append("%s %s" % (clause, order_direction))
else:
order_by_elements.append("%s %s" % (inner_clause, order_direction))
if order_by_elements:
order_by_clause = ",".join(order_by_elements)
return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
"""
Private implementation of search() method, allowing specifying the uid to use for the access right check.
This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
This is ok at the security level because this method is private and not callable through XML-RPC.
:param access_rights_uid: optional user ID to use when checking access rights
(not for ir.rules, this is only for ir.model.access)
"""
if context is None:
context = {}
self.check_access_rights(cr, access_rights_uid or user, 'read')
# For transient models, restrict acces to the current user, except for the super-user
if self.is_transient() and self._log_access and user != SUPERUSER_ID:
args = expression.AND(([('create_uid', '=', user)], args or []))
query = self._where_calc(cr, user, args, context=context)
self._apply_ir_rules(cr, user, query, 'read', context=context)
order_by = self._generate_order_by(order, query)
from_clause, where_clause, where_clause_params = query.get_sql()
where_str = where_clause and (" WHERE %s" % where_clause) or ''
if count:
# Ignore order, limit and offset when just counting, they don't make sense and could
# hurt performance
query_str = 'SELECT count(1) FROM ' + from_clause + where_str
cr.execute(query_str, where_clause_params)
res = cr.fetchone()
return res[0]
limit_str = limit and ' limit %d' % limit or ''
offset_str = offset and ' offset %d' % offset or ''
query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
cr.execute(query_str, where_clause_params)
res = cr.fetchall()
# TDE note: with auto_join, we could have several lines about the same result
# i.e. a lead with several unread messages; we uniquify the result using
# a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
def _uniquify_list(seq):
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
return _uniquify_list([x[0] for x in res])
# returns the different values ever entered for one field
# this is used, for example, in the client when the user hits enter on
# a char field
def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
if not args:
args = []
if field in self._inherit_fields:
return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
else:
return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
def copy_data(self, cr, uid, id, default=None, context=None):
"""
Copy given record's data with all its fields values
:param cr: database cursor
:param uid: current user id
:param id: id of the record to copy
:param default: field values to override in the original values of the copied record
:type default: dictionary
:param context: context arguments, like lang, time zone
:type context: dictionary
:return: dictionary containing all the field values
"""
if context is None:
context = {}
# avoid recursion through already copied records in case of circular relationship
seen_map = context.setdefault('__copy_data_seen', {})
if id in seen_map.setdefault(self._name, []):
return
seen_map[self._name].append(id)
if default is None:
default = {}
if 'state' not in default:
if 'state' in self._defaults:
if callable(self._defaults['state']):
default['state'] = self._defaults['state'](self, cr, uid, context)
else:
default['state'] = self._defaults['state']
# build a black list of fields that should not be copied
blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
def blacklist_given_fields(obj):
# blacklist the fields that are given by inheritance
for other, field_to_other in obj._inherits.items():
blacklist.add(field_to_other)
if field_to_other in default:
# all the fields of 'other' are given by the record: default[field_to_other],
# except the ones redefined in self
blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
else:
blacklist_given_fields(self.pool[other])
# blacklist deprecated fields
for name, field in obj._columns.items():
if field.deprecated:
blacklist.add(name)
blacklist_given_fields(self)
fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
if fi.column.copy
if f not in default
if f not in blacklist)
data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
if data:
data = data[0]
else:
raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
res = dict(default)
for f, colinfo in fields_to_copy.iteritems():
field = colinfo.column
if field._type == 'many2one':
res[f] = data[f] and data[f][0]
elif field._type == 'one2many':
other = self.pool[field._obj]
# duplicate following the order of the ids because we'll rely on
# it later for copying translations in copy_translation()!
lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
# the lines are duplicated using the wrong (old) parent, but then
# are reassigned to the correct one thanks to the (0, 0, ...)
res[f] = [(0, 0, line) for line in lines if line]
elif field._type == 'many2many':
res[f] = [(6, 0, data[f])]
else:
res[f] = data[f]
return res
def copy_translations(self, cr, uid, old_id, new_id, context=None):
if context is None:
context = {}
# avoid recursion through already copied records in case of circular relationship
seen_map = context.setdefault('__copy_translations_seen',{})
if old_id in seen_map.setdefault(self._name,[]):
return
seen_map[self._name].append(old_id)
trans_obj = self.pool.get('ir.translation')
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
fields = self.fields_get(cr, uid, context=context)
for field_name, field_def in fields.items():
# removing the lang to compare untranslated values
context_wo_lang = dict(context, lang=None)
old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
# we must recursively copy the translations for o2o and o2m
if field_def['type'] == 'one2many':
target_obj = self.pool[field_def['relation']]
# here we rely on the order of the ids to match the translations
# as foreseen in copy_data()
old_children = sorted(r.id for r in old_record[field_name])
new_children = sorted(r.id for r in new_record[field_name])
for (old_child, new_child) in zip(old_children, new_children):
target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
# and for translatable fields we keep them for copy
elif field_def.get('translate'):
if field_name in self._columns:
trans_name = self._name + "," + field_name
target_id = new_id
source_id = old_id
elif field_name in self._inherit_fields:
trans_name = self._inherit_fields[field_name][0] + "," + field_name
# get the id of the parent record to set the translation
inherit_field_name = self._inherit_fields[field_name][1]
target_id = new_record[inherit_field_name].id
source_id = old_record[inherit_field_name].id
else:
continue
trans_ids = trans_obj.search(cr, uid, [
('name', '=', trans_name),
('res_id', '=', source_id)
])
user_lang = context.get('lang')
for record in trans_obj.read(cr, uid, trans_ids, context=context):
del record['id']
# remove source to avoid triggering _set_src
del record['source']
record.update({'res_id': target_id})
if user_lang and user_lang == record['lang']:
# 'source' to force the call to _set_src
# 'value' needed if value is changed in copy(), want to see the new_value
record['source'] = old_record[field_name]
record['value'] = new_record[field_name]
trans_obj.create(cr, uid, record, context=context)
@api.returns('self', lambda value: value.id)
def copy(self, cr, uid, id, default=None, context=None):
""" copy(default=None)
Duplicate record with given id updating it with default values
:param dict default: dictionary of field values to override in the
original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
:returns: new record
"""
if context is None:
context = {}
context = context.copy()
data = self.copy_data(cr, uid, id, default, context)
new_id = self.create(cr, uid, data, context)
self.copy_translations(cr, uid, id, new_id, context)
return new_id
@api.multi
@api.returns('self')
def exists(self):
""" exists() -> records
Returns the subset of records in `self` that exist, and marks deleted
records as such in cache. It can be used as a test on records::
if record.exists():
...
By convention, new records are returned as existing.
"""
ids = filter(None, self._ids) # ids to check in database
if not ids:
return self
query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
self._cr.execute(query, (ids,))
ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
[id for id in self._ids if not id]) # new ids
existing = self.browse(ids)
if len(existing) < len(self):
# mark missing records in cache with a failed value
exc = MissingError(_("Record does not exist or has been deleted."))
(self - existing)._cache.update(FailedValue(exc))
return existing
def check_recursion(self, cr, uid, ids, context=None, parent=None):
_logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
self._name)
assert parent is None or parent in self._columns or parent in self._inherit_fields,\
"The 'parent' parameter passed to check_recursion() must be None or a valid field name"
return self._check_recursion(cr, uid, ids, context, parent)
def _check_recursion(self, cr, uid, ids, context=None, parent=None):
"""
Verifies that there is no loop in a hierarchical structure of records,
by following the parent relationship using the **parent** field until a loop
is detected or until a top-level record is found.
:param cr: database cursor
:param uid: current user id
:param ids: list of ids of records to check
:param parent: optional parent field name (default: ``self._parent_name = parent_id``)
:return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
"""
if not parent:
parent = self._parent_name
# must ignore 'active' flag, ir.rules, etc. => direct SQL query
query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
for id in ids:
current_id = id
while current_id is not None:
cr.execute(query, (current_id,))
result = cr.fetchone()
current_id = result[0] if result else None
if current_id == id:
return False
return True
def _check_m2m_recursion(self, cr, uid, ids, field_name):
"""
Verifies that there is no loop in a hierarchical structure of records,
by following the parent relationship using the **parent** field until a loop
is detected or until a top-level record is found.
:param cr: database cursor
:param uid: current user id
:param ids: list of ids of records to check
:param field_name: field to check
:return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
"""
field = self._all_columns.get(field_name)
field = field.column if field else None
if not field or field._type != 'many2many' or field._obj != self._name:
# field must be a many2many on itself
raise ValueError('invalid field_name: %r' % (field_name,))
query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
ids_parent = ids[:]
while ids_parent:
ids_parent2 = []
for i in range(0, len(ids_parent), cr.IN_MAX):
j = i + cr.IN_MAX
sub_ids_parent = ids_parent[i:j]
cr.execute(query, (tuple(sub_ids_parent),))
ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
ids_parent = ids_parent2
for i in ids_parent:
if i in ids:
return False
return True
def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
"""Retrieve the External ID(s) of any database record.
**Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
:return: map of ids to the list of their fully qualified External IDs
in the form ``module.key``, or an empty list when there's no External
ID for a record, e.g.::
{ 'id': ['module.ext_id', 'module.ext_id_bis'],
'id2': [] }
"""
ir_model_data = self.pool.get('ir.model.data')
data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
result = {}
for id in ids:
# can't use dict.fromkeys() as the list would be shared!
result[id] = []
for record in data_results:
result[record['res_id']].append('%(module)s.%(name)s' % record)
return result
def get_external_id(self, cr, uid, ids, *args, **kwargs):
"""Retrieve the External ID of any database record, if there
is one. This method works as a possible implementation
for a function field, to be able to add it to any
model object easily, referencing it as ``Model.get_external_id``.
When multiple External IDs exist for a record, only one
of them is returned (randomly).
:return: map of ids to their fully qualified XML ID,
defaulting to an empty string when there's none
(to be usable as a function field),
e.g.::
{ 'id': 'module.ext_id',
'id2': '' }
"""
results = self._get_xml_ids(cr, uid, ids)
for k, v in results.iteritems():
if results[k]:
results[k] = v[0]
else:
results[k] = ''
return results
# backwards compatibility
get_xml_id = get_external_id
_get_xml_ids = _get_external_ids
def print_report(self, cr, uid, ids, name, data, context=None):
"""
Render the report `name` for the given IDs. The report must be defined
for this model, not another.
"""
report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
assert self._name == report.table
return report.create(cr, uid, ids, data, context)
# Transience
@classmethod
def is_transient(cls):
""" Return whether the model is transient.
See :class:`TransientModel`.
"""
return cls._transient
def _transient_clean_rows_older_than(self, cr, seconds):
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
# Never delete rows used in last 5 minutes
seconds = max(seconds, 300)
query = ("SELECT id FROM " + self._table + " WHERE"
" COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
" < ((now() at time zone 'UTC') - interval %s)")
cr.execute(query, ("%s seconds" % seconds,))
ids = [x[0] for x in cr.fetchall()]
self.unlink(cr, SUPERUSER_ID, ids)
def _transient_clean_old_rows(self, cr, max_count):
# Check how many rows we have in the table
cr.execute("SELECT count(*) AS row_count FROM " + self._table)
res = cr.fetchall()
if res[0][0] <= max_count:
return # max not reached, nothing to do
self._transient_clean_rows_older_than(cr, 300)
def _transient_vacuum(self, cr, uid, force=False):
"""Clean the transient records.
This unlinks old records from the transient model tables whenever the
"_transient_max_count" or "_max_age" conditions (if any) are reached.
Actual cleaning will happen only once every "_transient_check_time" calls.
This means this method can be called frequently called (e.g. whenever
a new record is created).
Example with both max_hours and max_count active:
Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
- age based vacuum will leave the 22 rows created/changed in the last 12 minutes
- count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
would immediately cause the maximum to be reached again.
- the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
"""
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
_transient_check_time = 20 # arbitrary limit on vacuum executions
self._transient_check_count += 1
if not force and (self._transient_check_count < _transient_check_time):
return True # no vacuum cleaning this time
self._transient_check_count = 0
# Age-based expiration
if self._transient_max_hours:
self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
# Count-based expiration
if self._transient_max_count:
self._transient_clean_old_rows(cr, self._transient_max_count)
return True
def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
""" Serializes one2many and many2many commands into record dictionaries
(as if all the records came from the database via a read()). This
method is aimed at onchange methods on one2many and many2many fields.
Because commands might be creation commands, not all record dicts
will contain an ``id`` field. Commands matching an existing record
will have an ``id``.
:param field_name: name of the one2many or many2many field matching the commands
:type field_name: str
:param commands: one2many or many2many commands to execute on ``field_name``
:type commands: list((int|False, int|False, dict|False))
:param fields: list of fields to read from the database, when applicable
:type fields: list(str)
:returns: records in a shape similar to that returned by ``read()``
(except records may be missing the ``id`` field if they don't exist in db)
:rtype: list(dict)
"""
result = [] # result (list of dict)
record_ids = [] # ids of records to read
updates = {} # {id: dict} of updates on particular records
for command in commands or []:
if not isinstance(command, (list, tuple)):
record_ids.append(command)
elif command[0] == 0:
result.append(command[2])
elif command[0] == 1:
record_ids.append(command[1])
updates.setdefault(command[1], {}).update(command[2])
elif command[0] in (2, 3):
record_ids = [id for id in record_ids if id != command[1]]
elif command[0] == 4:
record_ids.append(command[1])
elif command[0] == 5:
result, record_ids = [], []
elif command[0] == 6:
result, record_ids = [], list(command[2])
# read the records and apply the updates
other_model = self.pool[self._all_columns[field_name].column._obj]
for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
record.update(updates.get(record['id'], {}))
result.append(record)
return result
# for backward compatibility
resolve_o2m_commands_to_record_dicts = resolve_2many_commands
def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
"""
Performs a ``search()`` followed by a ``read()``.
:param cr: database cursor
:param user: current user id
:param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
:param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
:param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
:param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
:param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
:param context: context arguments.
:return: List of dictionaries containing the asked fields.
:rtype: List of dictionaries.
"""
record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
if not record_ids:
return []
if fields and fields == ['id']:
# shortcut read if we only want the ids
return [{'id': id} for id in record_ids]
# read() ignores active_test, but it would forward it to any downstream search call
# (e.g. for x2m or function fields), and this is not the desired behavior, the flag
# was presumably only meant for the main search().
# TODO: Move this to read() directly?
read_ctx = dict(context or {})
read_ctx.pop('active_test', None)
result = self.read(cr, uid, record_ids, fields, context=read_ctx)
if len(result) <= 1:
return result
# reorder read
index = dict((r['id'], r) for r in result)
return [index[x] for x in record_ids if x in index]
def _register_hook(self, cr):
""" stuff to do right after the registry is built """
pass
@classmethod
def _patch_method(cls, name, method):
""" Monkey-patch a method for all instances of this model. This replaces
the method called `name` by `method` in the given class.
The original method is then accessible via ``method.origin``, and it
can be restored with :meth:`~._revert_method`.
Example::
@api.multi
def do_write(self, values):
# do stuff, and call the original method
return do_write.origin(self, values)
# patch method write of model
model._patch_method('write', do_write)
# this will call do_write
records = model.search([...])
records.write(...)
# restore the original method
model._revert_method('write')
"""
origin = getattr(cls, name)
method.origin = origin
# propagate decorators from origin to method, and apply api decorator
wrapped = api.guess(api.propagate(origin, method))
wrapped.origin = origin
setattr(cls, name, wrapped)
@classmethod
def _revert_method(cls, name):
""" Revert the original method called `name` in the given class.
See :meth:`~._patch_method`.
"""
method = getattr(cls, name)
setattr(cls, name, method.origin)
#
# Instance creation
#
# An instance represents an ordered collection of records in a given
# execution environment. The instance object refers to the environment, and
# the records themselves are represented by their cache dictionary. The 'id'
# of each record is found in its corresponding cache dictionary.
#
# This design has the following advantages:
# - cache access is direct and thus fast;
# - one can consider records without an 'id' (see new records);
# - the global cache is only an index to "resolve" a record 'id'.
#
@classmethod
def _browse(cls, env, ids):
""" Create an instance attached to `env`; `ids` is a tuple of record
ids.
"""
records = object.__new__(cls)
records.env = env
records._ids = ids
env.prefetch[cls._name].update(ids)
return records
@api.v7
def browse(self, cr, uid, arg=None, context=None):
ids = _normalize_ids(arg)
#assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
return self._browse(Environment(cr, uid, context or {}), ids)
@api.v8
def browse(self, arg=None):
""" browse([ids]) -> records
Returns a recordset for the ids provided as parameter in the current
environment.
Can take no ids, a single id or a sequence of ids.
"""
ids = _normalize_ids(arg)
#assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
return self._browse(self.env, ids)
#
# Internal properties, for manipulating the instance's implementation
#
@property
def ids(self):
""" List of actual record ids in this recordset (ignores placeholder
ids for records to create)
"""
return filter(None, list(self._ids))
# backward-compatibility with former browse records
_cr = property(lambda self: self.env.cr)
_uid = property(lambda self: self.env.uid)
_context = property(lambda self: self.env.context)
#
# Conversion methods
#
def ensure_one(self):
""" Verifies that the current recorset holds a single record. Raises
an exception otherwise.
"""
if len(self) == 1:
return self
raise except_orm("ValueError", "Expected singleton: %s" % self)
def with_env(self, env):
""" Returns a new version of this recordset attached to the provided
environment
:type env: :class:`~openerp.api.Environment`
"""
return self._browse(env, self._ids)
def sudo(self, user=SUPERUSER_ID):
""" sudo([user=SUPERUSER])
Returns a new version of this recordset attached to the provided
user.
"""
return self.with_env(self.env(user=user))
def with_context(self, *args, **kwargs):
""" with_context([context][, **overrides]) -> records
Returns a new version of this recordset attached to an extended
context.
The extended context is either the provided ``context`` in which
``overrides`` are merged or the *current* context in which
``overrides`` are merged e.g.::
# current context is {'key1': True}
r2 = records.with_context({}, key2=True)
# -> r2._context is {'key2': True}
r2 = records.with_context(key2=True)
# -> r2._context is {'key1': True, 'key2': True}
"""
context = dict(args[0] if args else self._context, **kwargs)
return self.with_env(self.env(context=context))
def _convert_to_cache(self, values, update=False, validate=True):
""" Convert the `values` dictionary into cached values.
:param update: whether the conversion is made for updating `self`;
this is necessary for interpreting the commands of *2many fields
:param validate: whether values must be checked
"""
fields = self._fields
target = self if update else self.browse()
return {
name: fields[name].convert_to_cache(value, target, validate=validate)
for name, value in values.iteritems()
if name in fields
}
def _convert_to_write(self, values):
""" Convert the `values` dictionary into the format of :meth:`write`. """
fields = self._fields
result = {}
for name, value in values.iteritems():
if name in fields:
value = fields[name].convert_to_write(value)
if not isinstance(value, NewId):
result[name] = value
return result
#
# Record traversal and update
#
def _mapped_func(self, func):
""" Apply function `func` on all records in `self`, and return the
result as a list or a recordset (if `func` return recordsets).
"""
vals = [func(rec) for rec in self]
val0 = vals[0] if vals else func(self)
if isinstance(val0, BaseModel):
return reduce(operator.or_, vals, val0)
return vals
def mapped(self, func):
""" Apply `func` on all records in `self`, and return the result as a
list or a recordset (if `func` return recordsets). In the latter
case, the order of the returned recordset is arbritrary.
:param func: a function or a dot-separated sequence of field names
"""
if isinstance(func, basestring):
recs = self
for name in func.split('.'):
recs = recs._mapped_func(operator.itemgetter(name))
return recs
else:
return self._mapped_func(func)
def _mapped_cache(self, name_seq):
""" Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
field names, and only cached values are used.
"""
recs = self
for name in name_seq.split('.'):
field = recs._fields[name]
null = field.null(self.env)
recs = recs.mapped(lambda rec: rec._cache.get(field, null))
return recs
def filtered(self, func):
""" Select the records in `self` such that `func(rec)` is true, and
return them as a recordset.
:param func: a function or a dot-separated sequence of field names
"""
if isinstance(func, basestring):
name = func
func = lambda rec: filter(None, rec.mapped(name))
return self.browse([rec.id for rec in self if func(rec)])
def sorted(self, key=None):
""" Return the recordset `self` ordered by `key` """
if key is None:
return self.search([('id', 'in', self.ids)])
else:
return self.browse(map(int, sorted(self, key=key)))
def update(self, values):
""" Update record `self[0]` with `values`. """
for name, value in values.iteritems():
self[name] = value
#
# New records - represent records that do not exist in the database yet;
# they are used to compute default values and perform onchanges.
#
@api.model
def new(self, values={}):
""" new([values]) -> record
Return a new record instance attached to the current environment and
initialized with the provided ``value``. The record is *not* created
in database, it only exists in memory.
"""
record = self.browse([NewId()])
record._cache.update(record._convert_to_cache(values, update=True))
if record.env.in_onchange:
# The cache update does not set inverse fields, so do it manually.
# This is useful for computing a function field on secondary
# records, if that field depends on the main record.
for name in values:
field = self._fields.get(name)
if field:
for invf in field.inverse_fields:
invf._update(record[name], record)
return record
#
# Dirty flag, to mark records modified (in draft mode)
#
@property
def _dirty(self):
""" Return whether any record in `self` is dirty. """
dirty = self.env.dirty
return any(record in dirty for record in self)
@_dirty.setter
def _dirty(self, value):
""" Mark the records in `self` as dirty. """
if value:
map(self.env.dirty.add, self)
else:
map(self.env.dirty.discard, self)
#
# "Dunder" methods
#
def __nonzero__(self):
""" Test whether `self` is nonempty. """
return bool(getattr(self, '_ids', True))
def __len__(self):
""" Return the size of `self`. """
return len(self._ids)
def __iter__(self):
""" Return an iterator over `self`. """
for id in self._ids:
yield self._browse(self.env, (id,))
def __contains__(self, item):
""" Test whether `item` (record or field name) is an element of `self`.
In the first case, the test is fully equivalent to::
any(item == record for record in self)
"""
if isinstance(item, BaseModel) and self._name == item._name:
return len(item) == 1 and item.id in self._ids
elif isinstance(item, basestring):
return item in self._fields
else:
raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
def __add__(self, other):
""" Return the concatenation of two recordsets. """
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
return self.browse(self._ids + other._ids)
def __sub__(self, other):
""" Return the recordset of all the records in `self` that are not in `other`. """
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
other_ids = set(other._ids)
return self.browse([id for id in self._ids if id not in other_ids])
def __and__(self, other):
""" Return the intersection of two recordsets.
Note that recordset order is not preserved.
"""
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
return self.browse(set(self._ids) & set(other._ids))
def __or__(self, other):
""" Return the union of two recordsets.
Note that recordset order is not preserved.
"""
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
return self.browse(set(self._ids) | set(other._ids))
def __eq__(self, other):
""" Test whether two recordsets are equivalent (up to reordering). """
if not isinstance(other, BaseModel):
if other:
_logger.warning("Comparing apples and oranges: %s == %s", self, other)
return False
return self._name == other._name and set(self._ids) == set(other._ids)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
return set(self._ids) < set(other._ids)
def __le__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
return set(self._ids) <= set(other._ids)
def __gt__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
return set(self._ids) > set(other._ids)
def __ge__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
return set(self._ids) >= set(other._ids)
def __int__(self):
return self.id
def __str__(self):
return "%s%s" % (self._name, getattr(self, '_ids', ""))
def __unicode__(self):
return unicode(str(self))
__repr__ = __str__
def __hash__(self):
if hasattr(self, '_ids'):
return hash((self._name, frozenset(self._ids)))
else:
return hash(self._name)
def __getitem__(self, key):
""" If `key` is an integer or a slice, return the corresponding record
selection as an instance (attached to `self.env`).
Otherwise read the field `key` of the first record in `self`.
Examples::
inst = model.search(dom) # inst is a recordset
r4 = inst[3] # fourth record in inst
rs = inst[10:20] # subset of inst
nm = rs['name'] # name of first record in inst
"""
if isinstance(key, basestring):
# important: one must call the field's getter
return self._fields[key].__get__(self, type(self))
elif isinstance(key, slice):
return self._browse(self.env, self._ids[key])
else:
return self._browse(self.env, (self._ids[key],))
def __setitem__(self, key, value):
""" Assign the field `key` to `value` in record `self`. """
# important: one must call the field's setter
return self._fields[key].__set__(self, value)
#
# Cache and recomputation management
#
@lazy_property
def _cache(self):
""" Return the cache of `self`, mapping field names to values. """
return RecordCache(self)
@api.model
def _in_cache_without(self, field):
""" Make sure `self` is present in cache (for prefetching), and return
the records of model `self` in cache that have no value for `field`
(:class:`Field` instance).
"""
env = self.env
prefetch_ids = env.prefetch[self._name]
prefetch_ids.update(self._ids)
ids = filter(None, prefetch_ids - set(env.cache[field]))
return self.browse(ids)
@api.model
def refresh(self):
""" Clear the records cache.
.. deprecated:: 8.0
The record cache is automatically invalidated.
"""
self.invalidate_cache()
@api.model
def invalidate_cache(self, fnames=None, ids=None):
""" Invalidate the record caches after some records have been modified.
If both `fnames` and `ids` are ``None``, the whole cache is cleared.
:param fnames: the list of modified fields, or ``None`` for all fields
:param ids: the list of modified record ids, or ``None`` for all
"""
if fnames is None:
if ids is None:
return self.env.invalidate_all()
fields = self._fields.values()
else:
fields = map(self._fields.__getitem__, fnames)
# invalidate fields and inverse fields, too
spec = [(f, ids) for f in fields] + \
[(invf, None) for f in fields for invf in f.inverse_fields]
self.env.invalidate(spec)
@api.multi
def modified(self, fnames):
""" Notify that fields have been modified on `self`. This invalidates
the cache, and prepares the recomputation of stored function fields
(new-style fields only).
:param fnames: iterable of field names that have been modified on
records `self`
"""
# each field knows what to invalidate and recompute
spec = []
for fname in fnames:
spec += self._fields[fname].modified(self)
cached_fields = {
field
for env in self.env.all
for field in env.cache
}
# invalidate non-stored fields.function which are currently cached
spec += [(f, None) for f in self.pool.pure_function_fields
if f in cached_fields]
self.env.invalidate(spec)
def _recompute_check(self, field):
""" If `field` must be recomputed on some record in `self`, return the
corresponding records that must be recomputed.
"""
return self.env.check_todo(field, self)
def _recompute_todo(self, field):
""" Mark `field` to be recomputed. """
self.env.add_todo(field, self)
def _recompute_done(self, field):
""" Mark `field` as recomputed. """
self.env.remove_todo(field, self)
@api.model
def recompute(self):
""" Recompute stored function fields. The fields and records to
recompute have been determined by method :meth:`modified`.
"""
while self.env.has_todo():
field, recs = self.env.get_todo()
# evaluate the fields to recompute, and save them to database
for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
try:
values = rec._convert_to_write({
f.name: rec[f.name] for f in field.computed_fields
})
rec1._write(values)
except MissingError:
pass
# mark the computed fields as done
map(recs._recompute_done, field.computed_fields)
#
# Generic onchange method
#
def _has_onchange(self, field, other_fields):
""" Return whether `field` should trigger an onchange event in the
presence of `other_fields`.
"""
# test whether self has an onchange method for field, or field is a
# dependency of any field in other_fields
return field.name in self._onchange_methods or \
any(dep in other_fields for dep in field.dependents)
@api.model
def _onchange_spec(self, view_info=None):
""" Return the onchange spec from a view description; if not given, the
result of ``self.fields_view_get()`` is used.
"""
result = {}
# for traversing the XML arch and populating result
def process(node, info, prefix):
if node.tag == 'field':
name = node.attrib['name']
names = "%s.%s" % (prefix, name) if prefix else name
if not result.get(names):
result[names] = node.attrib.get('on_change')
# traverse the subviews included in relational fields
for subinfo in info['fields'][name].get('views', {}).itervalues():
process(etree.fromstring(subinfo['arch']), subinfo, names)
else:
for child in node:
process(child, info, prefix)
if view_info is None:
view_info = self.fields_view_get()
process(etree.fromstring(view_info['arch']), view_info, '')
return result
def _onchange_eval(self, field_name, onchange, result):
""" Apply onchange method(s) for field `field_name` with spec `onchange`
on record `self`. Value assignments are applied on `self`, while
domain and warning messages are put in dictionary `result`.
"""
onchange = onchange.strip()
# onchange V8
if onchange in ("1", "true"):
for method in self._onchange_methods.get(field_name, ()):
method_res = method(self)
if not method_res:
continue
if 'domain' in method_res:
result.setdefault('domain', {}).update(method_res['domain'])
if 'warning' in method_res:
result['warning'] = method_res['warning']
return
# onchange V7
match = onchange_v7.match(onchange)
if match:
method, params = match.groups()
# evaluate params -> tuple
global_vars = {'context': self._context, 'uid': self._uid}
if self._context.get('field_parent'):
class RawRecord(object):
def __init__(self, record):
self._record = record
def __getattr__(self, name):
field = self._record._fields[name]
value = self._record[name]
return field.convert_to_onchange(value)
record = self[self._context['field_parent']]
global_vars['parent'] = RawRecord(record)
field_vars = {
key: self._fields[key].convert_to_onchange(val)
for key, val in self._cache.iteritems()
}
params = eval("[%s]" % params, global_vars, field_vars)
# call onchange method
args = (self._cr, self._uid, self._origin.ids) + tuple(params)
method_res = getattr(self._model, method)(*args)
if not isinstance(method_res, dict):
return
if 'value' in method_res:
method_res['value'].pop('id', None)
self.update(self._convert_to_cache(method_res['value'], validate=False))
if 'domain' in method_res:
result.setdefault('domain', {}).update(method_res['domain'])
if 'warning' in method_res:
result['warning'] = method_res['warning']
@api.multi
def onchange(self, values, field_name, field_onchange):
""" Perform an onchange on the given field.
:param values: dictionary mapping field names to values, giving the
current state of modification
:param field_name: name of the modified field_name
:param field_onchange: dictionary mapping field names to their
on_change attribute
"""
env = self.env
if field_name and field_name not in self._fields:
return {}
# determine subfields for field.convert_to_write() below
secondary = []
subfields = defaultdict(set)
for dotname in field_onchange:
if '.' in dotname:
secondary.append(dotname)
name, subname = dotname.split('.')
subfields[name].add(subname)
# create a new record with values, and attach `self` to it
with env.do_in_onchange():
record = self.new(values)
values = dict(record._cache)
# attach `self` with a different context (for cache consistency)
record._origin = self.with_context(__onchange=True)
# determine which field should be triggered an onchange
todo = set([field_name]) if field_name else set(values)
done = set()
# dummy assignment: trigger invalidations on the record
for name in todo:
value = record[name]
field = self._fields[name]
if not field_name and field.type == 'many2one' and field.delegate and not value:
# do not nullify all fields of parent record for new records
continue
record[name] = value
result = {'value': {}}
while todo:
name = todo.pop()
if name in done:
continue
done.add(name)
with env.do_in_onchange():
# apply field-specific onchange methods
if field_onchange.get(name):
record._onchange_eval(name, field_onchange[name], result)
# force re-evaluation of function fields on secondary records
for field_seq in secondary:
record.mapped(field_seq)
# determine which fields have been modified
for name, oldval in values.iteritems():
field = self._fields[name]
newval = record[name]
if field.type in ('one2many', 'many2many'):
if newval != oldval or newval._dirty:
# put new value in result
result['value'][name] = field.convert_to_write(
newval, record._origin, subfields.get(name),
)
todo.add(name)
else:
# keep result: newval may have been dirty before
pass
else:
if newval != oldval:
# put new value in result
result['value'][name] = field.convert_to_write(
newval, record._origin, subfields.get(name),
)
todo.add(name)
else:
# clean up result to not return another value
result['value'].pop(name, None)
# At the moment, the client does not support updates on a *2many field
# while this one is modified by the user.
if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
result['value'].pop(field_name, None)
return result
class RecordCache(MutableMapping):
""" Implements a proxy dictionary to read/update the cache of a record.
Upon iteration, it looks like a dictionary mapping field names to
values. However, fields may be used as keys as well.
"""
def __init__(self, records):
self._recs = records
def contains(self, field):
""" Return whether `records[0]` has a value for `field` in cache. """
if isinstance(field, basestring):
field = self._recs._fields[field]
return self._recs.id in self._recs.env.cache[field]
def __contains__(self, field):
""" Return whether `records[0]` has a regular value for `field` in cache. """
if isinstance(field, basestring):
field = self._recs._fields[field]
dummy = SpecialValue(None)
value = self._recs.env.cache[field].get(self._recs.id, dummy)
return not isinstance(value, SpecialValue)
def __getitem__(self, field):
""" Return the cached value of `field` for `records[0]`. """
if isinstance(field, basestring):
field = self._recs._fields[field]
value = self._recs.env.cache[field][self._recs.id]
return value.get() if isinstance(value, SpecialValue) else value
def __setitem__(self, field, value):
""" Assign the cached value of `field` for all records in `records`. """
if isinstance(field, basestring):
field = self._recs._fields[field]
values = dict.fromkeys(self._recs._ids, value)
self._recs.env.cache[field].update(values)
def update(self, *args, **kwargs):
""" Update the cache of all records in `records`. If the argument is a
`SpecialValue`, update all fields (except "magic" columns).
"""
if args and isinstance(args[0], SpecialValue):
values = dict.fromkeys(self._recs._ids, args[0])
for name, field in self._recs._fields.iteritems():
if name != 'id':
self._recs.env.cache[field].update(values)
else:
return super(RecordCache, self).update(*args, **kwargs)
def __delitem__(self, field):
""" Remove the cached value of `field` for all `records`. """
if isinstance(field, basestring):
field = self._recs._fields[field]
field_cache = self._recs.env.cache[field]
for id in self._recs._ids:
field_cache.pop(id, None)
def __iter__(self):
""" Iterate over the field names with a regular value in cache. """
cache, id = self._recs.env.cache, self._recs.id
dummy = SpecialValue(None)
for name, field in self._recs._fields.iteritems():
if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
yield name
def __len__(self):
""" Return the number of fields with a regular value in cache. """
return sum(1 for name in self)
class Model(BaseModel):
"""Main super-class for regular database-persisted OpenERP models.
OpenERP models are created by inheriting from this class::
class user(Model):
...
The system will later instantiate the class once per database (on
which the class' module is installed).
"""
_auto = True
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = False # True in a TransientModel
class TransientModel(BaseModel):
"""Model super-class for transient records, meant to be temporarily
persisted, and regularly vaccuum-cleaned.
A TransientModel has a simplified access rights management,
all users can create new records, and may only access the
records they created. The super-user has unrestricted access
to all TransientModel records.
"""
_auto = True
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = True
class AbstractModel(BaseModel):
"""Abstract Model super-class for creating an abstract class meant to be
inherited by regular models (Models or TransientModels) but not meant to
be usable on its own, or persisted.
Technical note: we don't want to make AbstractModel the super-class of
Model or BaseModel because it would not make sense to put the main
definition of persistence methods such as create() in it, and still we
should be able to override them within an AbstractModel.
"""
_auto = False # don't create any database backend for AbstractModels
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = False
def itemgetter_tuple(items):
""" Fixes itemgetter inconsistency (useful in some cases) of not returning
a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
"""
if len(items) == 0:
return lambda a: ()
if len(items) == 1:
return lambda gettable: (gettable[items[0]],)
return operator.itemgetter(*items)
def convert_pgerror_23502(model, fields, info, e):
m = re.match(r'^null value in column "(?P<field>\w+)" violates '
r'not-null constraint\n',
str(e))
field_name = m and m.group('field')
if not m or field_name not in fields:
return {'message': unicode(e)}
message = _(u"Missing required value for the field '%s'.") % field_name
field = fields.get(field_name)
if field:
message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
return {
'message': message,
'field': field_name,
}
def convert_pgerror_23505(model, fields, info, e):
m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
str(e))
field_name = m and m.group('field')
if not m or field_name not in fields:
return {'message': unicode(e)}
message = _(u"The value for the field '%s' already exists.") % field_name
field = fields.get(field_name)
if field:
message = _(u"%s This might be '%s' in the current model, or a field "
u"of the same name in an o2m.") % (message, field['string'])
return {
'message': message,
'field': field_name,
}
PGERROR_TO_OE = defaultdict(
# shape of mapped converters
lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
# not_null_violation
'23502': convert_pgerror_23502,
# unique constraint error
'23505': convert_pgerror_23505,
})
def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
""" Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
Various implementations were tested on the corpus of all browse() calls
performed during a full crawler run (after having installed all website_*
modules) and this one was the most efficient overall.
A possible bit of correctness was sacrificed by not doing any test on
Iterable and just assuming that any non-atomic type was an iterable of
some kind.
:rtype: tuple
"""
# much of the corpus is falsy objects (empty list, tuple or set, None)
if not arg:
return ()
# `type in set` is significantly faster (because more restrictive) than
# isinstance(arg, set) or issubclass(type, set); and for new-style classes
# obj.__class__ is equivalent to but faster than type(obj). Not relevant
# (and looks much worse) in most cases, but over millions of calls it
# does have a very minor effect.
if arg.__class__ in atoms:
return arg,
return tuple(arg)
# keep those imports here to avoid dependency cycle errors
from .osv import expression
from .fields import Field, SpecialValue, FailedValue
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,451,201,501,197,821,000 | 45.592705 | 247 | 0.551519 | false |
pahans/nototools | third_party/spiro/curves/polymat-bad.py | 15 | 1664 | from Numeric import *
import LinearAlgebra as la
import sys
n = 15
m = zeros(((n + 1) * 4, (n + 1) * 4), Float)
for i in range(n):
m[4 * i + 2][4 * i + 0] = .5
m[4 * i + 2][4 * i + 1] = -1./12
m[4 * i + 2][4 * i + 2] = 1./48
m[4 * i + 2][4 * i + 3] = -1./480
m[4 * i + 2][4 * i + 4] = .5
m[4 * i + 2][4 * i + 5] = 1./12
m[4 * i + 2][4 * i + 6] = 1./48
m[4 * i + 2][4 * i + 7] = 1./480
m[4 * i + 3][4 * i + 0] = 1
m[4 * i + 3][4 * i + 1] = .5
m[4 * i + 3][4 * i + 2] = .125
m[4 * i + 3][4 * i + 3] = 1./48
m[4 * i + 3][4 * i + 4] = -1
m[4 * i + 3][4 * i + 5] = .5
m[4 * i + 3][4 * i + 6] = -.125
m[4 * i + 3][4 * i + 7] = 1./48
m[4 * i + 4][4 * i + 0] = 0
m[4 * i + 4][4 * i + 1] = 1
m[4 * i + 4][4 * i + 2] = .5
m[4 * i + 4][4 * i + 3] = .125
m[4 * i + 4][4 * i + 4] = 0
m[4 * i + 4][4 * i + 5] = -1
m[4 * i + 4][4 * i + 6] = .5
m[4 * i + 4][4 * i + 7] = -.125
m[4 * i + 5][4 * i + 0] = 0
m[4 * i + 5][4 * i + 1] = 0
m[4 * i + 5][4 * i + 2] = 1
m[4 * i + 5][4 * i + 3] = .5
m[4 * i + 5][4 * i + 4] = 0
m[4 * i + 5][4 * i + 5] = 0
m[4 * i + 5][4 * i + 6] = -1
m[4 * i + 5][4 * i + 7] = .5
m[n * 4 + 2][2] = 1
m[n * 4 + 3][3] = 1
m[0][n * 4 + 2] = 1
m[1][n * 4 + 3] = 1
def printarr(m):
for j in range(n * 4 + 4):
for i in range(n * 4 + 4):
print '%6.1f' % m[j][i],
print ''
sys.output_line_width = 160
#print array2string(m, precision = 3)
mi = la.inverse(m)
#printarr(mi)
print ''
for j in range(n + 1):
for k in range(4):
print '%7.2f' % mi[j * 4 + k][(n / 2) * 4 + 2],
print ''
| apache-2.0 | -7,583,907,392,730,143,000 | 25 | 55 | 0.340144 | false |
janusnic/dj-21v | unit_10/mysite/userprofiles/utils.py | 6 | 2613 | from django.core.exceptions import ImproperlyConfigured
# -*- coding: utf-8 -*-
import functools
try:
import urlparse
except ImportError:
from urllib import parse as urlparse # python3 support
from django.core.exceptions import SuspiciousOperation
def default_redirect(request, fallback_url, **kwargs):
"""
Evaluates a redirect url by consulting GET, POST and the session.
"""
redirect_field_name = kwargs.get("redirect_field_name", "next")
next = request.REQUEST.get(redirect_field_name)
if not next:
# try the session if available
if hasattr(request, "session"):
session_key_value = kwargs.get("session_key_value", "redirect_to")
next = request.session.get(session_key_value)
is_safe = functools.partial(
ensure_safe_url,
allowed_protocols=kwargs.get("allowed_protocols"),
allowed_host=request.get_host()
)
redirect_to = next if next and is_safe(next) else fallback_url
# perform one last check to ensure the URL is safe to redirect to. if it
# is not then we should bail here as it is likely developer error and
# they should be notified
is_safe(redirect_to, raise_on_fail=True)
return redirect_to
def ensure_safe_url(url, allowed_protocols=None, allowed_host=None, raise_on_fail=False):
if allowed_protocols is None:
allowed_protocols = ["http", "https"]
parsed = urlparse.urlparse(url)
# perform security checks to ensure no malicious intent
# (i.e., an XSS attack with a data URL)
safe = True
if parsed.scheme and parsed.scheme not in allowed_protocols:
if raise_on_fail:
raise SuspiciousOperation("Unsafe redirect to URL with protocol '%s'" % parsed.scheme)
safe = False
if allowed_host and parsed.netloc and parsed.netloc != allowed_host:
if raise_on_fail:
raise SuspiciousOperation("Unsafe redirect to URL not matching host '%s'" % allowed_host)
safe = False
return safe
try:
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module
def get_form_class(path):
i = path.rfind('.')
module, attr = path[:i], path[i + 1:]
try:
mod = import_module(module)
# except ImportError, e: # python 2.7
except ImportError as e: # python 3.4
raise ImproperlyConfigured( 'Error loading module %s: "%s"' % (module, e))
try:
form = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a form named "%s"' % (module, attr))
return form
| mit | -6,078,964,194,875,679,000 | 36.869565 | 101 | 0.67279 | false |
varunarya10/python-glanceclient | glanceclient/common/https.py | 3 | 11205 | # Copyright 2014 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import struct
import OpenSSL
from requests import adapters
try:
from requests.packages.urllib3 import connectionpool
from requests.packages.urllib3 import poolmanager
except ImportError:
from urllib3 import connectionpool
from urllib3 import poolmanager
import six
import ssl
from glanceclient.common import utils
try:
from eventlet import patcher
# Handle case where we are running in a monkey patched environment
if patcher.is_monkey_patched('socket'):
from eventlet.green.httplib import HTTPSConnection
from eventlet.green.OpenSSL.SSL import GreenConnection as Connection
from eventlet.greenio import GreenSocket
# TODO(mclaren): A getsockopt workaround: see 'getsockopt' doc string
GreenSocket.getsockopt = utils.getsockopt
else:
raise ImportError
except ImportError:
try:
from httplib import HTTPSConnection
except ImportError:
from http.client import HTTPSConnection
from OpenSSL.SSL import Connection as Connection
from glanceclient import exc
from glanceclient.openstack.common import strutils
def to_bytes(s):
if isinstance(s, six.string_types):
return six.b(s)
else:
return s
class HTTPSAdapter(adapters.HTTPAdapter):
"""
This adapter will be used just when
ssl compression should be disabled.
The init method overwrites the default
https pool by setting glanceclient's
one.
"""
def __init__(self, *args, **kwargs):
# NOTE(flaper87): This line forces poolmanager to use
# glanceclient HTTPSConnection
classes_by_scheme = poolmanager.pool_classes_by_scheme
classes_by_scheme["glance+https"] = HTTPSConnectionPool
super(HTTPSAdapter, self).__init__(*args, **kwargs)
def request_url(self, request, proxies):
# NOTE(flaper87): Make sure the url is encoded, otherwise
# python's standard httplib will fail with a TypeError.
url = super(HTTPSAdapter, self).request_url(request, proxies)
return strutils.safe_encode(url)
def cert_verify(self, conn, url, verify, cert):
super(HTTPSAdapter, self).cert_verify(conn, url, verify, cert)
conn.ca_certs = verify[0]
conn.insecure = verify[1]
class HTTPSConnectionPool(connectionpool.HTTPSConnectionPool):
"""
HTTPSConnectionPool will be instantiated when a new
connection is requested to the HTTPSAdapter.This
implementation overwrites the _new_conn method and
returns an instances of glanceclient's VerifiedHTTPSConnection
which handles no compression.
ssl_compression is hard-coded to False because this will
be used just when the user sets --no-ssl-compression.
"""
scheme = 'glance+https'
def _new_conn(self):
self.num_connections += 1
return VerifiedHTTPSConnection(host=self.host,
port=self.port,
key_file=self.key_file,
cert_file=self.cert_file,
cacert=self.ca_certs,
insecure=self.insecure,
ssl_compression=False)
class OpenSSLConnectionDelegator(object):
"""
An OpenSSL.SSL.Connection delegator.
Supplies an additional 'makefile' method which httplib requires
and is not present in OpenSSL.SSL.Connection.
Note: Since it is not possible to inherit from OpenSSL.SSL.Connection
a delegator must be used.
"""
def __init__(self, *args, **kwargs):
self.connection = Connection(*args, **kwargs)
def __getattr__(self, name):
return getattr(self.connection, name)
def makefile(self, *args, **kwargs):
return socket._fileobject(self.connection, *args, **kwargs)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Extended HTTPSConnection which uses the OpenSSL library
for enhanced SSL support.
Note: Much of this functionality can eventually be replaced
with native Python 3.3 code.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
cacert=None, timeout=None, insecure=False,
ssl_compression=True):
# List of exceptions reported by Python3 instead of
# SSLConfigurationError
if six.PY3:
excp_lst = (TypeError, FileNotFoundError, ssl.SSLError)
else:
excp_lst = ()
try:
HTTPSConnection.__init__(self, host, port,
key_file=key_file,
cert_file=cert_file)
self.key_file = key_file
self.cert_file = cert_file
self.timeout = timeout
self.insecure = insecure
# NOTE(flaper87): `is_verified` is needed for
# requests' urllib3. If insecure is True then
# the request is not `verified`, hence `not insecure`
self.is_verified = not insecure
self.ssl_compression = ssl_compression
self.cacert = None if cacert is None else str(cacert)
self.set_context()
# ssl exceptions are reported in various form in Python 3
# so to be compatible, we report the same kind as under
# Python2
except excp_lst as e:
raise exc.SSLConfigurationError(str(e))
@staticmethod
def host_matches_cert(host, x509):
"""
Verify that the x509 certificate we have received
from 'host' correctly identifies the server we are
connecting to, ie that the certificate's Common Name
or a Subject Alternative Name matches 'host'.
"""
def check_match(name):
# Directly match the name
if name == host:
return True
# Support single wildcard matching
if name.startswith('*.') and host.find('.') > 0:
if name[2:] == host.split('.', 1)[1]:
return True
common_name = x509.get_subject().commonName
# First see if we can match the CN
if check_match(common_name):
return True
# Also try Subject Alternative Names for a match
san_list = None
for i in range(x509.get_extension_count()):
ext = x509.get_extension(i)
if ext.get_short_name() == b'subjectAltName':
san_list = str(ext)
for san in ''.join(san_list.split()).split(','):
if san.startswith('DNS:'):
if check_match(san.split(':', 1)[1]):
return True
# Server certificate does not match host
msg = ('Host "%s" does not match x509 certificate contents: '
'CommonName "%s"' % (host, common_name))
if san_list is not None:
msg = msg + ', subjectAltName "%s"' % san_list
raise exc.SSLCertificateError(msg)
def verify_callback(self, connection, x509, errnum,
depth, preverify_ok):
if x509.has_expired():
msg = "SSL Certificate expired on '%s'" % x509.get_notAfter()
raise exc.SSLCertificateError(msg)
if depth == 0 and preverify_ok:
# We verify that the host matches against the last
# certificate in the chain
return self.host_matches_cert(self.host, x509)
else:
# Pass through OpenSSL's default result
return preverify_ok
def set_context(self):
"""
Set up the OpenSSL context.
"""
self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
if self.ssl_compression is False:
self.context.set_options(0x20000) # SSL_OP_NO_COMPRESSION
if self.insecure is not True:
self.context.set_verify(OpenSSL.SSL.VERIFY_PEER,
self.verify_callback)
else:
self.context.set_verify(OpenSSL.SSL.VERIFY_NONE,
lambda *args: True)
if self.cert_file:
try:
self.context.use_certificate_file(self.cert_file)
except Exception as e:
msg = 'Unable to load cert from "%s" %s' % (self.cert_file, e)
raise exc.SSLConfigurationError(msg)
if self.key_file is None:
# We support having key and cert in same file
try:
self.context.use_privatekey_file(self.cert_file)
except Exception as e:
msg = ('No key file specified and unable to load key '
'from "%s" %s' % (self.cert_file, e))
raise exc.SSLConfigurationError(msg)
if self.key_file:
try:
self.context.use_privatekey_file(self.key_file)
except Exception as e:
msg = 'Unable to load key from "%s" %s' % (self.key_file, e)
raise exc.SSLConfigurationError(msg)
if self.cacert:
try:
self.context.load_verify_locations(to_bytes(self.cacert))
except Exception as e:
msg = 'Unable to load CA from "%s" %s' % (self.cacert, e)
raise exc.SSLConfigurationError(msg)
else:
self.context.set_default_verify_paths()
def connect(self):
"""
Connect to an SSL port using the OpenSSL library and apply
per-connection parameters.
"""
result = socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM)
if result:
socket_family = result[0][0]
if socket_family == socket.AF_INET6:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
# If due to some reason the address lookup fails - we still connect
# to IPv4 socket. This retains the older behavior.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.timeout is not None:
# '0' microseconds
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO,
struct.pack('LL', self.timeout, 0))
self.sock = OpenSSLConnectionDelegator(self.context, sock)
self.sock.connect((self.host, self.port))
| apache-2.0 | 552,029,750,450,214,300 | 36.600671 | 79 | 0.596609 | false |
amenonsen/ansible | lib/ansible/modules/network/fortios/fortios_vpn_certificate_ocsp_server.py | 14 | 11082 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_vpn_certificate_ocsp_server
short_description: OCSP server configuration in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify vpn_certificate feature and ocsp_server category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
vpn_certificate_ocsp_server:
description:
- OCSP server configuration.
default: null
type: dict
suboptions:
cert:
description:
- OCSP server certificate. Source vpn.certificate.remote.name vpn.certificate.ca.name.
type: str
name:
description:
- OCSP server entry name.
required: true
type: str
secondary_cert:
description:
- Secondary OCSP server certificate. Source vpn.certificate.remote.name vpn.certificate.ca.name.
type: str
secondary_url:
description:
- Secondary OCSP server URL.
type: str
source_ip:
description:
- Source IP address for communications to the OCSP server.
type: str
unavail_action:
description:
- Action when server is unavailable (revoke the certificate or ignore the result of the check).
type: str
choices:
- revoke
- ignore
url:
description:
- OCSP server URL.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: OCSP server configuration.
fortios_vpn_certificate_ocsp_server:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
vpn_certificate_ocsp_server:
cert: "<your_own_value> (source vpn.certificate.remote.name vpn.certificate.ca.name)"
name: "default_name_4"
secondary_cert: "<your_own_value> (source vpn.certificate.remote.name vpn.certificate.ca.name)"
secondary_url: "<your_own_value>"
source_ip: "84.230.14.43"
unavail_action: "revoke"
url: "myurl.com"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_vpn_certificate_ocsp_server_data(json):
option_list = ['cert', 'name', 'secondary_cert',
'secondary_url', 'source_ip', 'unavail_action',
'url']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def vpn_certificate_ocsp_server(data, fos):
vdom = data['vdom']
state = data['state']
vpn_certificate_ocsp_server_data = data['vpn_certificate_ocsp_server']
filtered_data = underscore_to_hyphen(filter_vpn_certificate_ocsp_server_data(vpn_certificate_ocsp_server_data))
if state == "present":
return fos.set('vpn.certificate',
'ocsp-server',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('vpn.certificate',
'ocsp-server',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_vpn_certificate(data, fos):
if data['vpn_certificate_ocsp_server']:
resp = vpn_certificate_ocsp_server(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"vpn_certificate_ocsp_server": {
"required": False, "type": "dict", "default": None,
"options": {
"cert": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"secondary_cert": {"required": False, "type": "str"},
"secondary_url": {"required": False, "type": "str"},
"source_ip": {"required": False, "type": "str"},
"unavail_action": {"required": False, "type": "str",
"choices": ["revoke", "ignore"]},
"url": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_vpn_certificate(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_vpn_certificate(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | -4,706,880,943,874,238,000 | 30.57265 | 116 | 0.593575 | false |
remh/dd-agent | checks.d/vsphere.py | 27 | 33304 | # stdlib
from copy import deepcopy
from datetime import datetime, timedelta
from hashlib import md5
from Queue import Empty, Queue
import re
import time
import traceback
# 3p
from pyVim import connect
from pyVmomi import vim
# project
from checks import AgentCheck
from checks.libs.thread_pool import Pool
from checks.libs.vmware.basic_metrics import BASIC_METRICS
from util import Timer
SOURCE_TYPE = 'vsphere'
REAL_TIME_INTERVAL = 20 # Default vCenter sampling interval
# The size of the ThreadPool used to process the request queue
DEFAULT_SIZE_POOL = 4
# The interval in seconds between two refresh of the entities list
REFRESH_MORLIST_INTERVAL = 3 * 60
# The interval in seconds between two refresh of metrics metadata (id<->name)
REFRESH_METRICS_METADATA_INTERVAL = 10 * 60
# The amount of jobs batched at the same time in the queue to query available metrics
BATCH_MORLIST_SIZE = 50
# Time after which we reap the jobs that clog the queue
# TODO: use it
JOB_TIMEOUT = 10
EXCLUDE_FILTERS = {
'AlarmStatusChangedEvent': [r'Gray'],
'TaskEvent': [
r'Initialize powering On',
r'Power Off virtual machine',
r'Power On virtual machine',
r'Reconfigure virtual machine',
r'Relocate virtual machine',
r'Suspend virtual machine',
r'Migrate virtual machine',
],
'VmBeingHotMigratedEvent': [],
'VmMessageEvent': [],
'VmMigratedEvent': [],
'VmPoweredOnEvent': [],
'VmPoweredOffEvent': [],
'VmReconfiguredEvent': [],
'VmResumedEvent': [],
'VmSuspendedEvent': [],
}
MORLIST = 'morlist'
METRICS_METADATA = 'metrics_metadata'
LAST = 'last'
INTERVAL = 'interval'
class VSphereEvent(object):
UNKNOWN = 'unknown'
def __init__(self, raw_event, event_config=None):
self.raw_event = raw_event
if self.raw_event and self.raw_event.__class__.__name__.startswith('vim.event'):
self.event_type = self.raw_event.__class__.__name__[10:]
else:
self.event_type = VSphereEvent.UNKNOWN
self.timestamp = int((self.raw_event.createdTime.replace(tzinfo=None) - datetime(1970, 1, 1)).total_seconds())
self.payload = {
"timestamp": self.timestamp,
"event_type": SOURCE_TYPE,
"source_type_name": SOURCE_TYPE,
}
if event_config is None:
self.event_config = {}
else:
self.event_config = event_config
def _is_filtered(self):
# Filter the unwanted types
if self.event_type not in EXCLUDE_FILTERS:
return True
filters = EXCLUDE_FILTERS[self.event_type]
for f in filters:
if re.search(f, self.raw_event.fullFormattedMessage):
return True
return False
def get_datadog_payload(self):
if self._is_filtered():
return None
transform_method = getattr(self, 'transform_%s' % self.event_type.lower(), None)
if callable(transform_method):
return transform_method()
# Default event transformation
self.payload["msg_title"] = u"{0}".format(self.event_type)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
return self.payload
def transform_vmbeinghotmigratedevent(self):
self.payload["msg_title"] = u"VM {0} is being migrated".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"{user} has launched a hot migration of this virtual machine:\n".format(user=self.raw_event.userName)
changes = []
pre_host = self.raw_event.host.name
new_host = self.raw_event.destHost.name
pre_dc = self.raw_event.datacenter.name
new_dc = self.raw_event.destDatacenter.name
pre_ds = self.raw_event.ds.name
new_ds = self.raw_event.destDatastore.name
if pre_host == new_host:
changes.append(u"- No host migration: still {0}".format(new_host))
else:
# Insert in front if it's a change
changes = [u"- Host MIGRATION: from {0} to {1}".format(pre_host, new_host)] + changes
if pre_dc == new_dc:
changes.append(u"- No datacenter migration: still {0}".format(new_dc))
else:
# Insert in front if it's a change
changes = [u"- Datacenter MIGRATION: from {0} to {1}".format(pre_dc, new_dc)] + changes
if pre_ds == new_ds:
changes.append(u"- No datastore migration: still {0}".format(new_ds))
else:
# Insert in front if it's a change
changes = [u"- Datastore MIGRATION: from {0} to {1}".format(pre_ds, new_ds)] + changes
self.payload["msg_text"] += "\n".join(changes)
self.payload['host'] = self.raw_event.vm.name
self.payload['tags'] = [
'vsphere_host:%s' % pre_host,
'vsphere_host:%s' % new_host,
'vsphere_datacenter:%s' % pre_dc,
'vsphere_datacenter:%s' % new_dc,
]
return self.payload
def transform_alarmstatuschangedevent(self):
if self.event_config.get('collect_vcenter_alarms') is None:
return None
def get_transition(before, after):
vals = {
'gray': -1,
'green': 0,
'yellow': 1,
'red': 2
}
before = before.lower()
after = after.lower()
if before not in vals or after not in vals:
return None
if vals[before] < vals[after]:
return 'Triggered'
else:
return 'Recovered'
TO_ALERT_TYPE = {
'green': 'success',
'yellow': 'warning',
'red': 'error'
}
def get_agg_key(alarm_event):
return 'h:{0}|dc:{1}|a:{2}'.format(
md5(alarm_event.entity.name).hexdigest()[:10],
md5(alarm_event.datacenter.name).hexdigest()[:10],
md5(alarm_event.alarm.name).hexdigest()[:10]
)
# Get the entity type/name
if self.raw_event.entity.entity.__class__ == vim.VirtualMachine:
host_type = 'VM'
elif self.raw_event.entity.entity.__class__ == vim.HostSystem:
host_type = 'host'
else:
return None
host_name = self.raw_event.entity.name
# Need a getattr because from is a reserved keyword...
trans_before = getattr(self.raw_event, 'from')
trans_after = self.raw_event.to
transition = get_transition(trans_before, trans_after)
# Bad transition, we shouldn't have got this transition
if transition is None:
return None
self.payload['msg_title'] = u"[{transition}] {monitor} on {host_type} {host_name} is now {status}".format(
transition=transition,
monitor=self.raw_event.alarm.name,
host_type=host_type,
host_name=host_name,
status=trans_after
)
self.payload['alert_type'] = TO_ALERT_TYPE[trans_after]
self.payload['event_object'] = get_agg_key(self.raw_event)
self.payload['msg_text'] = u"""vCenter monitor status changed on this alarm, it was {before} and it's now {after}.""".format(
before=trans_before,
after=trans_after
)
self.payload['host'] = host_name
return self.payload
def transform_vmmessageevent(self):
self.payload["msg_title"] = u"VM {0} is reporting".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmmigratedevent(self):
self.payload["msg_title"] = u"VM {0} has been migrated".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmpoweredoffevent(self):
self.payload["msg_title"] = u"VM {0} has been powered OFF".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has powered off this virtual machine. It was running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmpoweredonevent(self):
self.payload["msg_title"] = u"VM {0} has been powered ON".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has powered on this virtual machine. It is running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmresumingevent(self):
self.payload["msg_title"] = u"VM {0} is RESUMING".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has resumed {vm}. It will soon be powered on.""".format(
user=self.raw_event.userName,
vm=self.raw_event.vm.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmsuspendedevent(self):
self.payload["msg_title"] = u"VM {0} has been SUSPENDED".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has suspended this virtual machine. It was running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmreconfiguredevent(self):
self.payload["msg_title"] = u"VM {0} configuration has been changed".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"{user} saved the new configuration:\n@@@\n".format(user=self.raw_event.userName)
# Add lines for configuration change don't show unset, that's hacky...
config_change_lines = [line for line in self.raw_event.configSpec.__repr__().splitlines() if 'unset' not in line]
self.payload["msg_text"] += u"\n".join(config_change_lines)
self.payload["msg_text"] += u"\n@@@"
self.payload['host'] = self.raw_event.vm.name
return self.payload
def atomic_method(method):
""" Decorator to catch the exceptions that happen in detached thread atomic tasks
and display them in the logs.
"""
def wrapper(*args, **kwargs):
try:
method(*args, **kwargs)
except Exception as e:
args[0].exceptionq.put("A worker thread crashed:\n" + traceback.format_exc())
return wrapper
class VSphereCheck(AgentCheck):
""" Get performance metrics from a vCenter server and upload them to Datadog
References:
http://pubs.vmware.com/vsphere-51/index.jsp#com.vmware.wssdk.apiref.doc/vim.PerformanceManager.html
*_atomic jobs perform one single task asynchronously in the ThreadPool, we
don't know exactly when they will finish, but we reap them if they're stuck.
The other calls are performed synchronously.
"""
SERVICE_CHECK_NAME = 'vcenter.can_connect'
def __init__(self, name, init_config, agentConfig, instances):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.time_started = time.time()
self.pool_started = False
self.exceptionq = Queue()
# Connections open to vCenter instances
self.server_instances = {}
# Event configuration
self.event_config = {}
# Caching resources, timeouts
self.cache_times = {}
for instance in self.instances:
i_key = self._instance_key(instance)
self.cache_times[i_key] = {
MORLIST: {
LAST: 0,
INTERVAL: init_config.get('refresh_morlist_interval',
REFRESH_MORLIST_INTERVAL)
},
METRICS_METADATA: {
LAST: 0,
INTERVAL: init_config.get('refresh_metrics_metadata_interval',
REFRESH_METRICS_METADATA_INTERVAL)
}
}
self.event_config[i_key] = instance.get('event_config')
# First layer of cache (get entities from the tree)
self.morlist_raw = {}
# Second layer, processed from the first one
self.morlist = {}
# Metrics metadata, basically perfCounterId -> {name, group, description}
self.metrics_metadata = {}
self.latest_event_query = {}
def stop(self):
self.stop_pool()
def start_pool(self):
self.log.info("Starting Thread Pool")
self.pool_size = int(self.init_config.get('threads_count', DEFAULT_SIZE_POOL))
self.pool = Pool(self.pool_size)
self.pool_started = True
self.jobs_status = {}
def stop_pool(self):
self.log.info("Stopping Thread Pool")
if self.pool_started:
self.pool.terminate()
self.pool.join()
self.jobs_status.clear()
assert self.pool.get_nworkers() == 0
self.pool_started = False
def restart_pool(self):
self.stop_pool()
self.start_pool()
def _clean(self):
now = time.time()
# TODO: use that
for name in self.jobs_status.keys():
start_time = self.jobs_status[name]
if now - start_time > JOB_TIMEOUT:
self.log.critical("Restarting Pool. One check is stuck.")
self.restart_pool()
break
def _query_event(self, instance):
i_key = self._instance_key(instance)
last_time = self.latest_event_query.get(i_key)
server_instance = self._get_server_instance(instance)
event_manager = server_instance.content.eventManager
# Be sure we don't duplicate any event, never query the "past"
if not last_time:
last_time = self.latest_event_query[i_key] = \
event_manager.latestEvent.createdTime + timedelta(seconds=1)
query_filter = vim.event.EventFilterSpec()
time_filter = vim.event.EventFilterSpec.ByTime(beginTime=self.latest_event_query[i_key])
query_filter.time = time_filter
try:
new_events = event_manager.QueryEvents(query_filter)
self.log.debug("Got {0} events from vCenter event manager".format(len(new_events)))
for event in new_events:
normalized_event = VSphereEvent(event, self.event_config[i_key])
# Can return None if the event if filtered out
event_payload = normalized_event.get_datadog_payload()
if event_payload is not None:
self.event(event_payload)
last_time = event.createdTime + timedelta(seconds=1)
except Exception as e:
# Don't get stuck on a failure to fetch an event
# Ignore them for next pass
self.log.warning("Unable to fetch Events %s", e)
last_time = event_manager.latestEvent.createdTime + timedelta(seconds=1)
self.latest_event_query[i_key] = last_time
def _instance_key(self, instance):
i_key = instance.get('name')
if i_key is None:
raise Exception("Must define a unique 'name' per vCenter instance")
return i_key
def _should_cache(self, instance, entity):
i_key = self._instance_key(instance)
now = time.time()
return now - self.cache_times[i_key][entity][LAST] > self.cache_times[i_key][entity][INTERVAL]
def _get_server_instance(self, instance):
i_key = self._instance_key(instance)
service_check_tags = [
'vcenter_server:{0}'.format(instance.get('name')),
'vcenter_host:{0}'.format(instance.get('host')),
]
if i_key not in self.server_instances:
try:
server_instance = connect.SmartConnect(
host=instance.get('host'),
user=instance.get('username'),
pwd=instance.get('password')
)
except Exception as e:
err_msg = "Connection to %s failed: %s" % (instance.get('host'), e)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=err_msg)
raise Exception(err_msg)
self.server_instances[i_key] = server_instance
# Test if the connection is working
try:
self.server_instances[i_key].RetrieveContent()
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=service_check_tags)
except Exception as e:
err_msg = "Connection to %s died unexpectedly: %s" % (instance.get('host'), e)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=err_msg)
raise Exception(err_msg)
return self.server_instances[i_key]
def _compute_needed_metrics(self, instance, available_metrics):
""" Compare the available metrics for one MOR we have computed and intersect them
with the set of metrics we want to report
"""
if instance.get('all_metrics', False):
return available_metrics
i_key = self._instance_key(instance)
wanted_metrics = []
# Get only the basic metrics
for metric in available_metrics:
# No cache yet, skip it for now
if (i_key not in self.metrics_metadata
or metric.counterId not in self.metrics_metadata[i_key]):
continue
if self.metrics_metadata[i_key][metric.counterId]['name'] in BASIC_METRICS:
wanted_metrics.append(metric)
return wanted_metrics
def get_external_host_tags(self):
""" Returns a list of tags for every host that is detected by the vSphere
integration.
List of pairs (hostname, list_of_tags)
"""
self.log.info("Sending external_host_tags now")
external_host_tags = []
for instance in self.instances:
i_key = self._instance_key(instance)
mor_list = self.morlist[i_key].items()
for mor_name, mor in mor_list:
external_host_tags.append((mor['hostname'], {SOURCE_TYPE: mor['tags']}))
return external_host_tags
@atomic_method
def _cache_morlist_raw_atomic(self, i_key, obj_type, obj, tags, regexes=None):
""" Compute tags for a single node in the vCenter rootFolder
and queue other such jobs for children nodes.
Usual hierarchy:
rootFolder
- datacenter1
- compute_resource1 == cluster
- host1
- host2
- host3
- compute_resource2
- host5
- vm1
- vm2
If it's a node we want to query metric for, queue it in self.morlist_raw
that will be processed by another job.
"""
### <TEST-INSTRUMENTATION>
t = Timer()
self.log.debug("job_atomic: Exploring MOR {0} (type={1})".format(obj, obj_type))
### </TEST-INSTRUMENTATION>
tags_copy = deepcopy(tags)
if obj_type == 'rootFolder':
for datacenter in obj.childEntity:
# Skip non-datacenter
if not hasattr(datacenter, 'hostFolder'):
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'datacenter', datacenter, tags_copy, regexes)
)
elif obj_type == 'datacenter':
dc_tag = "vsphere_datacenter:%s" % obj.name
tags_copy.append(dc_tag)
for compute_resource in obj.hostFolder.childEntity:
# Skip non-compute resource
if not hasattr(compute_resource, 'host'):
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'compute_resource', compute_resource, tags_copy, regexes)
)
elif obj_type == 'compute_resource':
if obj.__class__ == vim.ClusterComputeResource:
cluster_tag = "vsphere_cluster:%s" % obj.name
tags_copy.append(cluster_tag)
for host in obj.host:
# Skip non-host
if not hasattr(host, 'vm'):
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'host', host, tags_copy, regexes)
)
elif obj_type == 'host':
if regexes and regexes.get('host_include') is not None:
match = re.search(regexes['host_include'], obj.name)
if not match:
self.log.debug(u"Filtered out VM {0} because of host_include_only_regex".format(obj.name))
return
watched_mor = dict(mor_type='host', mor=obj, hostname=obj.name, tags=tags_copy+['vsphere_type:host'])
self.morlist_raw[i_key].append(watched_mor)
host_tag = "vsphere_host:%s" % obj.name
tags_copy.append(host_tag)
for vm in obj.vm:
if vm.runtime.powerState != 'poweredOn':
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'vm', vm, tags_copy, regexes)
)
elif obj_type == 'vm':
if regexes and regexes.get('vm_include') is not None:
match = re.search(regexes['vm_include'], obj.name)
if not match:
self.log.debug(u"Filtered out VM {0} because of vm_include_only_regex".format(obj.name))
return
watched_mor = dict(mor_type='vm', mor=obj, hostname=obj.name, tags=tags_copy+['vsphere_type:vm'])
self.morlist_raw[i_key].append(watched_mor)
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.morlist_raw_atomic.time', t.total())
### </TEST-INSTRUMENTATION>
def _cache_morlist_raw(self, instance):
""" Initiate the first layer to refresh self.morlist by queueing
_cache_morlist_raw_atomic on the rootFolder in a recursive/asncy approach
"""
i_key = self._instance_key(instance)
self.log.debug("Caching the morlist for vcenter instance %s" % i_key)
if i_key in self.morlist_raw and len(self.morlist_raw[i_key]) > 0:
self.log.debug(
"Skipping morlist collection now, RAW results "
"processing not over (latest refresh was {0}s ago)".format(
time.time() - self.cache_times[i_key][MORLIST][LAST])
)
return
self.morlist_raw[i_key] = []
server_instance = self._get_server_instance(instance)
root_folder = server_instance.content.rootFolder
instance_tag = "vcenter_server:%s" % instance.get('name')
regexes = {
'host_include': instance.get('host_include_only_regex'),
'vm_include': instance.get('vm_include_only_regex')
}
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'rootFolder', root_folder, [instance_tag], regexes)
)
self.cache_times[i_key][MORLIST][LAST] = time.time()
@atomic_method
def _cache_morlist_process_atomic(self, instance, mor):
""" Process one item of the self.morlist_raw list by querying the available
metrics for this MOR and then putting it in self.morlist
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
self.log.debug(
"job_atomic: Querying available metrics"
" for MOR {0} (type={1})".format(mor['mor'], mor['mor_type'])
)
available_metrics = perfManager.QueryAvailablePerfMetric(
mor['mor'], intervalId=REAL_TIME_INTERVAL)
mor['metrics'] = self._compute_needed_metrics(instance, available_metrics)
mor_name = str(mor['mor'])
if mor_name in self.morlist[i_key]:
# Was already here last iteration
self.morlist[i_key][mor_name]['metrics'] = mor['metrics']
else:
self.morlist[i_key][mor_name] = mor
self.morlist[i_key][mor_name]['last_seen'] = time.time()
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.morlist_process_atomic.time', t.total())
### </TEST-INSTRUMENTATION>
def _cache_morlist_process(self, instance):
""" Empties the self.morlist_raw by popping items and running asynchronously
the _cache_morlist_process_atomic operation that will get the available
metrics for this MOR and put it in self.morlist
"""
i_key = self._instance_key(instance)
if i_key not in self.morlist:
self.morlist[i_key] = {}
batch_size = self.init_config.get('batch_morlist_size', BATCH_MORLIST_SIZE)
for i in xrange(batch_size):
try:
mor = self.morlist_raw[i_key].pop()
self.pool.apply_async(self._cache_morlist_process_atomic, args=(instance, mor))
except (IndexError, KeyError):
self.log.debug("No more work to process in morlist_raw")
return
def _vacuum_morlist(self, instance):
""" Check if self.morlist doesn't have some old MORs that are gone, ie
we cannot get any metrics from them anyway (or =0)
"""
i_key = self._instance_key(instance)
morlist = self.morlist[i_key].items()
for mor_name, mor in morlist:
last_seen = mor['last_seen']
if (time.time() - last_seen) > 2 * REFRESH_MORLIST_INTERVAL:
del self.morlist[i_key][mor_name]
def _cache_metrics_metadata(self, instance):
""" Get from the server instance, all the performance counters metadata
meaning name/group/description... attached with the corresponding ID
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
self.log.info("Warming metrics metadata cache for instance {0}".format(i_key))
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
new_metadata = {}
for counter in perfManager.perfCounter:
d = dict(
name = "%s.%s" % (counter.groupInfo.key, counter.nameInfo.key),
unit = counter.unitInfo.key,
instance_tag = 'instance' # FIXME: replace by what we want to tag!
)
new_metadata[counter.key] = d
self.cache_times[i_key][METRICS_METADATA][LAST] = time.time()
self.log.info("Finished metadata collection for instance {0}".format(i_key))
# Reset metadata
self.metrics_metadata[i_key] = new_metadata
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.metric_metadata_collection.time', t.total())
### </TEST-INSTRUMENTATION>
def _transform_value(self, instance, counter_id, value):
""" Given the counter_id, look up for the metrics metadata to check the vsphere
type of the counter and apply pre-reporting transformation if needed.
"""
i_key = self._instance_key(instance)
if counter_id in self.metrics_metadata[i_key]:
unit = self.metrics_metadata[i_key][counter_id]['unit']
if unit == 'percent':
return float(value) / 100
# Defaults to return the value without transformation
return value
@atomic_method
def _collect_metrics_atomic(self, instance, mor):
""" Task that collects the metrics listed in the morlist for one MOR
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
query = vim.PerformanceManager.QuerySpec(maxSample=1,
entity=mor['mor'],
metricId=mor['metrics'],
intervalId=20,
format='normal')
results = perfManager.QueryPerf(querySpec=[query])
if results:
for result in results[0].value:
if result.id.counterId not in self.metrics_metadata[i_key]:
self.log.debug("Skipping this metric value, because there is no metadata about it")
continue
instance_name = result.id.instance or "none"
value = self._transform_value(instance, result.id.counterId, result.value[0])
self.gauge(
"vsphere.%s" % self.metrics_metadata[i_key][result.id.counterId]['name'],
value,
hostname=mor['hostname'],
tags=['instance:%s' % instance_name]
)
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.metric_colection.time', t.total())
### </TEST-INSTRUMENTATION>
def collect_metrics(self, instance):
""" Calls asynchronously _collect_metrics_atomic on all MORs, as the
job queue is processed the Aggregator will receive the metrics.
"""
i_key = self._instance_key(instance)
if i_key not in self.morlist:
self.log.debug("Not collecting metrics for this instance, nothing to do yet: {0}".format(i_key))
return
mors = self.morlist[i_key].items()
self.log.debug("Collecting metrics of %d mors" % len(mors))
vm_count = 0
for mor_name, mor in mors:
if mor['mor_type'] == 'vm':
vm_count += 1
if 'metrics' not in mor:
# self.log.debug("Skipping entity %s collection because we didn't cache its metrics yet" % mor['hostname'])
continue
self.pool.apply_async(self._collect_metrics_atomic, args=(instance, mor))
self.gauge('vsphere.vm.count', vm_count, tags=["vcenter_server:%s" % instance.get('name')])
def check(self, instance):
if not self.pool_started:
self.start_pool()
### <TEST-INSTRUMENTATION>
self.gauge('datadog.agent.vsphere.queue_size', self.pool._workq.qsize(), tags=['instant:initial'])
### </TEST-INSTRUMENTATION>
# First part: make sure our object repository is neat & clean
if self._should_cache(instance, METRICS_METADATA):
self._cache_metrics_metadata(instance)
if self._should_cache(instance, MORLIST):
self._cache_morlist_raw(instance)
self._cache_morlist_process(instance)
self._vacuum_morlist(instance)
# Second part: do the job
self.collect_metrics(instance)
self._query_event(instance)
# For our own sanity
self._clean()
thread_crashed = False
try:
while True:
self.log.critical(self.exceptionq.get_nowait())
thread_crashed = True
except Empty:
pass
if thread_crashed:
self.stop_pool()
raise Exception("One thread in the pool crashed, check the logs")
### <TEST-INSTRUMENTATION>
self.gauge('datadog.agent.vsphere.queue_size', self.pool._workq.qsize(), tags=['instant:final'])
### </TEST-INSTRUMENTATION>
if __name__ == '__main__':
check, _instances = VSphereCheck.from_yaml('conf.d/vsphere.yaml')
try:
for i in xrange(200):
print "Loop %d" % i
for instance in check.instances:
check.check(instance)
if check.has_events():
print 'Events: %s' % (check.get_events())
print 'Metrics: %d' % (len(check.get_metrics()))
time.sleep(10)
except Exception as e:
print "Whoops something happened {0}".format(traceback.format_exc())
finally:
check.stop()
| bsd-3-clause | 8,482,297,408,791,674,000 | 38.694875 | 137 | 0.57996 | false |
openstack/ironic | ironic/objects/port.py | 1 | 23131 | # coding=utf-8
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import netutils
from oslo_utils import strutils
from oslo_utils import uuidutils
from oslo_utils import versionutils
from oslo_versionedobjects import base as object_base
from ironic.common import exception
from ironic.common import utils
from ironic.db import api as dbapi
from ironic.objects import base
from ironic.objects import fields as object_fields
from ironic.objects import notification
@base.IronicObjectRegistry.register
class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Add get() and get_by_id() and get_by_address() and
# make get_by_uuid() only work with a uuid
# Version 1.2: Add create() and destroy()
# Version 1.3: Add list()
# Version 1.4: Add list_by_node_id()
# Version 1.5: Add list_by_portgroup_id() and new fields
# local_link_connection, portgroup_id and pxe_enabled
# Version 1.6: Add internal_info field
# Version 1.7: Add physical_network field
# Version 1.8: Migrate/copy extra['vif_port_id'] to
# internal_info['tenant_vif_port_id'] (not an explicit db
# change)
# Version 1.9: Add support for Smart NIC port
# Version 1.10: Add name field
VERSION = '1.10'
dbapi = dbapi.get_instance()
fields = {
'id': object_fields.IntegerField(),
'uuid': object_fields.UUIDField(nullable=True),
'node_id': object_fields.IntegerField(nullable=True),
'address': object_fields.MACAddressField(nullable=True),
'extra': object_fields.FlexibleDictField(nullable=True),
'local_link_connection': object_fields.FlexibleDictField(
nullable=True),
'portgroup_id': object_fields.IntegerField(nullable=True),
'pxe_enabled': object_fields.BooleanField(),
'internal_info': object_fields.FlexibleDictField(nullable=True),
'physical_network': object_fields.StringField(nullable=True),
'is_smartnic': object_fields.BooleanField(nullable=True,
default=False),
'name': object_fields.StringField(nullable=True),
}
def _convert_name_field(self, target_version,
remove_unavailable_fields=True):
name_is_set = self.obj_attr_is_set('name')
if target_version >= (1, 10):
# Target version supports name. Set it to its default
# value if it is not set.
if not name_is_set:
self.name = None
elif name_is_set:
# Target version does not support name, and it is set.
if remove_unavailable_fields:
# (De)serialising: remove unavailable fields.
delattr(self, 'name')
elif self.name is not None:
# DB: set unavailable fields to their default.
self.name = None
def _convert_to_version(self, target_version,
remove_unavailable_fields=True):
"""Convert to the target version.
Convert the object to the target version. The target version may be
the same, older, or newer than the version of the object. This is
used for DB interactions as well as for serialization/deserialization.
Version 1.7: physical_network field was added. Its default value is
None. For versions prior to this, it should be set to None (or
removed).
Version 1.8: if extra['vif_port_id'] is specified (non-null) and
internal_info['tenant_vif_port_id'] is not specified, copy the
.extra value to internal_info. There is nothing to do here when
downgrading to an older version.
Version 1.9: remove is_smartnic field for unsupported versions if
remove_unavailable_fields is True.
Version 1.10: remove name field for unsupported versions if
remove_unavailable_fields is True.
:param target_version: the desired version of the object
:param remove_unavailable_fields: True to remove fields that are
unavailable in the target version; set this to True when
(de)serializing. False to set the unavailable fields to appropriate
values; set this to False for DB interactions.
"""
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version >= (1, 8):
if self.obj_attr_is_set('extra'):
vif = self.extra.get('vif_port_id')
if vif:
internal_info = (self.internal_info
if self.obj_attr_is_set('internal_info')
else {})
if 'tenant_vif_port_id' not in internal_info:
internal_info['tenant_vif_port_id'] = vif
self.internal_info = internal_info
# Convert the physical_network field.
physnet_is_set = self.obj_attr_is_set('physical_network')
if target_version >= (1, 7):
# Target version supports physical_network. Set it to its default
# value if it is not set.
if not physnet_is_set:
self.physical_network = None
elif physnet_is_set:
# Target version does not support physical_network, and it is set.
if remove_unavailable_fields:
# (De)serialising: remove unavailable fields.
delattr(self, 'physical_network')
elif self.physical_network is not None:
# DB: set unavailable fields to their default.
self.physical_network = None
# Convert is_smartnic field.
is_smartnic_set = self.obj_attr_is_set('is_smartnic')
if target_version >= (1, 9):
# Target version supports is_smartnic. Set it to its default
# value if it is not set.
if not is_smartnic_set:
self.is_smartnic = False
# handle is_smartnic field in older version
elif is_smartnic_set:
# Target version does not support is_smartnic, and it is set.
if remove_unavailable_fields:
# (De)serialising: remove unavailable fields.
delattr(self, 'is_smartnic')
elif self.is_smartnic is not False:
# DB: set unavailable fields to their default.
self.is_smartnic = False
# Convert the name field.
self._convert_name_field(target_version, remove_unavailable_fields)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def get(cls, context, port_id):
"""Find a port.
Find a port based on its id or uuid or name or MAC address and return
a Port object.
:param context: Security context
:param port_id: the id *or* uuid *or* name *or* MAC address of a port.
:returns: a :class:`Port` object.
:raises: InvalidIdentity
"""
if strutils.is_int_like(port_id):
return cls.get_by_id(context, port_id)
elif uuidutils.is_uuid_like(port_id):
return cls.get_by_uuid(context, port_id)
elif netutils.is_valid_mac(port_id):
return cls.get_by_address(context, port_id)
elif utils.is_valid_logical_name(port_id):
return cls.get_by_name(context, port_id)
else:
raise exception.InvalidIdentity(identity=port_id)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def get_by_id(cls, context, port_id):
"""Find a port based on its integer ID and return a Port object.
:param cls: the :class:`Port`
:param context: Security context
:param port_id: the ID of a port.
:returns: a :class:`Port` object.
:raises: PortNotFound
"""
db_port = cls.dbapi.get_port_by_id(port_id)
port = cls._from_db_object(context, cls(), db_port)
return port
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def get_by_uuid(cls, context, uuid):
"""Find a port based on UUID and return a :class:`Port` object.
:param cls: the :class:`Port`
:param context: Security context
:param uuid: the UUID of a port.
:returns: a :class:`Port` object.
:raises: PortNotFound
"""
db_port = cls.dbapi.get_port_by_uuid(uuid)
port = cls._from_db_object(context, cls(), db_port)
return port
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def get_by_address(cls, context, address, owner=None, project=None):
"""Find a port based on address and return a :class:`Port` object.
:param cls: the :class:`Port`
:param context: Security context
:param address: the address of a port.
:param owner: DEPRECATED a node owner to match against
:param project: a node owner or lessee to match against
:returns: a :class:`Port` object.
:raises: PortNotFound
"""
if owner and not project:
project = owner
db_port = cls.dbapi.get_port_by_address(address, project=project)
port = cls._from_db_object(context, cls(), db_port)
return port
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def get_by_name(cls, context, name):
"""Find a port based on name and return a :class:`Port` object.
:param cls: the :class:`Port`
:param context: Security context
:param name: the name of a port.
:returns: a :class:`Port` object.
:raises: PortNotFound
"""
db_port = cls.dbapi.get_port_by_name(name)
port = cls._from_db_object(context, cls(), db_port)
return port
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def list(cls, context, limit=None, marker=None,
sort_key=None, sort_dir=None, owner=None, project=None):
"""Return a list of Port objects.
:param context: Security context.
:param limit: maximum number of resources to return in a single result.
:param marker: pagination marker for large data sets.
:param sort_key: column to sort results by.
:param sort_dir: direction to sort. "asc" or "desc".
:param owner: DEPRECATED a node owner to match against
:param project: a node owner or lessee to match against
:returns: a list of :class:`Port` object.
:raises: InvalidParameterValue
"""
if owner and not project:
project = owner
db_ports = cls.dbapi.get_port_list(limit=limit,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir,
project=project)
return cls._from_db_object_list(context, db_ports)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def list_by_node_id(cls, context, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None, owner=None,
project=None):
"""Return a list of Port objects associated with a given node ID.
:param context: Security context.
:param node_id: the ID of the node.
:param limit: maximum number of resources to return in a single result.
:param marker: pagination marker for large data sets.
:param sort_key: column to sort results by.
:param sort_dir: direction to sort. "asc" or "desc".
:param owner: DEPRECATED a node owner to match against
:param project: a node owner or lessee to match against
:returns: a list of :class:`Port` object.
"""
if owner and not project:
project = owner
db_ports = cls.dbapi.get_ports_by_node_id(node_id, limit=limit,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir,
project=project)
return cls._from_db_object_list(context, db_ports)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def list_by_portgroup_id(cls, context, portgroup_id, limit=None,
marker=None, sort_key=None, sort_dir=None,
owner=None, project=None):
"""Return a list of Port objects associated with a given portgroup ID.
:param context: Security context.
:param portgroup_id: the ID of the portgroup.
:param limit: maximum number of resources to return in a single result.
:param marker: pagination marker for large data sets.
:param sort_key: column to sort results by.
:param sort_dir: direction to sort. "asc" or "desc".
:param owner: DEPRECATED a node owner to match against
:param project: a node owner or lessee to match against
:returns: a list of :class:`Port` object.
"""
if owner and not project:
project = owner
db_ports = cls.dbapi.get_ports_by_portgroup_id(portgroup_id,
limit=limit,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir,
project=project)
return cls._from_db_object_list(context, db_ports)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def create(self, context=None):
"""Create a Port record in the DB.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: Port(context)
:raises: MACAlreadyExists if 'address' column is not unique
:raises: PortAlreadyExists if 'uuid' column is not unique
"""
values = self.do_version_changes_for_db()
db_port = self.dbapi.create_port(values)
self._from_db_object(self._context, self, db_port)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def destroy(self, context=None):
"""Delete the Port from the DB.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: Port(context)
:raises: PortNotFound
"""
self.dbapi.destroy_port(self.uuid)
self.obj_reset_changes()
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def save(self, context=None):
"""Save updates to this Port.
Updates will be made column by column based on the result
of self.what_changed().
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: Port(context)
:raises: PortNotFound
:raises: MACAlreadyExists if 'address' column is not unique
"""
updates = self.do_version_changes_for_db()
updated_port = self.dbapi.update_port(self.uuid, updates)
self._from_db_object(self._context, self, updated_port)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def refresh(self, context=None):
"""Loads updates for this Port.
Loads a port with the same uuid from the database and
checks for updated attributes. Updates are applied from
the loaded port column by column, if there are any updates.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: Port(context)
:raises: PortNotFound
"""
current = self.get_by_uuid(self._context, uuid=self.uuid)
self.obj_refresh(current)
self.obj_reset_changes()
@classmethod
def supports_physical_network(cls):
"""Return whether the physical_network field is supported.
:returns: Whether the physical_network field is supported
:raises: ovo_exception.IncompatibleObjectVersion
"""
return cls.supports_version((1, 7))
@classmethod
def supports_is_smartnic(cls):
"""Return whether is_smartnic field is supported.
:returns: Whether is_smartnic field is supported
:raises: ovo_exception.IncompatibleObjectVersion
"""
return cls.supports_version((1, 9))
@base.IronicObjectRegistry.register
class PortCRUDNotification(notification.NotificationBase):
"""Notification emitted when ironic creates, updates or deletes a port."""
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': object_fields.ObjectField('PortCRUDPayload')
}
@base.IronicObjectRegistry.register
class PortCRUDPayload(notification.NotificationPayloadBase):
# Version 1.0: Initial version
# Version 1.1: Add "portgroup_uuid" field
# Version 1.2: Add "physical_network" field
# Version 1.3: Add "is_smartnic" field
# Version 1.4: Add "name" field
VERSION = '1.4'
SCHEMA = {
'address': ('port', 'address'),
'extra': ('port', 'extra'),
'local_link_connection': ('port', 'local_link_connection'),
'pxe_enabled': ('port', 'pxe_enabled'),
'physical_network': ('port', 'physical_network'),
'created_at': ('port', 'created_at'),
'updated_at': ('port', 'updated_at'),
'uuid': ('port', 'uuid'),
'is_smartnic': ('port', 'is_smartnic'),
'name': ('port', 'name'),
}
fields = {
'address': object_fields.MACAddressField(nullable=True),
'extra': object_fields.FlexibleDictField(nullable=True),
'local_link_connection': object_fields.FlexibleDictField(
nullable=True),
'pxe_enabled': object_fields.BooleanField(nullable=True),
'node_uuid': object_fields.UUIDField(),
'portgroup_uuid': object_fields.UUIDField(nullable=True),
'physical_network': object_fields.StringField(nullable=True),
'created_at': object_fields.DateTimeField(nullable=True),
'updated_at': object_fields.DateTimeField(nullable=True),
'uuid': object_fields.UUIDField(),
'is_smartnic': object_fields.BooleanField(nullable=True,
default=False),
'name': object_fields.StringField(nullable=True),
}
def __init__(self, port, node_uuid, portgroup_uuid):
super(PortCRUDPayload, self).__init__(node_uuid=node_uuid,
portgroup_uuid=portgroup_uuid)
self.populate_schema(port=port)
| apache-2.0 | -1,894,613,625,406,385,700 | 43.397313 | 79 | 0.610869 | false |
pforret/python-for-android | python-build/python-libs/gdata/build/lib/gdata/apps/service.py | 136 | 16595 | #!/usr/bin/python
#
# Copyright (C) 2007 SIOS Technology, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '[email protected] (Takashi MATSUO)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import urllib
import gdata
import atom.service
import gdata.service
import gdata.apps
import atom
API_VER="2.0"
HTTP_OK=200
UNKOWN_ERROR=1000
USER_DELETED_RECENTLY=1100
USER_SUSPENDED=1101
DOMAIN_USER_LIMIT_EXCEEDED=1200
DOMAIN_ALIAS_LIMIT_EXCEEDED=1201
DOMAIN_SUSPENDED=1202
DOMAIN_FEATURE_UNAVAILABLE=1203
ENTITY_EXISTS=1300
ENTITY_DOES_NOT_EXIST=1301
ENTITY_NAME_IS_RESERVED=1302
ENTITY_NAME_NOT_VALID=1303
INVALID_GIVEN_NAME=1400
INVALID_FAMILY_NAME=1401
INVALID_PASSWORD=1402
INVALID_USERNAME=1403
INVALID_HASH_FUNCTION_NAME=1404
INVALID_HASH_DIGGEST_LENGTH=1405
INVALID_EMAIL_ADDRESS=1406
INVALID_QUERY_PARAMETER_VALUE=1407
TOO_MANY_RECIPIENTS_ON_EMAIL_LIST=1500
DEFAULT_QUOTA_LIMIT='2048'
class Error(Exception):
pass
class AppsForYourDomainException(Error):
def __init__(self, response):
Error.__init__(self, response)
try:
self.element_tree = ElementTree.fromstring(response['body'])
self.error_code = int(self.element_tree[0].attrib['errorCode'])
self.reason = self.element_tree[0].attrib['reason']
self.invalidInput = self.element_tree[0].attrib['invalidInput']
except:
self.error_code = UNKOWN_ERROR
class AppsService(gdata.service.GDataService):
"""Client for the Google Apps Provisioning service."""
def __init__(self, email=None, password=None, domain=None, source=None,
server='apps-apis.google.com', additional_headers=None,
**kwargs):
"""Creates a client for the Google Apps Provisioning service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
domain: string (optional) The Google Apps domain name.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'apps-apis.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='apps', source=source,
server=server, additional_headers=additional_headers, **kwargs)
self.ssl = True
self.port = 443
self.domain = domain
def _baseURL(self):
return "/a/feeds/%s" % self.domain
def GetGeneratorFromLinkFinder(self, link_finder, func):
"""returns a generator for pagination"""
yield link_finder
next = link_finder.GetNextLink()
while next is not None:
next_feed = func(str(self.Get(next.href)))
yield next_feed
next = next_feed.GetNextLink()
def AddAllElementsFromAllPages(self, link_finder, func):
"""retrieve all pages and add all elements"""
next = link_finder.GetNextLink()
while next is not None:
next_feed = self.Get(next.href, converter=func)
for a_entry in next_feed.entry:
link_finder.entry.append(a_entry)
next = next_feed.GetNextLink()
return link_finder
def RetrievePageOfEmailLists(self, start_email_list_name=None):
"""Retrieve one page of email list"""
uri = "%s/emailList/%s" % (self._baseURL(), API_VER)
if start_email_list_name is not None:
uri += "?startEmailListName=%s" % start_email_list_name
try:
return gdata.apps.EmailListFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveAllEmailLists(self):
"""Retrieve all email list of a domain."""
ret = self.RetrievePageOfEmailLists()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListFeedFromString)
def RetrieveEmailList(self, list_name):
"""Retreive a single email list by the list's name."""
uri = "%s/emailList/%s/%s" % (
self._baseURL(), API_VER, list_name)
try:
return self.Get(uri, converter=gdata.apps.EmailListEntryFromString)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveEmailLists(self, recipient):
"""Retrieve All Email List Subscriptions for an Email Address."""
uri = "%s/emailList/%s?recipient=%s" % (
self._baseURL(), API_VER, recipient)
try:
ret = gdata.apps.EmailListFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListFeedFromString)
def RemoveRecipientFromEmailList(self, recipient, list_name):
"""Remove recipient from email list."""
uri = "%s/emailList/%s/%s/recipient/%s" % (
self._baseURL(), API_VER, list_name, recipient)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfRecipients(self, list_name, start_recipient=None):
"""Retrieve one page of recipient of an email list. """
uri = "%s/emailList/%s/%s/recipient" % (
self._baseURL(), API_VER, list_name)
if start_recipient is not None:
uri += "?startRecipient=%s" % start_recipient
try:
return gdata.apps.EmailListRecipientFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveAllRecipients(self, list_name):
"""Retrieve all recipient of an email list."""
ret = self.RetrievePageOfRecipients(list_name)
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListRecipientFeedFromString)
def AddRecipientToEmailList(self, recipient, list_name):
"""Add a recipient to a email list."""
uri = "%s/emailList/%s/%s/recipient" % (
self._baseURL(), API_VER, list_name)
recipient_entry = gdata.apps.EmailListRecipientEntry()
recipient_entry.who = gdata.apps.Who(email=recipient)
try:
return gdata.apps.EmailListRecipientEntryFromString(
str(self.Post(recipient_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteEmailList(self, list_name):
"""Delete a email list"""
uri = "%s/emailList/%s/%s" % (self._baseURL(), API_VER, list_name)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateEmailList(self, list_name):
"""Create a email list. """
uri = "%s/emailList/%s" % (self._baseURL(), API_VER)
email_list_entry = gdata.apps.EmailListEntry()
email_list_entry.email_list = gdata.apps.EmailList(name=list_name)
try:
return gdata.apps.EmailListEntryFromString(
str(self.Post(email_list_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteNickname(self, nickname):
"""Delete a nickname"""
uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfNicknames(self, start_nickname=None):
"""Retrieve one page of nicknames in the domain"""
uri = "%s/nickname/%s" % (self._baseURL(), API_VER)
if start_nickname is not None:
uri += "?startNickname=%s" % start_nickname
try:
return gdata.apps.NicknameFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveAllNicknames(self):
"""Retrieve all nicknames in the domain"""
ret = self.RetrievePageOfNicknames()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.NicknameFeedFromString)
def RetrieveNicknames(self, user_name):
"""Retrieve nicknames of the user"""
uri = "%s/nickname/%s?username=%s" % (self._baseURL(), API_VER, user_name)
try:
ret = gdata.apps.NicknameFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.NicknameFeedFromString)
def RetrieveNickname(self, nickname):
"""Retrieve a nickname.
Args:
nickname: string The nickname to retrieve
Returns:
gdata.apps.NicknameEntry
"""
uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname)
try:
return gdata.apps.NicknameEntryFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateNickname(self, user_name, nickname):
"""Create a nickname"""
uri = "%s/nickname/%s" % (self._baseURL(), API_VER)
nickname_entry = gdata.apps.NicknameEntry()
nickname_entry.login = gdata.apps.Login(user_name=user_name)
nickname_entry.nickname = gdata.apps.Nickname(name=nickname)
try:
return gdata.apps.NicknameEntryFromString(
str(self.Post(nickname_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteUser(self, user_name):
"""Delete a user account"""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def UpdateUser(self, user_name, user_entry):
"""Update a user account."""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return gdata.apps.UserEntryFromString(str(self.Put(user_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateUser(self, user_name, family_name, given_name, password,
suspended='false', quota_limit=None,
password_hash_function=None):
"""Create a user account. """
uri = "%s/user/%s" % (self._baseURL(), API_VER)
user_entry = gdata.apps.UserEntry()
user_entry.login = gdata.apps.Login(
user_name=user_name, password=password, suspended=suspended,
hash_function_name=password_hash_function)
user_entry.name = gdata.apps.Name(family_name=family_name,
given_name=given_name)
if quota_limit is not None:
user_entry.quota = gdata.apps.Quota(limit=str(quota_limit))
try:
return gdata.apps.UserEntryFromString(str(self.Post(user_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def SuspendUser(self, user_name):
user_entry = self.RetrieveUser(user_name)
if user_entry.login.suspended != 'true':
user_entry.login.suspended = 'true'
user_entry = self.UpdateUser(user_name, user_entry)
return user_entry
def RestoreUser(self, user_name):
user_entry = self.RetrieveUser(user_name)
if user_entry.login.suspended != 'false':
user_entry.login.suspended = 'false'
user_entry = self.UpdateUser(user_name, user_entry)
return user_entry
def RetrieveUser(self, user_name):
"""Retrieve an user account.
Args:
user_name: string The user name to retrieve
Returns:
gdata.apps.UserEntry
"""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return gdata.apps.UserEntryFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfUsers(self, start_username=None):
"""Retrieve one page of users in this domain."""
uri = "%s/user/%s" % (self._baseURL(), API_VER)
if start_username is not None:
uri += "?startUsername=%s" % start_username
try:
return gdata.apps.UserFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def GetGeneratorForAllUsers(self):
"""Retrieve a generator for all users in this domain."""
first_page = self.RetrievePageOfUsers()
return self.GetGeneratorFromLinkFinder(first_page,
gdata.apps.UserFeedFromString)
def RetrieveAllUsers(self):
"""Retrieve all users in this domain. OBSOLETE"""
ret = self.RetrievePageOfUsers()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.UserFeedFromString)
class PropertyService(gdata.service.GDataService):
"""Client for the Google Apps Property service."""
def __init__(self, email=None, password=None, domain=None, source=None,
server='apps-apis.google.com', additional_headers=None):
gdata.service.GDataService.__init__(self, email=email, password=password,
service='apps', source=source,
server=server,
additional_headers=additional_headers)
self.ssl = True
self.port = 443
self.domain = domain
def AddAllElementsFromAllPages(self, link_finder, func):
"""retrieve all pages and add all elements"""
next = link_finder.GetNextLink()
while next is not None:
next_feed = self.Get(next.href, converter=func)
for a_entry in next_feed.entry:
link_finder.entry.append(a_entry)
next = next_feed.GetNextLink()
return link_finder
def _GetPropertyEntry(self, properties):
property_entry = gdata.apps.PropertyEntry()
property = []
for name, value in properties.iteritems():
if name is not None and value is not None:
property.append(gdata.apps.Property(name=name, value=value))
property_entry.property = property
return property_entry
def _PropertyEntry2Dict(self, property_entry):
properties = {}
for i, property in enumerate(property_entry.property):
properties[property.name] = property.value
return properties
def _GetPropertyFeed(self, uri):
try:
return gdata.apps.PropertyFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _GetPropertiesList(self, uri):
property_feed = self._GetPropertyFeed(uri)
# pagination
property_feed = self.AddAllElementsFromAllPages(
property_feed, gdata.apps.PropertyFeedFromString)
properties_list = []
for property_entry in property_feed.entry:
properties_list.append(self._PropertyEntry2Dict(property_entry))
return properties_list
def _GetProperties(self, uri):
try:
return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString(
str(self.Get(uri))))
except gdata.service.RequestError, e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _PostProperties(self, uri, properties):
property_entry = self._GetPropertyEntry(properties)
try:
return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString(
str(self.Post(property_entry, uri))))
except gdata.service.RequestError, e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _PutProperties(self, uri, properties):
property_entry = self._GetPropertyEntry(properties)
try:
return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString(
str(self.Put(property_entry, uri))))
except gdata.service.RequestError, e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _DeleteProperties(self, uri):
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
| apache-2.0 | 7,247,474,686,710,491,000 | 33.572917 | 78 | 0.68599 | false |
talbarda/kaggle_predict_house_prices | Build Model.py | 1 | 2629 | import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import pandas as pd
import sklearn.linear_model as lm
from sklearn.model_selection import learning_curve
from sklearn.metrics import accuracy_score
from sklearn.metrics import make_scorer
from sklearn.model_selection import GridSearchCV
def get_model(estimator, parameters, X_train, y_train, scoring):
model = GridSearchCV(estimator, param_grid=parameters, scoring=scoring)
model.fit(X_train, y_train)
return model.best_estimator_
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5), scoring='accuracy'):
plt.figure(figsize=(10,6))
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel(scoring)
train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, scoring=scoring,
n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
train = pd.read_csv('input/train.csv')
test = pd.read_csv('input/test.csv')
for c in train:
train[c] = pd.Categorical(train[c].values).codes
X = train.drop(['SalePrice'], axis=1)
X = train[['OverallQual', 'GarageArea', 'GarageCars', 'TotalBsmtSF', 'TotRmsAbvGrd', 'FullBath', 'GrLivArea']]
y = train.SalePrice
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
scoring = make_scorer(accuracy_score, greater_is_better=True)
from sklearn.linear_model import RidgeCV
RidgeCV.fit(X, y, sample_weight=None)
clf_ridge = RidgeCV()
print (accuracy_score(y_test, clf_ridge.predict(X_test)))
print (clf_ridge)
plt = plot_learning_curve(clf_ridge, 'GaussianNB', X, y, cv=4);
plt.show() | mit | 3,213,995,162,590,570,000 | 38.848485 | 110 | 0.669456 | false |
XueqingLin/tensorflow | tensorflow/python/training/proximal_gradient_descent.py | 33 | 3580 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ProximalGradientDescent for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
# pylint: disable=unused-import
from tensorflow.python.ops import math_ops
# pylint: enable=unused-import
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class ProximalGradientDescentOptimizer(optimizer.Optimizer):
# pylint: disable=line-too-long
"""Optimizer that implements the proximal gradient descent algorithm.
See this [paper](http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting.pdf).
@@__init__
"""
def __init__(self, learning_rate, l1_regularization_strength=0.0,
l2_regularization_strength=0.0, use_locking=False,
name="ProximalGradientDescent"):
"""Construct a new proximal gradient descent optimizer.
Args:
learning_rate: A Tensor or a floating point value. The learning
rate to use.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If True use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "GradientDescent".
"""
super(ProximalGradientDescentOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
self._l1_regularization_strength_tensor = None
self._l2_regularization_strength_tensor = None
def _apply_dense(self, grad, var):
return training_ops.apply_proximal_gradient_descent(
var,
self._learning_rate_tensor,
self._l1_regularization_strength_tensor,
self._l2_regularization_strength_tensor,
grad,
use_locking=self._use_locking).op
def _apply_sparse(self, grad, var):
return training_ops.sparse_apply_proximal_gradient_descent(
var,
self._learning_rate_tensor,
self._l1_regularization_strength_tensor,
self._l2_regularization_strength_tensor,
grad.values,
grad.indices,
use_locking=self._use_locking).op
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
name="learning_rate")
self._l1_regularization_strength_tensor = ops.convert_to_tensor(
self._l1_regularization_strength, name="l1_regularization_strength")
self._l2_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_regularization_strength, name="l2_regularization_strength")
| apache-2.0 | 4,214,806,769,283,929,600 | 40.627907 | 109 | 0.695531 | false |
tanium/pytan | lib/libs_external/any/urllib3/filepost.py | 292 | 2321 | from __future__ import absolute_import
import codecs
from uuid import uuid4
from io import BytesIO
from .packages import six
from .packages.six import b
from .fields import RequestField
writer = codecs.lookup('utf-8')[3]
def choose_boundary():
"""
Our embarrassingly-simple replacement for mimetools.choose_boundary.
"""
return uuid4().hex
def iter_field_objects(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields):
"""
.. deprecated:: 1.6
Iterate over fields.
The addition of :class:`~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
:class:`~urllib3.fields.RequestField` objects.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for field in iter_field_objects(fields):
body.write(b('--%s\r\n' % (boundary)))
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = str('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
| mit | -6,916,278,960,011,971,000 | 23.691489 | 85 | 0.629039 | false |
Danielhiversen/home-assistant | homeassistant/components/switch/anel_pwrctrl.py | 8 | 3373 | """
Support for ANEL PwrCtrl switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.pwrctrl/
"""
import logging
import socket
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.const import (CONF_HOST, CONF_PASSWORD, CONF_USERNAME)
from homeassistant.util import Throttle
REQUIREMENTS = ['anel_pwrctrl-homeassistant==0.0.1.dev2']
_LOGGER = logging.getLogger(__name__)
CONF_PORT_RECV = 'port_recv'
CONF_PORT_SEND = 'port_send'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_PORT_RECV): cv.port,
vol.Required(CONF_PORT_SEND): cv.port,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_HOST): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up PwrCtrl devices/switches."""
host = config.get(CONF_HOST, None)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
port_recv = config.get(CONF_PORT_RECV)
port_send = config.get(CONF_PORT_SEND)
from anel_pwrctrl import DeviceMaster
try:
master = DeviceMaster(
username=username, password=password, read_port=port_send,
write_port=port_recv)
master.query(ip_addr=host)
except socket.error as ex:
_LOGGER.error("Unable to discover PwrCtrl device: %s", str(ex))
return False
devices = []
for device in master.devices.values():
parent_device = PwrCtrlDevice(device)
devices.extend(
PwrCtrlSwitch(switch, parent_device)
for switch in device.switches.values()
)
add_entities(devices)
class PwrCtrlSwitch(SwitchDevice):
"""Representation of a PwrCtrl switch."""
def __init__(self, port, parent_device):
"""Initialize the PwrCtrl switch."""
self._port = port
self._parent_device = parent_device
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def unique_id(self):
"""Return the unique ID of the device."""
return '{device}-{switch_idx}'.format(
device=self._port.device.host,
switch_idx=self._port.get_index()
)
@property
def name(self):
"""Return the name of the device."""
return self._port.label
@property
def is_on(self):
"""Return true if the device is on."""
return self._port.get_state()
def update(self):
"""Trigger update for all switches on the parent device."""
self._parent_device.update()
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._port.on()
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._port.off()
class PwrCtrlDevice:
"""Device representation for per device throttling."""
def __init__(self, device):
"""Initialize the PwrCtrl device."""
self._device = device
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update the device and all its switches."""
self._device.update()
| mit | 2,931,940,410,848,232,400 | 27.108333 | 75 | 0.647791 | false |
oopy/micropython | docs/sphinx_selective_exclude/search_auto_exclude.py | 40 | 1397 | #
# This is a Sphinx documentation tool extension which allows to
# automatically exclude from full-text search index document
# which are not referenced via toctree::. It's intended to be
# used with toctrees conditional on only:: directive, with the
# idea being that if you didn't include it in the ToC, you don't
# want the docs being findable by search either (for example,
# because these docs contain information not pertinent to a
# particular product configuration).
#
# This extension depends on "eager_only" extension and won't work
# without it.
#
# Copyright (c) 2016 Paul Sokolovsky
# Licensed under the terms of BSD license, see LICENSE file.
#
import sphinx
org_StandaloneHTMLBuilder_index_page = None
def StandaloneHTMLBuilder_index_page(self, pagename, doctree, title):
if pagename not in self.env.files_to_rebuild:
if pagename != self.env.config.master_doc and 'orphan' not in self.env.metadata[pagename]:
print("Excluding %s from full-text index because it's not referenced in ToC" % pagename)
return
return org_StandaloneHTMLBuilder_index_page(self, pagename, doctree, title)
def setup(app):
global org_StandaloneHTMLBuilder_index_page
org_StandaloneHTMLBuilder_index_page = sphinx.builders.html.StandaloneHTMLBuilder.index_page
sphinx.builders.html.StandaloneHTMLBuilder.index_page = StandaloneHTMLBuilder_index_page
| mit | -5,560,494,608,797,927,000 | 40.088235 | 100 | 0.761632 | false |
Workday/OpenFrame | tools/json_schema_compiler/highlighters/pygments_highlighter.py | 179 | 1273 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
try:
import pygments
from pygments.lexers import CppLexer
from pygments.formatters import HtmlFormatter
PYGMENTS_IMPORTED = True
except ImportError:
print('It appears that Pygments is not installed. '
'Can be installed using easy_install Pygments or from http://pygments.org.')
PYGMENTS_IMPORTED = False
class PygmentsHighlighter(object):
def __init__(self):
if not PYGMENTS_IMPORTED:
raise ImportError('Pygments not installed')
"""Highlighter that uses the python pygments library to highlight code.
"""
def GetCSS(self, style):
formatter = HtmlFormatter(linenos=True,
style=pygments.styles.get_style_by_name(style))
return formatter.get_style_defs('.highlight')
def GetCodeElement(self, code, style):
formatter = HtmlFormatter(linenos=True,
style=pygments.styles.get_style_by_name(style))
return pygments.highlight(code, CppLexer(), formatter)
def DisplayName(self):
return 'pygments' + ('' if PYGMENTS_IMPORTED else ' (not installed)')
def GetStyles(self):
return list(pygments.styles.get_all_styles())
| bsd-3-clause | -8,326,520,500,126,395,000 | 33.405405 | 80 | 0.727416 | false |
Poles/Poles | platforms/windows/JsonCpp/scons-local-2.3.0/SCons/Tool/BitKeeper.py | 11 | 2498 | """SCons.Tool.BitKeeper.py
Tool-specific initialization for the BitKeeper source code control
system.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/BitKeeper.py 2013/03/03 09:48:35 garyo"
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
BitKeeper to an Environment."""
def BitKeeperFactory(env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The BitKeeper() factory is deprecated and there is no replacement.""")
act = SCons.Action.Action("$BITKEEPERCOM", "$BITKEEPERCOMSTR")
return SCons.Builder.Builder(action = act, env = env)
#setattr(env, 'BitKeeper', BitKeeperFactory)
env.BitKeeper = BitKeeperFactory
env['BITKEEPER'] = 'bk'
env['BITKEEPERGET'] = '$BITKEEPER get'
env['BITKEEPERGETFLAGS'] = SCons.Util.CLVar('')
env['BITKEEPERCOM'] = '$BITKEEPERGET $BITKEEPERGETFLAGS $TARGET'
def exists(env):
return env.Detect('bk')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 | 1,522,447,516,660,714,500 | 36.283582 | 119 | 0.730985 | false |
SaschaMester/delicium | tools/perf/page_sets/mse_cases.py | 1 | 2010 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class MseCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(MseCasesPage, self).__init__(url=url, page_set=page_set)
def RunNavigateSteps(self, action_runner):
super(MseCasesPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition('window.__testDone == true')
class MseCasesPageSet(story.StorySet):
""" Media source extensions perf benchmark """
def __init__(self):
super(MseCasesPageSet, self).__init__(
cloud_storage_bucket=story.PUBLIC_BUCKET)
urls_list = [
'file://mse_cases/startup_test.html?testType=AV',
'file://mse_cases/startup_test.html?testType=AV&useAppendStream=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=AV&doNotWaitForBodyOnLoad=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=AV&useAppendStream=true&doNotWaitForBodyOnLoad=true',
'file://mse_cases/startup_test.html?testType=V',
'file://mse_cases/startup_test.html?testType=V&useAppendStream=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=V&doNotWaitForBodyOnLoad=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=V&useAppendStream=true&doNotWaitForBodyOnLoad=true',
'file://mse_cases/startup_test.html?testType=A',
'file://mse_cases/startup_test.html?testType=A&useAppendStream=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=A&doNotWaitForBodyOnLoad=true',
# pylint: disable=C0301
'file://mse_cases/startup_test.html?testType=A&useAppendStream=true&doNotWaitForBodyOnLoad=true',
]
for url in urls_list:
self.AddUserStory(MseCasesPage(url, self))
| bsd-3-clause | -7,264,405,073,782,115,000 | 40.875 | 104 | 0.71592 | false |
RPGOne/scikit-learn | sklearn/externals/funcsigs.py | 118 | 29982 | # Copyright 2001-2013 Python Software Foundation; All Rights Reserved
"""Function signature objects for callables
Back port of Python 3.3's function signature tools from the inspect module,
modified to be compatible with Python 2.6, 2.7 and 3.2+.
"""
from __future__ import absolute_import, division, print_function
import itertools
import functools
import re
import types
try:
from collections import OrderedDict
except ImportError:
from .odict import OrderedDict
__version__ = "0.4"
__all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature']
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
types.BuiltinFunctionType)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', '__builtin__', base_module):
return annotation.__name__
return annotation.__module__+'.'+annotation.__name__
return repr(annotation)
def _get_user_defined_method(cls, method_name, *nested):
try:
if cls is type:
return
meth = getattr(cls, method_name)
for name in nested:
meth = getattr(meth, name, meth)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def signature(obj):
'''Get a signature object for the passed callable.'''
if not callable(obj):
raise TypeError('{0!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
sig = signature(obj.__func__)
if obj.__self__ is None:
# Unbound method: the first parameter becomes positional-only
if sig.parameters:
first = sig.parameters.values()[0].replace(
kind=_POSITIONAL_ONLY)
return sig.replace(
parameters=(first,) + tuple(sig.parameters.values())[1:])
else:
return sig
else:
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
return sig
try:
# Was this function wrapped by a decorator?
wrapped = obj.__wrapped__
except AttributeError:
pass
else:
return signature(wrapped)
if isinstance(obj, types.FunctionType):
return Signature.from_function(obj)
if isinstance(obj, functools.partial):
sig = signature(obj.func)
new_params = OrderedDict(sig.parameters.items())
partial_args = obj.args or ()
partial_keywords = obj.keywords or {}
try:
ba = sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {0!r} has incorrect arguments'.format(obj)
raise ValueError(msg)
for arg_name, arg_value in ba.arguments.items():
param = new_params[arg_name]
if arg_name in partial_keywords:
# We set a new default value, because the following code
# is correct:
#
# >>> def foo(a): print(a)
# >>> print(partial(partial(foo, a=10), a=20)())
# 20
# >>> print(partial(partial(foo, a=10), a=20)(a=30))
# 30
#
# So, with 'partial' objects, passing a keyword argument is
# like setting a new default value for the corresponding
# parameter
#
# We also mark this parameter with '_partial_kwarg'
# flag. Later, in '_bind', the 'default' value of this
# parameter will be added to 'kwargs', to simulate
# the 'functools.partial' real call.
new_params[arg_name] = param.replace(default=arg_value,
_partial_kwarg=True)
elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and
not param._partial_kwarg):
new_params.pop(arg_name)
return sig.replace(parameters=new_params.values())
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _get_user_defined_method(obj, '__new__')
if new is not None:
sig = signature(new)
else:
# Finally, we should have at least __init__ implemented
init = _get_user_defined_method(obj, '__init__')
if init is not None:
sig = signature(init)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _get_user_defined_method(type(obj), '__call__', 'im_func')
if call is not None:
sig = signature(call)
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {0!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {0!r} is not supported by signature'.format(obj))
class _void(object):
'''A private marker - used in Parameter & Signature'''
class _empty(object):
pass
class _ParameterKind(int):
def __new__(self, *args, **kwargs):
obj = int.__new__(self, *args)
obj._name = kwargs['name']
return obj
def __str__(self):
return self._name
def __repr__(self):
return '<_ParameterKind: {0!r}>'.format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
class Parameter(object):
'''Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is not set.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is not set.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
'''
__slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, default=_empty, annotation=_empty,
_partial_kwarg=False):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{0} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is None:
if kind != _POSITIONAL_ONLY:
raise ValueError("None is not a valid name for a "
"non-positional-only parameter")
self._name = name
else:
name = str(name)
if kind != _POSITIONAL_ONLY and not re.match(r'[a-z_]\w*$', name, re.I):
msg = '{0!r} is not a valid parameter name'.format(name)
raise ValueError(msg)
self._name = name
self._partial_kwarg = _partial_kwarg
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, name=_void, kind=_void, annotation=_void,
default=_void, _partial_kwarg=_void):
'''Creates a customized copy of the Parameter.'''
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
if _partial_kwarg is _void:
_partial_kwarg = self._partial_kwarg
return type(self)(name, kind, default=default, annotation=annotation,
_partial_kwarg=_partial_kwarg)
def __str__(self):
kind = self.kind
formatted = self._name
if kind == _POSITIONAL_ONLY:
if formatted is None:
formatted = ''
formatted = '<{0}>'.format(formatted)
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{0}:{1}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{0}={1}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{0} at {1:#x} {2!r}>'.format(self.__class__.__name__,
id(self), self.name)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments(object):
'''Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
'''
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
# Keyword arguments mapped by 'functools.partial'
# (Parameter._partial_kwarg is True) are mapped
# in 'BoundArguments.kwargs', along with VAR_KEYWORD &
# KEYWORD_ONLY
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments)
def __ne__(self, other):
return not self.__eq__(other)
class Signature(object):
'''A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is not set.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
'''
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, return_annotation=_empty,
__validate_parameters__=True):
'''Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
'''
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
for idx, param in enumerate(parameters):
kind = param.kind
if kind < top_kind:
msg = 'wrong parameter order: {0} before {1}'
msg = msg.format(top_kind, param.kind)
raise ValueError(msg)
else:
top_kind = kind
name = param.name
if name is None:
name = str(idx)
param = param.replace(name=name)
if name in params:
msg = 'duplicate parameter name: {0!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = params
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
'''Constructs Signature for the given python function'''
if not isinstance(func, types.FunctionType):
raise TypeError('{0!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = getattr(func_code, 'co_kwonlyargcount', 0)
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = getattr(func, '__annotations__', {})
defaults = func.__defaults__
kwdefaults = getattr(func, '__kwdefaults__', None)
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & 0x04:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & 0x08:
index = pos_count + keyword_only_count
if func_code.co_flags & 0x04:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=False)
@property
def parameters(self):
try:
return types.MappingProxyType(self._parameters)
except AttributeError:
return OrderedDict(self._parameters.items())
@property
def return_annotation(self):
return self._return_annotation
def replace(self, parameters=_void, return_annotation=_void):
'''Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
'''
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
if (not issubclass(type(other), Signature) or
self.return_annotation != other.return_annotation or
len(self.parameters) != len(other.parameters)):
return False
other_positions = dict((param, idx)
for idx, param in enumerate(other.parameters.keys()))
for idx, (param_name, param) in enumerate(self.parameters.items()):
if param.kind == _KEYWORD_ONLY:
try:
other_param = other.parameters[param_name]
except KeyError:
return False
else:
if param != other_param:
return False
else:
try:
other_idx = other_positions[param_name]
except KeyError:
return False
else:
if (idx != other_idx or
param != other.parameters[param_name]):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, partial=False):
'''Private method. Don't use directly.'''
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
if partial:
# Support for binding arguments to 'functools.partial' objects.
# See 'functools.partial' case in 'signature()' implementation
# for details.
for param_name, param in self.parameters.items():
if (param._partial_kwarg and param_name not in kwargs):
# Simulating 'functools.partial' behavior
kwargs[param_name] = param.default
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg)
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
if partial:
parameters_ex = (param,)
break
else:
msg = '{arg!r} parameter lacking default value'
msg = msg.format(arg=param.name)
raise TypeError(msg)
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments')
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError('too many positional arguments')
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError('multiple values for argument '
'{arg!r}'.format(arg=param.name))
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('{arg!r} parameter lacking default value'. \
format(arg=param_name))
else:
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError('too many keyword arguments')
return self._bound_arguments_cls(self, arguments)
def bind(self, *args, **kwargs):
'''Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
'''
return self._bind(args, kwargs)
def bind_partial(self, *args, **kwargs):
'''Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
'''
return self._bind(args, kwargs, partial=True)
def __str__(self):
result = []
render_kw_only_separator = True
for idx, param in enumerate(self.parameters.values()):
formatted = str(param)
kind = param.kind
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
rendered = '({0})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {0}'.format(anno)
return rendered
| bsd-3-clause | 8,684,818,752,545,959,000 | 35.652812 | 84 | 0.534421 | false |
RNAcentral/rnacentral-import-pipeline | rnacentral_pipeline/cli/rfam.py | 1 | 2267 | # -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
import click
from rnacentral_pipeline.databases import rfam
from rnacentral_pipeline.writers import entry_writer
@click.group("rfam")
def cli():
"""
Commands with processing the Rfam metadata.
"""
pass
@cli.command("parse")
@click.argument("rfam_file", type=click.File("r"))
@click.argument("mapping_file", type=click.File("r"))
@click.argument(
"output",
default=".",
type=click.Path(
writable=True,
dir_okay=True,
file_okay=False,
),
)
def process_rfam(rfam_file, mapping_file, output):
"""
Process Rfam's JSON format into the files to import.
"""
entries = rfam.parser.parse(rfam_file, mapping_file)
with entry_writer(Path(output)) as writer:
writer.write(entries)
@cli.command("families")
@click.argument("filename", default="data.tsv", type=click.File("r"))
@click.argument("output", default="rfam-families.csv", type=click.File("w"))
def rfam_group_families(filename, output):
rfam.families.from_file(filename, output)
@cli.command("clans")
@click.argument("filename", default="data.tsv", type=click.File("r"))
@click.argument("output", default="rfam-clans.csv", type=click.File("w"))
def rfam_group_clans(filename, output):
rfam.clans.from_file(filename, output)
@cli.command("ontology-terms")
@click.argument("filename", default="data.tsv", type=click.File("r"))
@click.argument(
"output",
default=".",
type=click.Path(
writable=True,
dir_okay=True,
file_okay=False,
),
)
def ontologies_rfam_terms(filename, output):
rfam.cross_references.from_file(filename, Path(output))
| apache-2.0 | -7,114,810,845,567,693,000 | 27.696203 | 76 | 0.701367 | false |
Maikflow/django_test | lib/python2.7/site-packages/Django-1.7.1-py2.7.egg/django/utils/dates.py | 115 | 2296 | "Commonly-used date structures"
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
WEEKDAYS = {
0: _('Monday'), 1: _('Tuesday'), 2: _('Wednesday'), 3: _('Thursday'), 4: _('Friday'),
5: _('Saturday'), 6: _('Sunday')
}
WEEKDAYS_ABBR = {
0: _('Mon'), 1: _('Tue'), 2: _('Wed'), 3: _('Thu'), 4: _('Fri'),
5: _('Sat'), 6: _('Sun')
}
WEEKDAYS_REV = {
'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3, 'friday': 4,
'saturday': 5, 'sunday': 6
}
MONTHS = {
1: _('January'), 2: _('February'), 3: _('March'), 4: _('April'), 5: _('May'), 6: _('June'),
7: _('July'), 8: _('August'), 9: _('September'), 10: _('October'), 11: _('November'),
12: _('December')
}
MONTHS_3 = {
1: _('jan'), 2: _('feb'), 3: _('mar'), 4: _('apr'), 5: _('may'), 6: _('jun'),
7: _('jul'), 8: _('aug'), 9: _('sep'), 10: _('oct'), 11: _('nov'), 12: _('dec')
}
MONTHS_3_REV = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'aug': 8,
'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12
}
MONTHS_AP = { # month names in Associated Press style
1: pgettext_lazy('abbrev. month', 'Jan.'),
2: pgettext_lazy('abbrev. month', 'Feb.'),
3: pgettext_lazy('abbrev. month', 'March'),
4: pgettext_lazy('abbrev. month', 'April'),
5: pgettext_lazy('abbrev. month', 'May'),
6: pgettext_lazy('abbrev. month', 'June'),
7: pgettext_lazy('abbrev. month', 'July'),
8: pgettext_lazy('abbrev. month', 'Aug.'),
9: pgettext_lazy('abbrev. month', 'Sept.'),
10: pgettext_lazy('abbrev. month', 'Oct.'),
11: pgettext_lazy('abbrev. month', 'Nov.'),
12: pgettext_lazy('abbrev. month', 'Dec.')
}
MONTHS_ALT = { # required for long date representation by some locales
1: pgettext_lazy('alt. month', 'January'),
2: pgettext_lazy('alt. month', 'February'),
3: pgettext_lazy('alt. month', 'March'),
4: pgettext_lazy('alt. month', 'April'),
5: pgettext_lazy('alt. month', 'May'),
6: pgettext_lazy('alt. month', 'June'),
7: pgettext_lazy('alt. month', 'July'),
8: pgettext_lazy('alt. month', 'August'),
9: pgettext_lazy('alt. month', 'September'),
10: pgettext_lazy('alt. month', 'October'),
11: pgettext_lazy('alt. month', 'November'),
12: pgettext_lazy('alt. month', 'December')
}
| gpl-2.0 | -5,251,873,346,484,593,000 | 39.280702 | 95 | 0.535279 | false |
Mistobaan/tensorflow | tensorflow/contrib/py2tf/pyct/parser.py | 4 | 1152 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converting code to AST.
Adapted from Tangent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
import gast
from tensorflow.python.util import tf_inspect
def parse_object(obj):
"""Return the AST of given object."""
return parse_str(tf_inspect.getsource(obj))
def parse_str(src):
"""Return the AST of given piece of code."""
return gast.parse(textwrap.dedent(src))
| apache-2.0 | -7,603,796,458,607,800,000 | 29.315789 | 80 | 0.696181 | false |
andreaso/ansible | lib/ansible/plugins/connection/lxd.py | 133 | 4283 | # (c) 2016 Matt Clay <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from distutils.spawn import find_executable
from subprocess import call, Popen, PIPE
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins.connection import ConnectionBase
class Connection(ConnectionBase):
""" lxd based connections """
transport = "lxd"
has_pipelining = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self._host = self._play_context.remote_addr
self._lxc_cmd = find_executable("lxc")
if not self._lxc_cmd:
raise AnsibleError("lxc command not found in PATH")
if self._play_context.remote_user is not None and self._play_context.remote_user != 'root':
self._display.warning('lxd does not support remote_user, using container default: root')
def _connect(self):
"""connect to lxd (nothing to do here) """
super(Connection, self)._connect()
if not self._connected:
self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self._host)
self._connected = True
def exec_command(self, cmd, in_data=None, sudoable=True):
""" execute a command on the lxd host """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
self._display.vvv(u"EXEC {0}".format(cmd), host=self._host)
local_cmd = [self._lxc_cmd, "exec", self._host, "--", self._play_context.executable, "-c", cmd]
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate(in_data)
stdout = to_text(stdout)
stderr = to_text(stderr)
if stderr == "error: Container is not running.\n":
raise AnsibleConnectionFailure("container not running: %s" % self._host)
if stderr == "error: not found\n":
raise AnsibleConnectionFailure("container not found: %s" % self._host)
return process.returncode, stdout, stderr
def put_file(self, in_path, out_path):
""" put a file from local to lxd """
super(Connection, self).put_file(in_path, out_path)
self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._host)
if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
local_cmd = [self._lxc_cmd, "file", "push", in_path, self._host + "/" + out_path]
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
call(local_cmd)
def fetch_file(self, in_path, out_path):
""" fetch a file from lxd to local """
super(Connection, self).fetch_file(in_path, out_path)
self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host)
local_cmd = [self._lxc_cmd, "file", "pull", self._host + "/" + in_path, out_path]
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
call(local_cmd)
def close(self):
""" close the connection (nothing to do here) """
super(Connection, self).close()
self._connected = False
| gpl-3.0 | 118,347,690,421,008,590 | 37.241071 | 103 | 0.65258 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/registry/browser/personproduct.py | 1 | 2189 | # Copyright 2009 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Views, menus and traversal related to PersonProducts."""
__metaclass__ = type
__all__ = [
'PersonProductBreadcrumb',
'PersonProductFacets',
'PersonProductNavigation',
]
from zope.component import queryAdapter
from zope.traversing.interfaces import IPathAdapter
from lp.app.errors import NotFoundError
from lp.code.interfaces.branchnamespace import get_branch_namespace
from lp.registry.interfaces.personproduct import IPersonProduct
from lp.services.webapp import (
canonical_url,
Link,
Navigation,
StandardLaunchpadFacets,
)
from lp.services.webapp.breadcrumb import Breadcrumb
class PersonProductNavigation(Navigation):
"""Navigation to branches for this person/product."""
usedfor = IPersonProduct
def traverse(self, branch_name):
"""Look for a branch in the person/product namespace."""
namespace = get_branch_namespace(
person=self.context.person, product=self.context.product)
branch = namespace.getByName(branch_name)
if branch is None:
raise NotFoundError
else:
return branch
class PersonProductBreadcrumb(Breadcrumb):
"""Breadcrumb for an `IPersonProduct`."""
@property
def text(self):
return self.context.product.displayname
@property
def url(self):
if self._url is None:
return canonical_url(self.context.product, rootsite=self.rootsite)
else:
return self._url
@property
def icon(self):
return queryAdapter(
self.context.product, IPathAdapter, name='image').icon()
class PersonProductFacets(StandardLaunchpadFacets):
"""The links that will appear in the facet menu for an IPerson."""
usedfor = IPersonProduct
enable_only = ['branches']
def branches(self):
text = 'Code'
summary = ('Bazaar Branches of %s owned by %s' %
(self.context.product.displayname,
self.context.person.displayname))
return Link('', text, summary)
| agpl-3.0 | 5,386,008,516,877,667,000 | 27.802632 | 78 | 0.677478 | false |
Vogeltak/pauselan | lib/python3.4/site-packages/flask/testsuite/basic.py | 406 | 43777 | # -*- coding: utf-8 -*-
"""
flask.testsuite.basic
~~~~~~~~~~~~~~~~~~~~~
The basic functionality.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
import uuid
import flask
import pickle
import unittest
from datetime import datetime
from threading import Thread
from flask.testsuite import FlaskTestCase, emits_module_deprecation_warning
from flask._compat import text_type
from werkzeug.exceptions import BadRequest, NotFound
from werkzeug.http import parse_date
from werkzeug.routing import BuildError
class BasicFunctionalityTestCase(FlaskTestCase):
def test_options_work(self):
app = flask.Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
return 'Hello World'
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST'])
self.assert_equal(rv.data, b'')
def test_options_on_multiple_rules(self):
app = flask.Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
return 'Hello World'
@app.route('/', methods=['PUT'])
def index_put():
return 'Aha!'
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST', 'PUT'])
def test_options_handling_disabled(self):
app = flask.Flask(__name__)
def index():
return 'Hello World!'
index.provide_automatic_options = False
app.route('/')(index)
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(rv.status_code, 405)
app = flask.Flask(__name__)
def index2():
return 'Hello World!'
index2.provide_automatic_options = True
app.route('/', methods=['OPTIONS'])(index2)
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(sorted(rv.allow), ['OPTIONS'])
def test_request_dispatching(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.request.method
@app.route('/more', methods=['GET', 'POST'])
def more():
return flask.request.method
c = app.test_client()
self.assert_equal(c.get('/').data, b'GET')
rv = c.post('/')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS'])
rv = c.head('/')
self.assert_equal(rv.status_code, 200)
self.assert_false(rv.data) # head truncates
self.assert_equal(c.post('/more').data, b'POST')
self.assert_equal(c.get('/more').data, b'GET')
rv = c.delete('/more')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST'])
def test_url_mapping(self):
app = flask.Flask(__name__)
def index():
return flask.request.method
def more():
return flask.request.method
app.add_url_rule('/', 'index', index)
app.add_url_rule('/more', 'more', more, methods=['GET', 'POST'])
c = app.test_client()
self.assert_equal(c.get('/').data, b'GET')
rv = c.post('/')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS'])
rv = c.head('/')
self.assert_equal(rv.status_code, 200)
self.assert_false(rv.data) # head truncates
self.assert_equal(c.post('/more').data, b'POST')
self.assert_equal(c.get('/more').data, b'GET')
rv = c.delete('/more')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST'])
def test_werkzeug_routing(self):
from werkzeug.routing import Submount, Rule
app = flask.Flask(__name__)
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
def bar():
return 'bar'
def index():
return 'index'
app.view_functions['bar'] = bar
app.view_functions['index'] = index
c = app.test_client()
self.assert_equal(c.get('/foo/').data, b'index')
self.assert_equal(c.get('/foo/bar').data, b'bar')
def test_endpoint_decorator(self):
from werkzeug.routing import Submount, Rule
app = flask.Flask(__name__)
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
@app.endpoint('bar')
def bar():
return 'bar'
@app.endpoint('index')
def index():
return 'index'
c = app.test_client()
self.assert_equal(c.get('/foo/').data, b'index')
self.assert_equal(c.get('/foo/bar').data, b'bar')
def test_session(self):
app = flask.Flask(__name__)
app.secret_key = 'testkey'
@app.route('/set', methods=['POST'])
def set():
flask.session['value'] = flask.request.form['value']
return 'value set'
@app.route('/get')
def get():
return flask.session['value']
c = app.test_client()
self.assert_equal(c.post('/set', data={'value': '42'}).data, b'value set')
self.assert_equal(c.get('/get').data, b'42')
def test_session_using_server_name(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='example.com'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com/')
self.assert_in('domain=.example.com', rv.headers['set-cookie'].lower())
self.assert_in('httponly', rv.headers['set-cookie'].lower())
def test_session_using_server_name_and_port(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='example.com:8080'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com:8080/')
self.assert_in('domain=.example.com', rv.headers['set-cookie'].lower())
self.assert_in('httponly', rv.headers['set-cookie'].lower())
def test_session_using_server_name_port_and_path(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='example.com:8080',
APPLICATION_ROOT='/foo'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com:8080/foo')
self.assert_in('domain=example.com', rv.headers['set-cookie'].lower())
self.assert_in('path=/foo', rv.headers['set-cookie'].lower())
self.assert_in('httponly', rv.headers['set-cookie'].lower())
def test_session_using_application_root(self):
class PrefixPathMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
app = flask.Flask(__name__)
app.wsgi_app = PrefixPathMiddleware(app.wsgi_app, '/bar')
app.config.update(
SECRET_KEY='foo',
APPLICATION_ROOT='/bar'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com:8080/')
self.assert_in('path=/bar', rv.headers['set-cookie'].lower())
def test_session_using_session_settings(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='www.example.com:8080',
APPLICATION_ROOT='/test',
SESSION_COOKIE_DOMAIN='.example.com',
SESSION_COOKIE_HTTPONLY=False,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_PATH='/'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://www.example.com:8080/test/')
cookie = rv.headers['set-cookie'].lower()
self.assert_in('domain=.example.com', cookie)
self.assert_in('path=/', cookie)
self.assert_in('secure', cookie)
self.assert_not_in('httponly', cookie)
def test_missing_session(self):
app = flask.Flask(__name__)
def expect_exception(f, *args, **kwargs):
try:
f(*args, **kwargs)
except RuntimeError as e:
self.assert_true(e.args and 'session is unavailable' in e.args[0])
else:
self.assert_true(False, 'expected exception')
with app.test_request_context():
self.assert_true(flask.session.get('missing_key') is None)
expect_exception(flask.session.__setitem__, 'foo', 42)
expect_exception(flask.session.pop, 'foo')
def test_session_expiration(self):
permanent = True
app = flask.Flask(__name__)
app.secret_key = 'testkey'
@app.route('/')
def index():
flask.session['test'] = 42
flask.session.permanent = permanent
return ''
@app.route('/test')
def test():
return text_type(flask.session.permanent)
client = app.test_client()
rv = client.get('/')
self.assert_in('set-cookie', rv.headers)
match = re.search(r'\bexpires=([^;]+)(?i)', rv.headers['set-cookie'])
expires = parse_date(match.group())
expected = datetime.utcnow() + app.permanent_session_lifetime
self.assert_equal(expires.year, expected.year)
self.assert_equal(expires.month, expected.month)
self.assert_equal(expires.day, expected.day)
rv = client.get('/test')
self.assert_equal(rv.data, b'True')
permanent = False
rv = app.test_client().get('/')
self.assert_in('set-cookie', rv.headers)
match = re.search(r'\bexpires=([^;]+)', rv.headers['set-cookie'])
self.assert_true(match is None)
def test_session_stored_last(self):
app = flask.Flask(__name__)
app.secret_key = 'development-key'
app.testing = True
@app.after_request
def modify_session(response):
flask.session['foo'] = 42
return response
@app.route('/')
def dump_session_contents():
return repr(flask.session.get('foo'))
c = app.test_client()
self.assert_equal(c.get('/').data, b'None')
self.assert_equal(c.get('/').data, b'42')
def test_session_special_types(self):
app = flask.Flask(__name__)
app.secret_key = 'development-key'
app.testing = True
now = datetime.utcnow().replace(microsecond=0)
the_uuid = uuid.uuid4()
@app.after_request
def modify_session(response):
flask.session['m'] = flask.Markup('Hello!')
flask.session['u'] = the_uuid
flask.session['dt'] = now
flask.session['b'] = b'\xff'
flask.session['t'] = (1, 2, 3)
return response
@app.route('/')
def dump_session_contents():
return pickle.dumps(dict(flask.session))
c = app.test_client()
c.get('/')
rv = pickle.loads(c.get('/').data)
self.assert_equal(rv['m'], flask.Markup('Hello!'))
self.assert_equal(type(rv['m']), flask.Markup)
self.assert_equal(rv['dt'], now)
self.assert_equal(rv['u'], the_uuid)
self.assert_equal(rv['b'], b'\xff')
self.assert_equal(type(rv['b']), bytes)
self.assert_equal(rv['t'], (1, 2, 3))
def test_flashes(self):
app = flask.Flask(__name__)
app.secret_key = 'testkey'
with app.test_request_context():
self.assert_false(flask.session.modified)
flask.flash('Zap')
flask.session.modified = False
flask.flash('Zip')
self.assert_true(flask.session.modified)
self.assert_equal(list(flask.get_flashed_messages()), ['Zap', 'Zip'])
def test_extended_flashing(self):
# Be sure app.testing=True below, else tests can fail silently.
#
# Specifically, if app.testing is not set to True, the AssertionErrors
# in the view functions will cause a 500 response to the test client
# instead of propagating exceptions.
app = flask.Flask(__name__)
app.secret_key = 'testkey'
app.testing = True
@app.route('/')
def index():
flask.flash(u'Hello World')
flask.flash(u'Hello World', 'error')
flask.flash(flask.Markup(u'<em>Testing</em>'), 'warning')
return ''
@app.route('/test/')
def test():
messages = flask.get_flashed_messages()
self.assert_equal(len(messages), 3)
self.assert_equal(messages[0], u'Hello World')
self.assert_equal(messages[1], u'Hello World')
self.assert_equal(messages[2], flask.Markup(u'<em>Testing</em>'))
return ''
@app.route('/test_with_categories/')
def test_with_categories():
messages = flask.get_flashed_messages(with_categories=True)
self.assert_equal(len(messages), 3)
self.assert_equal(messages[0], ('message', u'Hello World'))
self.assert_equal(messages[1], ('error', u'Hello World'))
self.assert_equal(messages[2], ('warning', flask.Markup(u'<em>Testing</em>')))
return ''
@app.route('/test_filter/')
def test_filter():
messages = flask.get_flashed_messages(category_filter=['message'], with_categories=True)
self.assert_equal(len(messages), 1)
self.assert_equal(messages[0], ('message', u'Hello World'))
return ''
@app.route('/test_filters/')
def test_filters():
messages = flask.get_flashed_messages(category_filter=['message', 'warning'], with_categories=True)
self.assert_equal(len(messages), 2)
self.assert_equal(messages[0], ('message', u'Hello World'))
self.assert_equal(messages[1], ('warning', flask.Markup(u'<em>Testing</em>')))
return ''
@app.route('/test_filters_without_returning_categories/')
def test_filters2():
messages = flask.get_flashed_messages(category_filter=['message', 'warning'])
self.assert_equal(len(messages), 2)
self.assert_equal(messages[0], u'Hello World')
self.assert_equal(messages[1], flask.Markup(u'<em>Testing</em>'))
return ''
# Create new test client on each test to clean flashed messages.
c = app.test_client()
c.get('/')
c.get('/test/')
c = app.test_client()
c.get('/')
c.get('/test_with_categories/')
c = app.test_client()
c.get('/')
c.get('/test_filter/')
c = app.test_client()
c.get('/')
c.get('/test_filters/')
c = app.test_client()
c.get('/')
c.get('/test_filters_without_returning_categories/')
def test_request_processing(self):
app = flask.Flask(__name__)
evts = []
@app.before_request
def before_request():
evts.append('before')
@app.after_request
def after_request(response):
response.data += b'|after'
evts.append('after')
return response
@app.route('/')
def index():
self.assert_in('before', evts)
self.assert_not_in('after', evts)
return 'request'
self.assert_not_in('after', evts)
rv = app.test_client().get('/').data
self.assert_in('after', evts)
self.assert_equal(rv, b'request|after')
def test_after_request_processing(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
@flask.after_this_request
def foo(response):
response.headers['X-Foo'] = 'a header'
return response
return 'Test'
c = app.test_client()
resp = c.get('/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.headers['X-Foo'], 'a header')
def test_teardown_request_handler(self):
called = []
app = flask.Flask(__name__)
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route('/')
def root():
return "Response"
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 200)
self.assert_in(b'Response', rv.data)
self.assert_equal(len(called), 1)
def test_teardown_request_handler_debug_mode(self):
called = []
app = flask.Flask(__name__)
app.testing = True
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route('/')
def root():
return "Response"
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 200)
self.assert_in(b'Response', rv.data)
self.assert_equal(len(called), 1)
def test_teardown_request_handler_error(self):
called = []
app = flask.Flask(__name__)
@app.teardown_request
def teardown_request1(exc):
self.assert_equal(type(exc), ZeroDivisionError)
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.teardown_request
def teardown_request2(exc):
self.assert_equal(type(exc), ZeroDivisionError)
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.route('/')
def fails():
1 // 0
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_in(b'Internal Server Error', rv.data)
self.assert_equal(len(called), 2)
def test_before_after_request_order(self):
called = []
app = flask.Flask(__name__)
@app.before_request
def before1():
called.append(1)
@app.before_request
def before2():
called.append(2)
@app.after_request
def after1(response):
called.append(4)
return response
@app.after_request
def after2(response):
called.append(3)
return response
@app.teardown_request
def finish1(exc):
called.append(6)
@app.teardown_request
def finish2(exc):
called.append(5)
@app.route('/')
def index():
return '42'
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'42')
self.assert_equal(called, [1, 2, 3, 4, 5, 6])
def test_error_handling(self):
app = flask.Flask(__name__)
@app.errorhandler(404)
def not_found(e):
return 'not found', 404
@app.errorhandler(500)
def internal_server_error(e):
return 'internal server error', 500
@app.route('/')
def index():
flask.abort(404)
@app.route('/error')
def error():
1 // 0
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, b'not found')
rv = c.get('/error')
self.assert_equal(rv.status_code, 500)
self.assert_equal(b'internal server error', rv.data)
def test_before_request_and_routing_errors(self):
app = flask.Flask(__name__)
@app.before_request
def attach_something():
flask.g.something = 'value'
@app.errorhandler(404)
def return_something(error):
return flask.g.something, 404
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, b'value')
def test_user_error_handling(self):
class MyException(Exception):
pass
app = flask.Flask(__name__)
@app.errorhandler(MyException)
def handle_my_exception(e):
self.assert_true(isinstance(e, MyException))
return '42'
@app.route('/')
def index():
raise MyException()
c = app.test_client()
self.assert_equal(c.get('/').data, b'42')
def test_trapping_of_bad_request_key_errors(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/fail')
def fail():
flask.request.form['missing_key']
c = app.test_client()
self.assert_equal(c.get('/fail').status_code, 400)
app.config['TRAP_BAD_REQUEST_ERRORS'] = True
c = app.test_client()
try:
c.get('/fail')
except KeyError as e:
self.assert_true(isinstance(e, BadRequest))
else:
self.fail('Expected exception')
def test_trapping_of_all_http_exceptions(self):
app = flask.Flask(__name__)
app.testing = True
app.config['TRAP_HTTP_EXCEPTIONS'] = True
@app.route('/fail')
def fail():
flask.abort(404)
c = app.test_client()
try:
c.get('/fail')
except NotFound as e:
pass
else:
self.fail('Expected exception')
def test_enctype_debug_helper(self):
from flask.debughelpers import DebugFilesKeyError
app = flask.Flask(__name__)
app.debug = True
@app.route('/fail', methods=['POST'])
def index():
return flask.request.files['foo'].filename
# with statement is important because we leave an exception on the
# stack otherwise and we want to ensure that this is not the case
# to not negatively affect other tests.
with app.test_client() as c:
try:
c.post('/fail', data={'foo': 'index.txt'})
except DebugFilesKeyError as e:
self.assert_in('no file contents were transmitted', str(e))
self.assert_in('This was submitted: "index.txt"', str(e))
else:
self.fail('Expected exception')
def test_response_creation(self):
app = flask.Flask(__name__)
@app.route('/unicode')
def from_unicode():
return u'Hällo Wörld'
@app.route('/string')
def from_string():
return u'Hällo Wörld'.encode('utf-8')
@app.route('/args')
def from_tuple():
return 'Meh', 400, {
'X-Foo': 'Testing',
'Content-Type': 'text/plain; charset=utf-8'
}
c = app.test_client()
self.assert_equal(c.get('/unicode').data, u'Hällo Wörld'.encode('utf-8'))
self.assert_equal(c.get('/string').data, u'Hällo Wörld'.encode('utf-8'))
rv = c.get('/args')
self.assert_equal(rv.data, b'Meh')
self.assert_equal(rv.headers['X-Foo'], 'Testing')
self.assert_equal(rv.status_code, 400)
self.assert_equal(rv.mimetype, 'text/plain')
def test_make_response(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.make_response()
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data, b'')
self.assert_equal(rv.mimetype, 'text/html')
rv = flask.make_response('Awesome')
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data, b'Awesome')
self.assert_equal(rv.mimetype, 'text/html')
rv = flask.make_response('W00t', 404)
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, b'W00t')
self.assert_equal(rv.mimetype, 'text/html')
def test_make_response_with_response_instance(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.make_response(
flask.jsonify({'msg': 'W00t'}), 400)
self.assertEqual(rv.status_code, 400)
self.assertEqual(rv.data, b'{\n "msg": "W00t"\n}')
self.assertEqual(rv.mimetype, 'application/json')
rv = flask.make_response(
flask.Response(''), 400)
self.assertEqual(rv.status_code, 400)
self.assertEqual(rv.data, b'')
self.assertEqual(rv.mimetype, 'text/html')
rv = flask.make_response(
flask.Response('', headers={'Content-Type': 'text/html'}),
400, [('X-Foo', 'bar')])
self.assertEqual(rv.status_code, 400)
self.assertEqual(rv.headers['Content-Type'], 'text/html')
self.assertEqual(rv.headers['X-Foo'], 'bar')
def test_url_generation(self):
app = flask.Flask(__name__)
@app.route('/hello/<name>', methods=['POST'])
def hello():
pass
with app.test_request_context():
self.assert_equal(flask.url_for('hello', name='test x'), '/hello/test%20x')
self.assert_equal(flask.url_for('hello', name='test x', _external=True),
'http://localhost/hello/test%20x')
def test_build_error_handler(self):
app = flask.Flask(__name__)
# Test base case, a URL which results in a BuildError.
with app.test_request_context():
self.assertRaises(BuildError, flask.url_for, 'spam')
# Verify the error is re-raised if not the current exception.
try:
with app.test_request_context():
flask.url_for('spam')
except BuildError as err:
error = err
try:
raise RuntimeError('Test case where BuildError is not current.')
except RuntimeError:
self.assertRaises(BuildError, app.handle_url_build_error, error, 'spam', {})
# Test a custom handler.
def handler(error, endpoint, values):
# Just a test.
return '/test_handler/'
app.url_build_error_handlers.append(handler)
with app.test_request_context():
self.assert_equal(flask.url_for('spam'), '/test_handler/')
def test_custom_converters(self):
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
def to_python(self, value):
return value.split(',')
def to_url(self, value):
base_to_url = super(ListConverter, self).to_url
return ','.join(base_to_url(x) for x in value)
app = flask.Flask(__name__)
app.url_map.converters['list'] = ListConverter
@app.route('/<list:args>')
def index(args):
return '|'.join(args)
c = app.test_client()
self.assert_equal(c.get('/1,2,3').data, b'1|2|3')
def test_static_files(self):
app = flask.Flask(__name__)
app.testing = True
rv = app.test_client().get('/static/index.html')
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data.strip(), b'<h1>Hello World!</h1>')
with app.test_request_context():
self.assert_equal(flask.url_for('static', filename='index.html'),
'/static/index.html')
rv.close()
def test_none_response(self):
app = flask.Flask(__name__)
@app.route('/')
def test():
return None
try:
app.test_client().get('/')
except ValueError as e:
self.assert_equal(str(e), 'View function did not return a response')
pass
else:
self.assert_true("Expected ValueError")
def test_request_locals(self):
self.assert_equal(repr(flask.g), '<LocalProxy unbound>')
self.assertFalse(flask.g)
def test_test_app_proper_environ(self):
app = flask.Flask(__name__)
app.config.update(
SERVER_NAME='localhost.localdomain:5000'
)
@app.route('/')
def index():
return 'Foo'
@app.route('/', subdomain='foo')
def subdomain():
return 'Foo SubDomain'
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'Foo')
rv = app.test_client().get('/', 'http://localhost.localdomain:5000')
self.assert_equal(rv.data, b'Foo')
rv = app.test_client().get('/', 'https://localhost.localdomain:5000')
self.assert_equal(rv.data, b'Foo')
app.config.update(SERVER_NAME='localhost.localdomain')
rv = app.test_client().get('/', 'https://localhost.localdomain')
self.assert_equal(rv.data, b'Foo')
try:
app.config.update(SERVER_NAME='localhost.localdomain:443')
rv = app.test_client().get('/', 'https://localhost.localdomain')
# Werkzeug 0.8
self.assert_equal(rv.status_code, 404)
except ValueError as e:
# Werkzeug 0.7
self.assert_equal(str(e), "the server name provided " +
"('localhost.localdomain:443') does not match the " + \
"server name from the WSGI environment ('localhost.localdomain')")
try:
app.config.update(SERVER_NAME='localhost.localdomain')
rv = app.test_client().get('/', 'http://foo.localhost')
# Werkzeug 0.8
self.assert_equal(rv.status_code, 404)
except ValueError as e:
# Werkzeug 0.7
self.assert_equal(str(e), "the server name provided " + \
"('localhost.localdomain') does not match the " + \
"server name from the WSGI environment ('foo.localhost')")
rv = app.test_client().get('/', 'http://foo.localhost.localdomain')
self.assert_equal(rv.data, b'Foo SubDomain')
def test_exception_propagation(self):
def apprunner(configkey):
app = flask.Flask(__name__)
@app.route('/')
def index():
1 // 0
c = app.test_client()
if config_key is not None:
app.config[config_key] = True
try:
resp = c.get('/')
except Exception:
pass
else:
self.fail('expected exception')
else:
self.assert_equal(c.get('/').status_code, 500)
# we have to run this test in an isolated thread because if the
# debug flag is set to true and an exception happens the context is
# not torn down. This causes other tests that run after this fail
# when they expect no exception on the stack.
for config_key in 'TESTING', 'PROPAGATE_EXCEPTIONS', 'DEBUG', None:
t = Thread(target=apprunner, args=(config_key,))
t.start()
t.join()
def test_max_content_length(self):
app = flask.Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 64
@app.before_request
def always_first():
flask.request.form['myfile']
self.assert_true(False)
@app.route('/accept', methods=['POST'])
def accept_file():
flask.request.form['myfile']
self.assert_true(False)
@app.errorhandler(413)
def catcher(error):
return '42'
c = app.test_client()
rv = c.post('/accept', data={'myfile': 'foo' * 100})
self.assert_equal(rv.data, b'42')
def test_url_processors(self):
app = flask.Flask(__name__)
@app.url_defaults
def add_language_code(endpoint, values):
if flask.g.lang_code is not None and \
app.url_map.is_endpoint_expecting(endpoint, 'lang_code'):
values.setdefault('lang_code', flask.g.lang_code)
@app.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop('lang_code', None)
@app.route('/<lang_code>/')
def index():
return flask.url_for('about')
@app.route('/<lang_code>/about')
def about():
return flask.url_for('something_else')
@app.route('/foo')
def something_else():
return flask.url_for('about', lang_code='en')
c = app.test_client()
self.assert_equal(c.get('/de/').data, b'/de/about')
self.assert_equal(c.get('/de/about').data, b'/foo')
self.assert_equal(c.get('/foo').data, b'/en/about')
def test_inject_blueprint_url_defaults(self):
app = flask.Flask(__name__)
bp = flask.Blueprint('foo.bar.baz', __name__,
template_folder='template')
@bp.url_defaults
def bp_defaults(endpoint, values):
values['page'] = 'login'
@bp.route('/<page>')
def view(page): pass
app.register_blueprint(bp)
values = dict()
app.inject_url_defaults('foo.bar.baz.view', values)
expected = dict(page='login')
self.assert_equal(values, expected)
with app.test_request_context('/somepage'):
url = flask.url_for('foo.bar.baz.view')
expected = '/login'
self.assert_equal(url, expected)
def test_nonascii_pathinfo(self):
app = flask.Flask(__name__)
app.testing = True
@app.route(u'/киртест')
def index():
return 'Hello World!'
c = app.test_client()
rv = c.get(u'/киртест')
self.assert_equal(rv.data, b'Hello World!')
def test_debug_mode_complains_after_first_request(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/')
def index():
return 'Awesome'
self.assert_false(app.got_first_request)
self.assert_equal(app.test_client().get('/').data, b'Awesome')
try:
@app.route('/foo')
def broken():
return 'Meh'
except AssertionError as e:
self.assert_in('A setup function was called', str(e))
else:
self.fail('Expected exception')
app.debug = False
@app.route('/foo')
def working():
return 'Meh'
self.assert_equal(app.test_client().get('/foo').data, b'Meh')
self.assert_true(app.got_first_request)
def test_before_first_request_functions(self):
got = []
app = flask.Flask(__name__)
@app.before_first_request
def foo():
got.append(42)
c = app.test_client()
c.get('/')
self.assert_equal(got, [42])
c.get('/')
self.assert_equal(got, [42])
self.assert_true(app.got_first_request)
def test_routing_redirect_debugging(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/foo/', methods=['GET', 'POST'])
def foo():
return 'success'
with app.test_client() as c:
try:
c.post('/foo', data={})
except AssertionError as e:
self.assert_in('http://localhost/foo/', str(e))
self.assert_in('Make sure to directly send your POST-request '
'to this URL', str(e))
else:
self.fail('Expected exception')
rv = c.get('/foo', data={}, follow_redirects=True)
self.assert_equal(rv.data, b'success')
app.debug = False
with app.test_client() as c:
rv = c.post('/foo', data={}, follow_redirects=True)
self.assert_equal(rv.data, b'success')
def test_route_decorator_custom_endpoint(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/foo/')
def foo():
return flask.request.endpoint
@app.route('/bar/', endpoint='bar')
def for_bar():
return flask.request.endpoint
@app.route('/bar/123', endpoint='123')
def for_bar_foo():
return flask.request.endpoint
with app.test_request_context():
assert flask.url_for('foo') == '/foo/'
assert flask.url_for('bar') == '/bar/'
assert flask.url_for('123') == '/bar/123'
c = app.test_client()
self.assertEqual(c.get('/foo/').data, b'foo')
self.assertEqual(c.get('/bar/').data, b'bar')
self.assertEqual(c.get('/bar/123').data, b'123')
def test_preserve_only_once(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/fail')
def fail_func():
1 // 0
c = app.test_client()
for x in range(3):
with self.assert_raises(ZeroDivisionError):
c.get('/fail')
self.assert_true(flask._request_ctx_stack.top is not None)
self.assert_true(flask._app_ctx_stack.top is not None)
# implicit appctx disappears too
flask._request_ctx_stack.top.pop()
self.assert_true(flask._request_ctx_stack.top is None)
self.assert_true(flask._app_ctx_stack.top is None)
def test_preserve_remembers_exception(self):
app = flask.Flask(__name__)
app.debug = True
errors = []
@app.route('/fail')
def fail_func():
1 // 0
@app.route('/success')
def success_func():
return 'Okay'
@app.teardown_request
def teardown_handler(exc):
errors.append(exc)
c = app.test_client()
# After this failure we did not yet call the teardown handler
with self.assert_raises(ZeroDivisionError):
c.get('/fail')
self.assert_equal(errors, [])
# But this request triggers it, and it's an error
c.get('/success')
self.assert_equal(len(errors), 2)
self.assert_true(isinstance(errors[0], ZeroDivisionError))
# At this point another request does nothing.
c.get('/success')
self.assert_equal(len(errors), 3)
self.assert_equal(errors[1], None)
def test_get_method_on_g(self):
app = flask.Flask(__name__)
app.testing = True
with app.app_context():
self.assert_equal(flask.g.get('x'), None)
self.assert_equal(flask.g.get('x', 11), 11)
flask.g.x = 42
self.assert_equal(flask.g.get('x'), 42)
self.assert_equal(flask.g.x, 42)
def test_g_iteration_protocol(self):
app = flask.Flask(__name__)
app.testing = True
with app.app_context():
flask.g.foo = 23
flask.g.bar = 42
self.assert_equal('foo' in flask.g, True)
self.assert_equal('foos' in flask.g, False)
self.assert_equal(sorted(flask.g), ['bar', 'foo'])
class SubdomainTestCase(FlaskTestCase):
def test_basic_support(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost'
@app.route('/')
def normal_index():
return 'normal index'
@app.route('/', subdomain='test')
def test_index():
return 'test index'
c = app.test_client()
rv = c.get('/', 'http://localhost/')
self.assert_equal(rv.data, b'normal index')
rv = c.get('/', 'http://test.localhost/')
self.assert_equal(rv.data, b'test index')
@emits_module_deprecation_warning
def test_module_static_path_subdomain(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'example.com'
from subdomaintestmodule import mod
app.register_module(mod)
c = app.test_client()
rv = c.get('/static/hello.txt', 'http://foo.example.com/')
rv.direct_passthrough = False
self.assert_equal(rv.data.strip(), b'Hello Subdomain')
rv.close()
def test_subdomain_matching(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost'
@app.route('/', subdomain='<user>')
def index(user):
return 'index for %s' % user
c = app.test_client()
rv = c.get('/', 'http://mitsuhiko.localhost/')
self.assert_equal(rv.data, b'index for mitsuhiko')
def test_subdomain_matching_with_ports(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost:3000'
@app.route('/', subdomain='<user>')
def index(user):
return 'index for %s' % user
c = app.test_client()
rv = c.get('/', 'http://mitsuhiko.localhost:3000/')
self.assert_equal(rv.data, b'index for mitsuhiko')
@emits_module_deprecation_warning
def test_module_subdomain_support(self):
app = flask.Flask(__name__)
mod = flask.Module(__name__, 'test', subdomain='testing')
app.config['SERVER_NAME'] = 'localhost'
@mod.route('/test')
def test():
return 'Test'
@mod.route('/outside', subdomain='xtesting')
def bar():
return 'Outside'
app.register_module(mod)
c = app.test_client()
rv = c.get('/test', 'http://testing.localhost/')
self.assert_equal(rv.data, b'Test')
rv = c.get('/outside', 'http://xtesting.localhost/')
self.assert_equal(rv.data, b'Outside')
def test_multi_route_rules(self):
app = flask.Flask(__name__)
@app.route('/')
@app.route('/<test>/')
def index(test='a'):
return test
rv = app.test_client().open('/')
self.assert_equal(rv.data, b'a')
rv = app.test_client().open('/b/')
self.assert_equal(rv.data, b'b')
def test_multi_route_class_views(self):
class View(object):
def __init__(self, app):
app.add_url_rule('/', 'index', self.index)
app.add_url_rule('/<test>/', 'index', self.index)
def index(self, test='a'):
return test
app = flask.Flask(__name__)
_ = View(app)
rv = app.test_client().open('/')
self.assert_equal(rv.data, b'a')
rv = app.test_client().open('/b/')
self.assert_equal(rv.data, b'b')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(BasicFunctionalityTestCase))
suite.addTest(unittest.makeSuite(SubdomainTestCase))
return suite
| gpl-2.0 | -2,334,631,854,668,619,300 | 33.892344 | 111 | 0.542704 | false |
victronenergy/dbus-fronius | test/src/fronius_sim/fronius_sim.py | 1 | 1241 | import random
import time
class PowerInfo:
def __init__(self):
self._lastEnergy = 0
self._prevPower = 0
# Use time.perf_counter() instead of time.clock() when using python 3
self._lastTimeStamp = time.perf_counter()
@property
def current(self):
return random.gauss(14, 0.5)
@property
def voltage(self):
return random.gauss(230, 0.05)
@property
def power(self):
p = random.gauss(3000, 100)
t = time.perf_counter()
self._lastEnergy += (self._prevPower + p) * (t - self._lastTimeStamp) / (2 * 3600)
self._lastTimeStamp = t
self._prevPower = p
return p
@property
def nominal_power(self):
return 2000
@property
def energy(self):
p = self.power
return self._lastEnergy
class FroniusSim:
def __init__(self, id, unique_id, device_type, custom_name='', has_3phases=True, modbus_enabled=False,
max_power=5000):
self.main = PowerInfo()
self.has_3phases = has_3phases
self.modbus_enabled = modbus_enabled
self.max_power = max_power
self.power_limit = 100
if has_3phases:
self.l1 = PowerInfo()
self.l2 = PowerInfo()
self.l3 = PowerInfo()
else:
self.l1 = self.main
self.id = id
self.unique_id = unique_id
self.custom_name = custom_name
self.device_type = device_type
| mit | 1,034,318,083,225,044,400 | 21.160714 | 103 | 0.676873 | false |
tsdmgz/ansible | lib/ansible/modules/system/pamd.py | 3 | 23321 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Kenneth D. Evensen <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: pamd
author:
- "Kenneth D. Evensen (@kevensen)"
short_description: Manage PAM Modules
description:
- Edit PAM service's type, control, module path and module arguments.
In order for a PAM rule to be modified, the type, control and
module_path must match an existing rule. See man(5) pam.d for details.
version_added: "2.3"
options:
name:
required: true
description:
- The name generally refers to the PAM service file to
change, for example system-auth.
type:
required: true
description:
- The type of the PAM rule being modified. The type, control
and module_path all must match a rule to be modified.
control:
required: true
description:
- The control of the PAM rule being modified. This may be a
complicated control with brackets. If this is the case, be
sure to put "[bracketed controls]" in quotes. The type,
control and module_path all must match a rule to be modified.
module_path:
required: true
description:
- The module path of the PAM rule being modified. The type,
control and module_path all must match a rule to be modified.
new_type:
description:
- The new type to assign to the new rule.
new_control:
description:
- The new control to assign to the new rule.
new_module_path:
description:
- The new module path to be assigned to the new rule.
module_arguments:
description:
- When state is 'updated', the module_arguments will replace existing
module_arguments. When state is 'args_absent' args matching those
listed in module_arguments will be removed. When state is
'args_present' any args listed in module_arguments are added if
missing from the existing rule. Furthermore, if the module argument
takes a value denoted by '=', the value will be changed to that specified
in module_arguments. Note that module_arguments is a list. Please see
the examples for usage.
state:
default: updated
choices:
- updated
- before
- after
- args_present
- args_absent
- absent
description:
- The default of 'updated' will modify an existing rule if type,
control and module_path all match an existing rule. With 'before',
the new rule will be inserted before a rule matching type, control
and module_path. Similarly, with 'after', the new rule will be inserted
after an existing rule matching type, control and module_path. With
either 'before' or 'after' new_type, new_control, and new_module_path
must all be specified. If state is 'args_absent' or 'args_present',
new_type, new_control, and new_module_path will be ignored. State
'absent' will remove the rule. The 'absent' state was added in version
2.4 and is only available in Ansible versions >= 2.4.
path:
default: /etc/pam.d/
description:
- This is the path to the PAM service files
"""
EXAMPLES = """
- name: Update pamd rule's control in /etc/pam.d/system-auth
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
new_control: sufficient
- name: Update pamd rule's complex control in /etc/pam.d/system-auth
pamd:
name: system-auth
type: session
control: '[success=1 default=ignore]'
module_path: pam_succeed_if.so
new_control: '[success=2 default=ignore]'
- name: Insert a new rule before an existing rule
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
new_type: auth
new_control: sufficient
new_module_path: pam_faillock.so
state: before
- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an \
existing rule pam_rootok.so
pamd:
name: su
type: auth
control: sufficient
module_path: pam_rootok.so
new_type: auth
new_control: required
new_module_path: pam_wheel.so
module_arguments: 'use_uid'
state: after
- name: Remove module arguments from an existing rule
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
module_arguments: ''
state: updated
- name: Replace all module arguments in an existing rule
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
module_arguments: 'preauth
silent
deny=3
unlock_time=604800
fail_interval=900'
state: updated
- name: Remove specific arguments from a rule
pamd:
name: system-auth
type: session control='[success=1 default=ignore]'
module_path: pam_succeed_if.so
module_arguments: crond,quiet
state: args_absent
- name: Ensure specific arguments are present in a rule
pamd:
name: system-auth
type: session
control: '[success=1 default=ignore]'
module_path: pam_succeed_if.so
module_arguments: crond,quiet
state: args_present
- name: Ensure specific arguments are present in a rule (alternative)
pamd:
name: system-auth
type: session
control: '[success=1 default=ignore]'
module_path: pam_succeed_if.so
module_arguments:
- crond
- quiet
state: args_present
- name: Module arguments requiring commas must be listed as a Yaml list
pamd:
name: special-module
type: account
control: required
module_path: pam_access.so
module_arguments:
- listsep=,
state: args_present
- name: Update specific argument value in a rule
pamd:
name: system-auth
type: auth
control: required
module_path: pam_faillock.so
module_arguments: 'fail_interval=300'
state: args_present
"""
RETURN = '''
change_count:
description: How many rules were changed
type: int
sample: 1
returned: success
version_added: 2.4
new_rule:
description: The changes to the rule
type: string
sample: None None None sha512 shadow try_first_pass use_authtok
returned: success
version_added: 2.4
updated_rule_(n):
description: The rule(s) that was/were changed
type: string
sample:
- password sufficient pam_unix.so sha512 shadow try_first_pass
use_authtok
returned: success
version_added: 2.4
action:
description:
- "That action that was taken and is one of: update_rule,
insert_before_rule, insert_after_rule, args_present, args_absent,
absent."
returned: always
type: string
sample: "update_rule"
version_added: 2.4
dest:
description:
- "Path to pam.d service that was changed. This is only available in
Ansible version 2.3 and was removed in 2.4."
returned: success
type: string
sample: "/etc/pam.d/system-auth"
...
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
import os
import re
import time
# The PamdRule class encapsulates a rule in a pam.d service
class PamdRule(object):
def __init__(self, rule_type,
rule_control, rule_module_path,
rule_module_args=None):
self.rule_type = rule_type
self.rule_control = rule_control
self.rule_module_path = rule_module_path
try:
if (rule_module_args is not None and
type(rule_module_args) is list):
self.rule_module_args = rule_module_args
elif (rule_module_args is not None and
type(rule_module_args) is str):
self.rule_module_args = rule_module_args.split()
except AttributeError:
self.rule_module_args = []
@classmethod
def rulefromstring(cls, stringline):
pattern = None
rule_type = ''
rule_control = ''
rule_module_path = ''
rule_module_args = ''
complicated = False
if '[' in stringline:
pattern = re.compile(
r"""([\-A-Za-z0-9_]+)\s* # Rule Type
\[([A-Za-z0-9_=\s]+)\]\s* # Rule Control
([A-Za-z0-9_\-\.]+)\s* # Rule Path
([A-Za-z0-9,_=<>\-\s\./]*)""", # Rule Args
re.X)
complicated = True
else:
pattern = re.compile(
r"""([\-A-Za-z0-9_]+)\s* # Rule Type
([A-Za-z0-9_]+)\s* # Rule Control
([A-Za-z0-9_\-\.]+)\s* # Rule Path
([A-Za-z0-9,_=<>\-\s\./]*)""", # Rule Args
re.X)
result = pattern.match(stringline)
rule_type = result.group(1)
if complicated:
rule_control = '[' + result.group(2) + ']'
else:
rule_control = result.group(2)
rule_module_path = result.group(3)
if result.group(4) is not None:
rule_module_args = result.group(4)
return cls(rule_type, rule_control, rule_module_path, rule_module_args)
def get_module_args_as_string(self):
try:
if self.rule_module_args is not None:
return ' '.join(self.rule_module_args)
except AttributeError:
pass
return ''
def __str__(self):
return "%-10s\t%s\t%s %s" % (self.rule_type,
self.rule_control,
self.rule_module_path,
self.get_module_args_as_string())
# PamdService encapsulates an entire service and contains one or more rules
class PamdService(object):
def __init__(self, ansible=None):
if ansible is not None:
self.check = ansible.check_mode
self.check = False
self.ansible = ansible
self.preamble = []
self.rules = []
self.fname = None
if ansible is not None:
self.path = self.ansible.params["path"]
self.name = self.ansible.params["name"]
def load_rules_from_file(self):
self.fname = self.path + "/" + self.name
stringline = ''
try:
for line in open(self.fname, 'r'):
stringline += line.rstrip()
stringline += '\n'
self.load_rules_from_string(stringline)
except IOError:
e = get_exception()
self.ansible.fail_json(msg='Unable to open/read PAM module \
file %s with error %s. And line %s' %
(self.fname, str(e), stringline))
def load_rules_from_string(self, stringvalue):
for line in stringvalue.splitlines():
stringline = line.rstrip()
if line.startswith('#') and not line.isspace():
self.preamble.append(line.rstrip())
elif (not line.startswith('#') and
not line.isspace() and
len(line) != 0):
self.rules.append(PamdRule.rulefromstring(stringline))
def write(self):
if self.fname is None:
self.fname = self.path + "/" + self.name
# If the file is a symbollic link, we'll write to the source.
pamd_file = os.path.realpath(self.fname)
temp_file = "/tmp/" + self.name + "_" + time.strftime("%y%m%d%H%M%S")
try:
f = open(temp_file, 'w')
f.write(str(self))
f.close()
except IOError:
self.ansible.fail_json(msg='Unable to create temporary \
file %s' % self.temp_file)
self.ansible.atomic_move(temp_file, pamd_file)
def __str__(self):
stringvalue = ''
previous_rule = None
for amble in self.preamble:
stringvalue += amble
stringvalue += '\n'
for rule in self.rules:
if (previous_rule is not None and
(previous_rule.rule_type.replace('-', '') !=
rule.rule_type.replace('-', ''))):
stringvalue += '\n'
stringvalue += str(rule).rstrip()
stringvalue += '\n'
previous_rule = rule
if stringvalue.endswith('\n'):
stringvalue = stringvalue[:-1]
return stringvalue
def update_rule(service, old_rule, new_rule):
changed = False
change_count = 0
result = {'action': 'update_rule'}
for rule in service.rules:
if (old_rule.rule_type == rule.rule_type and
old_rule.rule_control == rule.rule_control and
old_rule.rule_module_path == rule.rule_module_path):
if (new_rule.rule_type is not None and
new_rule.rule_type != rule.rule_type):
rule.rule_type = new_rule.rule_type
changed = True
if (new_rule.rule_control is not None and
new_rule.rule_control != rule.rule_control):
rule.rule_control = new_rule.rule_control
changed = True
if (new_rule.rule_module_path is not None and
new_rule.rule_module_path != rule.rule_module_path):
rule.rule_module_path = new_rule.rule_module_path
changed = True
try:
if (new_rule.rule_module_args is not None and
new_rule.get_module_args_as_string() !=
rule.get_module_args_as_string()):
rule.rule_module_args = new_rule.rule_module_args
changed = True
except AttributeError:
pass
if changed:
result['updated_rule_' + str(change_count)] = str(rule)
result['new_rule'] = str(new_rule)
change_count += 1
result['change_count'] = change_count
return changed, result
def insert_before_rule(service, old_rule, new_rule):
index = 0
change_count = 0
result = {'action':
'insert_before_rule'}
changed = False
for rule in service.rules:
if (old_rule.rule_type == rule.rule_type and
old_rule.rule_control == rule.rule_control and
old_rule.rule_module_path == rule.rule_module_path):
if index == 0:
service.rules.insert(0, new_rule)
changed = True
elif (new_rule.rule_type != service.rules[index - 1].rule_type or
new_rule.rule_control !=
service.rules[index - 1].rule_control or
new_rule.rule_module_path !=
service.rules[index - 1].rule_module_path):
service.rules.insert(index, new_rule)
changed = True
if changed:
result['new_rule'] = str(new_rule)
result['before_rule_' + str(change_count)] = str(rule)
change_count += 1
index += 1
result['change_count'] = change_count
return changed, result
def insert_after_rule(service, old_rule, new_rule):
index = 0
change_count = 0
result = {'action': 'insert_after_rule'}
changed = False
for rule in service.rules:
if (old_rule.rule_type == rule.rule_type and
old_rule.rule_control == rule.rule_control and
old_rule.rule_module_path == rule.rule_module_path):
if (new_rule.rule_type != service.rules[index + 1].rule_type or
new_rule.rule_control !=
service.rules[index + 1].rule_control or
new_rule.rule_module_path !=
service.rules[index + 1].rule_module_path):
service.rules.insert(index + 1, new_rule)
changed = True
if changed:
result['new_rule'] = str(new_rule)
result['after_rule_' + str(change_count)] = str(rule)
change_count += 1
index += 1
result['change_count'] = change_count
return changed, result
def remove_module_arguments(service, old_rule, module_args):
result = {'action': 'args_absent'}
changed = False
change_count = 0
for rule in service.rules:
if (old_rule.rule_type == rule.rule_type and
old_rule.rule_control == rule.rule_control and
old_rule.rule_module_path == rule.rule_module_path):
for arg_to_remove in module_args:
for arg in rule.rule_module_args:
if arg == arg_to_remove:
rule.rule_module_args.remove(arg)
changed = True
result['removed_arg_' + str(change_count)] = arg
result['from_rule_' + str(change_count)] = str(rule)
change_count += 1
result['change_count'] = change_count
return changed, result
def add_module_arguments(service, old_rule, module_args):
result = {'action': 'args_present'}
changed = False
change_count = 0
for rule in service.rules:
if (old_rule.rule_type == rule.rule_type and
old_rule.rule_control == rule.rule_control and
old_rule.rule_module_path == rule.rule_module_path):
for arg_to_add in module_args:
if "=" in arg_to_add:
pre_string = arg_to_add[:arg_to_add.index('=') + 1]
indicies = [i for i, arg
in enumerate(rule.rule_module_args)
if arg.startswith(pre_string)]
if len(indicies) == 0:
rule.rule_module_args.append(arg_to_add)
changed = True
result['added_arg_' + str(change_count)] = arg_to_add
result['to_rule_' + str(change_count)] = str(rule)
change_count += 1
else:
for i in indicies:
if rule.rule_module_args[i] != arg_to_add:
rule.rule_module_args[i] = arg_to_add
changed = True
result['updated_arg_' +
str(change_count)] = arg_to_add
result['in_rule_' +
str(change_count)] = str(rule)
change_count += 1
elif arg_to_add not in rule.rule_module_args:
rule.rule_module_args.append(arg_to_add)
changed = True
result['added_arg_' + str(change_count)] = arg_to_add
result['to_rule_' + str(change_count)] = str(rule)
change_count += 1
result['change_count'] = change_count
return changed, result
def remove_rule(service, old_rule):
result = {'action': 'absent'}
changed = False
change_count = 0
for rule in service.rules:
if (old_rule.rule_type == rule.rule_type and
old_rule.rule_control == rule.rule_control and
old_rule.rule_module_path == rule.rule_module_path):
service.rules.remove(rule)
changed = True
return changed, result
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='str'),
type=dict(required=True,
choices=['account', 'auth',
'password', 'session']),
control=dict(required=True, type='str'),
module_path=dict(required=True, type='str'),
new_type=dict(required=False,
choices=['account', 'auth',
'password', 'session']),
new_control=dict(required=False, type='str'),
new_module_path=dict(required=False, type='str'),
module_arguments=dict(required=False, type='list'),
state=dict(required=False, default="updated",
choices=['before', 'after', 'updated',
'args_absent', 'args_present', 'absent']),
path=dict(required=False, default='/etc/pam.d', type='str')
),
supports_check_mode=True,
required_if=[
("state", "args_present", ["module_arguments"]),
("state", "args_absent", ["module_arguments"]),
("state", "before", ["new_control"]),
("state", "before", ["new_type"]),
("state", "before", ["new_module_path"]),
("state", "after", ["new_control"]),
("state", "after", ["new_type"]),
("state", "after", ["new_module_path"])
]
)
service = module.params['name']
old_type = module.params['type']
old_control = module.params['control']
old_module_path = module.params['module_path']
new_type = module.params['new_type']
new_control = module.params['new_control']
new_module_path = module.params['new_module_path']
module_arguments = module.params['module_arguments']
state = module.params['state']
path = module.params['path']
pamd = PamdService(module)
pamd.load_rules_from_file()
old_rule = PamdRule(old_type,
old_control,
old_module_path)
new_rule = PamdRule(new_type,
new_control,
new_module_path,
module_arguments)
if state == 'updated':
change, result = update_rule(pamd,
old_rule,
new_rule)
elif state == 'before':
change, result = insert_before_rule(pamd,
old_rule,
new_rule)
elif state == 'after':
change, result = insert_after_rule(pamd,
old_rule,
new_rule)
elif state == 'args_absent':
change, result = remove_module_arguments(pamd,
old_rule,
module_arguments)
elif state == 'args_present':
change, result = add_module_arguments(pamd,
old_rule,
module_arguments)
elif state == 'absent':
change, result = remove_rule(pamd,
old_rule)
if not module.check_mode and change:
pamd.write()
facts = {}
facts['pamd'] = {'changed': change, 'result': result}
module.params['dest'] = pamd.fname
module.exit_json(changed=change, ansible_facts=facts)
if __name__ == '__main__':
main()
| gpl-3.0 | 9,031,450,999,728,111,000 | 33.60089 | 92 | 0.54779 | false |
ntruchsess/Arduino-1 | arduino-core/src/processing/app/i18n/python/requests/models.py | 151 | 21105 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import logging
import datetime
from io import BytesIO
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.filepost import encode_multipart_formdata
from .exceptions import HTTPError, RequestException, MissingSchema, InvalidURL
from .utils import (
stream_untransfer, guess_filename, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len)
from .compat import (
cookielib, urlparse, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
log = logging.getLogger(__name__)
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but abritrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but abritrary
if parameters are supplied as a dict.
"""
if (not files) or isinstance(data, str):
return None
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, list):
for v in val:
new_fields.append((field, builtin_str(v)))
else:
new_fields.append((field, builtin_str(val)))
for (k, v) in files:
# support for explicit filename
ft = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
else:
fn, fp, ft = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, str):
fp = StringIO(fp)
if isinstance(fp, bytes):
fp = BytesIO(fp)
if ft:
new_v = (fn, fp.read(), ft)
else:
new_v = (fn, fp.read())
new_fields.append((k, new_v))
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach the request. If a dictionary is provided, form-encoding will take place.
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=dict(),
params=dict(),
auth=None,
cookies=None,
hooks=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.params = params
self.auth = auth
self.cookies = cookies
self.hooks = hooks
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare_method(self.method)
p.prepare_url(self.url, self.params)
p.prepare_headers(self.headers)
p.prepare_cookies(self.cookies)
p.prepare_body(self.data, self.files)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
p.prepare_auth(self.auth)
# This MUST go after prepare_auth. Authenticators could add a hook
p.prepare_hooks(self.hooks)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
try:
url = unicode(url)
except NameError:
# We're on Python 3.
url = str(url)
except UnicodeDecodeError:
pass
# Support for unicode domain names and paths.
scheme, netloc, path, _params, query, fragment = urlparse(url)
if not (scheme and netloc):
raise MissingSchema("Invalid URL %r: No schema supplied" % url)
try:
netloc = netloc.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(_params, str):
_params = _params.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, _params, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
headers = dict((name.encode('ascii'), value) for name, value in headers.items())
self.headers = CaseInsensitiveDict(headers)
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
is_stream = False
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, basestring),
not isinstance(data, list),
not isinstance(data, dict)
])
try:
length = str(super_len(data))
except (TypeError, AttributeError):
length = False
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = length
else:
self.headers['Transfer-Encoding'] = 'chunked'
# Check if file, fo, generator, iterator.
# If not, run through normal process.
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, str) or isinstance(data, builtin_str) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if (content_type) and (not 'content-type' in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = str(body.tell())
body.seek(0, 0)
elif body is not None:
self.headers['Content-Length'] = str(len(body))
elif self.method not in ('GET', 'HEAD'):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth):
"""Prepares the given HTTP auth data."""
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
cookies = cookies
else:
cookies = cookiejar_from_dict(cookies)
if 'cookie' not in self.headers:
cookie_header = get_cookie_header(cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Requires that ``stream=True` on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except RequestException:
return False
return True
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the lovely Charade library
(Thanks, Ian!)."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. This avoids reading the content
at once into memory for large responses. The chunk size is the number
of bytes it should read into memory. This is not necessarily the
length of each item returned as decoding can take place.
"""
if self._content_consumed:
# simulate reading small chunks of the content
return iter_slices(self._content, chunk_size)
def generate():
while 1:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
gen = stream_untransfer(generate(), self)
if decode_unicode:
gen = stream_decode_response_unicode(gen, self)
return gen
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None):
"""Iterates over the response data, one line at a time. This
avoids reading the content at once into memory for large
responses.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size,
decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code is 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
if Response.encoding is None and chardet module is available, encoding
will be guessed.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
return json.loads(self.content.decode(encoding), **kwargs)
return json.loads(self.text or self.content, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers['link']
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
return self.raw.release_conn()
| lgpl-2.1 | 8,549,009,283,892,214,000 | 30.832579 | 108 | 0.5638 | false |
Dandandan/wikiprogramming | jsrepl/extern/python/reloop-closured/lib/python2.7/ntpath.py | 81 | 18082 | # Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting "\\" as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
b_wins = 0 # set to 1 iff b makes path irrelevant
if path == "":
b_wins = 1
elif isabs(b):
# This probably wipes out path so far. However, it's more
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verified in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '%': # variable or '%'
if path[index + 1:index + 2] == '%':
res = res + c
index = index + 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index('%')
except ValueError:
res = res + '%' + path
index = pathlen - 1
else:
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '%' + var + '%'
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '$' + var
if c != '':
index = index - 1
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
backslash, dot = (u'\\', u'.') if isinstance(path, unicode) else ('\\', '.')
if path.startswith(('\\\\.\\', '\\\\?\\')):
# in the case of paths with these prefixes:
# \\.\ -> device names
# \\?\ -> literal paths
# do not do any normalization, but return the path unchanged
return path
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path[:1] == "\\":
prefix = prefix + backslash
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + backslash
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append(dot)
return prefix + backslash.join(comps)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
if not isabs(path):
if isinstance(path, unicode):
cwd = os.getcwdu()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
elif isinstance(path, unicode):
path = os.getcwdu()
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def _abspath_split(path):
abs = abspath(normpath(path))
prefix, rest = splitunc(abs)
is_unc = bool(prefix)
if not is_unc:
prefix, rest = splitdrive(abs)
return is_unc, prefix, [x for x in rest.split(sep) if x]
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_is_unc, start_prefix, start_list = _abspath_split(start)
path_is_unc, path_prefix, path_list = _abspath_split(path)
if path_is_unc ^ start_is_unc:
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
if path_prefix.lower() != start_prefix.lower():
if path_is_unc:
raise ValueError("path is on UNC root %s, start on UNC root %s"
% (path_prefix, start_prefix))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_prefix, start_prefix))
# Work out how much of the filepath is shared by start and path.
i = 0
for e1, e2 in zip(start_list, path_list):
if e1.lower() != e2.lower():
break
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| mit | 5,943,260,352,307,369,000 | 33.573614 | 80 | 0.549276 | false |
gustavo-guimaraes/siga | backend/appengine/lib/pip/_vendor/six.py | 322 | 22857 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2014 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.5.2"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
# This is a bit ugly, but it avoids running this again.
delattr(obj.__class__, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
# Hack around the Django autoreloader. The reloader tries to get
# __file__ or __name__ of every module in sys.modules. This doesn't work
# well if this MovedModule is for an module that is unavailable on this
# machine (like winreg on Unix systems). Thus, we pretend __file__ and
# __name__ don't exist if the module hasn't been loaded yet. See issues
# #51 and #53.
if attr in ("__file__", "__name__") and self.mod not in sys.modules:
raise AttributeError
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
sys.modules[__name__ + ".moves." + attr.name] = attr
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
sys.modules[__name__ + ".moves.urllib_parse"] = sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
sys.modules[__name__ + ".moves.urllib_error"] = sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
sys.modules[__name__ + ".moves.urllib_request"] = sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
sys.modules[__name__ + ".moves.urllib_response"] = sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
sys.modules[__name__ + ".moves.urllib_robotparser"] = sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
parse = sys.modules[__name__ + ".moves.urllib_parse"]
error = sys.modules[__name__ + ".moves.urllib_error"]
request = sys.modules[__name__ + ".moves.urllib_request"]
response = sys.modules[__name__ + ".moves.urllib_response"]
robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"]
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
_iterlists = "lists"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
_iterlists = "iterlists"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
def iterkeys(d, **kw):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)(**kw))
def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)(**kw))
def iteritems(d, **kw):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)(**kw))
def iterlists(d, **kw):
"""Return an iterator over the (key, [values]) pairs of a dictionary."""
return iter(getattr(d, _iterlists)(**kw))
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
def iterbytes(buf):
return (ord(byte) for byte in buf)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
| mit | -5,375,496,060,216,665,000 | 35.166139 | 183 | 0.633416 | false |
tareqalayan/ansible | test/units/modules/network/onyx/test_onyx_lldp_interface.py | 50 | 3151 | #
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.onyx import onyx_lldp_interface
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxLldpInterfaceModule(TestOnyxModule):
module = onyx_lldp_interface
def setUp(self):
super(TestOnyxLldpInterfaceModule, self).setUp()
self.mock_get_config = patch.object(
onyx_lldp_interface.OnyxLldpInterfaceModule,
"_get_lldp_config")
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestOnyxLldpInterfaceModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_lldp_interface_show.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_lldp_no_change(self):
set_module_args(dict(name='Eth1/1', state='present'))
self.execute_module(changed=False)
def test_no_lldp_no_change(self):
set_module_args(dict(name='Eth1/2', state='absent'))
self.execute_module(changed=False)
def test_no_lldp_change(self):
set_module_args(dict(name='Eth1/2', state='present'))
commands = ['interface ethernet 1/2 lldp receive',
'interface ethernet 1/2 lldp transmit']
self.execute_module(changed=True, commands=commands)
def test_lldp_change(self):
set_module_args(dict(name='Eth1/1', state='absent'))
commands = ['interface ethernet 1/1 no lldp receive',
'interface ethernet 1/1 no lldp transmit']
self.execute_module(changed=True, commands=commands)
def test_lldp_aggregate(self):
aggregate = [dict(name='Eth1/1'), dict(name='Eth1/2')]
set_module_args(dict(aggregate=aggregate, state='present'))
commands = ['interface ethernet 1/2 lldp receive',
'interface ethernet 1/2 lldp transmit']
self.execute_module(changed=True, commands=commands)
def test_lldp_aggregate_purge(self):
aggregate = [dict(name='Eth1/3'), dict(name='Eth1/2')]
set_module_args(dict(aggregate=aggregate, state='present', purge=True))
commands = ['interface ethernet 1/2 lldp receive',
'interface ethernet 1/2 lldp transmit',
'interface ethernet 1/3 lldp receive',
'interface ethernet 1/3 lldp transmit',
'interface ethernet 1/1 no lldp receive',
'interface ethernet 1/1 no lldp transmit']
self.execute_module(changed=True, commands=commands)
| gpl-3.0 | 8,036,928,914,720,705,000 | 40.460526 | 92 | 0.652174 | false |
sgraf812/Celero | test/gtest-1.7.0/test/gtest_help_test.py | 2968 | 5856 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 | 3,875,183,899,412,054,500 | 33.046512 | 75 | 0.702527 | false |
openstack/keystone | keystone/common/sql/migrate_repo/versions/108_add_failed_auth_columns.py | 5 | 1065 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
failed_auth_count = sql.Column('failed_auth_count', sql.Integer,
nullable=True)
failed_auth_at = sql.Column('failed_auth_at', sql.DateTime(),
nullable=True)
local_user_table = sql.Table('local_user', meta, autoload=True)
local_user_table.create_column(failed_auth_count)
local_user_table.create_column(failed_auth_at)
| apache-2.0 | -7,074,200,702,754,783,000 | 39.961538 | 75 | 0.693897 | false |
platinhom/ManualHom | Coding/Python/scipy-html-0.16.1/generated/scipy-stats-probplot-1.py | 1 | 1101 | from scipy import stats
import matplotlib.pyplot as plt
nsample = 100
np.random.seed(7654321)
# A t distribution with small degrees of freedom:
ax1 = plt.subplot(221)
x = stats.t.rvs(3, size=nsample)
res = stats.probplot(x, plot=plt)
# A t distribution with larger degrees of freedom:
ax2 = plt.subplot(222)
x = stats.t.rvs(25, size=nsample)
res = stats.probplot(x, plot=plt)
# A mixture of two normal distributions with broadcasting:
ax3 = plt.subplot(223)
x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
size=(nsample/2.,2)).ravel()
res = stats.probplot(x, plot=plt)
# A standard normal distribution:
ax4 = plt.subplot(224)
x = stats.norm.rvs(loc=0, scale=1, size=nsample)
res = stats.probplot(x, plot=plt)
# Produce a new figure with a loggamma distribution, using the ``dist`` and
# ``sparams`` keywords:
fig = plt.figure()
ax = fig.add_subplot(111)
x = stats.loggamma.rvs(c=2.5, size=500)
stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
# Show the results with Matplotlib:
plt.show()
| gpl-2.0 | -1,036,773,950,203,792,900 | 25.214286 | 75 | 0.704814 | false |
joebowen/movement_validation_cloud | djangodev/lib/python2.7/site-packages/boto/beanstalk/__init__.py | 145 | 1680 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the AWS Elastic Beanstalk service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
import boto.beanstalk.layer1
return get_regions(
'elasticbeanstalk',
connection_cls=boto.beanstalk.layer1.Layer1
)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit | -4,939,778,460,407,346,000 | 37.181818 | 74 | 0.733333 | false |
danieluct/ntv2generator | ntv2generator/ntv2writer.py | 1 | 15705 | """
This file is part of ntv2generator.
ntv2generator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ntv2generator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ntv2generator. If not, see <http://www.gnu.org/licenses/>.
"""
import datetime
import os
import struct
def _format_8bit_str(input_string):
return "{0:<8}".format(input_string[:8])
def _format_ntv2_record(name, value, type_='f', binary_format=True):
if name == "RECORD":
if binary_format:
return struct.pack("<4f",*value)
else:
return " ".join(["{0:6f}".format(x) for x in value]) + "\n"
else:
if type_ == "s":
if binary_format:
return struct.pack("<8s8s",
_format_8bit_str(name),
_format_8bit_str(value))
else:
return (_format_8bit_str(name) + " " +
_format_8bit_str(value) + "\n")
elif type_ == "i":
if binary_format:
return struct.pack("<8si4x",
_format_8bit_str(name),
value)
else:
return _format_8bit_str(name) + " " + str(int(value)) + "\n"
elif type_ == "f":
if binary_format:
return struct.pack("<8sd",
_format_8bit_str(name),
value)
else:
return (_format_8bit_str(name) + " " +
"{0:4f}".format(value) + "\n")
else:
raise Exception("Unknown record format!")
class CRSDef:
def __init__(self, name, major_axis, minor_axis):
self.name = name
self.major_axis = major_axis
self.minor_axis = minor_axis
ETRS89_CRS = CRSDef("ETRS89", 6378137.000, 6356752.314)
class BoundingBox:
def __init__(self, north, south, west, east):
self.north = north
self.south = south
self.east = east
self.west = west
class NTv2File:
def __init__(self,
coord_unit="SECONDS"):
self.has_overview = False
self.added_sub_files = 0
self.subfiles_dict ={}
if coord_unit not in ["SECONDS", "MINUTES", "DEGREES"]:
raise Exception("Unknown unit for coordinates!")
else:
self.gridshift_data_type = coord_unit
def set_ref_systems(self, crs_from, crs_to, overwrite=False):
if self.has_overview and not overwrite:
raise Exception("Header was previously set!")
self.crs_from = crs_from
self.crs_to = crs_to
self.has_overview = True
def add_subfile(self, subFile, overwrite=False):
if subFile.name in self.subfiles_dict.keys() and not overwrite:
raise Exception(
"Subfile with name {0} already exists!".format(subFile.name)
)
if (subFile.parent != "NONE"
and subFile.parent not in self.subfiles_dict.keys()):
raise Exception(
"Parent with name {0} was not defined!".format(subFile.name)
)
self.subfiles_dict[subFile.name] = subFile
def create_subfile(self, name, parent='NONE'):
if name in self.subfiles_dict.keys() and not overwrite:
raise Exception(
"Subfile with name {0} already exists!".format(subFile.name)
)
if parent!= "NONE" and subFile.parent not in self.subfiles_dict.keys():
raise Exception(
"Parent with name {0} was not defined!".format(subFile.name)
)
subFile = NTv2SubFile(name, parent)
self.subfiles_dict[name] = subFile
return subFile
def write_to_file(self, path, name, f_format='b',
overwrite=False):
self.file_name = os.path.join(path, name)
if os.path.exists(self.file_name) and not overwrite:
raise Exception("File already exists!")
if f_format == 'a' or f_format == 'A':
binary_format = False
elif f_format == 'b' or f_format == 'B':
binary_format = True
else:
raise Exception("Unknown format!")
if not self.has_overview:
raise Exception("Header info was not set!")
if not self.subfiles_dict.keys():
raise Exception("No subfiles have been defined!")
if binary_format:
output_file = open(self.file_name, "wb")
else:
output_file = open(self.file_name, "w")
self._write_header(output_file, binary_format)
for key in self.subfiles_dict.keys():
self.subfiles_dict[key].write_to_file(output_file, binary_format)
self._write_eof(output_file, binary_format)
output_file.close()
def _write_eof(self, output_file, binary_format=True):
if binary_format:
output_file.write(struct.pack("<8s8x", "END "))
else:
output_file.write("END")
def _write_header(self, output_file, binary_format=True):
if not self.has_overview:
raise Exception("No overview file defined!")
output_file.write(_format_ntv2_record("NUM_OREC", 11,
'i', binary_format))
output_file.write(_format_ntv2_record("NUM_SREC", 11,
'i', binary_format))
output_file.write(_format_ntv2_record("NUM_FILE",
len(self.subfiles_dict.keys()),
'i', binary_format))
output_file.write(_format_ntv2_record("GS_TYPE",
self.gridshift_data_type,
's', binary_format))
output_file.write(_format_ntv2_record("VERSION", "NTv2.0",
's', binary_format))
output_file.write(_format_ntv2_record("SYSTEM_F", self.crs_from.name,
's', binary_format))
output_file.write(_format_ntv2_record("SYSTEM_T", self.crs_to.name,
's', binary_format))
output_file.write(_format_ntv2_record("MAJOR_F ",
self.crs_from.major_axis,
'f', binary_format))
output_file.write(_format_ntv2_record("MINOR_F ",
self.crs_from.minor_axis,
'f', binary_format))
output_file.write(_format_ntv2_record("MAJOR_T ",
self.crs_to.major_axis,
'f', binary_format))
output_file.write(_format_ntv2_record("MINOR_T ",
self.crs_to.minor_axis,
'f', binary_format))
if not binary_format:
output_file.write("\n")
class NTv2SubFile:
def __init__(self, name, parent ='NONE'):
self.name = name
self.parent = parent
self.bbox_set = False
self.inc_set = False
self.dates_set = False
self.gs_count = 0
self.gs_list = []
def set_limits(self, bounding_box, overwrite=False):
if self.bbox_set and not overwrite:
raise Exception("Subfile limits have already been set!")
self.bounding_box = bounding_box
self.bbox_set = True
def set_coord_increment(self, lat_increment,
long_increment, overwrite=False):
if not self.bbox_set:
raise Exception(
"Subfile limits have to be set before setting increments!"
)
if self.inc_set and not overwrite:
raise Exception(
"Subfile coordinate increments have already been set!"
)
self.lat_increase = lat_increment
self.long_increase = long_increment
self.inc_set = True
self.gs_count = int(
(abs(self.bounding_box.north-self.bounding_box.south)/
self.lat_increase)
+ 1
)* int(
(abs(self.bounding_box.east-self.bounding_box.west)/
self.long_increase)
+ 1)
def set_dates(self, create_date, update_date=None, overwrite=False):
if self.dates_set and not overwrite:
raise Exception("Subfile date have already been set!")
self.date_created = create_date
if update_date is None:
self.date_updated = self.date_created
else:
self.date_updated = update_date
self.dates_set = True
def set_gridshifts(grid_shift_array, overwrite=False):
if not self.bbox_set or not self.inc_set:
raise Exception(
"Subfile limits and increments have to be set before "
"setting grid shifts!"
)
if self.gs_list and not overwrite:
raise Exception("Grid shift have already been set!")
if len(grid_shift_array) < self.gs_count:
raise Exception(
"Input array does not contain enough grid shifts. "
"Required entries: {0}.".format(self.gc_count)
)
self.gs_list = grid_shift_array
def clear_gridshifts():
self.gs_list = []
def add_gridshift(latitude_shift, longitude_shift,
latitude_accuracy, longitude_accuracy):
if len(self.gs_list) + 1 > self.gs_count:
raise Exception("All grid shifts have already been added!")
else:
self.gs_list.append([
latitude_shift, longitude_shift,
latitude_accuracy, longitude_accuracy
])
def write_to_file(self, output_file, binary_format=True):
if not self.bbox_set:
raise Exception(
"Subfile limits have to be set before saving subfile!"
)
if not self.inc_set:
raise Exception(
"Subfile increments have to be set before saving subfile!"
)
if not self.dates_set:
raise Exception(
"Subfile dates have to be set before saving subfile!"
)
if len(self.gs_list) < self.gs_count:
raise Exception(
"All grid shift points have to be added before saving "
"subfile " + self.name + "! "
"Current entries: {0}. Expected: {1}".format(len(self.gs_list),
self.gs_count))
self._write_header(output_file, binary_format)
for grid_shift in self.gs_list:
self._write_record(output_file,
grid_shift[0], grid_shift[1],
grid_shift[2], grid_shift[3],
binary_format)
if not binary_format:
output_file.write("\n")
def _write_header(self, output_file, binary_format=True):
if not self.bbox_set:
raise Exception(
"Subfile limits have not been set!"
)
if not self.inc_set:
raise Exception(
"Subfile coordinate increments have not been set!"
)
if not self.dates_set:
raise Exception(
"Subfile dates have not been set!"
)
if self.gs_count == 0:
raise Exception(
"There is something wrong with the limits and/or increments!"
)
output_file.write(_format_ntv2_record("SUB_NAME", self.name,
"s", binary_format))
output_file.write(_format_ntv2_record("PARENT", self.parent,
"s", binary_format))
output_file.write(_format_ntv2_record("CREATED ",
self.date_created.strftime("%d%m%Y"),
"s", binary_format))
output_file.write(_format_ntv2_record("UPDATED ",
self.date_updated.strftime("%d%m%Y"),
"s", binary_format))
output_file.write(_format_ntv2_record("S_LAT", self.bounding_box.south,
"f", binary_format))
output_file.write(_format_ntv2_record("N_LAT", self.bounding_box.north,
"f", binary_format))
output_file.write(_format_ntv2_record("E_LONG",
self.bounding_box.east*-1,
"f", binary_format))
output_file.write(_format_ntv2_record("W_LONG",
self.bounding_box.west*-1,
"f", binary_format))
output_file.write(_format_ntv2_record("LAT_INC", self.lat_increase,
"f", binary_format))
output_file.write(_format_ntv2_record("LONG_INC", self.long_increase,
"f", binary_format))
output_file.write(_format_ntv2_record("GS_COUNT", self.gs_count,
"i", binary_format))
if not binary_format:
output_file.write("\n")
def _write_record(output_file,
latitude_shift, longitude_shift,
latitude_accuracy, longitude_accuracy,
binary_format=True):
output_file.write(_format_ntv2_record("RECORD",
[
latitude_shift, longitude_shift,
latitude_accuracy, longitude_accuracy
],
"f", binary_format))
def _test():
f_test = NTv2File()
crs_from = CRSDef("Stereo70", 6378245.0, 6356863.019)
crs_to = ETRS89_CRS
f_test.set_ref_systems(crs_from, crs_to)
subFile = f_test.create_subfile("ANCPI+TNS")
bounding_box = BoundingBox(174422.502, 156677.502, 72415.3775, 107465.3775)
lat_inc = 35.0
long_inc = 50.0
subFile.set_limits(bounding_box)
subFile.set_coord_increment(lat_inc, long_inc)
subFile.set_dates(datetime.datetime.now())
f_test.write_to_file(r"D:\Data\Data\ntv2",
"test2.txt", f_format='a',
overwrite=True)
| gpl-2.0 | -3,519,281,597,956,345,000 | 40.768617 | 94 | 0.492709 | false |
sunlightlabs/django-meetup | meetup/models.py | 1 | 3123 | from django.conf import settings
from django.db import models
from meetup.api import MeetupClient
import datetime
STATUSES = [(s, s) for s in ('past','pending','upcoming')]
API_KEY = getattr(settings, 'MEETUP_KEY', None)
class Account(models.Model):
key = models.CharField(max_length=128)
description = models.CharField(max_length=128)
slug = models.SlugField()
container_id = models.CharField(max_length=16, blank=True)
meetup_url = models.URLField(verify_exists=False, blank=True)
sync = models.BooleanField(default=True)
def __unicode__(self):
return self.slug
def past_events(self):
return self.events.filter(status='past')
def upcoming_events(self):
return self.events.exclude(status='past')
class EventManager(models.Manager):
def past(self):
return Event.objects.filter(status='past')
def upcoming(self):
return Event.objects.exclude(status='past')
class Event(models.Model):
objects = EventManager()
account = models.ForeignKey(Account, related_name="events")
# Meetup.com fields
id = models.CharField(max_length=255, primary_key=True)
meetup_url = models.URLField(verify_exists=False)
title = models.CharField(max_length=255, blank=True)
description = models.TextField(blank=True)
start_time = models.DateTimeField(blank=True, null=True)
location = models.CharField(max_length=255, blank=True)
address = models.CharField(max_length=128, blank=True)
city = models.CharField(max_length=64, blank=True)
state = models.CharField(max_length=64, blank=True)
zipcode = models.CharField(max_length=10, blank=True)
latitude = models.CharField(max_length=16, blank=True)
longitude = models.CharField(max_length=16, blank=True)
url = models.URLField(verify_exists=False, max_length=255, blank=True)
rsvp_count = models.IntegerField(default=0)
timestamp = models.DateTimeField()
status = models.CharField(max_length=16, choices=STATUSES)
organizer_id = models.CharField(max_length=32, blank=True)
organizer_name = models.CharField(max_length=128, blank=True)
# user defined fields
# none for now, add tags later
class Meta:
ordering = ('start_time',)
def __unicode__(self):
return self.pk
def save(self, sync=True, **kwargs):
super(Event, self).save(**kwargs)
# if sync:
# api_client = MeetupClient(self.account.key)
# api_client.update_event(self.pk, udf_category=self.category)
def city_state(self):
if self.city:
if self.state:
return "%s, %s" % (self.city, self.state)
else:
return self.city
elif self.state:
return self.state
else:
return ''
def short_description(self, length=64):
if len(self.description) > length:
desc = self.description[:length]
if desc.endswith(' '):
desc = desc[:-1]
return desc + '...'
return self.description | bsd-3-clause | -4,450,050,897,407,959,600 | 33.711111 | 74 | 0.642011 | false |
Creworker/FreeCAD | src/Mod/Path/PathScripts/PathSelection.py | 15 | 8856 | # -*- coding: utf-8 -*-
#***************************************************************************
#* *
#* Copyright (c) 2015 Dan Falck <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
'''Path selection function select a face or faces, two edges, etc to get a dictionary with what was selected in order '''
import FreeCAD,FreeCADGui
import Part
from FreeCAD import Vector
def equals(p1,p2):
'''returns True if vertexes have same coordinates within precision amount of digits '''
precision = 12 #hardcoded
p=precision
u = Vector(p1.X,p1.Y,p1.Z)
v = Vector(p2.X,p2.Y,p2.Z)
vector = (u.sub(v))
isNull = (round(vector.x,p)==0 and round(vector.y,p)==0 and round(vector.z,p)==0)
return isNull
def Sort2Edges(edgelist):
'''Sort2Edges(edgelist) simple function to reorder the start and end pts of two edges
based on their selection order. Returns the list, the start point,
and their common point, => edgelist, vertex, vertex'''
if len(edgelist)>=2:
vlist = []
e0 = edgelist[0]
e1=edgelist[1]
a0 = e0.Vertexes[0]
a1 = e0.Vertexes[1]
b0 = e1.Vertexes[0]
b1 = e1.Vertexes[1]
# comparison routine to order two edges:
if equals(a1,b0):
vlist.append((a0.Point.x,a0.Point.y))
vlist.append((a1.Point.x,a1.Point.y))
vlist.append((b1.Point.x,b1.Point.y))
if equals(a0,b0):
vlist.append((a1.Point.x,a1.Point.y))
vlist.append((a0.Point.x,a0.Point.y))
vlist.append((b1.Point.x,b1.Point.y))
if equals(a0,b1):
vlist.append((a1.Point.x,a1.Point.y))
vlist.append((a0.Point.x,a0.Point.y))
vlist.append((b0.Point.x,b0.Point.y))
if equals(a1,b1):
vlist.append((a0.Point.x,a0.Point.y))
vlist.append((a1.Point.x,a1.Point.y))
vlist.append((b0.Point.x,b0.Point.y))
edgestart = Vector(vlist[0][0],vlist[0][1],e0.Vertexes[1].Z)
edgecommon = Vector(vlist[1][0],vlist[1][1],e0.Vertexes[1].Z)
return vlist,edgestart,edgecommon
def segments(poly):
''' A sequence of (x,y) numeric coordinates pairs '''
return zip(poly, poly[1:] + [poly[0]])
def check_clockwise(poly):
'''
check_clockwise(poly) a function for returning a boolean if the selected wire is clockwise or counter clockwise
based on point order. poly = [(x1,y1),(x2,y2),(x3,y3)]
'''
clockwise = False
if (sum(x0*y1 - x1*y0 for ((x0, y0), (x1, y1)) in segments(poly))) < 0:
clockwise = not clockwise
return clockwise
def multiSelect():
'''
multiSelect() A function for selecting elements of an object for CNC path operations.
Select just a face, an edge,or two edges to indicate direction, a vertex on the object, a point not on the object,
or some combination. Returns a dictionary.
'''
sel = FreeCADGui.Selection.getSelectionEx()
numobjs = len([selobj.Object for selobj in sel])
if numobjs == 0:
FreeCAD.Console.PrintError('Please select some objects and try again.\n')
return
goodselect = False
for s in sel:
for i in s.SubObjects:
if i.ShapeType == 'Face':
goodselect = True
if i.ShapeType == 'Edge':
goodselect = True
if i.ShapeType == 'Vertex':
goodselect = True
if not goodselect:
FreeCAD.Console.PrintError('Please select a face and/or edges along with points (optional) and try again.\n')
return
selItems = {}
selItems['objname']=None #the parent object name - a 3D solid
selItems['pointlist']=None #start and end points
selItems['pointnames']=None #names of points for document object
selItems['facenames']=None # the selected face name
selItems['facelist']=None #list of faces selected
selItems['edgelist']=None #some edges that could be selected along with points and faces
selItems['edgenames']=None
selItems['pathwire']=None #the whole wire around edges of the face
selItems['clockwise']=None
selItems['circles']=None
facenames = []
edgelist =[]
edgenames=[]
ptlist=[]
ptnames=[]
circlelist=[]
face = False
edges = False
points = False
wireobj = False
circles = False
facelist= []
for s in sel:
if s.Object.Shape.ShapeType in ['Solid','Compound','Wire','Vertex']:
if not (s.Object.Shape.ShapeType =='Vertex'):
objname = s.ObjectName
selItems['objname'] =objname
if s.Object.Shape.ShapeType == 'Wire':
wireobj = True
if s.Object.Shape.ShapeType == 'Vertex':
ptnames.append(s.ObjectName)
# ptlist.append(s.Object)
points = True
for sub in s.SubObjects:
if sub.ShapeType =='Face':
facelist.append(sub)
face = True
if sub.ShapeType =='Edge':
edge = sub
edgelist.append(edge)
edges = True
if isinstance(sub.Curve,Part.Circle):
circlelist.append(edge)
circles = True
if sub.ShapeType =='Vertex':
ptlist.append(sub)
points = True
for sub in s.SubElementNames:
if 'Face' in sub:
facename = sub
facenames.append(facename)
if 'Edge' in sub:
edgenames.append(sub)
# now indicate which wire is going to be processed, based on which edges are selected
if facelist:
selItems['facelist']=facelist
if edges:
if face:
selItems['edgelist'] =edgelist
for fw in facelist[0].Wires:
for e in fw.Edges:
if e.isSame(edge):
pathwire = fw
selItems['pathwire'] =pathwire
elif wireobj:
selItems['pathwire'] =s.Object.Shape
selItems['edgelist'] =edgelist
else:
for w in s.Object.Shape.Wires:
for e in w.Edges:
if e.BoundBox.ZMax == e.BoundBox.ZMin: #if they are on same plane in Z as sel edge
if e.isSame(edge):
pathwire = w
selItems['pathwire'] =pathwire
selItems['edgelist'] =edgelist
if not edges:
if face:
selItems['pathwire'] =facelist[0].OuterWire
if edges and (len(edgelist)>=2):
vlist,edgestart,edgecommon=Sort2Edges(edgelist)
edgepts ={}
edgepts['vlist'] = vlist
edgepts['edgestart']=edgestart # start point of edges selected
edgepts['edgecommon']=edgecommon # point where two edges join- will be last point in in first gcode line
selItems['edgepts']=edgepts
if check_clockwise(vlist):
selItems['clockwise']=True
elif check_clockwise(vlist) == False:
selItems['clockwise']=False
if points:
selItems['pointlist'] = ptlist
selItems['pointnames'] = ptnames
if edges:
selItems['edgenames']=edgenames
if face:
selItems['facenames'] = facenames
if circles:
selItems['circles'] = circlelist
return selItems
| lgpl-2.1 | -3,113,815,746,999,385,600 | 38.185841 | 121 | 0.541215 | false |
Huskerboy/startbootstrap-freelancer | freelancer_env/Lib/site-packages/pip/utils/__init__.py | 323 | 27187 | from __future__ import absolute_import
from collections import deque
import contextlib
import errno
import io
import locale
# we have a submodule named 'logging' which would shadow this if we used the
# regular name:
import logging as std_logging
import re
import os
import posixpath
import shutil
import stat
import subprocess
import sys
import tarfile
import zipfile
from pip.exceptions import InstallationError
from pip.compat import console_to_str, expanduser, stdlib_pkgs
from pip.locations import (
site_packages, user_site, running_under_virtualenv, virtualenv_no_global,
write_delete_marker_file,
)
from pip._vendor import pkg_resources
from pip._vendor.six.moves import input
from pip._vendor.six import PY2
from pip._vendor.retrying import retry
if PY2:
from io import BytesIO as StringIO
else:
from io import StringIO
__all__ = ['rmtree', 'display_path', 'backup_dir',
'ask', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'normalize_path',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',
'captured_stdout', 'ensure_dir',
'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS',
'get_installed_version']
logger = std_logging.getLogger(__name__)
BZ2_EXTENSIONS = ('.tar.bz2', '.tbz')
XZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma')
ZIP_EXTENSIONS = ('.zip', '.whl')
TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar')
ARCHIVE_EXTENSIONS = (
ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS)
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
except ImportError:
logger.debug('bz2 module is not available')
try:
# Only for Python 3.3+
import lzma # noqa
SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
except ImportError:
logger.debug('lzma module is not available')
def import_or_raise(pkg_or_module_string, ExceptionType, *args, **kwargs):
try:
return __import__(pkg_or_module_string)
except ImportError:
raise ExceptionType(*args, **kwargs)
def ensure_dir(path):
"""os.path.makedirs without EEXIST."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
# Retry every half second for up to 3 seconds
@retry(stop_max_delay=3000, wait_fixed=500)
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
# if file type currently read only
if os.stat(path).st_mode & stat.S_IREAD:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if sys.version_info[0] == 2:
path = path.decode(sys.getfilesystemencoding(), 'replace')
path = path.encode(sys.getdefaultencoding(), 'replace')
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception(
'No input was expected ($PIP_NO_INPUT set); question: %s' %
message
)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
'Your response (%r) was not one of the expected responses: '
'%s' % (response, ', '.join(options))
)
else:
return response
def format_size(bytes):
if bytes > 1000 * 1000:
return '%.1fMB' % (bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '%ikB' % (bytes / 1000)
elif bytes > 1000:
return '%.1fkB' % (bytes / 1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html) and
re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
with open(filename, 'rb') as fp:
return fp.read().decode('utf-8')
def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
"""Yield pieces of data from a file-like object until EOF."""
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk
def split_leading_dir(path):
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or
'\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def normalize_path(path, resolve_symlinks=True):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
path = expanduser(path)
if resolve_symlinks:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
return os.path.normcase(path)
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
norm_path = normalize_path(dist_location(dist))
return norm_path.startswith(normalize_path(user_site))
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in
distutils.sysconfig.get_python_lib().
"""
return normalize_path(
dist_location(dist)
).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
for path_item in sys.path:
egg_link = os.path.join(path_item, dist.project_name + '.egg-link')
if os.path.isfile(egg_link):
return True
return False
def get_installed_distributions(local_only=True,
skip=stdlib_pkgs,
include_editables=True,
editables_only=False,
user_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
"""
if local_only:
local_test = dist_is_local
else:
def local_test(d):
return True
if include_editables:
def editable_test(d):
return True
else:
def editable_test(d):
return not dist_is_editable(d)
if editables_only:
def editables_only_test(d):
return dist_is_editable(d)
else:
def editables_only_test(d):
return True
if user_only:
user_test = dist_in_usersite
else:
def user_test(d):
return True
return [d for d in pkg_resources.working_set
if local_test(d) and
d.key not in skip and
editable_test(d) and
editables_only_test(d) and
user_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack(
'hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')
)
except:
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = 'r:bz2'
elif filename.lower().endswith(XZ_EXTENSIONS):
mode = 'r:xz'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesn't seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
tar._extract_member(member, path)
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
ensure_dir(os.path.dirname(path))
with open(path, 'wb') as destfp:
shutil.copyfileobj(fp, destfp)
fp.close()
# Update the timestamp (useful for cython compiled files)
tar.utime(member, path)
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip' or
filename.lower().endswith(ZIP_EXTENSIONS) or
zipfile.is_zipfile(filename)):
unzip_file(
filename,
location,
flatten=not filename.endswith('.whl')
)
elif (content_type == 'application/x-gzip' or
tarfile.is_tarfile(filename) or
filename.lower().endswith(
TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html') and
is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
'Cannot unpack file %s (downloaded from %s, content-type: %s); '
'cannot detect archive format',
filename, location, content_type,
)
raise InstallationError(
'Cannot determine archive format of %s' % location
)
def call_subprocess(cmd, show_stdout=True, cwd=None,
on_returncode='raise',
command_desc=None,
extra_environ=None, spinner=None):
# This function's handling of subprocess output is confusing and I
# previously broke it terribly, so as penance I will write a long comment
# explaining things.
#
# The obvious thing that affects output is the show_stdout=
# kwarg. show_stdout=True means, let the subprocess write directly to our
# stdout. Even though it is nominally the default, it is almost never used
# inside pip (and should not be used in new code without a very good
# reason); as of 2016-02-22 it is only used in a few places inside the VCS
# wrapper code. Ideally we should get rid of it entirely, because it
# creates a lot of complexity here for a rarely used feature.
#
# Most places in pip set show_stdout=False. What this means is:
# - We connect the child stdout to a pipe, which we read.
# - By default, we hide the output but show a spinner -- unless the
# subprocess exits with an error, in which case we show the output.
# - If the --verbose option was passed (= loglevel is DEBUG), then we show
# the output unconditionally. (But in this case we don't want to show
# the output a second time if it turns out that there was an error.)
#
# stderr is always merged with stdout (even if show_stdout=True).
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
logger.debug("Running command %s", command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception as exc:
logger.critical(
"Error %s while executing command %s", exc, command_desc,
)
raise
if stdout is not None:
all_output = []
while True:
line = console_to_str(proc.stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
if logger.getEffectiveLevel() <= std_logging.DEBUG:
# Show the line immediately
logger.debug(line)
else:
# Update the spinner
if spinner is not None:
spinner.spin()
proc.wait()
if spinner is not None:
if proc.returncode:
spinner.finish("error")
else:
spinner.finish("done")
if proc.returncode:
if on_returncode == 'raise':
if (logger.getEffectiveLevel() > std_logging.DEBUG and
not show_stdout):
logger.info(
'Complete output from command %s:', command_desc,
)
logger.info(
''.join(all_output) +
'\n----------------------------------------'
)
raise InstallationError(
'Command "%s" failed with error code %s in %s'
% (command_desc, proc.returncode, cwd))
elif on_returncode == 'warn':
logger.warning(
'Command "%s" had error code %s in %s',
command_desc, proc.returncode, cwd,
)
elif on_returncode == 'ignore':
pass
else:
raise ValueError('Invalid value: on_returncode=%s' %
repr(on_returncode))
if not show_stdout:
return ''.join(all_output)
def read_text_file(filename):
"""Return the contents of *filename*.
Try to decode the file contents with utf-8, the preferred system encoding
(e.g., cp1252 on some Windows machines), and latin1, in that order.
Decoding a byte string with latin1 will never raise an error. In the worst
case, the returned string will contain some garbage characters.
"""
with open(filename, 'rb') as fp:
data = fp.read()
encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1']
for enc in encodings:
try:
data = data.decode(enc)
except UnicodeDecodeError:
continue
break
assert type(data) != bytes # Latin1 should have worked.
return data
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
class StreamWrapper(StringIO):
@classmethod
def from_stream(cls, orig_stream):
cls.orig_stream = orig_stream
return cls()
# compileall.compile_dir() needs stdout.encoding to print to stdout
@property
def encoding(self):
return self.orig_stream.encoding
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/__init__.py in the CPython repo.
"""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/__init__.py in the CPython repo.
"""
return captured_output('stdout')
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
# We're being accessed from the class itself, not from an object
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def get_installed_version(dist_name, lookup_dirs=None):
"""Get the installed version of dist_name avoiding pkg_resources cache"""
# Create a requirement that we'll look for inside of setuptools.
req = pkg_resources.Requirement.parse(dist_name)
# We want to avoid having this cached, so we need to construct a new
# working set each time.
if lookup_dirs is None:
working_set = pkg_resources.WorkingSet()
else:
working_set = pkg_resources.WorkingSet(lookup_dirs)
# Get the installed distribution from our working set
dist = working_set.find(req)
# Check to see if we got an installed distribution or not, if we did
# we want to return it's version.
return dist.version if dist else None
def consume(iterator):
"""Consume an iterable at C speed."""
deque(iterator, maxlen=0)
| mit | -3,087,673,230,438,494,000 | 30.909624 | 79 | 0.586788 | false |
djhenderson/ctypesgen | test/testsuite.py | 12 | 9617 | #!/usr/bin/env python
# -*- coding: ascii -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
"""Simple test suite using unittest.
By clach04 (Chris Clark).
Calling:
python test/testsuite.py
or
cd test
./testsuite.py
Could use any unitest compatible test runner (nose, etc.)
Aims to test for regressions. Where possible use stdlib to
avoid the need to compile C code.
Known to run clean with:
* 32bit Linux (python 2.5.2, 2.6)
* 32bit Windows XP (python 2.4, 2.5, 2.6.1)
"""
import sys
import os
import ctypes
import math
import unittest
import logging
test_directory = os.path.abspath(os.path.dirname(__file__))
sys.path.append(test_directory)
sys.path.append(os.path.join(test_directory, '..'))
import ctypesgentest # TODO consider moving test() from ctypesgentest into this module
class StdlibTest(unittest.TestCase):
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = '#include <stdlib.h>\n'
if sys.platform == "win32":
# pick something from %windir%\system32\msvc*dll that include stdlib
libraries = ["msvcrt.dll"]
libraries = ["msvcrt"]
elif sys.platform.startswith("linux"):
libraries = ["libc.so.6"]
else:
libraries = ["libc"]
self.module, output = ctypesgentest.test(header_str, libraries=libraries, all_headers=True)
def tearDown(self):
del self.module
ctypesgentest.cleanup()
def test_getenv_returns_string(self):
"""Issue 8 - Regression for crash with 64 bit and bad strings on 32 bit.
See http://code.google.com/p/ctypesgen/issues/detail?id=8
Test that we get a valid (non-NULL, non-empty) string back
"""
module = self.module
if sys.platform == "win32":
# Check a variable that is already set
env_var_name = 'USERNAME' # this is always set (as is windir, ProgramFiles, USERPROFILE, etc.)
expect_result = os.environ[env_var_name]
self.assert_(expect_result, 'this should not be None or empty')
# reason for using an existing OS variable is that unless the
# MSVCRT dll imported is the exact same one that Python was
# built with you can't share structures, see
# http://msdn.microsoft.com/en-us/library/ms235460.aspx
# "Potential Errors Passing CRT Objects Across DLL Boundaries"
else:
env_var_name = 'HELLO'
os.environ[env_var_name] = 'WORLD' # This doesn't work under win32
expect_result = 'WORLD'
result = module.getenv(env_var_name)
self.failUnlessEqual(expect_result, result)
def test_getenv_returns_null(self):
"""Related to issue 8. Test getenv of unset variable.
"""
module = self.module
env_var_name = 'NOT SET'
expect_result = None
try:
# ensure variable is not set, ignoring not set errors
del os.environ[env_var_name]
except KeyError:
pass
result = module.getenv(env_var_name)
self.failUnlessEqual(expect_result, result)
class StdBoolTest(unittest.TestCase):
"Test correct parsing and generation of bool type"
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = '''
#include <stdbool.h>
struct foo
{
bool is_bar;
int a;
};
'''
self.module, _ = ctypesgentest.test(header_str)#, all_headers=True)
def tearDown(self):
del self.module
ctypesgentest.cleanup()
def test_stdbool_type(self):
"""Test is bool is correctly parsed"""
module = self.module
struct_foo = module.struct_foo
self.failUnlessEqual(struct_foo._fields_, [("is_bar", ctypes.c_bool), ("a", ctypes.c_int)])
class SimpleMacrosTest(unittest.TestCase):
"""Based on simple_macros.py
"""
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = '''
#define A 1
#define B(x,y) x+y
#define C(a,b,c) a?b:c
#define funny(x) "funny" #x
#define multipler_macro(x,y) x*y
#define minus_macro(x,y) x-y
#define divide_macro(x,y) x/y
#define mod_macro(x,y) x%y
'''
libraries = None
self.module, output = ctypesgentest.test(header_str)
def tearDown(self):
del self.module
ctypesgentest.cleanup()
def test_macro_constant_int(self):
"""Tests from simple_macros.py
"""
module = self.module
self.failUnlessEqual(module.A, 1)
def test_macro_addition(self):
"""Tests from simple_macros.py
"""
module = self.module
self.failUnlessEqual(module.B(2, 2), 4)
def test_macro_ternary_true(self):
"""Tests from simple_macros.py
"""
module = self.module
self.failUnlessEqual(module.C(True, 1, 2), 1)
def test_macro_ternary_false(self):
"""Tests from simple_macros.py
"""
module = self.module
self.failUnlessEqual(module.C(False, 1, 2), 2)
def test_macro_ternary_true_complex(self):
"""Test ?: with true, using values that can not be confused between True and 1
"""
module = self.module
self.failUnlessEqual(module.C(True, 99, 100), 99)
def test_macro_ternary_false_complex(self):
"""Test ?: with false, using values that can not be confused between True and 1
"""
module = self.module
self.failUnlessEqual(module.C(False, 99, 100), 100)
def test_macro_string_compose(self):
"""Tests from simple_macros.py
"""
module = self.module
self.failUnlessEqual(module.funny("bunny"), "funnybunny")
def test_macro_math_multipler(self):
module = self.module
x, y = 2, 5
self.failUnlessEqual(module.multipler_macro(x, y), x * y)
def test_macro_math_minus(self):
module = self.module
x, y = 2, 5
self.failUnlessEqual(module.minus_macro(x, y), x - y)
def test_macro_math_divide(self):
module = self.module
x, y = 2, 5
self.failUnlessEqual(module.divide_macro(x, y), x / y)
def test_macro_math_mod(self):
module = self.module
x, y = 2, 5
self.failUnlessEqual(module.mod_macro(x, y), x % y)
class StructuresTest(unittest.TestCase):
"""Based on structures.py
"""
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = '''
struct foo
{
int a;
int b;
int c;
};
'''
libraries = None
self.module, output = ctypesgentest.test(header_str)
def tearDown(self):
del self.module
ctypesgentest.cleanup()
def test_structures(self):
"""Tests from structures.py
"""
module = self.module
struct_foo = module.struct_foo
self.failUnlessEqual(struct_foo._fields_, [("a", ctypes.c_int), ("b", ctypes.c_int), ("c", ctypes.c_int)])
class MathTest(unittest.TestCase):
"""Based on math_functions.py"""
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = '#include <math.h>\n'
if sys.platform == "win32":
# pick something from %windir%\system32\msvc*dll that include stdlib
libraries = ["msvcrt.dll"]
libraries = ["msvcrt"]
elif sys.platform.startswith("linux"):
libraries = ["libm.so.6"]
else:
libraries = ["libc"]
self.module, output = ctypesgentest.test(header_str, libraries=libraries, all_headers=True)
def tearDown(self):
del self.module
ctypesgentest.cleanup()
def test_sin(self):
"""Based on math_functions.py"""
module = self.module
self.failUnlessEqual(module.sin(2), math.sin(2))
def test_sqrt(self):
"""Based on math_functions.py"""
module = self.module
self.failUnlessEqual(module.sqrt(4), 2)
def local_test():
module.sin("foobar")
self.failUnlessRaises(ctypes.ArgumentError, local_test)
def test_bad_args_string_not_number(self):
"""Based on math_functions.py"""
module = self.module
def local_test():
module.sin("foobar")
self.failUnlessRaises(ctypes.ArgumentError, local_test)
def main(argv=None):
if argv is None:
argv = sys.argv
ctypesgentest.ctypesgencore.messages.log.setLevel(logging.CRITICAL) # do not log anything
unittest.main()
return 0
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause | -790,871,012,187,811,000 | 28.5 | 114 | 0.595092 | false |
lnielsen/zenodo | zenodo/factory.py | 2 | 4550 | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo application factories."""
from __future__ import absolute_import
import os
import sys
from invenio_base.app import create_app_factory
from invenio_base.wsgi import create_wsgi_factory, wsgi_proxyfix
from invenio_config import create_conf_loader
from invenio_files_rest.app import Flask
from statsd import StatsClient
from werkzeug.contrib.fixers import HeaderRewriterFix
from wsgi_statsd import StatsdTimingMiddleware
from zenodo.modules.cache.bccache import RedisBytecodeCache
from . import config
env_prefix = 'APP'
invenio_conf_loader = create_conf_loader(config=config, env_prefix=env_prefix)
instance_path = os.getenv(env_prefix + '_INSTANCE_PATH') or \
os.path.join(sys.prefix, 'var', 'instance')
"""Path to instance folder.
Defaults to ``<virtualenv>/var/instance/``. Can be overwritten using the
environment variable ``APP_INSTANCE_PATH``.
"""
static_folder = os.getenv(env_prefix + '_STATIC_FOLDER') or \
os.path.join(instance_path, 'static')
"""Path to static folder.
Defaults to ``<virtualenv>/var/instance/static/``. Can be overwritten
using the environment variable ``APP_STATIC_FOLDER``
"""
def conf_loader(app, **kwargs_config):
"""Zenodo conf loader."""
app.url_map.strict_slashes = False # Legacy support
app.jinja_options = dict(
app.jinja_options,
cache_size=1000,
bytecode_cache=RedisBytecodeCache(app)
)
invenio_conf_loader(app, **kwargs_config)
def create_wsgi_statsd_factory(mounts_factories):
"""Create WSGI statsd factory."""
wsgi_factory = create_wsgi_factory(mounts_factories)
def create_wsgi(app, **kwargs):
application = wsgi_factory(app, **kwargs)
# Remove X-Forwarded-For headers because Flask-Security doesn't know
# how to deal with them properly. Note REMOTE_ADDR has already been
# set correctly at this point by the ``wsgi_proxyfix`` factory.
if app.config.get('WSGI_PROXIES'):
application = HeaderRewriterFix(
application,
remove_headers=['X-Forwarded-For']
)
host = app.config.get('STATSD_HOST')
port = app.config.get('STATSD_PORT', 8125)
prefix = app.config.get('STATSD_PREFIX')
if host and port and prefix:
client = StatsClient(prefix=prefix, host=host, port=port)
return StatsdTimingMiddleware(application, client)
return application
return create_wsgi
create_celery = create_app_factory(
'zenodo',
config_loader=conf_loader,
extension_entry_points=['invenio_base.apps'],
blueprint_entry_points=['invenio_base.blueprints'],
converter_entry_points=['invenio_base.converters'],
instance_path=instance_path,
static_folder=static_folder,
)
"""Create CLI/Celery application."""
create_api = create_app_factory(
'zenodo',
config_loader=conf_loader,
extension_entry_points=['invenio_base.api_apps'],
blueprint_entry_points=['invenio_base.api_blueprints'],
converter_entry_points=['invenio_base.api_converters'],
instance_path=instance_path,
app_class=Flask,
)
"""Create Flask API application."""
create_app = create_app_factory(
'zenodo',
config_loader=conf_loader,
extension_entry_points=['invenio_base.apps'],
blueprint_entry_points=['invenio_base.blueprints'],
converter_entry_points=['invenio_base.converters'],
wsgi_factory=wsgi_proxyfix(
create_wsgi_statsd_factory({'/api': create_api})),
instance_path=instance_path,
static_folder=static_folder,
)
"""Create Flask UI application."""
| gpl-2.0 | -4,008,428,632,355,924,000 | 32.455882 | 78 | 0.707473 | false |
dproc/trex_odp_porting_integration | scripts/automation/trex_control_plane/client_utils/general_utils.py | 2 | 2206 | #!/router/bin/python
import sys
import site
import string
import random
import os
try:
import pwd
except ImportError:
import getpass
pwd = None
using_python_3 = True if sys.version_info.major == 3 else False
def user_input():
if using_python_3:
return input()
else:
# using python version 2
return raw_input()
def get_current_user():
if pwd:
return pwd.getpwuid(os.geteuid()).pw_name
else:
return getpass.getuser()
def import_module_list_by_path (modules_list):
assert(isinstance(modules_list, list))
for full_path in modules_list:
site.addsitedir(full_path)
def find_path_to_pardir (pardir, base_path = os.getcwd() ):
"""
Finds the absolute path for some parent dir `pardir`, starting from base_path
The request is only valid if the stop initiator is the same client as the TRex run initiator.
:parameters:
pardir : str
name of an upper-level directory to which we want to find an absolute path for
base_path : str
a full (usually nested) path from which we want to find a parent folder.
default value : **current working dir**
:return:
string representation of the full path to
"""
components = base_path.split(os.sep)
return str.join(os.sep, components[:components.index(pardir)+1])
def random_id_gen(length=8):
"""
A generator for creating a random chars id of specific length
:parameters:
length : int
the desired length of the generated id
default: 8
:return:
a random id with each next() request.
"""
id_chars = string.ascii_lowercase + string.digits
while True:
return_id = ''
for i in range(length):
return_id += random.choice(id_chars)
yield return_id
def id_count_gen():
"""
A generator for creating an increasing id for objects, starting from 0
:parameters:
None
:return:
an id (unsigned int) with each next() request.
"""
return_id = 0
while True:
yield return_id
return_id += 1
if __name__ == "__main__":
pass
| apache-2.0 | -1,639,415,128,092,664,000 | 21.979167 | 97 | 0.616047 | false |
david-ragazzi/nupic | tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_multi_step/a/description.py | 42 | 1569 |
# ----------------------------------------------------------------------
# Copyright (C) 2011 Numenta Inc. All rights reserved.
#
# The information and source code contained herein is the
# exclusive property of Numenta Inc. No part of this software
# may be used, reproduced, stored or distributed in any form,
# without explicit written authorization from Numenta Inc.
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config ={
'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tpParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}},
'firstRecord': 0,
'lastRecord': 250,
}
mod = importBaseDescription('../base.py', config)
locals().update(mod.__dict__)
| gpl-3.0 | -8,460,187,602,168,246,000 | 48.03125 | 411 | 0.573614 | false |
yfried/ansible | lib/ansible/modules/cloud/vmware/vcenter_license.py | 27 | 5353 | #!/usr/bin/python
# Copyright: (c) 2017, Dag Wieers (@dagwieers) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
module: vcenter_license
short_description: Manage VMware vCenter license keys
description:
- Add and delete vCenter license keys.
version_added: '2.4'
author:
- Dag Wieers (@dagwieers)
requirements:
- pyVmomi
options:
labels:
description:
- The optional labels of the license key to manage in vSphere vCenter.
- This is dictionary with key/value pair.
default: {
'source': 'ansible'
}
license:
description:
- The license key to manage in vSphere vCenter.
required: yes
state:
description:
- Whether to add (C(present)) or remove (C(absent)) the license key.
choices: [absent, present]
default: present
notes:
- This module will also auto-assign the current vCenter to the license key
if the product matches the license key, and vCenter us currently assigned
an evaluation license only.
- The evaluation license (00000-00000-00000-00000-00000) is not listed
when unused.
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Add a new vCenter license
vcenter_license:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
license: f600d-21ae3-5592b-249e0-cc341
state: present
delegate_to: localhost
- name: Remove an (unused) vCenter license
vcenter_license:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
license: f600d-21ae3-5592b-249e0-cc341
state: absent
delegate_to: localhost
'''
RETURN = r'''
licenses:
description: list of license keys after module executed
returned: always
type: list
sample:
- f600d-21ae3-5592b-249e0-cc341
- 143cc-0e942-b2955-3ea12-d006f
'''
try:
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import connect_to_api, vmware_argument_spec
def find_key(licenses, license):
for item in licenses:
if item.licenseKey == license:
return item
return None
def list_keys(licenses):
keys = []
for item in licenses:
# Filter out evaluation license key
if item.used is None:
continue
keys.append(item.licenseKey)
return keys
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(
labels=dict(type='dict', default=dict(source='ansible')),
license=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
license = module.params['license']
state = module.params['state']
# FIXME: This does not seem to work on vCenter v6.0
labels = []
for k in module.params['labels']:
kv = vim.KeyValue()
kv.key = k
kv.value = module.params['labels'][k]
labels.append(kv)
result = dict(
changed=False,
diff=dict(),
)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
content = connect_to_api(module)
lm = content.licenseManager
result['licenses'] = list_keys(lm.licenses)
if module._diff:
result['diff']['before'] = '\n'.join(result['licenses']) + '\n'
if state == 'present' and license not in result['licenses']:
result['changed'] = True
if module.check_mode:
result['licenses'].append(license)
else:
lm.AddLicense(license, labels)
# Automatically assign to current vCenter, if needed
key = find_key(lm.licenses, license)
if content.about.name in key.name:
try:
lam = lm.licenseAssignmentManager
lam.UpdateAssignedLicense(entity=content.about.instanceUuid, licenseKey=license)
except:
module.warn('Could not assign "%s" (%s) to vCenter.' % (license, key.name))
result['licenses'] = list_keys(lm.licenses)
if module._diff:
result['diff']['after'] = '\n'.join(result['licenses']) + '\n'
elif state == 'absent' and license in result['licenses']:
# Check if key is in use
key = find_key(lm.licenses, license)
if key.used > 0:
module.fail_json(msg='Cannot remove key "%s", still in use %s time(s).' % (license, key.used))
result['changed'] = True
if module.check_mode:
result['licenses'].remove(license)
else:
lm.RemoveLicense(license)
result['licenses'] = list_keys(lm.licenses)
if module._diff:
result['diff']['after'] = '\n'.join(result['licenses']) + '\n'
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,008,593,862,721,674,000 | 27.77957 | 106 | 0.624323 | false |
y-asano/primecloud-controller | iaas-gw/src/iaasgw/controller/cloudStack/cloudStackInstanceController.py | 5 | 14487 | # coding: UTF-8
#
# Copyright 2014 by SCSK Corporation.
#
# This file is part of PrimeCloud Controller(TM).
#
# PrimeCloud Controller(TM) is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# PrimeCloud Controller(TM) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PrimeCloud Controller(TM). If not, see <http://www.gnu.org/licenses/>.
#
from iaasgw.exception.iaasException import IaasException
from iaasgw.log.log import IaasLogger
from iaasgw.utils.propertyUtil import getImage, getScriptProperty, getDnsProperty, getPuppetProperty, getVpnProperty
from iaasgw.utils.stringUtils import isEmpty, isNotEmpty, startsWithIgnoreCase
class CloudStackInstanceController(object):
client = None
conn = None
logger = IaasLogger()
platforminfo = None
def __init__(self, platforminfo, ec2iaasclient, conn):
self.client = ec2iaasclient
self.conn = conn
self.platforminfo = platforminfo
def startInstance(self, instanceNo):
#AWS_INSTANCE 取得
tableCSINS = self.conn.getTable("CLOUDSTACK_INSTANCE")
csInstance = self.conn.selectOne(tableCSINS.select(tableCSINS.c.INSTANCE_NO==instanceNo))
#PCC_INSTANCE 取得
tableINS = self.conn.getTable("INSTANCE")
pccInstance = self.conn.selectOne(tableINS.select(tableINS.c.INSTANCE_NO==instanceNo))
#イメージの取得 再考の余地あり
image = getImage(pccInstance["IMAGE_NO"])
#
if isEmpty(csInstance["INSTANCE_ID"]):
#インスタンスの作成
self.run(instanceNo, csInstance, pccInstance, image)
#winodowsなら
if (startsWithIgnoreCase(image["os"], "windows")):
#INSTANCE_ID取得の為、CLOUDSTACK_INSTANCE 再取得
csInstance = self.conn.selectOne(tableCSINS.select(tableCSINS.c.INSTANCE_NO==instanceNo))
self.client.getPasswordData(csInstance["INSTANCE_ID"])
else:
# インスタンスが停止中でない場合はスキップ
if (csInstance["STATE"] != "Stopped"):
return;
# インスタンスの起動
self.start(instanceNo, csInstance, pccInstance)
def stopInstance(self, instanceNo):
#AWS_INSTANCE 取得
tableCSINS = self.conn.getTable("CLOUDSTACK_INSTANCE")
csInstance = self.conn.selectOne(tableCSINS.select(tableCSINS.c.INSTANCE_NO==instanceNo))
#PCC_INSTANCE 取得
tableINS = self.conn.getTable("INSTANCE")
pccInstance = self.conn.selectOne(tableINS.select(tableINS.c.INSTANCE_NO==instanceNo))
# インスタンスIDがない場合は確認する
if (isEmpty(csInstance["INSTANCE_ID"])):
#起動ミス対策
nodes = self.client.describeInstances(name = pccInstance["INSTANCE_NAME"])
if not nodes or len(nodes) == 0:
#インスタンスが存在しない場合
return;
if len(nodes) >= 1:
#FQDNを比較する
for node in nodes:
if pccInstance["FQDN"] == node.extra["displayname"]:
#起動をミスったインスタンスを発見した場合
#ID情報を更新
csInstance["INSTANCE_ID"] = node.id
sql = tableCSINS.update(tableCSINS.c.INSTANCE_NO ==csInstance["INSTANCE_NO"], values=csInstance)
self.conn.execute(sql)
# インスタンスの停止
self.stop(instanceNo, csInstance, pccInstance)
####################################################################################
#---------------------ローカル------------------------------------------------------
####################################################################################
def run(self, instanceNo, csInstance, pccInstance, image):
#serviceoffering名称をIDへ変換
serviceofferings = self.client.describeServiceOfferings()
#デフォルトは最初にHitするID
serviceofferingid = serviceofferings[0]["id"]
for serviceoffering in serviceofferings:
if csInstance["INSTANCE_TYPE"] == serviceoffering["name"]:
serviceofferingid = serviceoffering["id"]
availabilityZone = None
if (isNotEmpty(csInstance["ZONEID"])):
availabilityZone = csInstance["ZONEID"]
#任意設定はここから 必要な分増やす
extra_args = {}
if (isNotEmpty(csInstance["NETWORKID"])):
extra_args["network_id"] = csInstance["NETWORKID"]
#SecurityGroup
securityGroups = []
if (isNotEmpty(csInstance["SECURITYGROUP"])):
securityGroups.append(csInstance["SECURITYGROUP"].split(","))
extra_args["securitygroupnames"] = securityGroups
if (isNotEmpty(csInstance["KEY_NAME"])):
extra_args["keypair"] = csInstance["KEY_NAME"]
#UserDataを作成
userData = self.createUserData(instanceNo, pccInstance, csInstance)
userData = self.makeUserData(userData)
extra_args["userdata"] = userData
self.logger.info("userData:"+userData)
#イベントログ出力
self.conn.debug(pccInstance["FARM_NO"], None, None, instanceNo, pccInstance["INSTANCE_NAME"], "CloudStackInstanceCreate",["CLOUDSTACK"])
#インスタンスの作成
node = self.client.runInstances(pccInstance["INSTANCE_NAME"],
pccInstance["FQDN"],
serviceofferingid,
image["templateId"],
availabilityZone,
**extra_args)
if node["state"] != "Running":
# インスタンス作成失敗時
raise IaasException("EPROCESS-000716", [node["id"], node["state"]])
# ログ出力
self.logger.info(None, "IPROCESS-100603", [node["id"],])
# イベントログ出力
self.conn.debug(pccInstance["FARM_NO"], None, None, instanceNo, pccInstance["INSTANCE_NAME"], "CloudStackInstanceCreateFinish",["CLOUDSTACK", node["id"]])
# データベース更新
table = self.conn.getTable("CLOUDSTACK_INSTANCE")
updateDict = self.conn.selectOne(table.select(table.c.INSTANCE_NO==instanceNo))
updateDict["INSTANCE_ID"] = node["id"]
updateDict["ZONEID"] = node["zoneid"]
updateDict["STATE"] = node["state"]
updateDict["DISPLAYNAME"] = node["displayname"]
updateDict["IPADDRESS"] = node["nic"][0]["ipaddress"]
sql = table.update(table.c.INSTANCE_NO ==updateDict["INSTANCE_NO"], values=updateDict)
self.conn.execute(sql)
def start(self, instanceNo, csInstance, pccInstance):
instanceId = csInstance["INSTANCE_ID"]
# イベントログ出力
self.conn.debug(pccInstance["FARM_NO"], None, None, instanceNo, pccInstance["INSTANCE_NAME"], "CloudStackInstanceStart",["CLOUDSTACK", instanceId])
#serviceoffering名称をIDへ変換
serviceofferings = self.client.describeServiceOfferings()
#デフォルトは最初にHitするID
serviceofferingid = serviceofferings[0]["id"]
for serviceoffering in serviceofferings:
if csInstance["INSTANCE_TYPE"] == serviceoffering["name"]:
serviceofferingid = serviceoffering["id"]
#serviceofferingの変更有無を確認
node = self.client.describeInstance(instanceId)
if node.extra["serviceofferingid"] != serviceofferingid:
# serviceofferingの変更
node = self.client.changeInstance(instanceId, serviceofferingid);
# インスタンスの起動
node = self.client.startInstance(instanceId);
if node["state"] != "Running":
# インスタンス作成失敗時
raise IaasException("EPROCESS-000716", [instanceId, node["state"]])
# ログ出力
self.logger.info(None, "IPROCESS-100601", [instanceId,])
# イベントログ出力
self.conn.debug(pccInstance["FARM_NO"], None, None, instanceNo, pccInstance["INSTANCE_NAME"], "CloudStackInstanceStartFinish",["CLOUDSTACK", instanceId])
# データベース更新
table = self.conn.getTable("CLOUDSTACK_INSTANCE")
updateDict = self.conn.selectOne(table.select(table.c.INSTANCE_NO==instanceNo))
updateDict["ZONEID"] = node["zoneid"]
updateDict["STATE"] = node["state"]
updateDict["DISPLAYNAME"] = node["displayname"]
updateDict["IPADDRESS"] = node["nic"][0]["ipaddress"]
sql = table.update(table.c.INSTANCE_NO ==updateDict["INSTANCE_NO"], values=updateDict)
self.conn.execute(sql)
def stop(self, instanceNo, csInstance, pccInstance):
instanceId = csInstance["INSTANCE_ID"]
# イベントログ出力
self.conn.debug(pccInstance["FARM_NO"], None, None, instanceNo, pccInstance["INSTANCE_NAME"], "CloudStackInstanceStop",["CLOUDSTACK", instanceId])
# インスタンスの停止
node = self.client.stopInstance(instanceId);
if node["state"] != "Stopped":
# インスタンス作成失敗時
raise IaasException("EPROCESS-000718", [instanceId, node["state"]])
# ログ出力
self.logger.info(None, "IPROCESS-100602", [instanceId,])
# イベントログ出力
self.conn.debug(pccInstance["FARM_NO"], None, None, instanceNo, pccInstance["INSTANCE_NAME"], "CloudStackInstanceStopFinish",["CLOUDSTACK", instanceId])
# データベース更新
table = self.conn.getTable("CLOUDSTACK_INSTANCE")
updateDict = self.conn.selectOne(table.select(table.c.INSTANCE_NO==instanceNo))
updateDict["ZONEID"] = node["zoneid"]
updateDict["STATE"] = node["state"]
updateDict["DISPLAYNAME"] = node["displayname"]
updateDict["IPADDRESS"] = node["nic"][0]["ipaddress"]
sql = table.update(table.c.INSTANCE_NO ==updateDict["INSTANCE_NO"], values=updateDict)
self.conn.execute(sql)
def terminate(self, instanceId):
#CLOUDSTACK_INSTANCE 取得
tableCSINS = self.conn.getTable("CLOUDSTACK_INSTANCE")
csInstance = self.conn.selectOne(tableCSINS.select(tableCSINS.c.INSTANCE_ID==instanceId))
if isEmpty(instanceId):
#IDが指定されていない場合はそのまま返す
return
# インスタンスの停止
node = self.client.terminateInstance(instanceId)
# ログ出力
self.logger.info(None, "IPROCESS-100604", [instanceId,])
# データベース更新
csInstance["ZONEID"] = None
csInstance["STATE"] = node["state"]
csInstance["DISPLAYNAME"] = None
csInstance["IPADDRESS"] = None
sql = tableCSINS.update(tableCSINS.c.INSTANCE_NO ==csInstance["INSTANCE_NO"], values=csInstance)
self.conn.execute(sql)
def createUserData(self, instanceNo, pccInstance, csInstance):
table = self.conn.getTable("FARM")
fram = self.conn.selectOne(table.select(table.c.FARM_NO==pccInstance["FARM_NO"]))
#UserDataを作成
userData = {}
#DB情報
userData.update({"instanceName": pccInstance["INSTANCE_NAME"]})
userData.update({"farmName": fram["FARM_NAME"]})
# FQDN
userData.update({"hostname": pccInstance["FQDN"]})
#初期スクリプト情報
userData.update({"scriptserver": getScriptProperty("script.server")})
#DNS情報
userData.update(self.createDnsUserData(instanceNo))
# Puppet情報
userData.update(self.createPuppetUserData())
# VPN情報
internal = self.platforminfo["internal"]
if (internal == 0):
userData.update(self.createVpnUserData(pccInstance))
return userData;
def createDnsUserData(self,instanceNo):
# UserDataを作成
userData = {}
# Primary DNSサーバ
userData.update({"dns": getDnsProperty("dns.server")})
# Secondry DNSサーバ
dns2 = getDnsProperty("dns.server2")
if (isNotEmpty(dns2)):
userData.update({"dns2": dns2})
# DNSドメイン
userData.update({"dnsdomain": getDnsProperty("dns.domain")})
return userData;
def createPuppetUserData(self):
# UserDataを作成
userData = {}
# PuppetMaster情報
userData.update({"puppetmaster": getPuppetProperty("puppet.masterHost")})
return userData;
def createVpnUserData(self, pccInstance):
# UserDataを作成
userData = {}
#VPN情報のユーザとパスワードをセットする
userData.update({"vpnuser": pccInstance["FQDN"]})
userData.update({"vpnuserpass": pccInstance["INSTANCE_CODE"]})
# VPNサーバ情報
userData.update({"vpnserver": getVpnProperty("vpn.server")})
userData.update({"vpnport": getVpnProperty("vpn.port")})
# userData.update({"vpnuser": getVpnProperty("vpn.user")})
# userData.update({"vpnuserpass": getVpnProperty("vpn.userpass")})
# ZIPパスワード
userData.update({"vpnzippass": getVpnProperty("vpn.zippass")})
# クライアント証明書ダウンロードURL
userData.update({"vpnclienturl": getVpnProperty("vpn.clienturl")})
return userData;
def makeUserData(self, map):
if not map or len(map) == 0:
return None
userdata = ''
for key in map.keys():
value = map[key]
if isNotEmpty(value):
if userdata != '':
userdata = userdata + ';'
userdata = userdata + key + "=" + value
return userdata
| gpl-2.0 | -8,598,702,698,478,667,000 | 36.459834 | 162 | 0.608667 | false |
rasa/scoops | makeindex.py | 1 | 13169 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" @todo add docstring """
# ### imports ###
from __future__ import (
absolute_import,
division,
print_function # ,
# unicode_literals
)
import fnmatch
import io
import json
import re
import os
# import pprint
import subprocess
import sys
OSIS = [
'0BSD',
'AAL',
'Abstyles',
'Adobe-2006',
'Adobe-Glyph',
'ADSL',
'AFL-1.1',
'AFL-1.2',
'AFL-2.0',
'AFL-2.1',
'AFL-3.0',
'Afmparse',
'AGPL-1.0',
'AGPL-1.0-only',
'AGPL-1.0-or-later',
'AGPL-3.0',
'AGPL-3.0-only',
'AGPL-3.0-or-later',
'Aladdin',
'AMDPLPA',
'AML',
'AMPAS',
'ANTLR-PD',
'Apache-1.0',
'Apache-1.1',
'Apache-2.0',
'APAFML',
'APL-1.0',
'APSL-1.0',
'APSL-1.1',
'APSL-1.2',
'APSL-2.0',
'Artistic-1.0',
'Artistic-1.0-cl8',
'Artistic-1.0-Perl',
'Artistic-2.0',
'Bahyph',
'Barr',
'Beerware',
'BitTorrent-1.0',
'BitTorrent-1.1',
'Borceux',
'BSD-1-Clause',
'BSD-2-Clause',
'BSD-2-Clause-FreeBSD',
'BSD-2-Clause-NetBSD',
'BSD-2-Clause-Patent',
'BSD-3-Clause',
'BSD-3-Clause-Attribution',
'BSD-3-Clause-Clear',
'BSD-3-Clause-LBNL',
'BSD-3-Clause-No-Nuclear-License',
'BSD-3-Clause-No-Nuclear-License-2014',
'BSD-3-Clause-No-Nuclear-Warranty',
'BSD-4-Clause',
'BSD-4-Clause-UC',
'BSD-Protection',
'BSD-Source-Code',
'BSL-1.0',
'bzip2-1.0.5',
'bzip2-1.0.6',
'Caldera',
'CATOSL-1.1',
'CC-BY-1.0',
'CC-BY-2.0',
'CC-BY-2.5',
'CC-BY-3.0',
'CC-BY-4.0',
'CC-BY-NC-1.0',
'CC-BY-NC-2.0',
'CC-BY-NC-2.5',
'CC-BY-NC-3.0',
'CC-BY-NC-4.0',
'CC-BY-NC-ND-1.0',
'CC-BY-NC-ND-2.0',
'CC-BY-NC-ND-2.5',
'CC-BY-NC-ND-3.0',
'CC-BY-NC-ND-4.0',
'CC-BY-NC-SA-1.0',
'CC-BY-NC-SA-2.0',
'CC-BY-NC-SA-2.5',
'CC-BY-NC-SA-3.0',
'CC-BY-NC-SA-4.0',
'CC-BY-ND-1.0',
'CC-BY-ND-2.0',
'CC-BY-ND-2.5',
'CC-BY-ND-3.0',
'CC-BY-ND-4.0',
'CC-BY-SA-1.0',
'CC-BY-SA-2.0',
'CC-BY-SA-2.5',
'CC-BY-SA-3.0',
'CC-BY-SA-4.0',
'CC0-1.0',
'CDDL-1.0',
'CDDL-1.1',
'CDLA-Permissive-1.0',
'CDLA-Sharing-1.0',
'CECILL-1.0',
'CECILL-1.1',
'CECILL-2.0',
'CECILL-2.1',
'CECILL-B',
'CECILL-C',
'ClArtistic',
'CNRI-Jython',
'CNRI-Python',
'CNRI-Python-GPL-Compatible',
'Condor-1.1',
'CPAL-1.0',
'CPL-1.0',
'CPOL-1.02',
'Crossword',
'CrystalStacker',
'CUA-OPL-1.0',
'Cube',
'curl',
'D-FSL-1.0',
'diffmark',
'DOC',
'Dotseqn',
'DSDP',
'dvipdfm',
'ECL-1.0',
'ECL-2.0',
'eCos-2.0',
'EFL-1.0',
'EFL-2.0',
'eGenix',
'Entessa',
'EPL-1.0',
'EPL-2.0',
'ErlPL-1.1',
'EUDatagrid',
'EUPL-1.0',
'EUPL-1.1',
'EUPL-1.2',
'Eurosym',
'Fair',
'Frameworx-1.0',
'FreeImage',
'FSFAP',
'FSFUL',
'FSFULLR',
'FTL',
'GFDL-1.1',
'GFDL-1.1-only',
'GFDL-1.1-or-later',
'GFDL-1.2',
'GFDL-1.2-only',
'GFDL-1.2-or-later',
'GFDL-1.3',
'GFDL-1.3-only',
'GFDL-1.3-or-later',
'Giftware',
'GL2PS',
'Glide',
'Glulxe',
'gnuplot',
'GPL-1.0',
'GPL-1.0+',
'GPL-1.0-only',
'GPL-1.0-or-later',
'GPL-2.0',
'GPL-2.0+',
'GPL-2.0-only',
'GPL-2.0-or-later',
'GPL-2.0-with-autoconf-exception',
'GPL-2.0-with-bison-exception',
'GPL-2.0-with-classpath-exception',
'GPL-2.0-with-font-exception',
'GPL-2.0-with-GCC-exception',
'GPL-3.0',
'GPL-3.0+',
'GPL-3.0-only',
'GPL-3.0-or-later',
'GPL-3.0-with-autoconf-exception',
'GPL-3.0-with-GCC-exception',
'gSOAP-1.3b',
'HaskellReport',
'HPND',
'IBM-pibs',
'ICU',
'IJG',
'ImageMagick',
'iMatix',
'Imlib2',
'Info-ZIP',
'Intel',
'Intel-ACPI',
'Interbase-1.0',
'IPA',
'IPL-1.0',
'ISC',
'JasPer-2.0',
'JSON',
'LAL-1.2',
'LAL-1.3',
'Latex2e',
'Leptonica',
'LGPL-2.0',
'LGPL-2.0+',
'LGPL-2.0-only',
'LGPL-2.0-or-later',
'LGPL-2.1',
'LGPL-2.1+',
'LGPL-2.1-only',
'LGPL-2.1-or-later',
'LGPL-3.0',
'LGPL-3.0+',
'LGPL-3.0-only',
'LGPL-3.0-or-later',
'LGPLLR',
'Libpng',
'libtiff',
'LiLiQ-P-1.1',
'LiLiQ-R-1.1',
'LiLiQ-Rplus-1.1',
'Linux-OpenIB',
'LPL-1.0',
'LPL-1.02',
'LPPL-1.0',
'LPPL-1.1',
'LPPL-1.2',
'LPPL-1.3a',
'LPPL-1.3c',
'MakeIndex',
'MirOS',
'MIT',
'MIT-0',
'MIT-advertising',
'MIT-CMU',
'MIT-enna',
'MIT-feh',
'MITNFA',
'Motosoto',
'mpich2',
'MPL-1.0',
'MPL-1.1',
'MPL-2.0',
'MPL-2.0-no-copyleft-exception',
'MS-PL',
'MS-RL',
'MTLL',
'Multics',
'Mup',
'NASA-1.3',
'Naumen',
'NBPL-1.0',
'NCSA',
'Net-SNMP',
'NetCDF',
'Newsletr',
'NGPL',
'NLOD-1.0',
'NLPL',
'Nokia',
'NOSL',
'Noweb',
'NPL-1.0',
'NPL-1.1',
'NPOSL-3.0',
'NRL',
'NTP',
'Nunit',
'OCCT-PL',
'OCLC-2.0',
'ODbL-1.0',
'OFL-1.0',
'OFL-1.1',
'OGTSL',
'OLDAP-1.1',
'OLDAP-1.2',
'OLDAP-1.3',
'OLDAP-1.4',
'OLDAP-2.0',
'OLDAP-2.0.1',
'OLDAP-2.1',
'OLDAP-2.2',
'OLDAP-2.2.1',
'OLDAP-2.2.2',
'OLDAP-2.3',
'OLDAP-2.4',
'OLDAP-2.5',
'OLDAP-2.6',
'OLDAP-2.7',
'OLDAP-2.8',
'OML',
'OpenSSL',
'OPL-1.0',
'OSET-PL-2.1',
'OSL-1.0',
'OSL-1.1',
'OSL-2.0',
'OSL-2.1',
'OSL-3.0',
'PDDL-1.0',
'PHP-3.0',
'PHP-3.01',
'Plexus',
'PostgreSQL',
'psfrag',
'psutils',
'Python-2.0',
'Qhull',
'QPL-1.0',
'Rdisc',
'RHeCos-1.1',
'RPL-1.1',
'RPL-1.5',
'RPSL-1.0',
'RSA-MD',
'RSCPL',
'Ruby',
'SAX-PD',
'Saxpath',
'SCEA',
'Sendmail',
'SGI-B-1.0',
'SGI-B-1.1',
'SGI-B-2.0',
'SimPL-2.0',
'SISSL',
'SISSL-1.2',
'Sleepycat',
'SMLNJ',
'SMPPL',
'SNIA',
'Spencer-86',
'Spencer-94',
'Spencer-99',
'SPL-1.0',
'StandardML-NJ',
'SugarCRM-1.1.3',
'SWL',
'TCL',
'TCP-wrappers',
'TMate',
'TORQUE-1.1',
'TOSL',
'Unicode-DFS-2015',
'Unicode-DFS-2016',
'Unicode-TOU',
'Unlicense',
'UPL-1.0',
'Vim',
'VOSTROM',
'VSL-1.0',
'W3C',
'W3C-19980720',
'W3C-20150513',
'Watcom-1.0',
'Wsuipa',
'WTFPL',
'wxWindows',
'X11',
'Xerox',
'XFree86-1.1',
'xinetd',
'Xnet',
'xpp',
'XSkat',
'YPL-1.0',
'YPL-1.1',
'Zed',
'Zend-2.0',
'Zimbra-1.3',
'Zimbra-1.4',
'Zlib',
'zlib-acknowledgement',
'ZPL-1.1',
'ZPL-2.0',
'ZPL-2.1',
'389-exception',
'Autoconf-exception-2.0',
'Autoconf-exception-3.0',
'Bison-exception-2.2',
'Bootloader-exception',
'Classpath-exception-2.0',
'CLISP-exception-2.0',
'DigiRule-FOSS-exception',
'eCos-exception-2.0',
'Fawkes-Runtime-exception',
'FLTK-exception',
'Font-exception-2.0',
'freertos-exception-2.0',
'GCC-exception-2.0',
'GCC-exception-3.1',
'gnu-javamail-exception',
'i2p-gpl-java-exception',
'Libtool-exception',
'Linux-syscall-note',
'LLVM-exception',
'LZMA-exception',
'mif-exception',
'Nokia-Qt-exception-1.1',
'OCCT-exception-1.0',
'OpenJDK-assembly-exception-1.0',
'openvpn-openssl-exception',
'Qt-GPL-exception-1.0',
'Qt-LGPL-exception-1.1',
'Qwt-exception-1.0',
'u-boot-exception-2.0',
'sWxWindows-exception-3.1'
]
OSImap = {}
for osi in OSIS:
OSImap[osi.lower()] = 'https://opensource.org/licenses/%s' % osi
lmap = {
'commercial': 'https://en.m.wikipedia.org/wiki/Software_license#Proprietary_software_licenses',
'freeware': 'https://en.wikipedia.org/wiki/Freeware',
'proprietary': 'https://en.m.wikipedia.org/wiki/Software_license#Proprietary_software_licenses',
'public_domain': 'https://wiki.creativecommons.org/wiki/Public_domain',
'public domain': 'https://wiki.creativecommons.org/wiki/Public_domain',
'public-domain': 'https://wiki.creativecommons.org/wiki/Public_domain',
'publicdomain': 'https://wiki.creativecommons.org/wiki/Public_domain',
'shareware': 'https://en.wikipedia.org/wiki/Shareware',
}
def do_license(v):
""" doc me """
url = v
if 'identifier' in v:
identifier = v['identifier']
else:
identifier = ''
if 'url' in v:
url = v['url']
if re.search('^(http|ftp)', url):
if not identifier:
identifier = 'Link'
v = '[%s](%s "%s")' % (identifier, url, url)
return v
if not identifier:
identifier = url
parts = re.split(r'[,|\s]+', identifier)
v = ''
for part in parts:
if v > '':
v += '/'
url = ''
k = part.lower()
if k in OSImap:
url = OSImap[k]
elif lmap.get(k):
url = lmap[k]
if url > '':
v += '[%s](%s "%s")' % (part, url, url)
else:
v += part
return v
def get_url(js):
""" doc me """
if 'checkver' in js:
if 'url' in js['checkver']:
return js['checkver']['url']
if 'homepage' in js:
return js['homepage']
return ''
def do_version(js):
""" doc me """
version = js['version']
url = get_url(js)
if 'checkver' not in js:
version = '<i>%s</i>' % version
if url == '':
return version
return '[%s](%s "%s")' % (version, url, url)
# pylint: disable=R0912 # Too many branches (22/12) (too-many-branches)
# pylint: disable=R0915 # Too many statements (71/50) (too-many-statements)
def main():
""" doc me """
markdown = 'README.md'
print("Reading %s" % markdown)
with io.open(markdown, 'r', encoding='utf-8') as f:
lines = f.readlines()
for i, line in enumerate(lines):
lines[i] = str(line)
specs = sys.argv
specs.pop(0)
if len(specs) == 0:
specs = ['bucket/*.json']
keys = [
'checkver',
'description',
'homepage',
'license',
'version',
]
rows = {}
cmdline = ["git", "ls-files"]
proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, shell=True)
(out, _) = proc.communicate()
files = out.splitlines()
for file in files:
file = file.decode("utf-8")
if re.search('wip/', file):
# print("skipping %s: wip" % file)
continue
accept = False
print("file=%s" % file)
for spec in specs:
# print("spec=%s" % spec)
if fnmatch.fnmatch(file, spec):
accept = True
break
if not accept:
# print("skipping %s: not matched" % file)
continue
with open(file, 'r') as f:
j = json.load(f)
row = {}
(name, _) = os.path.splitext(os.path.basename(file))
if re.search('^_', name):
# print("skipping %s: starts with _" % name)
continue
if re.search('^schema', name):
# print("skipping %s: starts with schema" % name)
continue
for key in keys:
if key in j:
val = j[key]
if type(val).__name__ == 'unicode':
val = val.strip()
if key == 'license':
val = do_license(val)
if key == 'version':
val = do_version(j)
row[key] = val
else:
row[key] = ''
rows[name] = row
table = [
'<!-- The following table was inserted by makeindex.py -->',
'<!-- Your edits will be lost the next time makeindex.py is run -->',
'|Name|Version|Description|License|',
'|----|-------|-----------|-------|'
]
newlist = [(key, rows[key]) for key in sorted(rows.keys())]
for (name, row) in newlist:
table.append('|[%s](%s "%s")|%s|%s|%s|' % (
name, row['homepage'], row['homepage'], row['version'], row['description'], row['license']))
out = []
found = False
for line in lines:
line = str(line.strip())
if found:
if re.match(r'^\s*<!--\s+</apps>\s+-->', line):
found = False
else:
continue
if re.match(r'^\s*<!--\s+<apps>\s+-->', line):
found = True
out.append(line)
out.extend(table)
continue
out.append(line)
print("Writing %s" % markdown)
with io.open(markdown + '.tmp', 'w', encoding='utf-8', newline='\n') as f:
data = "\n".join(out) + "\n"
f.write(data)
if os.path.exists(markdown + '.bak'):
os.remove(markdown + '.bak')
os.rename(markdown, markdown + '.bak')
os.rename(markdown + '.tmp', markdown)
main()
sys.exit(0)
| mit | -3,599,315,138,253,931,500 | 20.206119 | 104 | 0.4787 | false |
deepakbane28/nixysa | nixysa/pod_binding.py | 9 | 31775 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""pod binding model module.
This module implements the glue functions for the pod binding model, that is
used by POD types, and strings (which are POD in JavaScript). 'void' is also
included here, although it is only used for return values (and raises an
exception otherwise).
In C++, objects using this binding model are passed and returned by value (or
by pointer when mutable), except strings which are passed by const reference
(and returned by copy).
For example:
void SetValues(int value, const string &name);
float GetValue();
string GetString();
For JS bindings, they are directly represented by variants.
"""
import string
import sys
import cpp_utils
import java_utils
CPP_POD_TO_JSDOC_TYPES = {
'int': 'number',
'std.string' : 'string',
'bool' : 'boolean',
'float' : 'number',
'double' : 'number',
'unsigned int' : 'number',
'size_t' : 'number',
'void' : 'void'}; # void is a special case. It's used for callbacks
class InvalidPODUsage(Exception):
"""Raised when POD type is used incorrectly."""
pass
class BadVoidUsage(Exception):
"""Raised when 'void' is used outside of a return value."""
pass
class UnknownPODType(Exception):
"""Raised when an unknown POD type is used."""
def __init__(self, name):
Exception.__init__(self)
self.name = name
def JavaMemberString(scope, type_defn):
"""Gets the representation of a member name in Java.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a string representing the type
"""
# TODO: Check if we need the check below for Java
#final_type = type_defn.GetFinalType()
#if final_type.podtype == 'void':
# raise BadVoidUsage
return java_utils.GetScopedName(scope, type_defn)
def CppTypedefString(scope, type_defn):
"""Gets the representation of a type when used in a C++ typedef.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the representation of
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
Raises:
BadVoidUsage: type_defn is a 'void' POD type.
"""
if type_defn.GetFinalType().podtype == 'void':
raise BadVoidUsage
return cpp_utils.GetScopedName(scope, type_defn), True
def CppMemberString(scope, type_defn):
"""Gets the representation of a type when used as a C++ class member.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the representation of
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
Raises:
BadVoidUsage: type_defn is a 'void' POD type.
"""
if type_defn.GetFinalType().podtype == 'void':
raise BadVoidUsage
return cpp_utils.GetScopedName(scope, type_defn), True
def CppReturnValueString(scope, type_defn):
"""Gets the representation of a type when used as a C++ function return value.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the representation of
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
"""
return cpp_utils.GetScopedName(scope, type_defn), True
def CppParameterString(scope, type_defn):
"""Gets the representation of a type when used for a function parameter.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the representation of
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
Raises:
BadVoidUsage: type_defn is a 'void' POD type.
"""
final_type = type_defn.GetFinalType()
if final_type.podtype == 'void':
raise BadVoidUsage
elif final_type.podtype == 'string' or final_type.podtype == 'wstring':
return 'const %s&' % cpp_utils.GetScopedName(scope, type_defn), True
else:
return cpp_utils.GetScopedName(scope, type_defn), True
def CppMutableParameterString(scope, type_defn):
"""Gets the representation of a type for a mutable function parameter.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the string representing
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
Raises:
BadVoidUsage: type_defn is a 'void' POD type.
"""
if type_defn.GetFinalType().podtype == 'void':
raise BadVoidUsage
return '%s*' % cpp_utils.GetScopedName(scope, type_defn), True
def CppMutableToNonMutable(scope, type_defn, expr):
"""Gets the string converting a mutable expression to a non-mutable one.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
expr: a string for the mutable expression.
Returns:
a string, which is the non-mutable expression.
"""
(scope, type_defn) = (scope, type_defn) # silence gpylint.
return '*(%s)' % expr
def CppBaseClassString(scope, type_defn):
"""Gets the representation of a type for a base class.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition for the type.
Returns:
a (string, boolean) pair, the first element being the string representing
the type, the second element indicating whether or not the definition of
the type is needed for the expression to be valid.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def CppCallMethod(scope, type_defn, object_expr, mutable, method, param_exprs):
"""Gets the representation of a member function call.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object being called.
object_expr: a string, which is the expression for the object being called.
mutable: a boolean, whether or not the 'object_expr' expression is mutable
or not
method: a Function, representing the function to call.
param_exprs: a list of strings, each being the expression for the value of
each parameter.
Returns:
a string, which is the expression for the function call.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def CppCallStaticMethod(scope, type_defn, method, param_exprs):
"""Gets the representation of a static function call.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object being called.
method: a Function, representing the function to call.
param_exprs: a list of strings, each being the expression for the value of
each parameter.
Returns:
a string, which is the expression for the function call.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def CppCallConstructor(scope, type_defn, method, param_exprs):
"""Gets the representation of a constructor call.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object being called.
method: a Function, representing the constructor to call.
param_exprs: a list of strings, each being the expression for the value of
each parameter.
Returns:
a string, which is the expression for the constructor call.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def CppSetField(scope, type_defn, object_expr, field, param_expr):
"""Gets the representation of an expression setting a field in an object.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object containing the
field being set.
object_expr: a string, which is the expression for the object containing
the field being set.
field: a string, the name of the field to be set.
param_expr: a strings, being the expression for the value to be set.
Returns:
a string, which is the expression for setting the field.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def CppGetField(scope, type_defn, object_expr, field):
"""Gets the representation of an expression getting a field in an object.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object containing the
field being retrieved.
object_expr: a string, which is the expression for the object containing
the field being retrieved.
field: a string, the name of the field to be retrieved.
Returns:
a string, which is the expression for getting the field.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def CppSetStatic(scope, type_defn, field, param_expr):
"""Gets the representation of an expression setting a static field.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object containing the
field being set.
field: a string, the name of the field to be set.
param_expr: a strings, being the expression for the value to be set.
Returns:
a string, which is the expression for setting the field.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def CppGetStatic(scope, type_defn, field):
"""Gets the representation of an expression getting a static field.
Args:
scope: a Definition for the scope in which the expression will be written.
type_defn: a Definition, representing the type of the object containing the
field being retrieved.
field: a string, the name of the field to be retrieved.
Returns:
a string, which is the expression for getting the field.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def JSDocTypeString(type_defn):
"""Gets the representation of a type in JSDoc notation.
Args:
type_defn: a Definition for the type.
Returns:
a string that is the JSDoc notation of type_defn.
"""
type_defn = type_defn.GetFinalType()
type_stack = type_defn.GetParentScopeStack()
name = type_defn.name
type_string = '.'.join([s.name for s in type_stack[1:]] + [name])
if type_string in CPP_POD_TO_JSDOC_TYPES:
return CPP_POD_TO_JSDOC_TYPES[type_string]
print >> sys.stderr, (
'ERROR: %s : Unknown C++ Pod to JSDoc type conversion for C++ type: %s' %
(type_defn.source, type_string))
return '*'
def NpapiBindingGlueHeader(scope, type_defn):
"""Gets the NPAPI glue header for a given type.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
Returns:
a string, the glue header.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def NpapiBindingGlueCpp(scope, type_defn):
"""Gets the NPAPI glue implementation for a given type.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
Returns:
a string, the glue implementation.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def NpapiDispatchFunctionHeader(scope, type_defn, variable, npp, success):
"""Gets a header for NPAPI glue dispatch functions.
This function creates a string containing a C++ code snippet that should be
included at the beginning of NPAPI glue dispatch functions like Invoke or
GetProperty. This code snippet will declare and initialize certain variables
that will be used in the dispatch functions, like the NPObject representing
the object, or a pointer to the NPP instance.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
variable: a string, representing a name of a variable that can be used to
store a reference to the object.
npp: a string, representing the name of the variable that holds the pointer
to the NPP instance. Will be declared by the code snippet.
success: the name of a bool variable containing the current success status.
(is not declared by the code snippet).
Returns:
a (string, string) pair, the first string being the code snippet, and the
second string being an expression to access the object.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
_wstring_from_npvariant_template = string.Template("""
${type} ${variable};
if (!NPVARIANT_IS_STRING(${input})) {
${success} = false;
*error_handle = "Error in " ${context}
": was expecting a string.";
} else if (!UTF8ToString16(NPVARIANT_TO_STRING(${input}).UTF8Characters,
NPVARIANT_TO_STRING(${input}).UTF8Length,
&${variable})) {
${success} = false;
*error_handle = "Error in " ${context}
": hit an unexpected unicode conversion problem.";
}
""")
_string_from_npvariant_template = string.Template("""
${type} ${variable};
if (NPVARIANT_IS_STRING(${input})) {
${variable} = ${type}(NPVARIANT_TO_STRING(${input}).UTF8Characters,
NPVARIANT_TO_STRING(${input}).UTF8Length);
} else {
${success} = false;
*error_handle = "Error in " ${context}
": was expecting a string.";
}
""")
_float_from_npvariant_template = string.Template("""
${type} ${variable} = 0.f;
if (NPVARIANT_IS_NUMBER(${input})) {
${variable} = static_cast<${type}>(NPVARIANT_TO_NUMBER(${input}));
} else {
*error_handle = "Error in " ${context}
": was expecting a number.";
${success} = false;
}
""")
_int_from_npvariant_template = string.Template("""
${type} ${variable} = 0;
if (NPVARIANT_IS_NUMBER(${input})) {
${variable} = static_cast<${type}>(NPVARIANT_TO_NUMBER(${input}));
} else {
*error_handle = "Error in " ${context}
": was expecting an int.";
${success} = false;
}
""")
_bool_from_npvariant_template = string.Template("""
${type} ${variable} = false;
if (NPVARIANT_IS_BOOLEAN(${input})) {
${variable} = NPVARIANT_TO_BOOLEAN(${input});
} else {
*error_handle = "Error in " ${context}
": was expecting a boolean.";
${success} = false;
}
""")
def NpapiFromNPVariant(scope, type_defn, input_expr, variable, success,
exception_context, npp):
"""Gets the string to get a value from a NPVariant.
This function creates a string containing a C++ code snippet that is used to
retrieve a value from a NPVariant. If an error occurs, like if the NPVariant
is not of the correct type, the snippet will set the success status variable
to false.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type of the value.
input_expr: an expression representing the NPVariant to get the value from.
variable: a string, representing a name of a variable that can be used to
store a reference to the value.
success: the name of a bool variable containing the current success status.
exception_context: the name of a string containing context information, for
use in exception reporting.
npp: a string, representing the name of the variable that holds the pointer
to the NPP instance.
Returns:
a (string, string) pair, the first string being the code snippet and the
second one being the expression to access that value.
Raises:
BadVoidUsage: type_defn is a 'void' POD type.
UnknownPODType: type_defn is not a known POD type.
"""
npp = npp # silence gpylint.
type_name = cpp_utils.GetScopedName(scope, type_defn)
final_type = type_defn.GetFinalType()
if final_type.podtype == 'void':
return '', 'void(0)'
elif final_type.podtype == 'int':
text = _int_from_npvariant_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
elif final_type.podtype == 'bool':
text = _bool_from_npvariant_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
elif final_type.podtype == 'float':
text = _float_from_npvariant_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
elif final_type.podtype == 'variant':
return '%s %s(npp, %s);' % (type_name, variable, input_expr), variable
elif final_type.podtype == 'string':
text = _string_from_npvariant_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
elif final_type.podtype == 'wstring':
text = _wstring_from_npvariant_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
else:
raise UnknownPODType(final_type.podtype)
def NpapiExprToNPVariant(scope, type_defn, variable, expression, output,
success, npp):
"""Gets the string to store a value into a NPVariant.
This function creates a string containing a C++ code snippet that is used to
store a value into a NPVariant. That operation takes two phases, one that
allocates necessary NPAPI resources, and that can fail, and one that actually
sets the NPVariant (that can't fail). If an error occurs, the snippet will
set the success status variable to false.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type of the value.
variable: a string, representing a name of a variable that can be used to
store a reference to the value.
expression: a string representing the expression that yields the value to
be stored.
output: an expression representing a pointer to the NPVariant to store the
value into.
success: the name of a bool variable containing the current success status.
npp: a string, representing the name of the variable that holds the pointer
to the NPP instance.
Returns:
a (string, string) pair, the first string being the code snippet for the
first phase, and the second one being the code snippet for the second phase.
Raises:
UnknownPODType: type_defn is not a known POD type.
"""
npp = npp # silence gpylint.
type_name = cpp_utils.GetScopedName(scope, type_defn)
final_type = type_defn.GetFinalType()
if final_type.podtype == 'void':
return ('%s;' % expression,
'VOID_TO_NPVARIANT(*%s);' % output)
elif final_type.podtype == 'int':
return ('%s %s = %s;' % (type_name, variable, expression),
'INT32_TO_NPVARIANT(%s, *%s);' % (variable, output))
elif final_type.podtype == 'bool':
return ('%s %s = %s;' % (type_name, variable, expression),
'BOOLEAN_TO_NPVARIANT(%s, *%s);' % (variable, output))
elif final_type.podtype == 'float':
return ('%s %s = %s;' % (type_name, variable, expression),
'DOUBLE_TO_NPVARIANT(static_cast<double>(%s), *%s);' %
(variable, output))
elif final_type.podtype == 'variant':
return ('%s %s = %s' % (type_name, variable, expression),
'*%s = %s.NPVariant(npp);' % (output, variable))
elif final_type.podtype == 'string':
return ('GLUE_PROFILE_START(npp, "StringToNPVariant");\n'
'%s = StringToNPVariant(%s, %s);\n'
'GLUE_PROFILE_STOP(npp, "StringToNPVariant");'
% (success, expression, output),
'')
elif final_type.podtype == 'wstring':
return ('GLUE_PROFILE_START(npp, "String16ToNPVariant");\n'
'%s = String16ToNPVariant(%s, %s);\n'
'GLUE_PROFILE_STOP(npp, "String16ToNPVariant");'
% (success, expression, output),
'')
else:
raise UnknownPODType(final_type.podtype)
def PpapiBindingGlueHeader(scope, type_defn):
"""Gets the PPAPI glue header for a given type.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
Returns:
a string, the glue header.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def PpapiBindingGlueCpp(scope, type_defn):
"""Gets the PPAPI glue implementation for a given type.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
Returns:
a string, the glue implementation.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
def PpapiDispatchFunctionHeader(scope, type_defn, variable, npp, success):
"""Gets a header for PPAPI glue dispatch functions.
This function creates a string containing a C++ code snippet that should be
included at the beginning of PPAPI glue dispatch functions like Call or
GetProperty. This code snippet will declare and initialize certain variables
that will be used in the dispatch functions, like the pp::Var representing
the object, or a pointer to the pp::Instance.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type.
variable: a string, representing a name of a variable that can be used to
store a reference to the object.
npp: a string, representing the name of the variable that holds the pointer
to the pp::Instance. Will be declared by the code snippet.
success: the name of a bool variable containing the current success status.
(is not declared by the code snippet).
Returns:
a (string, string) pair, the first string being the code snippet, and the
second string being an expression to access the object.
Raises:
InvalidPODUsage: always. This function can't be called for a POD type.
"""
raise InvalidPODUsage
_string_from_ppvar_template = string.Template("""
${type} ${variable};
if (${input}.is_string()) {
${variable} = ${input}.AsString();
} else {
${success} = false;
*exception = pp::Var("Error in " ${context}
": was expecting a string.");
}
""")
_float_from_ppvar_template = string.Template("""
${type} ${variable} = 0.f;
if (${input}.is_number()) {
${variable} = static_cast<${type}>(${input}.AsDouble());
} else {
*exception = pp::Var("Error in " ${context}
": was expecting a number.");
${success} = false;
}
""")
_int_from_ppvar_template = string.Template("""
${type} ${variable} = 0;
if (${input}.is_number()) {
${variable} = static_cast<${type}>(${input}.AsInt());
} else {
*exception = pp::Var("Error in " ${context}
": was expecting an int.");
${success} = false;
}
""")
_bool_from_ppvar_template = string.Template("""
${type} ${variable} = false;
if (${input}.is_bool()) {
${variable} = ${input}.AsBool();
} else {
*exception = pp::Var("Error in " ${context}
": was expecting a boolean.");
${success} = false;
}
""")
def PpapiFromPPVar(scope, type_defn, input_expr, variable, success,
exception_context, npp):
"""Gets the string to get a value from a pp::Var.
This function creates a string containing a C++ code snippet that is used to
retrieve a value from a pp::Var. If an error occurs, like if the pp::Var
is not of the correct type, the snippet will set the success status variable
to false.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type of the value.
input_expr: an expression representing the pp::Var to get the value from.
variable: a string, representing a name of a variable that can be used to
store a reference to the value.
success: the name of a bool variable containing the current success status.
exception_context: the name of a string containing context information, for
use in exception reporting.
npp: a string, representing the name of the variable that holds the pointer
to the pp::Instance.
Returns:
a (string, string) pair, the first string being the code snippet and the
second one being the expression to access that value.
Raises:
BadVoidUsage: type_defn is a 'void' POD type.
UnknownPODType: type_defn is not a known POD type.
"""
npp = npp # silence gpylint.
type_name = cpp_utils.GetScopedName(scope, type_defn)
final_type = type_defn.GetFinalType()
if final_type.podtype == 'void':
return '', 'void(0)'
elif final_type.podtype == 'int':
text = _int_from_ppvar_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
elif final_type.podtype == 'bool':
text = _bool_from_ppvar_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
elif final_type.podtype == 'float':
text = _float_from_ppvar_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
elif final_type.podtype == 'string':
text = _string_from_ppvar_template.substitute(type=type_name,
input=input_expr,
variable=variable,
success=success,
context=exception_context)
return text, variable
else:
raise UnknownPODType(final_type.podtype)
def PpapiExprToPPVar(scope, type_defn, variable, expression, output,
success, npp):
"""Gets the string to store a value into a pp::Var.
This function creates a string containing a C++ code snippet that is used to
store a value into a pp::Var. That operation takes two phases, one that
allocates necessary PPAPI resources, and that can fail, and one that actually
sets the pp::Var (that can't fail). If an error occurs, the snippet will
set the success status variable to false.
Args:
scope: a Definition for the scope in which the glue will be written.
type_defn: a Definition, representing the type of the value.
variable: a string, representing a name of a variable that can be used to
store a reference to the value.
expression: a string representing the expression that yields the value to
be stored.
output: an expression representing a pointer to the pp::Var to store the
value into.
success: the name of a bool variable containing the current success status.
npp: a string, representing the name of the variable that holds the pointer
to the pp::Instance.
Returns:
a (string, string) pair, the first string being the code snippet for the
first phase, and the second one being the code snippet for the second phase.
Raises:
UnknownPODType: type_defn is not a known POD type.
"""
(npp, success) = (npp, success) # silence gpylint.
type_name = cpp_utils.GetScopedName(scope, type_defn)
final_type = type_defn.GetFinalType()
if final_type.podtype == 'void':
return ('%s;' % expression,
'*%s = pp::Var();' % output)
elif final_type.podtype == 'int':
return ('%s %s = %s;' % (type_name, variable, expression),
'*%s = pp::Var((int32_t)%s);' % (output, variable))
elif final_type.podtype == 'bool':
return ('%s %s = %s;' % (type_name, variable, expression),
'*%s = pp::Var(%s);' % (output, variable))
elif final_type.podtype == 'float':
return ('%s %s = %s;' % (type_name, variable, expression),
'*%s = pp::Var(static_cast<double>(%s));' %
(output, variable))
elif final_type.podtype == 'variant':
raise UnimplementedPODType
elif final_type.podtype == 'string':
return ('*%s = pp::Var(%s);' % (output, expression),
'')
else:
raise UnknownPODType(final_type.podtype)
def main(unused_argv):
pass
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 | 8,047,745,173,312,595,000 | 35.607143 | 81 | 0.652746 | false |
kittiu/odoo | addons/mrp/wizard/mrp_price.py | 381 | 2132 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class mrp_price(osv.osv_memory):
_name = 'mrp.product_price'
_description = 'Product Price'
_columns = {
'number': fields.integer('Quantity', required=True, help="Specify quantity of products to produce or buy. Report of Cost structure will be displayed base on this quantity."),
}
_defaults = {
'number': 1,
}
def print_report(self, cr, uid, ids, context=None):
""" To print the report of Product cost structure
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : Report
"""
if context is None:
context = {}
datas = {'ids' : context.get('active_ids',[])}
res = self.read(cr, uid, ids, ['number'])
res = res and res[0] or {}
datas['form'] = res
return {
'type' : 'ir.actions.report.xml',
'report_name':'product.price',
'datas' : datas,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -658,135,798,220,431,000 | 37.071429 | 182 | 0.588649 | false |
tjgillies/distributed-draw | entangled/kademlia/kbucket.py | 2 | 4999 | #!/usr/bin/env python
#
# This library is free software, distributed under the terms of
# the GNU Lesser General Public License Version 3, or any later version.
# See the COPYING file included in this archive
#
# The docstrings in this module contain epytext markup; API documentation
# may be created by processing this file with epydoc: http://epydoc.sf.net
import constants
class BucketFull(Exception):
""" Raised when the bucket is full """
class KBucket(object):
""" Description - later
"""
def __init__(self, rangeMin, rangeMax):
"""
@param rangeMin: The lower boundary for the range in the 160-bit ID
space covered by this k-bucket
@param rangeMax: The upper boundary for the range in the ID space
covered by this k-bucket
"""
self.lastAccessed = 0
self.rangeMin = rangeMin
self.rangeMax = rangeMax
self._contacts = list()
def addContact(self, contact):
""" Add contact to _contact list in the right order. This will move the
contact to the end of the k-bucket if it is already present.
@raise kademlia.kbucket.BucketFull: Raised when the bucket is full and
the contact isn't in the bucket
already
@param contact: The contact to add
@type contact: kademlia.contact.Contact
"""
if contact in self._contacts:
# Move the existing contact to the end of the list
# - using the new contact to allow add-on data (e.g. optimization-specific stuff) to pe updated as well
self._contacts.remove(contact)
self._contacts.append(contact)
elif len(self._contacts) < constants.k:
self._contacts.append(contact)
else:
raise BucketFull("No space in bucket to insert contact")
def getContact(self, contactID):
""" Get the contact specified node ID"""
index = self._contacts.index(contactID)
return self._contacts[index]
def getContacts(self, count=-1, excludeContact=None):
""" Returns a list containing up to the first count number of contacts
@param count: The amount of contacts to return (if 0 or less, return
all contacts)
@type count: int
@param excludeContact: A contact to exclude; if this contact is in
the list of returned values, it will be
discarded before returning. If a C{str} is
passed as this argument, it must be the
contact's ID.
@type excludeContact: kademlia.contact.Contact or str
@raise IndexError: If the number of requested contacts is too large
@return: Return up to the first count number of contacts in a list
If no contacts are present an empty is returned
@rtype: list
"""
# Return all contacts in bucket
if count <= 0:
count = len(self._contacts)
# Get current contact number
currentLen = len(self._contacts)
# If count greater than k - return only k contacts
if count > constants.k:
count = constants.k
# Check if count value in range and,
# if count number of contacts are available
if not currentLen:
contactList = list()
# length of list less than requested amount
elif currentLen < count:
contactList = self._contacts[0:currentLen]
# enough contacts in list
else:
contactList = self._contacts[0:count]
if excludeContact in contactList:
contactList.remove(excludeContact)
return contactList
def removeContact(self, contact):
""" Remove given contact from list
@param contact: The contact to remove, or a string containing the
contact's node ID
@type contact: kademlia.contact.Contact or str
@raise ValueError: The specified contact is not in this bucket
"""
self._contacts.remove(contact)
def keyInRange(self, key):
""" Tests whether the specified key (i.e. node ID) is in the range
of the 160-bit ID space covered by this k-bucket (in otherwords, it
returns whether or not the specified key should be placed in this
k-bucket)
@param key: The key to test
@type key: str or int
@return: C{True} if the key is in this k-bucket's range, or C{False}
if not.
@rtype: bool
"""
if isinstance(key, str):
key = long(key.encode('hex'), 16)
return self.rangeMin <= key < self.rangeMax
def __len__(self):
return len(self._contacts)
| lgpl-3.0 | 8,384,391,428,333,225,000 | 36.30597 | 115 | 0.585317 | false |
mogoweb/chromium-crosswalk | native_client_sdk/src/tools/httpd.py | 41 | 10125 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
import imp
import logging
import multiprocessing
import optparse
import os
import SimpleHTTPServer # pylint: disable=W0611
import socket
import sys
import time
import urlparse
if sys.version_info < (2, 6, 0):
sys.stderr.write("python 2.6 or later is required run this script\n")
sys.exit(1)
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
NACL_SDK_ROOT = os.path.dirname(SCRIPT_DIR)
# We only run from the examples directory so that not too much is exposed
# via this HTTP server. Everything in the directory is served, so there should
# never be anything potentially sensitive in the serving directory, especially
# if the machine might be a multi-user machine and not all users are trusted.
# We only serve via the loopback interface.
def SanityCheckDirectory(dirname):
abs_serve_dir = os.path.abspath(dirname)
# Verify we don't serve anywhere above NACL_SDK_ROOT.
if abs_serve_dir[:len(NACL_SDK_ROOT)] == NACL_SDK_ROOT:
return
logging.error('For security, httpd.py should only be run from within the')
logging.error('example directory tree.')
logging.error('Attempting to serve from %s.' % abs_serve_dir)
logging.error('Run with --no_dir_check to bypass this check.')
sys.exit(1)
class PluggableHTTPServer(BaseHTTPServer.HTTPServer):
def __init__(self, *args, **kwargs):
BaseHTTPServer.HTTPServer.__init__(self, *args)
self.serve_dir = kwargs.get('serve_dir', '.')
self.test_mode = kwargs.get('test_mode', False)
self.delegate_map = {}
self.running = True
self.result = 0
def Shutdown(self, result=0):
self.running = False
self.result = result
class PluggableHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def _FindDelegateAtPath(self, dirname):
# First check the cache...
logging.debug('Looking for cached delegate in %s...' % dirname)
handler_script = os.path.join(dirname, 'handler.py')
if dirname in self.server.delegate_map:
result = self.server.delegate_map[dirname]
if result is None:
logging.debug('Found None.')
else:
logging.debug('Found delegate.')
return result
# Don't have one yet, look for one.
delegate = None
logging.debug('Testing file %s for existence...' % handler_script)
if os.path.exists(handler_script):
logging.debug(
'File %s exists, looking for HTTPRequestHandlerDelegate.' %
handler_script)
module = imp.load_source('handler', handler_script)
delegate_class = getattr(module, 'HTTPRequestHandlerDelegate', None)
delegate = delegate_class()
if not delegate:
logging.warn(
'Unable to find symbol HTTPRequestHandlerDelegate in module %s.' %
handler_script)
return delegate
def _FindDelegateForURLRecurse(self, cur_dir, abs_root):
delegate = self._FindDelegateAtPath(cur_dir)
if not delegate:
# Didn't find it, try the parent directory, but stop if this is the server
# root.
if cur_dir != abs_root:
parent_dir = os.path.dirname(cur_dir)
delegate = self._FindDelegateForURLRecurse(parent_dir, abs_root)
logging.debug('Adding delegate to cache for %s.' % cur_dir)
self.server.delegate_map[cur_dir] = delegate
return delegate
def _FindDelegateForURL(self, url_path):
path = self.translate_path(url_path)
if os.path.isdir(path):
dirname = path
else:
dirname = os.path.dirname(path)
abs_serve_dir = os.path.abspath(self.server.serve_dir)
delegate = self._FindDelegateForURLRecurse(dirname, abs_serve_dir)
if not delegate:
logging.info('No handler found for path %s. Using default.' % url_path)
return delegate
def _SendNothingAndDie(self, result=0):
self.send_response(200, 'OK')
self.send_header('Content-type', 'text/html')
self.send_header('Content-length', '0')
self.end_headers()
self.server.Shutdown(result)
def send_head(self):
delegate = self._FindDelegateForURL(self.path)
if delegate:
return delegate.send_head(self)
return self.base_send_head()
def base_send_head(self):
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
def do_GET(self):
# TODO(binji): pyauto tests use the ?quit=1 method to kill the server.
# Remove this when we kill the pyauto tests.
_, _, _, query, _ = urlparse.urlsplit(self.path)
if query:
params = urlparse.parse_qs(query)
if '1' in params.get('quit', []):
self._SendNothingAndDie()
return
delegate = self._FindDelegateForURL(self.path)
if delegate:
return delegate.do_GET(self)
return self.base_do_GET()
def base_do_GET(self):
return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
delegate = self._FindDelegateForURL(self.path)
if delegate:
return delegate.do_POST(self)
return self.base_do_POST()
def base_do_POST(self):
if self.server.test_mode:
if self.path == '/ok':
self._SendNothingAndDie(0)
elif self.path == '/fail':
self._SendNothingAndDie(1)
class LocalHTTPServer(object):
"""Class to start a local HTTP server as a child process."""
def __init__(self, dirname, port, test_mode):
parent_conn, child_conn = multiprocessing.Pipe()
self.process = multiprocessing.Process(
target=_HTTPServerProcess,
args=(child_conn, dirname, port, {
'serve_dir': dirname,
'test_mode': test_mode,
}))
self.process.start()
if parent_conn.poll(10): # wait 10 seconds
self.port = parent_conn.recv()
else:
raise Exception('Unable to launch HTTP server.')
self.conn = parent_conn
def ServeForever(self):
"""Serve until the child HTTP process tells us to stop.
Returns:
The result from the child (as an errorcode), or 0 if the server was
killed not by the child (by KeyboardInterrupt for example).
"""
child_result = 0
try:
# Block on this pipe, waiting for a response from the child process.
child_result = self.conn.recv()
except KeyboardInterrupt:
pass
finally:
self.Shutdown()
return child_result
def ServeUntilSubprocessDies(self, process):
"""Serve until the child HTTP process tells us to stop or |subprocess| dies.
Returns:
The result from the child (as an errorcode), or 0 if |subprocess| died,
or the server was killed some other way (by KeyboardInterrupt for
example).
"""
child_result = 0
try:
while True:
if process.poll() is not None:
child_result = 0
break
if self.conn.poll():
child_result = self.conn.recv()
break
time.sleep(0)
except KeyboardInterrupt:
pass
finally:
self.Shutdown()
return child_result
def Shutdown(self):
"""Send a message to the child HTTP server process and wait for it to
finish."""
self.conn.send(False)
self.process.join()
def GetURL(self, rel_url):
"""Get the full url for a file on the local HTTP server.
Args:
rel_url: A URL fragment to convert to a full URL. For example,
GetURL('foobar.baz') -> 'http://localhost:1234/foobar.baz'
"""
return 'http://localhost:%d/%s' % (self.port, rel_url)
def _HTTPServerProcess(conn, dirname, port, server_kwargs):
"""Run a local httpserver with the given port or an ephemeral port.
This function assumes it is run as a child process using multiprocessing.
Args:
conn: A connection to the parent process. The child process sends
the local port, and waits for a message from the parent to
stop serving. It also sends a "result" back to the parent -- this can
be used to allow a client-side test to notify the server of results.
dirname: The directory to serve. All files are accessible through
http://localhost:<port>/path/to/filename.
port: The port to serve on. If 0, an ephemeral port will be chosen.
server_kwargs: A dict that will be passed as kwargs to the server.
"""
try:
os.chdir(dirname)
httpd = PluggableHTTPServer(('', port), PluggableHTTPRequestHandler,
**server_kwargs)
except socket.error as e:
sys.stderr.write('Error creating HTTPServer: %s\n' % e)
sys.exit(1)
try:
conn.send(httpd.server_address[1]) # the chosen port number
httpd.timeout = 0.5 # seconds
while httpd.running:
# Flush output for MSVS Add-In.
sys.stdout.flush()
sys.stderr.flush()
httpd.handle_request()
if conn.poll():
httpd.running = conn.recv()
except KeyboardInterrupt:
pass
finally:
conn.send(httpd.result)
conn.close()
def main(args):
parser = optparse.OptionParser()
parser.add_option('-C', '--serve-dir',
help='Serve files out of this directory.',
dest='serve_dir', default=os.path.abspath('.'))
parser.add_option('-p', '--port',
help='Run server on this port.',
dest='port', default=5103)
parser.add_option('--no_dir_check',
help='No check to ensure serving from safe directory.',
dest='do_safe_check', action='store_false', default=True)
parser.add_option('--test-mode',
help='Listen for posts to /ok or /fail and shut down the server with '
' errorcodes 0 and 1 respectively.',
dest='test_mode', action='store_true')
options, args = parser.parse_args(args)
if options.do_safe_check:
SanityCheckDirectory(options.serve_dir)
server = LocalHTTPServer(options.serve_dir, int(options.port),
options.test_mode)
# Serve until the client tells us to stop. When it does, it will give us an
# errorcode.
print 'Serving %s on %s...' % (options.serve_dir, server.GetURL(''))
return server.ServeForever()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | 5,751,467,446,014,391,000 | 31.76699 | 80 | 0.664198 | false |
TeamExodus/external_chromium_org | tools/telemetry/telemetry/util/path.py | 45 | 1284 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
# TODO(dtu): Move these functions from core.util to here.
GetBaseDir = util.GetBaseDir
GetTelemetryDir = util.GetTelemetryDir
GetUnittestDataDir = util.GetUnittestDataDir
GetChromiumSrcDir = util.GetChromiumSrcDir
AddDirToPythonPath = util.AddDirToPythonPath
GetBuildDirectories = util.GetBuildDirectories
def IsExecutable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
def FindInstalledWindowsApplication(application_path):
"""Search common Windows installation directories for an application.
Args:
application_path: Path to application relative from installation location.
Returns:
A string representing the full path, or None if not found.
"""
search_paths = [os.getenv('PROGRAMFILES(X86)'),
os.getenv('PROGRAMFILES'),
os.getenv('LOCALAPPDATA')]
search_paths += os.getenv('PATH', '').split(os.pathsep)
for search_path in search_paths:
if not search_path:
continue
path = os.path.join(search_path, application_path)
if IsExecutable(path):
return path
return None
| bsd-3-clause | 8,411,957,422,125,236,000 | 28.860465 | 78 | 0.737539 | false |
kzampog/sisyphus | docs/conf.py | 2 | 4917 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'cilantro'
copyright = u'2018, Konstantinos Zampogiannis'
author = u'Konstantinos Zampogiannis'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cilantrodoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cilantro.tex', u'cilantro Documentation',
u'Konstantinos Zampogiannis', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cilantro', u'cilantro Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cilantro', u'cilantro Documentation',
author, 'cilantro', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration ------------------------------------------------- | mit | -7,410,876,533,731,000,000 | 29.7375 | 79 | 0.644905 | false |
ikoula/cloudstack | plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmVmModule.py | 8 | 22802 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Created on May 17, 2011
'''
from OvmCommonModule import *
from OvmDiskModule import *
from OvmVifModule import *
from OvmHostModule import OvmHost
from string import Template
from OVSXXenVMConfig import *
from OVSSiteVM import start_vm, stop_vm, reset_vm
from OVSSiteCluster import *
from OvmStoragePoolModule import OvmStoragePool
from OVSXXenStore import xen_get_vm_path, xen_get_vnc_port
from OVSDB import db_get_vm
from OVSXMonitor import xen_get_vm_perf_metrics, xen_get_xm_info
from OVSXXenVM import xen_migrate_vm
from OVSSiteRMVM import unregister_vm, register_vm, set_vm_status
from OVSSiteVMInstall import install_vm_hvm
from OVSSiteRMServer import get_master_ip
from OVSXXenVMInstall import xen_change_vm_cdrom
from OVSXAPIUtil import XenAPIObject, session_login, session_logout
logger = OvmLogger("OvmVm")
class OvmVmDecoder(json.JSONDecoder):
def decode(self, jStr):
deDict = asciiLoads(jStr)
vm = OvmVm()
setAttrFromDict(vm, 'cpuNum', deDict, int)
setAttrFromDict(vm, 'memory', deDict, long)
setattr(vm, 'rootDisk', toOvmDisk(deDict['rootDisk']))
setattr(vm, 'vifs', toOvmVifList(deDict['vifs']))
setattr(vm, 'disks', toOvmDiskList(deDict['disks']))
setAttrFromDict(vm, 'name', deDict)
setAttrFromDict(vm, 'uuid', deDict)
setAttrFromDict(vm, 'bootDev', deDict)
setAttrFromDict(vm, 'type', deDict)
return vm
class OvmVmEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, OvmVm): raise Exception("%s is not instance of OvmVm"%type(obj))
dct = {}
safeDictSet(obj, dct, 'cpuNum')
safeDictSet(obj, dct, 'memory')
safeDictSet(obj, dct, 'powerState')
safeDictSet(obj, dct, 'name')
safeDictSet(obj, dct, 'type')
vifs = fromOvmVifList(obj.vifs)
dct['vifs'] = vifs
rootDisk = fromOvmDisk(obj.rootDisk)
dct['rootDisk'] = rootDisk
disks = fromOvmDiskList(obj.disks)
dct['disks'] = disks
return dct
def toOvmVm(jStr):
return json.loads(jStr, cls=OvmVmDecoder)
def fromOvmVm(vm):
return normalizeToGson(json.dumps(vm, cls=OvmVmEncoder))
class OvmVm(OvmObject):
cpuNum = 0
memory = 0
rootDisk = None
vifs = []
disks = []
powerState = ''
name = ''
bootDev = ''
type = ''
def _getVifs(self, vmName):
vmPath = OvmHost()._vmNameToPath(vmName)
domId = OvmHost()._getDomainIdByName(vmName)
vifs = successToMap(xen_get_vifs(vmPath))
lst = []
for k in vifs:
v = vifs[k]
vifName = 'vif' + domId + '.' + k[len('vif'):]
vif = OvmVif()
(mac, bridge, type) = v.split(',')
safeSetAttr(vif, 'name', vifName)
safeSetAttr(vif, 'mac', mac)
safeSetAttr(vif, 'bridge', bridge)
safeSetAttr(vif, 'type', type)
lst.append(vif)
return lst
def _getVifsFromConfig(self, vmPath):
vifs = successToMap(xen_get_vifs(vmPath))
lst = []
for k in vifs:
v = vifs[k]
vif = OvmVif()
(mac, bridge, type) = v.split(',')
safeSetAttr(vif, 'name', k)
safeSetAttr(vif, 'mac', mac)
safeSetAttr(vif, 'bridge', bridge)
safeSetAttr(vif, 'type', type)
lst.append(vif)
return lst
def _getIsoMountPath(self, vmPath):
vmName = basename(vmPath)
priStoragePath = vmPath.rstrip(join('running_pool', vmName))
return join(priStoragePath, 'iso_pool', vmName)
def _getVmTypeFromConfigFile(self, vmPath):
vmType = successToMap(xen_get_vm_type(vmPath))['type']
return vmType.replace('hvm', 'HVM').replace('para', 'PV')
def _tapAOwnerFile(self, vmPath):
# Create a file with name convention 'host_ip_address' in vmPath
# Because xm list doesn't return vm that has been stopped, we scan
# primary storage for stopped vm. This file tells us which host it belongs
# to. The file is used in OvmHost.getAllVms()
self._cleanUpOwnerFile(vmPath)
ownerFileName = makeOwnerFileName()
fd = open(join(vmPath, ownerFileName), 'w')
fd.write(ownerFileName)
fd.close()
def _cleanUpOwnerFile(self, vmPath):
for f in os.listdir(vmPath):
fp = join(vmPath, f)
if isfile(fp) and f.startswith(OWNER_FILE_PREFIX):
os.remove(fp)
@staticmethod
def create(jsonString):
def dumpCfg(vmName, cfgPath):
cfgFd = open(cfgPath, 'r')
cfg = cfgFd.readlines()
cfgFd.close()
logger.info(OvmVm.create, "Start %s with configure:\n\n%s\n"%(vmName, "".join(cfg)))
def setVifsType(vifs, type):
for vif in vifs:
vif.type = type
def hddBoot(vm, vmPath):
vmType = vm.type
if vmType == "FROMCONFIGFILE":
vmType = OvmVm()._getVmTypeFromConfigFile(vmPath)
cfgDict = {}
if vmType == "HVM":
cfgDict['builder'] = "'hvm'"
cfgDict['acpi'] = "1"
cfgDict['apic'] = "1"
cfgDict['device_model'] = "'/usr/lib/xen/bin/qemu-dm'"
cfgDict['kernel'] = "'/usr/lib/xen/boot/hvmloader'"
vifType = 'ioemu'
else:
cfgDict['bootloader'] = "'/usr/bin/pygrub'"
vifType = 'netfront'
cfgDict['name'] = "'%s'"%vm.name
cfgDict['disk'] = "[]"
cfgDict['vcpus'] = "''"
cfgDict['memory'] = "''"
cfgDict['on_crash'] = "'destroy'"
cfgDict['on_reboot'] = "'restart'"
cfgDict['vif'] = "[]"
items = []
for k in cfgDict.keys():
item = " = ".join([k, cfgDict[k]])
items.append(item)
vmSpec = "\n".join(items)
vmCfg = open(join(vmPath, 'vm.cfg'), 'w')
vmCfg.write(vmSpec)
vmCfg.close()
setVifsType(vm.vifs, vifType)
raiseExceptionIfFail(xen_set_vcpus(vmPath, vm.cpuNum))
raiseExceptionIfFail(xen_set_memory(vmPath, BytesToM(vm.memory)))
raiseExceptionIfFail(xen_add_disk(vmPath, vm.rootDisk.path, mode=vm.rootDisk.type))
vifs = [OvmVif.toXenString(v) for v in vm.vifs]
for vif in vifs:
raiseExceptionIfFail(xen_set_vifs(vmPath, vif))
for disk in vm.disks:
raiseExceptionIfFail(xen_add_disk(vmPath, disk.path, mode=disk.type))
raiseExceptionIfFail(xen_set_vm_vnc_password(vmPath, ""))
cfgFile = join(vmPath, 'vm.cfg')
# only HVM supports attaching cdrom
if vmType == 'HVM':
# Add an empty "hdc:cdrom" entry in config. Fisrt we set boot order to 'd' that is cdrom boot,
# then 'hdc:cdrom' entry will be in disk list. Second, change boot order to 'c' which
# is harddisk boot. VM can not start with an empty 'hdc:cdrom' when boot order is 'd'.
# it's tricky !
raiseExceptionIfFail(xen_config_boot_sequence(vmPath, 'd'))
raiseExceptionIfFail(xen_config_boot_sequence(vmPath, 'c'))
raiseExceptionIfFail(xen_correct_cfg(cfgFile, vmPath))
xen_correct_qos_cfg(cfgFile)
dumpCfg(vm.name, cfgFile)
server = successToMap(get_master_ip())['ip']
raiseExceptionIfFail(start_vm(vmPath, server))
rs = SUCC()
return rs
def cdBoot(vm, vmPath):
isoMountPath = None
try:
cdrom = None
for disk in vm.disks:
if disk.isIso == True:
cdrom = disk
break
if not cdrom: raise Exception("Cannot find Iso in disks")
isoOnSecStorage = dirname(cdrom.path)
isoName = basename(cdrom.path)
isoMountPath = OvmVm()._getIsoMountPath(vmPath)
OvmStoragePool()._mount(isoOnSecStorage, isoMountPath)
isoPath = join(isoMountPath, isoName)
if not exists(isoPath):
raise Exception("Cannot found iso %s at %s which mounts to %s"%(isoName, isoOnSecStorage, isoMountPath))
stdout = run_cmd(args=['file', isoPath])
if not stdout.strip().endswith("(bootable)"): raise Exception("ISO %s is not bootable"%cdrom.path)
#now alter cdrom to correct path
cdrom.path = isoPath
if len(vm.vifs) != 0:
vif = vm.vifs[0]
#ISO boot must be HVM
vifCfg = ','.join([vif.mac, vif.bridge, 'ioemu'])
else:
vifCfg = ''
rootDiskSize = os.path.getsize(vm.rootDisk.path)
rooDiskCfg = ':'.join([join(vmPath, basename(vm.rootDisk.path)), str(BytesToG(rootDiskSize)), 'True'])
disks = [rooDiskCfg]
for d in vm.disks:
if d.isIso: continue
size = os.path.getsize(d.path)
cfg = ':'.join([d.path, str(BytesToG(size)), 'True'])
disks.append(cfg)
disksCfg = ','.join(disks)
server = successToMap(get_master_ip())['ip']
raiseExceptionIfFail(install_vm_hvm(vmPath, BytesToM(vm.memory), vm.cpuNum, vifCfg, disksCfg, cdrom.path, vncpassword='', dedicated_server=server))
rs = SUCC()
return rs
except Exception, e:
if isoMountPath and OvmStoragePool()._isMounted(isoMountPath):
doCmd(['umount', '-f', isoMountPath])
errmsg = fmt_err_msg(e)
raise Exception(errmsg)
try:
vm = toOvmVm(jsonString)
logger.debug(OvmVm.create, "creating vm, spec:%s"%jsonString)
rootDiskPath = vm.rootDisk.path
if not exists(rootDiskPath): raise Exception("Cannot find root disk %s"%rootDiskPath)
rootDiskDir = dirname(rootDiskPath)
vmPath = join(dirname(rootDiskDir), vm.name)
if not exists(vmPath):
doCmd(['ln', '-s', rootDiskDir, vmPath])
vmNameFile = open(join(rootDiskDir, 'vmName'), 'w')
vmNameFile.write(vm.name)
vmNameFile.close()
OvmVm()._tapAOwnerFile(rootDiskDir)
# set the VM to DOWN before starting, OVS agent will check this status
set_vm_status(vmPath, 'DOWN')
if vm.bootDev == "HDD":
return hddBoot(vm, vmPath)
elif vm.bootDev == "CD":
return cdBoot(vm, vmPath)
else:
raise Exception("Unkown bootdev %s for %s"%(vm.bootDev, vm.name))
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.create, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.create), errmsg)
@staticmethod
def stop(vmName):
try:
try:
OvmHost()._getDomainIdByName(vmName)
except NoVmFoundException, e:
logger.info(OvmVm.stop, "vm %s is already stopped"%vmName)
return SUCC()
logger.info(OvmVm.stop, "Stop vm %s"%vmName)
try:
vmPath = OvmHost()._vmNameToPath(vmName)
except Exception, e:
errmsg = fmt_err_msg(e)
logger.info(OvmVm.stop, "Cannot find link for vm %s on primary storage, treating it as stopped\n %s"%(vmName, errmsg))
return SUCC()
# set the VM to RUNNING before stopping, OVS agent will check this status
set_vm_status(vmPath, 'RUNNING')
raiseExceptionIfFail(stop_vm(vmPath))
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.stop, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.stop), errmsg)
@staticmethod
def reboot(vmName):
try:
#===================================================================
# Xend has a bug of reboot. If reboot vm too quick, xend return success
# but actually it refused reboot (seen from log)
# vmPath = successToMap(xen_get_vm_path(vmName))['path']
# raiseExceptionIfFail(reset_vm(vmPath))
#===================================================================
vmPath = OvmHost()._vmNameToPath(vmName)
OvmVm.stop(vmName)
raiseExceptionIfFail(start_vm(vmPath))
vncPort= successToMap(xen_get_vnc_port(vmName))['vnc_port']
logger.info(OvmVm.stop, "reboot vm %s, new vncPort is %s"%(vmName, vncPort))
return toGson({"vncPort":str(vncPort)})
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.reboot, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.reboot), errmsg)
@staticmethod
def getDetails(vmName):
try:
vm = OvmVm()
try:
OvmHost()._getDomainIdByName(vmName)
vmPath = OvmHost()._vmNameToPath(vmName)
vifsFromConfig = False
except NoVmFoundException, e:
vmPath = OvmHost()._getVmPathFromPrimaryStorage(vmName)
vifsFromConfig = True
if not isdir(vmPath):
# The case is, when vm starting was not completed at primaryStroageDownload or createVolume(e.g. mgmt server stop), the mgmt
# server will keep vm state in staring, then a stop command will be sent. The stop command will delete bridges that vm attaches,
# by retriving birdge info by OvmVm.getDetails(). In this case, the vm doesn't exists, so returns a fake object here.
fakeDisk = OvmDisk()
vm.rootDisk = fakeDisk
else:
if vifsFromConfig:
vm.vifs.extend(vm._getVifsFromConfig(vmPath))
else:
vm.vifs.extend(vm._getVifs(vmName))
safeSetAttr(vm, 'name', vmName)
disks = successToMap(xen_get_vdisks(vmPath))['vdisks'].split(',')
rootDisk = None
#BUG: there is no way to get type of disk, assume all are "w"
for d in disks:
if vmName in d:
rootDisk = OvmDisk()
safeSetAttr(rootDisk, 'path', d)
safeSetAttr(rootDisk, 'type', "w")
continue
disk = OvmDisk()
safeSetAttr(disk, 'path', d)
safeSetAttr(disk, 'type', "w")
vm.disks.append(disk)
if not rootDisk: raise Exception("Cannot find root disk for vm %s"%vmName)
safeSetAttr(vm, 'rootDisk', rootDisk)
vcpus = int(successToMap(xen_get_vcpus(vmPath))['vcpus'])
safeSetAttr(vm, 'cpuNum', vcpus)
memory = MtoBytes(int(successToMap(xen_get_memory(vmPath))['memory']))
safeSetAttr(vm, 'memory', memory)
vmStatus = db_get_vm(vmPath)
safeSetAttr(vm, 'powerState', vmStatus['status'])
vmType = successToMap(xen_get_vm_type(vmPath))['type'].replace('hvm', 'HVM').replace('para', 'PV')
safeSetAttr(vm, 'type', vmType)
rs = fromOvmVm(vm)
logger.info(OvmVm.getDetails, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.getDetails, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.getDetails), errmsg)
@staticmethod
def getVmStats(vmName):
def getVcpuNumAndUtils():
try:
session = session_login()
refs = session.xenapi.VM.get_by_name_label(vmName)
if len(refs) == 0:
raise Exception("No ref for %s found in xenapi VM objects"%vmName)
vm = XenAPIObject('VM', session, refs[0])
VM_metrics = XenAPIObject("VM_metrics", session, vm.get_metrics())
items = VM_metrics.get_VCPUs_utilisation().items()
nvCpus = len(items)
if nvCpus == 0:
raise Exception("vm %s has 0 vcpus !!!"%vmName)
xmInfo = successToMap(xen_get_xm_info())
nCpus = int(xmInfo['nr_cpus'])
totalUtils = 0.0
# CPU utlization of VM = (total cpu utilization of each vcpu) / number of physical cpu
for num, util in items:
totalUtils += float(util)
avgUtils = float(totalUtils/nCpus) * 100
return (nvCpus, avgUtils)
finally:
session_logout()
try:
try:
OvmHost()._getDomainIdByName(vmName)
vmPath = OvmHost()._vmNameToPath(vmName)
(nvcpus, avgUtils) = getVcpuNumAndUtils()
vifs = successToMap(xen_get_vifs(vmPath))
rxBytes = 0
txBytes = 0
vifs = OvmVm()._getVifs(vmName)
for vif in vifs:
rxp = join('/sys/class/net', vif.name, 'statistics/rx_bytes')
txp = join("/sys/class/net/", vif.name, "statistics/tx_bytes")
if not exists(rxp): raise Exception('can not find %s'%rxp)
if not exists(txp): raise Exception('can not find %s'%txp)
rxBytes += long(doCmd(['cat', rxp])) / 1000
txBytes += long(doCmd(['cat', txp])) / 1000
except NoVmFoundException, e:
vmPath = OvmHost()._getVmPathFromPrimaryStorage(vmName)
nvcpus = int(successToMap(xen_get_vcpus(vmPath))['vcpus'])
avgUtils = 0
rxBytes = 0
txBytes = 0
rs = toGson({"cpuNum":nvcpus, "cpuUtil":avgUtils, "rxBytes":rxBytes, "txBytes":txBytes})
logger.debug(OvmVm.getVmStats, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.getVmStats, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.getVmStats), errmsg)
@staticmethod
def migrate(vmName, targetHost):
try:
vmPath = OvmHost()._vmNameToPath(vmName)
raiseExceptionIfFail(xen_migrate_vm(vmPath, targetHost))
unregister_vm(vmPath)
OvmVm()._cleanUpOwnerFile(vmPath)
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.migrate, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.migrate), errmsg)
@staticmethod
def register(vmName):
try:
vmPath = OvmHost()._vmNameToPath(vmName)
raiseExceptionIfFail(register_vm(vmPath))
OvmVm()._tapAOwnerFile(vmPath)
vncPort= successToMap(xen_get_vnc_port(vmName))['vnc_port']
rs = toGson({"vncPort":str(vncPort)})
logger.debug(OvmVm.register, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.register, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.register), errmsg)
@staticmethod
def getVncPort(vmName):
try:
vncPort= successToMap(xen_get_vnc_port(vmName))['vnc_port']
rs = toGson({"vncPort":vncPort})
logger.debug(OvmVm.getVncPort, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.getVncPort, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.getVncPort), errmsg)
@staticmethod
def detachOrAttachIso(vmName, iso, isAttach):
try:
if vmName in OvmHost.getAllVms():
scope = 'both'
vmPath = OvmHost()._vmNameToPath(vmName)
else:
scope = 'cfg'
vmPath = OvmHost()._getVmPathFromPrimaryStorage(vmName)
vmType = OvmVm()._getVmTypeFromConfigFile(vmPath)
if vmType != 'HVM':
raise Exception("Only HVM supports attaching/detaching ISO")
if not isAttach:
iso = ''
else:
isoName = basename(iso)
isoMountPoint = OvmVm()._getIsoMountPath(vmPath)
isoOnSecStorage = dirname(iso)
OvmStoragePool()._mount(isoOnSecStorage, isoMountPoint)
iso = join(isoMountPoint, isoName)
exceptionIfNoSuccess(xen_change_vm_cdrom(vmPath, iso, scope))
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmVm.detachOrAttachIso, errmsg)
raise XmlRpcFault(toErrCode(OvmVm, OvmVm.detachOrAttachIso), errmsg)
if __name__ == "__main__":
import sys
print OvmVm.getDetails(sys.argv[1])
#print OvmVm.getVmStats(sys.argv[1]) | gpl-2.0 | -7,053,745,002,862,057,000 | 41.149723 | 163 | 0.540917 | false |
tayebzaidi/PPLL_Spr_16 | finalPractica/2_2_ciclos_mejor.py | 1 | 4345 | from mrjob.job import MRJob
from mrjob.step import MRStep
import string
import sys
class MRGrados(MRJob):
SORT_VALUES = True
def mapper(self, _, line):
line_stripped = line.translate(string.maketrans("",""), '"')
line_split = line_stripped.split(',') #split by the comma
sorted_line = sorted(line_split)
node0 = sorted_line[0]
node1 = sorted_line[1]
if node0 != node1: #eliminate edges with the same vertice
yield (node0, node1), None #eliminate duplicate nodes
def reducer(self, key, values):
yield key[0], key
yield key[1], key
def sift(self, key, values):
degree = 0
send_edges = []
for val in values:
degree += 1
if val not in send_edges:
send_edges.append(val)
for edge in sorted(send_edges):
if key == edge[0]:
location = 0
elif key == edge[1]:
location = 1
yield edge, (edge, degree, location)
def grado_calc(self, key, values):
for edge, degree, location in values:
if location == 0:
degree0 = degree
if location == 1:
degree1 = degree
yield edge, (degree0, degree1)
def steps(self):
return [
MRStep(mapper = self.mapper,
reducer = self.reducer),
MRStep(reducer = self.sift),
MRStep(reducer = self.grado_calc)
]
class MRCiclos(MRJob):
def mapper(self, _, line):
line_split = line.split() #split by the comma)
node0 = line_split[0]
node1 = line_split[1]
degree0 = line_split[2]
degree1 = line_split[3]
if degree0 <= degree1:
yield node0, (node0, node1)
else:
yield node1, (node0, node1)
yield '.pass_through.', (node0, node1, degree0, degree1)
def reducer(self, key, values):
if key != '.pass_through.':
edges = list(values)
if len(edges) > 1:
for i in range(len(edges)):
for j in range(i, len(edges)):
if i != j:
if edges[i][0] and edges[j][1] != key:
yield (edges[i][0], edges[j][1]), edges[i]
else:
for node0, node1, degree0, degree1 in values:
yield (node0, node1), 'original'
def reducer2(self, key, values):
vals = list(values)
if len(vals) > 1:
for val in vals:
node0 = key[0]
node1 = key[1]
if val != 'original':
if val[0] in [node0, node1]:
node2 = val[1]
else:
node2 = val[0]
ciclo = [node0,node1,node2]
ciclo_sorted = sorted(ciclo)
yield ciclo_sorted, None
def steps(self):
return [
MRStep(mapper = self.mapper,
reducer = self.reducer),
MRStep(reducer = self.reducer2)
]
if __name__=="__main__":
print 'Starting grado_calcjob'
job_grado_calc = MRGrados(args=sys.argv[1:])
runner_grado_calc= job_grado_calc.make_runner()
runner_grado_calc.run()
grado_calc_output = []
for line in runner_grado_calc.stream_output():
grado_calc_output = grado_calc_output + [job_grado_calc.parse_output_line(line)]
#print 'Results grado_calc:', grado_calc_output
f = open('results_grado_calc.txt','w')
for (node1, node2), (degree0, degree1) in grado_calc_output:
f.write(str(node1)+'\t'+str(node2)+'\t'+str(degree0)+'\t'+str(degree1)+'\n')
f.close()
#print 'Starting ciclos_count job'
job_ciclos_count = MRCiclos(args=['results_grado_calc.txt'])
runner_ciclos_count = job_ciclos_count.make_runner()
runner_ciclos_count.run()
ciclos_count_output = []
for line in runner_ciclos_count.stream_output():
ciclos_count_output = ciclos_count_output + [job_ciclos_count.parse_output_line(line)]
for result in ciclos_count_output:
print result
| gpl-3.0 | -3,768,544,177,289,435,000 | 32.689922 | 94 | 0.509551 | false |
rbdavid/DNA_stacking_analysis | angles_binary.py | 1 | 9052 | #!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
# USAGE:
# PREAMBLE:
import numpy as np
import MDAnalysis
import sys
import os
import matplotlib.pyplot as plt
traj_file ='%s' %(sys.argv[1])
# ----------------------------------------
# VARIABLE DECLARATION
base1 = 1
nbases = 15
#nbases = 3
#Nsteps = 150000 # check length of the energy file; if not 150000 lines, then need to alter Nsteps value so that angle values will match up
#Nsteps = 149996
#equilib_step = 37500 # we have chosen 75 ns to be the equilib time; 75ns = 37500 frames; if energy values do not match with angle values, then equilib_step needs to be altered as well...
#equilib_step = 37496
#production = Nsteps - equilib_step
# SUBROUTINES/DEFINITIONS:
arccosine = np.arccos
dotproduct = np.dot
pi = np.pi
ldtxt = np.loadtxt
zeros = np.zeros
# ----------------------------------------
# DICTIONARY DECLARATION
normals = {} # create the normals dictionary for future use
total_binaries = {} # create the total_binaries dictionary for future use
get_norm = normals.get
get_tb = total_binaries.get
# ----------------------------------------
# PLOTTING SUBROUTINES
def plotting(xdata, ydata, base):
plt.plot(xdata, ydata, 'rx')
plt.title('Stacking behavior of base %s over the trajectory' %(base))
plt.xlabel('Simulation time (ns)')
plt.ylabel('Stacking metric')
plt.xlim((0,300))
plt.grid( b=True, which='major', axis='both', color='k', linestyle='-')
plt.savefig('stacking_binary.%s.png' %(base))
plt.close()
def vdw_hist(data, base_a, base_b):
events, edges, patches = plt.hist(data, bins = 100, histtype = 'bar')
plt.title('Distribution of vdW Energies - Base Pair %s-%s' %(base_a, base_b))
plt.xlabel('vdW Energy ($kcal\ mol^{-1}$)')
plt.xlim((-8,0))
plt.ylabel('Frequency')
plt.savefig('energy.%s.%s.png' %(base_a, base_b))
nf = open('energy.%s.%s.dat' %(base_a, base_b), 'w')
for i in range(len(events)):
nf.write(' %10.1f %10.4f\n' %(events[i], edges[i]))
nf.close()
plt.close()
events = []
edges = []
patches = []
def angle_hist(data, base_a, base_b):
events, edges, patches = plt.hist(data, bins = 100, histtype = 'bar')
plt.title('Distribution of Angles btw Base Pair %s-%s' %(base_a, base_b))
plt.xlabel('Angle (Degrees)')
plt.ylabel('Frequency')
plt.savefig('angle.%s.%s.png' %(base_a, base_b))
nf = open('angle.%s.%s.dat' %(base_a, base_b), 'w')
for i in range(len(events)):
nf.write(' %10.1f %10.4f\n' %(events[i], edges[i]))
nf.close()
plt.close()
events = []
edges = []
patches = []
def energy_angle_hist(xdata, ydata, base_a, base_b):
counts, xedges, yedges, image = plt.hist2d(xdata, ydata, bins = 100)
cb1 = plt.colorbar()
cb1.set_label('Frequency')
plt.title('Distribution of Base Pair interactions - %s-%s' %(base_a, base_b))
plt.xlabel('Angle (Degrees)')
plt.ylabel('vdW Energy ($kcal\ mol^{-1}$)')
plt.ylim((-6,0.5))
plt.savefig('vdw_angle.%s.%s.png' %(base_a, base_b))
plt.close()
counts = []
xedges = []
yedges = []
image = []
# MAIN PROGRAM:
# ----------------------------------------
# ATOM SELECTION - load the trajectory and select the desired nucleotide atoms to be analyzed later on
u = MDAnalysis.Universe('../nucleic_ions.pdb', traj_file, delta=2.0) # load in trajectory file
Nsteps = len(u.trajectory)
equilib_step = 37500 # first 75 ns are not to be included in total stacking metric
production = Nsteps - equilib_step
nucleic = u.selectAtoms('resid 1:15') # atom selections for nucleic chain
a1 = nucleic.selectAtoms('resid 1') # residue 1 has different atom IDs for the base atoms
a1_base = a1.atoms[10:24] # atom selections
bases = [] # make a list of the 15 bases filled with atoms
bases.append(a1_base) # add base 1 into list
for residue in nucleic.residues[1:15]: # collect the other bases into list
residue_base = []
residue_base = residue.atoms[12:26]
bases.append(residue_base)
# ----------------------------------------
# DICTIONARY DEVELOPMENT - Develop the normals and total binary dictionary which contain the data for each base
while base1 <= nbases:
normals['normal.%s' %(base1)] = get_norm('normal.%s' %(base1), np.zeros((Nsteps, 3)))
total_binaries['base.%s' %(base1)] = get_tb('base.%s' %(base1), np.zeros(Nsteps))
base1 += 1
# ----------------------------------------
# SIMULATION TIME - calculate the array that contains the simulation time in ns units
time = np.zeros(Nsteps)
for i in range(Nsteps):
time[i] = i*0.002 # time units: ns
# ----------------------------------------
# NORMAL ANALYSIS for each base - loops through all bases and all timesteps of the trajectory; calculates the normal vector of the base atoms
base1 = 1
while (base1 <= nbases):
for ts in u.trajectory:
Princ_axes = []
Princ_axes = bases[base1 - 1].principalAxes()
normals['normal.%s' %(base1)][ts.frame - 1] = Princ_axes[2] # ts.frame index starts at 1; add normal to dictionary with index starting at 0
base1 += 1
# ----------------------------------------
# BASE PAIR ANALYSIS - loops through all base pairs (w/out duplicates) and performs the angle analysis as well as the binary analysis
base1 = 1 # reset the base index to start at 1
while (base1 <= nbases): # while loops to perform the base-pair analysis while avoiding performing the same analysis twice
base2 = base1 + 1
while (base2 <= nbases):
os.mkdir('base%s_base%s' %(base1, base2)) # makes and moves into a directory for the base pair
os.chdir('base%s_base%s' %(base1, base2))
energyfile = '../../nonbond_energy/base%s_base%s/base%s_base%s.energies.dat' %(base1, base2, base1, base2)
energies = ldtxt(energyfile) # load in the energy file to a numpy array
vdw_energies = energies[:,2]
binary = zeros(Nsteps)
nf = open('binary.%s.%s.dat' %(base1, base2), 'w') # write the base pair data to a file; make sure to be writing this in a base pair directory
# angle and binary analysis for base pair;
for i in range(Nsteps):
angle = 0.
angle = arccosine(dotproduct(normals['normal.%s' %(base1)][i], normals['normal.%s' %(base2)][i]))
angle = angle*(180./pi)
if angle > 90.:
angle = 180. - angle
if vdw_energies[i] <= -3.5 and angle <= 30.: # cutoff: -3.5 kcal mol^-1 and 30 degrees
binary[i] = 1. # assumed else binary[i] = 0.
nf.write(' %10.3f %10.5f %10.5f %10.1f\n' %(time[i], vdw_energies[i], angle, binary[i])) # check time values
total_binaries['base.%s' %(base1)][i] = total_binaries['base.%s' %(base1)][i] + binary[i]
total_binaries['base.%s' %(base2)][i] = total_binaries['base.%s' %(base2)][i] + binary[i]
nf.close()
angles = []
energies = []
vdw_energies = []
os.chdir('..')
base2 += 1
base1 += 1
# ----------------------------------------
# TOTAL BINARY METRIC ANALYSIS - writing to file and plotting
# print out (also plot) the total binary data to an indivual file for each individual base
base1 = 1 # reset the base index to start at 1
os.mkdir('total_binaries')
os.chdir('total_binaries')
while (base1 <= nbases):
os.mkdir('base%s' %(base1))
os.chdir('base%s' %(base1))
nf = open('binary.%s.dat' %(base1), 'w')
for i in range(Nsteps):
nf.write(' %10.3f %10.1f\n' %(time[i], total_binaries['base.%s' %(base1)][i])) # check time values
nf.close()
counts = 0
for i in range(equilib_step, Nsteps):
if total_binaries['base.%s' %(base1)][i] > 0.:
counts +=1
prob = 0.
prob = (float(counts)/production)*100.
nf = open('stacking.%s.dat' %(base1), 'w')
nf.write('counts: %10.1f out of %10.1f time steps \n Probability of stacking = %10.4f ' %(counts, production, prob))
nf.close()
plotting(time[:], total_binaries['base.%s' %(base1)][:], base1)
os.chdir('..')
base1 += 1
# ----------------------------------------
# BASE PAIR PLOTTING - making histogram plots for vdW energy distributions, angle distributions, and 2d hist of vdw vs angle distributions
# Also printint out a file that contains the count of timesteps where the base pair are stacked
os.chdir('..')
base1 = 1
while (base1 <= nbases): # while loops to perform the base-pair analysis while avoiding performing the same analysis twice
base2 = base1 + 1
while (base2 <= nbases):
os.chdir('base%s_base%s' %(base1, base2))
infile = 'binary.%s.%s.dat' %(base1, base2)
data = ldtxt(infile) # data[0] = time, data[1] = vdW energies, data[2] = angle, data[3] = base pair binary metric
vdw_hist(data[equilib_step:,1], base1, base2)
angle_hist(data[equilib_step:,2], base1, base2)
energy_angle_hist(data[equilib_step:,2], data[equilib_step:,1], base1, base2)
nf = open('stacking.%s.%s.dat' %(base1, base2), 'w')
bp_counts = sum(data[equilib_step:,3])
nf.write('counts for base pair %s-%s: %10.1f' %(base1, base2, bp_counts))
nf.close()
data = []
os.chdir('..')
base2 += 1
base1 += 1
# ----------------------------------------
# END
| mit | 704,029,633,018,578,400 | 30.761404 | 188 | 0.619532 | false |
TribeMedia/sky_engine | testing/scripts/get_compile_targets.py | 76 | 1285 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import sys
import common
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--output', required=True)
parser.add_argument('args', nargs=argparse.REMAINDER)
args = parser.parse_args(argv)
passthrough_args = args.args
if passthrough_args[0] == '--':
passthrough_args = passthrough_args[1:]
results = {}
for filename in os.listdir(common.SCRIPT_DIR):
if not filename.endswith('.py'):
continue
if filename in ('common.py', 'get_compile_targets.py'):
continue
with common.temporary_file() as tempfile_path:
rc = common.run_command(
[sys.executable, os.path.join(common.SCRIPT_DIR, filename)] +
passthrough_args +
[
'compile_targets',
'--output', tempfile_path
]
)
if rc != 0:
return rc
with open(tempfile_path) as f:
results[filename] = json.load(f)
with open(args.output, 'w') as f:
json.dump(results, f)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | 5,233,897,076,529,113,000 | 22.363636 | 72 | 0.629572 | false |
JeffAMcGee/friendloc | friendloc/explore/sprawl.py | 1 | 13724 | #!/usr/bin/env python
import numpy
import random
import logging
import itertools
import collections
from friendloc.base import gob
from friendloc.base.models import Edges, User, Tweets
from friendloc.base import gisgraphy, twitter, utils
NEBR_KEYS = ['rfriends','just_followers','just_friends','just_mentioned']
@gob.mapper(all_items=True)
def parse_geotweets(tweets):
"""
read tweets from Twitter's streaming API and save users and their tweets
USAGE: gunzip -c ~/may/*/*.gz | ./gb.py -s parse_geotweets
"""
# We save users and locations intermingled because this data is too big to
# fit in memory, and we do not want to do two passes.
users = set()
for i,t in enumerate(tweets):
if i%10000 ==0:
logging.info("read %d tweets"%i)
if 'id' not in t: continue # this is not a tweet
uid = t['user']['id']
if not t.get('coordinates'): continue
if uid not in users:
yield User.mod_id(uid),t['user']
users.add(uid)
yield User.mod_id(uid),(uid,t['coordinates']['coordinates'])
logging.info("sending up to %d users"%len(users))
def _untangle_users_and_coords(users_and_coords):
users = {}
locs = collections.defaultdict(list)
for user_or_coord in users_and_coords:
if isinstance(user_or_coord,dict):
users[user_or_coord['id']] = user_or_coord
else:
uid,coord = user_or_coord
locs[uid].append(coord)
return users, locs
@gob.mapper(all_items=True)
def mloc_users(users_and_coords):
"""
pick users with good home locations from geotweets
"""
users, locs = _untangle_users_and_coords(users_and_coords)
selected = []
for uid,user in users.iteritems():
spots = locs[uid]
if len(spots)<=2: continue
if user['followers_count']==0 and user['friends_count']==0: continue
median = utils.median_2d(spots)
dists = [utils.coord_in_miles(median,spot) for spot in spots]
if numpy.median(dists)>50:
continue #user moves too much
user['mloc'] = median
selected.append(user)
random.shuffle(selected)
return selected
@gob.mapper(all_items=True)
def mloc_reject_count(users_and_coords):
"""
count the number of users we ignored in mloc_users. (This job was done to
calculate a number for the paper, and is almost trash.)
"""
results = collections.defaultdict(int)
users, locs = _untangle_users_and_coords(users_and_coords)
for uid,user in users.iteritems():
spots = locs[uid]
if len(spots)<=2:
results['spots']+=1
continue
median = utils.median_2d(spots)
dists = [utils.coord_in_miles(median,spot) for spot in spots]
if numpy.median(dists)>50:
results['moves']+=1
elif user['followers_count']==0 and user['friends_count']==0:
results['counts']+=1
else:
results['good']+=1
return results.iteritems()
def _fetch_edges(twit,uid):
edges = Edges.get_id(uid)
if not edges:
edges = twit.get_edges(uid)
edges.save()
return edges
def _fetch_tweets(twit,uid):
tweets = Tweets.get_id(uid)
if not tweets:
tweets_ = twit.user_timeline(uid)
tweets = Tweets(_id=uid,tweets=tweets_)
tweets.save()
return tweets
def _contact_sets(tweets, edges):
ated = set(tweets.ats or [])
frds = set(edges.friends)
fols = set(edges.followers)
return dict(
rfriends = frds&fols,
just_friends = frds-fols,
just_followers = fols-frds,
just_mentioned = ated-(frds|fols),
)
def _pick_best_contacts(user, sets, limit=100):
def digit_sum(uid):
return sum(map(int,str(uid)))
left = limit
for key in NEBR_KEYS:
if left>0:
uids = sorted(sets[key],key=digit_sum,reverse=True)[:left]
else:
uids = []
left -=len(uids)
setattr(user,key,uids)
def _pick_random_contacts(user, sets, limit=100):
#pick uids from sets
for key,s in sets.iteritems():
l = list(s)
random.shuffle(l)
setattr(user,key,l[:limit//4])
def _save_user_contacts(twit,user,contact_picker,limit):
logging.info("visit %s - %d",user.screen_name,user._id)
if user.protected:
user.error_status=401
user.merge()
return None, None
edges, tweets = None, None
try:
edges = _fetch_edges(twit,user._id)
tweets = _fetch_tweets(twit,user._id)
sets = _contact_sets(tweets,edges)
contact_picker(user,sets,limit)
except twitter.TwitterFailure as e:
logging.warn("%d for %d",e.status_code,user._id)
user.error_status = e.status_code
user.merge()
return edges, tweets
def _my_contacts(user):
return ((User.mod_id(c),c) for c in user.contacts)
@gob.mapper(all_items=True)
def find_contacts(user_ds):
"""
for each target user, fetch edges and tweets, pick 100 located contact ids
"""
gis = gisgraphy.GisgraphyResource()
twit = twitter.TwitterResource()
for user_d in itertools.islice(user_ds,2600):
user = User.get_id(user_d['id'])
if user:
logging.warn("not revisiting %d",user._id)
else:
user = User(user_d)
user.geonames_place = gis.twitter_loc(user.location)
_save_user_contacts(twit, user, _pick_random_contacts, limit=100)
for mod_nebr in _my_contacts(user):
yield mod_nebr
@gob.mapper()
def find_leafs(uid):
"""
for each contact, fetch edges and tweets, pick 100 leaf ids
"""
twit = twitter.TwitterResource()
user = User.get_id(uid)
_save_user_contacts(twit, user, _pick_random_contacts, limit=100)
return _my_contacts(user)
@gob.mapper(all_items=True)
def total_contacts(user_ds):
"""
count the total number of contacts (to include in the paper)
"""
for user_d in itertools.islice(user_ds,2600):
user = User.get_id(user_d['id'])
if not user:
yield "no user"
elif user.error_status:
yield str(user.error_status)
else:
edges = Edges.get_id(user._id)
tweets = Tweets.get_id(user._id)
if not edges or not tweets:
yield "no contacts"
else:
sets = _contact_sets(tweets,edges)
yield [len(sets[k]) for k in User.NEBR_KEYS]
@gob.mapper(all_items=True)
def mloc_uids(user_ds):
"""
pick 2500 target users who have locations and good contacts
"""
retrieved = [u['id'] for u in itertools.islice(user_ds,2600)]
users = User.find(User._id.is_in(retrieved))
good_ = { u._id for u in users if any(getattr(u,k) for k in NEBR_KEYS)}
good = [uid for uid in retrieved if uid in good_]
logging.info("found %d of %d",len(good),len(retrieved))
# throw away accounts that didn't work to get down to the 2500 good users
return good[:2500]
@gob.mapper(all_items=True)
def trash_extra_mloc(mloc_uids):
"remove the mloc_users that mloc_uids skipped over"
# This scares me a bit, but it's too late to go back and fix find_contacts.
# I really wish I had limited find_contacts to stop after 2500 good users.
db = User.database
mloc_uids = set(mloc_uids)
group_ = set(uid%100 for uid in mloc_uids)
assert len(group_)==1
group = next(iter(group_))
stored = User.mod_id_set(group)
trash = list(stored - mloc_uids)
logging.info("trashing %d users",len(trash))
logging.debug("full list: %r",trash)
db.Edges.remove({'_id':{'$in':trash}})
db.Tweets.remove({'_id':{'$in':trash}})
db.User.remove({'_id':{'$in':trash}})
@gob.mapper()
def saved_users():
"""
Create set of ids already already in the database so that lookup_contacts
can skip these users. Talking to the database in lookup_contacts to check
if users are in the database is too slow.
"""
users = User.database.User.find({},fields=[],timeout=False)
return ((User.mod_id(u['_id']),u['_id']) for u in users)
@gob.mapper(all_items=True,slurp={'mdists':next})
def lookup_contacts(contact_uids,mdists,env):
"""
lookup user profiles for contacts or leafs
"""
twit = twitter.TwitterResource()
gis = gisgraphy.GisgraphyResource()
gis.set_mdists(mdists)
# FIXME: we need a better way to know which file we are on.
# FIXME: use the new input_paths thing
first, contact_uids = utils.peek(contact_uids)
group = User.mod_id(first)
logging.info('lookup old uids for %s',group)
save_name = 'saved_users.%s'%group
if env.name_exists(save_name):
stored = set(env.load(save_name))
else:
stored = User.mod_id_set(int(group))
logging.info('loaded mod_group %s of %d users',group,len(stored))
missing = (id for id in contact_uids if id not in stored)
chunks = utils.grouper(100, missing, dontfill=True)
for chunk in chunks:
users = twit.user_lookup(user_ids=list(chunk))
for amigo in filter(None,users):
assert User.mod_id(amigo._id)==group
amigo.geonames_place = gis.twitter_loc(amigo.location)
amigo.merge()
yield len(users)
def _pick_neighbors(user):
nebrs = {}
for key in NEBR_KEYS:
cids = getattr(user,key)
if not cids:
continue
# this is slowish
contacts = User.find(User._id.is_in(cids), fields=['gnp'])
nebrs[key] = set(u._id for u in contacts if u.has_place())
picked_ = filter(None,
itertools.chain.from_iterable(
itertools.izip_longest(*nebrs.values())))
picked = picked_[:25]
logging.info('picked %d of %d contacts',len(picked),len(user.contacts))
return picked
@gob.mapper()
def pick_nebrs(mloc_uid):
"""
For each target user, pick the 25 located contacts.
"""
# reads predict.prep.mloc_uids, requires lookup_contacts, but don't read it.
user = User.get_id(mloc_uid)
user.neighbors = _pick_neighbors(user)
user.save()
return ((User.mod_id(n),n) for n in user.neighbors)
@gob.mapper(all_items=True,slurp={'mdists':next})
def fix_mloc_mdists(mloc_uids,mdists):
"""
Add the median location error to profiles of contacts and target users.
"""
gis = gisgraphy.GisgraphyResource()
gis.set_mdists(mdists)
# We didn't have mdists at the time the mloc users were saved. This
# function could be avoided by running the mdist calculation before
# running find_contacts.
fixed = 0
users = User.find(User._id.is_in(tuple(mloc_uids)))
for user in users:
user.geonames_place = gis.twitter_loc(user.location)
user.save()
if user.geonames_place:
fixed+=1
logging.info("fixed %d mdists",fixed)
return [fixed]
@gob.mapper(all_items=True)
def uid_split(groups):
"""
after a set reduce, split up the user ids into seperate files
"""
# This method should really be built into gob somehow.
return (
(group, id)
for group,ids in groups
for id in ids
)
def _fetch_profiles(uids,twit,gis):
users = list(User.find(User._id.is_in(uids)))
existing_ids = {u._id for u in users}
missing_ids = [uid for uid in uids if uid not in existing_ids]
chunks = utils.grouper(100, missing_ids, dontfill=True)
for chunk in chunks:
found = twit.user_lookup(user_ids=list(chunk))
for amigo in filter(None,found):
amigo.geonames_place = gis.twitter_loc(amigo.location)
amigo.merge()
users.append(amigo)
return users
def _calc_lorat(nebrs,twit,gis):
leaf_ids = {uid
for nebr in nebrs
for uid in nebr.contacts[:10]}
leafs_ = _fetch_profiles(list(leaf_ids),twit,gis)
leafs = {leaf._id:leaf for leaf in leafs_}
for nebr in nebrs:
# Does this break if the contact does not exist?
nebr_loc = nebr.geonames_place.to_d()
dists = []
for leaf_id in nebr.contacts[:10]:
leaf = leafs.get(leaf_id)
if leaf and leaf.has_place():
dist = utils.coord_in_miles(nebr_loc,leaf.geonames_place.to_d())
dists.append(dist)
if dists:
lorat = sum(1.0 for d in dists if d<25)/len(dists)
else:
lorat = float('nan')
nebr.local_ratio = lorat
CrawlResults = collections.namedtuple("CrawlResults",['nebrs','ats','ated'])
def crawl_single(user, twit, gis, fast):
"""
save a single user, contacts, and leafs to the database
crawl a user object who has not been visited before
twit is a TwitterResource
gis is a GisgraphyResource with mdists
"""
edges,tweets=_save_user_contacts(twit, user, _pick_best_contacts, limit=100)
contact_ids = user.contacts
profiles = {p._id:p for p in _fetch_profiles(contact_ids,twit,gis)}
def has_place(uid):
return uid in profiles and profiles[uid].has_place()
user.neighbors = filter(has_place, contact_ids)[:25]
nebrs = [profiles[nid] for nid in user.neighbors]
ated = set()
if not fast:
for nebr in nebrs:
ne,nt = _save_user_contacts(twit, nebr, _pick_best_contacts, limit=100)
if nt and nt.ats and user._id in nt.ats:
ated.add(nebr._id)
need_lorat = [nebr for nebr in nebrs if nebr.local_ratio is None]
_calc_lorat(need_lorat,twit,gis)
for nebr in need_lorat:
nebr.merge()
user.merge()
return CrawlResults(nebrs,tweets.ats if tweets else [],ated)
| bsd-2-clause | -7,170,094,799,280,597,000 | 30.477064 | 83 | 0.616293 | false |
runiq/modeling-clustering | find-correct-cluster-number/plot_clustering_metrics.py | 1 | 10092 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Performs a clustering run with a number of clusters and a given mask,
and creates graphs of the corresponding DBI, pSF, SSR/SST, and RMSD
values.
These faciliate the choice of cluster numbers and improve the clustering
process by allowing to pick the number of clusters with the highest
information content.
"""
# TODO
# - Fix plot_tree()
# - Do some logging
# - remove clustering_run from plot_metrics() and plot_tree() as it
# basically represents world state. Use explicit metrics/nodes instead
# - Implement ylabel alignment as soon as PGF backend has its act together
import cStringIO as csio
from glob import glob, iglob
import os
import os.path as op
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tic
import matplotlib.transforms as tfs
import clustering_run as cr
import newick as cn
def align_yaxis_labels(axes, sortfunc):
xpos = sortfunc(ax.yaxis.get_label().get_position()[0] for ax in axes)
for ax in axes:
trans = tfs.blended_transform_factory(tfs.IdentityTransform(), ax.transAxes)
ax.yaxis.set_label_coords(xpos, 0.5, transform=trans)
def plot_metrics(clustering_run, output_file, xmin=None, xmax=None,
use_tex=False, figsize=(12,8), square=False):
metrics = clustering_run.gather_metrics()
# The ±0.5 are so that all chosen points are well within the
# plots
if xmin is None:
xmin = min(metrics['n'])
if xmax is None:
xmax = max(metrics['n'])
xlim = (xmin-0.5, xmax+0.5)
fig = plt.figure(figsize=figsize)
if clustering_run.no_ssr_sst:
gridindex = 310
else:
if square:
gridindex = 220
else:
gridindex = 410
if use_tex:
rmsd_ylabel = r'Critical distance/\si{\angstrom}'
xlabel = r'$n_{\text{Clusters}}$'
else:
rmsd_ylabel = u'Critical distance/Å'
xlabel = r'Number of clusters'
ax1 = fig.add_subplot(gridindex+1, ylabel=rmsd_ylabel)
ax2 = fig.add_subplot(gridindex+2, ylabel='DBI', sharex=ax1)
ax3 = fig.add_subplot(gridindex+3, ylabel='pSF', sharex=ax1)
ax1.plot(metrics['n'], metrics['rmsd'], marker='.')
ax2.plot(metrics['n'], metrics['dbi'], marker='.')
ax3.plot(metrics['n'], metrics['psf'], marker='.')
if not clustering_run.no_ssr_sst:
ax4 = fig.add_subplot(gridindex+4,
ylabel='SSR/SST', xlim=xlim, sharex=ax1)
ax4.plot(metrics['n'], metrics['ssr_sst'], marker='.')
if square and not clustering_run.no_ssr_sst:
nonxaxes = fig.axes[:-2]
xaxes = fig.axes[-2:]
lefthandplots = fig.axes[0::2]
righthandplots = fig.axes[1::2]
# Put yticklabels of right-hand plots to the right
for ax in righthandplots:
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
else:
nonxaxes = fig.axes[:-1]
xaxes = [fig.axes[-1]]
lefthandplots = fig.axes
# xaxes limits and tick locations are propagated across sharex plots
for ax in xaxes:
ax.set_xlabel(xlabel)
ax.xaxis.set_major_locator(tic.MultipleLocator(10))
ax.xaxis.set_minor_locator(tic.AutoMinorLocator(2))
for ax in nonxaxes:
plt.setp(ax.get_xticklabels(), visible=False)
# 5 yticklabels are enough for everybody
for ax in fig.axes:
ax.yaxis.set_major_locator(tic.MaxNLocator(nbins=5))
ax.yaxis.set_minor_locator(tic.MaxNLocator(nbins=5))
# Draw first to get proper ylabel coordinates
# fig.canvas.draw()
# align_yaxis_labels(lefthandplots, sortfunc=min)
# if square and not clustering_run.no_ssr_sst:
# align_yaxis_labels(righthandplots, sortfunc=max)
fig.savefig(output_file)
def plot_tree(clustering_run, node_info, steps, dist, output, graphical=None, no_length=False):
tree = cn.parse_clustermerging(clustering_run)
newick = tree.create_newick(node_info=node_info, no_length=no_length, steps=steps, dist=dist)
if output is sys.stdout:
fh = output
else:
fh = open(output, 'w')
fh.write(newick)
fh.close()
fig = plt.figure()
ax1 = fig.add_subplot(111, ylabel='Cluster tree')
if graphical is not None:
cn.draw(csio.StringIO(newick), do_show=False, axes=ax1)
fig.savefig(graphical)
def parse_args():
import argparse as ap
parser = ap.ArgumentParser()
parser.add_argument('-c', '--cm-file', metavar='FILE',
default='./ClusterMerging.txt', dest='cm_fn',
help="File to parse (default: ./ClusterMerging.txt)")
parser.add_argument('-C', '--matplotlibrc', metavar='FILE', default=None,
help="Matplotlibrc file to use")
parser.add_argument('-p', '--prefix', default='c',
help="Prefix for clustering result files (default: \"c\")")
parser.add_argument('-N', '--no-ssr-sst', action='store_true', default=False,
help="Don't gather SSR_SST values (default: False)")
subs = parser.add_subparsers(dest='subcommand', help="Sub-command help")
c = subs.add_parser('cluster', help="Do clustering run to gather metrics")
c.add_argument('prmtop', help="prmtop file")
c.add_argument('-m', '--mask', metavar='MASKSTR', default='@CA,C,O,N',
help=("Mask string (default: '@CA,C,O,N')"))
c.add_argument('-P', '--ptraj-trajin-file', metavar='FILE',
default='ptraj_trajin', dest='ptraj_trajin_fn',
help=("Filename for ptraj trajin file (default: ptraj_trajin)"))
c.add_argument('-n', '--num-clusters', dest='n_clusters', type=int,
metavar='CLUSTERS', default=50,
help="Number of clusters to examine (default (also maximum): 50)")
c.add_argument('-s', '--start-num-clusters', dest='start_n_clusters',
type=int, metavar='CLUSTERS', default=2,
help="Number of clusters to start from (default: 2)")
c.add_argument('-l', '--logfile', metavar='FILE', default=None,
dest='log_fn',
help=("Logfile for ptraj run (default: Print to stdout)"))
c.add_argument('--use-cpptraj', action='store_true', default=False,
help="Use cpptraj instead of ptraj")
t = subs.add_parser('tree', help="Create Newick tree representation")
t.add_argument('-o', '--output', metavar='FILE', default=sys.stdout,
help="Output file for Newick tree (default: print to terminal)")
t.add_argument('-g', '--graphical', default=None,
help="Save tree as png (default: Don't)")
t.add_argument('-s', '--steps', type=int, default=None,
help="Number of steps to print (default: all)")
t.add_argument('-d', '--dist', type=float, default=None,
help="Minimum distance to print (default: all)")
t.add_argument('-i', '--node-info', choices=['num', 'dist', 'id'],
default='num', help="Node data to print")
t.add_argument('-l', '--no-length', default=False, action='store_true',
help="Don't print branch length information")
p = subs.add_parser('plot', help="Plot clustering metrics")
p.add_argument('-o', '--output', metavar='FILE',
default='clustering_metrics.png',
help="Filename for output file (default: show using matplotlib)")
p.add_argument('-n', '--num-clusters', dest='n_clusters', type=int,
metavar='CLUSTERS', default=50,
help="Number of clusters to examine (default (also maximum): 50)")
p.add_argument('-s', '--start-num-clusters', dest='start_n_clusters',
type=int, metavar='CLUSTERS', default=2,
help="Number of clusters to start from (default: 2)")
p.add_argument('-T', '--use-tex', default=False, action='store_true',
help="Use LaTeX output (default: use plaintext output)")
p.add_argument('-S', '--fig-size', nargs=2, type=float, metavar='X Y', default=[12, 8],
help=("Figure size in inches (default: 12x8)"))
p.add_argument('--square', default=False, action='store_true',
help="Plot in two columns")
return parser.parse_args()
def main():
args = parse_args()
if args.matplotlibrc is not None:
matplotlib.rc_file(args.matplotlibrc)
if args.subcommand == 'cluster':
if args.n_clusters < 1 or args.n_clusters > 50:
print "Error: Maximum cluster number must be between 1 and 50."
sys.exit(1)
cn_fns = None
clustering_run = cr.ClusteringRun(prmtop=args.prmtop,
start_n_clusters=args.start_n_clusters, n_clusters=args.n_clusters,
cm_fn=args.cm_fn, mask=args.mask,
ptraj_trajin_fn=args.ptraj_trajin_fn, cn_fns=cn_fns,
prefix=args.prefix, log_fn=args.log_fn,
no_ssr_sst=args.no_ssr_sst)
else:
if not op.exists(args.cm_fn):
print ("{cm_fn} doesn't exist. Please perform a clustering run",
"first.".format(cm_fn=args.cm_fn))
sys.exit(1)
# We assume that the number of clusters starts at 1
n_clusters = len(glob('{prefix}*.txt'.format(prefix=args.prefix)))
cn_fns = {i: '{prefix}{n}.txt'.format(prefix=args.prefix, n=i) for
i in xrange(1, n_clusters+1)}
# Only cm_fn and cn_fns are necessary for plotting the tree and
# metrics
clustering_run = cr.ClusteringRun(prmtop=None, cm_fn=args.cm_fn,
cn_fns=cn_fns, no_ssr_sst=args.no_ssr_sst)
if args.subcommand == 'plot':
plot_metrics(clustering_run, output_file=args.output,
xmin=args.start_n_clusters, xmax=args.n_clusters,
use_tex=args.use_tex, figsize=args.fig_size,
square=args.square)
elif args.subcommand == 'tree':
plot_tree(clustering_run=clustering_run, node_info=args.node_info,
steps=args.steps, dist=args.dist, no_length=args.no_length,
graphical=args.graphical, output=args.output)
if __name__ == '__main__':
main()
| bsd-2-clause | -8,508,664,488,436,508,000 | 39.850202 | 97 | 0.623389 | false |
trishnaguha/ansible | lib/ansible/modules/cloud/openstack/os_ironic_node.py | 41 | 12663 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2015, Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_ironic_node
short_description: Activate/Deactivate Bare Metal Resources from OpenStack
author: "Monty Taylor (@emonty)"
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Deploy to nodes controlled by Ironic.
options:
state:
description:
- Indicates desired state of the resource
choices: ['present', 'absent']
default: present
deploy:
description:
- Indicates if the resource should be deployed. Allows for deployment
logic to be disengaged and control of the node power or maintenance
state to be changed.
type: bool
default: 'yes'
uuid:
description:
- globally unique identifier (UUID) to be given to the resource.
ironic_url:
description:
- If noauth mode is utilized, this is required to be set to the
endpoint URL for the Ironic API. Use with "auth" and "auth_type"
settings set to None.
config_drive:
description:
- A configdrive file or HTTP(S) URL that will be passed along to the
node.
instance_info:
description:
- Definition of the instance information which is used to deploy
the node. This information is only required when an instance is
set to present.
suboptions:
image_source:
description:
- An HTTP(S) URL where the image can be retrieved from.
image_checksum:
description:
- The checksum of image_source.
image_disk_format:
description:
- The type of image that has been requested to be deployed.
power:
description:
- A setting to allow power state to be asserted allowing nodes
that are not yet deployed to be powered on, and nodes that
are deployed to be powered off.
choices: ['present', 'absent']
default: present
maintenance:
description:
- A setting to allow the direct control if a node is in
maintenance mode.
type: bool
default: 'no'
maintenance_reason:
description:
- A string expression regarding the reason a node is in a
maintenance mode.
wait:
description:
- A boolean value instructing the module to wait for node
activation or deactivation to complete before returning.
type: bool
default: 'no'
version_added: "2.1"
timeout:
description:
- An integer value representing the number of seconds to
wait for the node activation or deactivation to complete.
version_added: "2.1"
availability_zone:
description:
- Ignored. Present for backwards compatibility
'''
EXAMPLES = '''
# Activate a node by booting an image with a configdrive attached
os_ironic_node:
cloud: "openstack"
uuid: "d44666e1-35b3-4f6b-acb0-88ab7052da69"
state: present
power: present
deploy: True
maintenance: False
config_drive: "http://192.168.1.1/host-configdrive.iso"
instance_info:
image_source: "http://192.168.1.1/deploy_image.img"
image_checksum: "356a6b55ecc511a20c33c946c4e678af"
image_disk_format: "qcow"
delegate_to: localhost
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _choose_id_value(module):
if module.params['uuid']:
return module.params['uuid']
if module.params['name']:
return module.params['name']
return None
# TODO(TheJulia): Change this over to use the machine patch method
# in shade once it is available.
def _prepare_instance_info_patch(instance_info):
patch = []
patch.append({
'op': 'replace',
'path': '/instance_info',
'value': instance_info
})
return patch
def _is_true(value):
true_values = [True, 'yes', 'Yes', 'True', 'true', 'present', 'on']
if value in true_values:
return True
return False
def _is_false(value):
false_values = [False, None, 'no', 'No', 'False', 'false', 'absent', 'off']
if value in false_values:
return True
return False
def _check_set_maintenance(module, cloud, node):
if _is_true(module.params['maintenance']):
if _is_false(node['maintenance']):
cloud.set_machine_maintenance_state(
node['uuid'],
True,
reason=module.params['maintenance_reason'])
module.exit_json(changed=True, msg="Node has been set into "
"maintenance mode")
else:
# User has requested maintenance state, node is already in the
# desired state, checking to see if the reason has changed.
if (str(node['maintenance_reason']) not in
str(module.params['maintenance_reason'])):
cloud.set_machine_maintenance_state(
node['uuid'],
True,
reason=module.params['maintenance_reason'])
module.exit_json(changed=True, msg="Node maintenance reason "
"updated, cannot take any "
"additional action.")
elif _is_false(module.params['maintenance']):
if node['maintenance'] is True:
cloud.remove_machine_from_maintenance(node['uuid'])
return True
else:
module.fail_json(msg="maintenance parameter was set but a valid "
"the value was not recognized.")
return False
def _check_set_power_state(module, cloud, node):
if 'power on' in str(node['power_state']):
if _is_false(module.params['power']):
# User has requested the node be powered off.
cloud.set_machine_power_off(node['uuid'])
module.exit_json(changed=True, msg="Power requested off")
if 'power off' in str(node['power_state']):
if (_is_false(module.params['power']) and
_is_false(module.params['state'])):
return False
if (_is_false(module.params['power']) and
_is_false(module.params['state'])):
module.exit_json(
changed=False,
msg="Power for node is %s, node must be reactivated "
"OR set to state absent"
)
# In the event the power has been toggled on and
# deployment has been requested, we need to skip this
# step.
if (_is_true(module.params['power']) and
_is_false(module.params['deploy'])):
# Node is powered down when it is not awaiting to be provisioned
cloud.set_machine_power_on(node['uuid'])
return True
# Default False if no action has been taken.
return False
def main():
argument_spec = openstack_full_argument_spec(
uuid=dict(required=False),
name=dict(required=False),
instance_info=dict(type='dict', required=False),
config_drive=dict(required=False),
ironic_url=dict(required=False),
state=dict(required=False, default='present'),
maintenance=dict(required=False),
maintenance_reason=dict(required=False),
power=dict(required=False, default='present'),
deploy=dict(required=False, default=True),
wait=dict(type='bool', required=False, default=False),
timeout=dict(required=False, type='int', default=1800),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if (module.params['auth_type'] in [None, 'None'] and
module.params['ironic_url'] is None):
module.fail_json(msg="Authentication appears disabled, Please "
"define an ironic_url parameter")
if (module.params['ironic_url'] and
module.params['auth_type'] in [None, 'None']):
module.params['auth'] = dict(
endpoint=module.params['ironic_url']
)
node_id = _choose_id_value(module)
if not node_id:
module.fail_json(msg="A uuid or name value must be defined "
"to use this module.")
sdk, cloud = openstack_cloud_from_module(module)
try:
node = cloud.get_machine(node_id)
if node is None:
module.fail_json(msg="node not found")
uuid = node['uuid']
instance_info = module.params['instance_info']
changed = False
wait = module.params['wait']
timeout = module.params['timeout']
# User has reqeusted desired state to be in maintenance state.
if module.params['state'] is 'maintenance':
module.params['maintenance'] = True
if node['provision_state'] in [
'cleaning',
'deleting',
'wait call-back']:
module.fail_json(msg="Node is in %s state, cannot act upon the "
"request as the node is in a transition "
"state" % node['provision_state'])
# TODO(TheJulia) This is in-development code, that requires
# code in the shade library that is still in development.
if _check_set_maintenance(module, cloud, node):
if node['provision_state'] in 'active':
module.exit_json(changed=True,
result="Maintenance state changed")
changed = True
node = cloud.get_machine(node_id)
if _check_set_power_state(module, cloud, node):
changed = True
node = cloud.get_machine(node_id)
if _is_true(module.params['state']):
if _is_false(module.params['deploy']):
module.exit_json(
changed=changed,
result="User request has explicitly disabled "
"deployment logic"
)
if 'active' in node['provision_state']:
module.exit_json(
changed=changed,
result="Node already in an active state."
)
if instance_info is None:
module.fail_json(
changed=changed,
msg="When setting an instance to present, "
"instance_info is a required variable.")
# TODO(TheJulia): Update instance info, however info is
# deployment specific. Perhaps consider adding rebuild
# support, although there is a known desire to remove
# rebuild support from Ironic at some point in the future.
patch = _prepare_instance_info_patch(instance_info)
cloud.set_node_instance_info(uuid, patch)
cloud.validate_node(uuid)
if not wait:
cloud.activate_node(uuid, module.params['config_drive'])
else:
cloud.activate_node(
uuid,
configdrive=module.params['config_drive'],
wait=wait,
timeout=timeout)
# TODO(TheJulia): Add more error checking..
module.exit_json(changed=changed, result="node activated")
elif _is_false(module.params['state']):
if node['provision_state'] not in "deleted":
cloud.purge_node_instance_info(uuid)
if not wait:
cloud.deactivate_node(uuid)
else:
cloud.deactivate_node(
uuid,
wait=wait,
timeout=timeout)
module.exit_json(changed=True, result="deleted")
else:
module.exit_json(changed=False, result="node not found")
else:
module.fail_json(msg="State must be present, absent, "
"maintenance, off")
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == "__main__":
main()
| gpl-3.0 | 967,439,140,081,208,700 | 35.598266 | 125 | 0.577667 | false |
PaulEnglert/ML | deep_architectures/deep_belief_network.py | 1 | 2528 | # -*- coding: utf-8 -*-
from neural_networks.boltzmann_machines.generative_rbm import GenRBM
from neural_networks.perceptrons.mlp import MLP
from utilities.data_utils import make_batches
class DBN():
''' Deep belief network aka stacked boltzmann machines'''
def __init__(self, layer_definitions):
self.num_layers = len(layer_definitions)
# build stack of RBMs for pretraining
self.rbm_stack = []
for l in range(self.num_layers - 1):
self.rbm_stack.append(
GenRBM(layer_definitions[l], layer_definitions[l + 1]))
# build MLP used for fine tuning
print 'Initializing MLP with a configuration of {0}, {1}, {2}'.format(
layer_definitions[0],
[l for l in layer_definitions[1:-1]], layer_definitions[-1])
self.mlp = MLP(
layer_definitions[0],
[l + 1 for l in layer_definitions[1:-1]],
layer_definitions[-1])
def pre_train(
self, trainX, epochs_per_layer=5, learning_rate=0.01,
learning_rate_decay=1, lambda_1=0, lambda_2=0):
X = trainX.copy()
for l in range(self.num_layers - 1):
print 'Training GenRBM {0}'.format(l)
batches = make_batches(X.copy(), 100, keep_last=True)
self.rbm_stack[l].train(
batches, epochs=epochs_per_layer,
learning_rate=learning_rate,
learning_rate_decay=learning_rate_decay,
lambda_1=lambda_1,
lambda_2=lambda_2) # train layer with X
X = self.rbm_stack[l].sample_hidden(X)
def fine_tune(
self, trainX, trainY, epochs=10,
learning_rate=0.01, learning_rate_decay=1):
print 'Fine Tuning GenRB as MLP'
self.mlp.set_weights(self.__convert_weights(self.rbm_stack))
self.mlp.train(make_batches(trainX.copy(), 10, keep_last=False),
make_batches(trainY.copy(), 10, keep_last=False),
epochs=epochs,
learning_rate=learning_rate,
learning_rate_decay=learning_rate_decay)
def __convert_weights(self, stack, use_best=False):
weights = []
for s in stack:
# get weights of botzmann machine
w = (s.W_best if use_best else s.W)
# move first row to last and cut first column
weights.append(
w[[i for i in range(1, w.shape[0])] + [0], 1:])
return weights
| bsd-2-clause | 7,862,436,048,049,449,000 | 37.30303 | 78 | 0.566456 | false |
ApolloAuto/apollo | modules/tools/mobileye_viewer/planning_data.py | 3 | 2223 | #!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import threading
class PlanningData:
def __init__(self, planning_pb=None):
self.path_lock = threading.Lock()
self.path_param_lock = threading.Lock()
self.planning_pb = planning_pb
self.path_x = []
self.path_y = []
self.relative_time = []
self.speed = []
self.s = []
self.theta = []
def update(self, planning_pb):
self.planning_pb = planning_pb
def compute_path(self):
if self.planning_pb is None:
return
path_x = []
path_y = []
for point in self.planning_pb.trajectory_point:
path_x.append(-1 * point.path_point.y)
path_y.append(point.path_point.x)
self.path_lock.acquire()
self.path_x = path_x
self.path_y = path_y
self.path_lock.release()
def compute_path_param(self):
if self.planning_pb is None:
return
relative_time = []
speed = []
s = []
theta = []
for point in self.planning_pb.trajectory_point:
relative_time.append(point.relative_time)
speed.append(point.v)
s.append(point.path_point.s)
theta.append(point.path_point.theta)
self.path_param_lock.acquire()
self.relative_time = relative_time
self.speed = speed
self.s = s
self.theta = theta
self.path_param_lock.release()
| apache-2.0 | 8,863,216,880,750,548,000 | 31.217391 | 79 | 0.566352 | false |
suncycheng/intellij-community | python/lib/Lib/site-packages/django/contrib/admin/widgets.py | 73 | 11754 | """
Form Widget classes specific to the Django admin site.
"""
import django.utils.copycompat as copy
from django import forms
from django.forms.widgets import RadioFieldRenderer
from django.forms.util import flatatt
from django.utils.html import escape
from django.utils.text import truncate_words
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/core.js",
settings.ADMIN_MEDIA_PREFIX + "js/SelectBox.js",
settings.ADMIN_MEDIA_PREFIX + "js/SelectFilter2.js")
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
if attrs is None: attrs = {}
attrs['class'] = 'selectfilter'
if self.is_stacked: attrs['class'] += 'stacked'
output = [super(FilteredSelectMultiple, self).render(name, value, attrs, choices)]
output.append(u'<script type="text/javascript">addEvent(window, "load", function(e) {')
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'SelectFilter.init("id_%s", "%s", %s, "%s"); });</script>\n' % \
(name, self.verbose_name.replace('"', '\\"'), int(self.is_stacked), settings.ADMIN_MEDIA_PREFIX))
return mark_safe(u''.join(output))
class AdminDateWidget(forms.DateInput):
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js")
def __init__(self, attrs={}, format=None):
super(AdminDateWidget, self).__init__(attrs={'class': 'vDateField', 'size': '10'}, format=format)
class AdminTimeWidget(forms.TimeInput):
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js")
def __init__(self, attrs={}, format=None):
super(AdminTimeWidget, self).__init__(attrs={'class': 'vTimeField', 'size': '8'}, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return mark_safe(u'<p class="datetime">%s %s<br />%s %s</p>' % \
(_('Date:'), rendered_widgets[0], _('Time:'), rendered_widgets[1]))
class AdminRadioFieldRenderer(RadioFieldRenderer):
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return mark_safe(u'<ul%s>\n%s\n</ul>' % (
flatatt(self.attrs),
u'\n'.join([u'<li>%s</li>' % force_unicode(w) for w in self]))
)
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminFileWidget(forms.ClearableFileInput):
template_with_initial = (u'<p class="file-upload">%s</p>'
% forms.ClearableFileInput.template_with_initial)
template_with_clear = (u'<span class="clearable-file-input">%s</span>'
% forms.ClearableFileInput.template_with_clear)
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
def __init__(self, rel, attrs=None, using=None):
self.rel = rel
self.db = using
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
related_url = '../../../%s/%s/' % (self.rel.to._meta.app_label, self.rel.to._meta.object_name.lower())
params = self.url_parameters()
if params:
url = '?' + '&'.join(['%s=%s' % (k, v) for k, v in params.items()])
else:
url = ''
if "class" not in attrs:
attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript looks for this hook.
output = [super(ForeignKeyRawIdWidget, self).render(name, value, attrs)]
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append('<a href="%s%s" class="related-lookup" id="lookup_id_%s" onclick="return showRelatedObjectLookupPopup(this);"> ' % \
(related_url, url, name))
output.append('<img src="%simg/admin/selector-search.gif" width="16" height="16" alt="%s" /></a>' % (settings.ADMIN_MEDIA_PREFIX, _('Lookup')))
if value:
output.append(self.label_for_value(value))
return mark_safe(u''.join(output))
def base_url_parameters(self):
params = {}
if self.rel.limit_choices_to and hasattr(self.rel.limit_choices_to, 'items'):
items = []
for k, v in self.rel.limit_choices_to.items():
if isinstance(v, list):
v = ','.join([str(x) for x in v])
else:
v = str(v)
items.append((k, v))
params.update(dict(items))
return params
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.to._default_manager.using(self.db).get(**{key: value})
return ' <strong>%s</strong>' % escape(truncate_words(obj, 14))
except (ValueError, self.rel.to.DoesNotExist):
return ''
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
attrs['class'] = 'vManyToManyRawIdAdminField'
if value:
value = ','.join([force_unicode(v) for v in value])
else:
value = ''
return super(ManyToManyRawIdWidget, self).render(name, value, attrs)
def url_parameters(self):
return self.base_url_parameters()
def label_for_value(self, value):
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
for pk1, pk2 in zip(initial, data):
if force_unicode(pk1) != force_unicode(pk2):
return True
return False
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
def __init__(self, widget, rel, admin_site, can_add_related=None):
self.is_hidden = widget.is_hidden
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.to in admin_site._registry
self.can_add_related = can_add_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
def _media(self):
return self.widget.media
media = property(_media)
def render(self, name, value, *args, **kwargs):
rel_to = self.rel.to
info = (rel_to._meta.app_label, rel_to._meta.object_name.lower())
try:
related_url = reverse('admin:%s_%s_add' % info, current_app=self.admin_site.name)
except NoReverseMatch:
info = (self.admin_site.root_path, rel_to._meta.app_label, rel_to._meta.object_name.lower())
related_url = '%s%s/%s/add/' % info
self.widget.choices = self.choices
output = [self.widget.render(name, value, *args, **kwargs)]
if self.can_add_related:
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'<a href="%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> ' % \
(related_url, name))
output.append(u'<img src="%simg/admin/icon_addlink.gif" width="10" height="10" alt="%s"/></a>' % (settings.ADMIN_MEDIA_PREFIX, _('Add Another')))
return mark_safe(u''.join(output))
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs)
return self.attrs
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def _has_changed(self, initial, data):
return self.widget._has_changed(initial, data)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
class AdminIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vCommaSeparatedIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs)
| apache-2.0 | -1,535,620,658,020,655,000 | 39.954704 | 157 | 0.618428 | false |
raywill/crawl_smth | rent_crawl.py | 1 | 3770 | #!/usr/bin/python
# coding:utf-8
from bs4 import BeautifulSoup
import re
import os
import sys
import urllib
import time
import random
import time
#################### 配置开始#################
# 版面配置
# 支持爬多个版面,取消下面的注释即可
# 二手房
# board = 'OurHouse'
# 二手市场主版
# board = 'SecondMarket'
# 租房
boards = ['OurEstate', 'PolicyEstate', 'SchoolEstate', 'RealEstate_review', 'ShangHaiEstate', 'RealEstate', 'Picture']
# 关注关键词文件
keywordsFile = '/home/wwwroot/www.reactshare.cn/rent/keywords.txt'
# 黑名单关键词
blacklistFile = '/home/wwwroot/www.reactshare.cn/rent/blacklist.txt'
# 爬虫结果文件,简易放入 http 服务目录中
outputFile = '/home/wwwroot/www.reactshare.cn/rent/index.html'
# 比对文件,如果没有更新则不通知
lastCopy = '/home/wwwroot/www.reactshare.cn/rent/last.html'
# 结果通知地址, 用于通知爬虫执行完毕,可查看结果
notifyUrl = "http://m.reactshare.cn/rent"
# 最多爬的页数
maxCrawlPage = 3
# 每爬一个页面最少等待多少秒,防止被黑名单
# 外加一个随机等待因子,总计等待baseSleepSec + [0~X] 秒
baseSleepSec = 1
randWaitSec = 2
# 随机等待
################### 配置结束#################
reload(sys)
sys.setdefaultencoding("utf-8")
keywords = []
blacklist = []
matched = []
final = []
def notInBlackList(item) :
for kw in blacklist:
if item.find(kw) >= 0:
return False
return True
for kw in open(keywordsFile).readlines():
keywords.append(kw.strip())
for kw in open(blacklistFile).readlines():
blacklist.append(kw.strip())
for board in boards:
# continue
for page in range(1, maxCrawlPage):
url = 'http://m.newsmth.net/board/%s?p=%s' % (board, page)
data = urllib.urlopen(url).read()
# print data
soup = BeautifulSoup(data, "html5lib")
for a in soup.find_all(href=re.compile("\/article\/" + board)):
item = a.encode('utf-8')
for kw in keywords:
if item.find(kw) >= 0 and notInBlackList(item):
matched.append(item)
time.sleep(baseSleepSec + randWaitSec * random.random())
for item in matched:
if item not in final:
final.append(item)
# 为了避免无聊的人反复顶贴,做一次排序
final.sort()
union=final
# 检查本次爬得得数据是否有更新
if os.path.exists(lastCopy):
last=[]
for item in open(lastCopy).readlines():
last.append(item.strip())
union=list(set(last).union(set(final)))
diff=list(set(union) ^ set(last))
if len(diff) == 0 :
sys.exit(0)
# 保存上次数据
tmp = open(lastCopy, 'w')
tmp.write('\n'.join(union))
tmp.close()
# 输出网页
# 重新按照关键词分类来渲染页面
html = "<html><head><meta charset='UTF-8' /><meta name='viewport' content='width=device-width,user-scalable=yes'><meta name='apple-mobile-web-app-capable' content='yes'><title>水木爬爬</title><base href='http://m.newsmth.net/' /></head><body>"
html += "<style> a:visited {color:gray;} a:active {color:red;} a {color:blue;}</style>"
for kw in keywords:
html += "<h2> %s </h2>" % (kw)
for item in union:
if item.find(kw) >= 0:
html += "%s<br/>" % (item)
html += "<hr />"
for board in boards:
html += "<p><a href='http://m.newsmth.net/board/%s'>%s</a></p>" % (board, board)
html += "<hr />"
html += "<p>%d items updated at %s </p><p><a href='http://m.newsmth.net/'>水木社区</a></p>" % (len(union), time.strftime('%Y-%m-%d %X', time.localtime()))
html += "</body></html>"
output = open(outputFile, 'w')
output.write(html)
output.close()
# notify
data = urllib.urlopen(notifyUrl).read()
| mit | -2,995,266,385,997,871,000 | 22.842857 | 239 | 0.62133 | false |
cobalys/django | django/contrib/localflavor/es/es_provinces.py | 110 | 1482 | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
PROVINCE_CHOICES = (
('01', _('Araba')),
('02', _('Albacete')),
('03', _('Alacant')),
('04', _('Almeria')),
('05', _('Avila')),
('06', _('Badajoz')),
('07', _('Illes Balears')),
('08', _('Barcelona')),
('09', _('Burgos')),
('10', _('Caceres')),
('11', _('Cadiz')),
('12', _('Castello')),
('13', _('Ciudad Real')),
('14', _('Cordoba')),
('15', _('A Coruna')),
('16', _('Cuenca')),
('17', _('Girona')),
('18', _('Granada')),
('19', _('Guadalajara')),
('20', _('Guipuzkoa')),
('21', _('Huelva')),
('22', _('Huesca')),
('23', _('Jaen')),
('24', _('Leon')),
('25', _('Lleida')),
('26', _('La Rioja')),
('27', _('Lugo')),
('28', _('Madrid')),
('29', _('Malaga')),
('30', _('Murcia')),
('31', _('Navarre')),
('32', _('Ourense')),
('33', _('Asturias')),
('34', _('Palencia')),
('35', _('Las Palmas')),
('36', _('Pontevedra')),
('37', _('Salamanca')),
('38', _('Santa Cruz de Tenerife')),
('39', _('Cantabria')),
('40', _('Segovia')),
('41', _('Seville')),
('42', _('Soria')),
('43', _('Tarragona')),
('44', _('Teruel')),
('45', _('Toledo')),
('46', _('Valencia')),
('47', _('Valladolid')),
('48', _('Bizkaia')),
('49', _('Zamora')),
('50', _('Zaragoza')),
('51', _('Ceuta')),
('52', _('Melilla')),
)
| bsd-3-clause | -9,222,741,432,160,857,000 | 24.551724 | 55 | 0.375169 | false |
openaire/iis | iis-3rdparty-madis/src/main/resources/eu/dnetlib/iis/3rdparty/scripts/madis/lib/dateutil/easter.py | 291 | 2633 | """
Copyright (c) 2003-2007 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <[email protected]>"
__license__ = "PSF License"
import datetime
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
def easter(year, method=EASTER_WESTERN):
"""
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different easter
calculation methods:
1 - Original calculation in Julian calendar, valid in
dates after 326 AD
2 - Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3 - Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
The default method is method 3.
More about the algorithm may be found at:
http://users.chariot.net.au/~gmarts/eastalg.htm
and
http://www.tondering.dk/claus/calendar.html
"""
if not (1 <= method <= 3):
raise ValueError, "invalid method"
# g - Golden year - 1
# c - Century
# h - (23 - Epact) mod 30
# i - Number of days from March 21 to Paschal Full Moon
# j - Weekday for PFM (0=Sunday, etc)
# p - Number of days from March 21 to Sunday on or before PFM
# (-6 to 28 methods 1 & 3, to 56 for method 2)
# e - Extra days to add for method 2 (converting Julian
# date to Gregorian date)
y = year
g = y % 19
e = 0
if method < 3:
# Old method
i = (19*g+15)%30
j = (y+y//4+i)%7
if method == 2:
# Extra dates to convert Julian to Gregorian date
e = 10
if y > 1600:
e = e+y//100-16-(y//100-16)//4
else:
# New method
c = y//100
h = (c-c//4-(8*c+13)//25+19*g+15)%30
i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11))
j = (y+y//4+i+2-c+c//4)%7
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
# (later dates apply to method 2, although 23 May never actually occurs)
p = i-j+e
d = 1+(p+27+(p+6)//40)%31
m = 3+(p+26)//30
return datetime.date(int(y),int(m),int(d))
| apache-2.0 | 3,271,246,482,586,912,300 | 27.619565 | 76 | 0.600836 | false |
svn2github/vbox | src/VBox/HostServices/SharedOpenGL/crserverlib/server_dispatch.py | 12 | 4596 | # Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
import sys, string, re
import apiutil
apiutil.CopyrightC()
print """
/* DO NOT EDIT - THIS FILE AUTOMATICALLY GENERATED BY server_dispatch.py SCRIPT */
#include "cr_spu.h"
#include "chromium.h"
#include "cr_error.h"
#include "server_dispatch.h"
#include "server.h"
#include "cr_unpack.h"
CRCurrentStatePointers crServerCurrent;
"""
for func_name in apiutil.AllSpecials( sys.argv[1]+"/../state_tracker/state" ):
params = apiutil.Parameters(func_name)
if (apiutil.FindSpecial( "server", func_name ) or
"get" in apiutil.Properties(func_name)):
continue
wrap = apiutil.GetCategoryWrapper(func_name)
if wrap:
print '#if defined(CR_%s)' % wrap
print 'void SERVER_DISPATCH_APIENTRY crServerDispatch%s( %s )' % ( func_name, apiutil.MakeDeclarationString( params ) )
print '{'
print '\tcrState%s( %s );' % (func_name, apiutil.MakeCallString( params ) )
print '\tcr_server.head_spu->dispatch_table.%s( %s );' % (func_name, apiutil.MakeCallString( params ) )
print '}'
if wrap:
print '#endif'
keys = apiutil.GetDispatchedFunctions(sys.argv[1]+"/APIspec.txt")
for func_name in keys:
current = 0
array = ""
condition = ""
m = re.search( r"^(Color|Normal)([1234])(ub|b|us|s|ui|i|f|d)$", func_name )
if m :
current = 1
name = string.lower( m.group(1)[:1] ) + m.group(1)[1:]
type = m.group(3) + m.group(2)
m = re.search( r"^(SecondaryColor)(3)(ub|b|us|s|ui|i|f|d)(EXT)$", func_name )
if m :
current = 1
name = string.lower(m.group(1)[:1] ) + m.group(1)[1:]
type = m.group(3) + m.group(2)
m = re.search( r"^(TexCoord)([1234])(ub|b|us|s|ui|i|f|d)$", func_name )
if m :
current = 1
name = string.lower( m.group(1)[:1] ) + m.group(1)[1:]
type = m.group(3) + m.group(2)
array = "[0]"
m = re.search( r"^(MultiTexCoord)([1234])(ub|b|us|s|ui|i|f|d)ARB$", func_name )
if m :
current = 1
name = "texCoord"
type = m.group(3) + m.group(2)
array = "[texture-GL_TEXTURE0_ARB]"
condition = "if (texture >= GL_TEXTURE0_ARB && texture < GL_TEXTURE0_ARB + CR_MAX_TEXTURE_UNITS)"
m = re.match( r"^(Index)(ub|b|us|s|ui|i|f|d)$", func_name )
if m :
current = 1
name = string.lower( m.group(1)[:1] ) + m.group(1)[1:]
type = m.group(2) + "1"
m = re.match( r"^(EdgeFlag)$", func_name )
if m :
current = 1
name = string.lower( m.group(1)[:1] ) + m.group(1)[1:]
type = "l1"
m = re.match( r"^(FogCoord)(f|d)(EXT)$", func_name)
if m :
current = 1
name = string.lower( m.group(1)[:1] ) + m.group(1)[1:]
type = m.group(2) + "1"
# Vertex attribute commands w/ some special cases
m = re.search( r"^(VertexAttrib)([1234])(s|i|f|d)ARB$", func_name )
if m :
current = 1
name = string.lower( m.group(1)[:1] ) + m.group(1)[1:]
type = m.group(3) + m.group(2)
array = "[index]"
condition = "if (index < CR_MAX_VERTEX_ATTRIBS)"
if func_name == "VertexAttrib4NubARB":
current = 1
name = "vertexAttrib"
type = "ub4"
array = "[index]"
condition = "if (index < CR_MAX_VERTEX_ATTRIBS)"
if current:
params = apiutil.Parameters(func_name)
print 'void SERVER_DISPATCH_APIENTRY crServerDispatch%s( %s )' % ( func_name, apiutil.MakeDeclarationString(params) )
print '{'
print '\t%s' % (condition)
print '\t{'
print '\t\tcr_server.head_spu->dispatch_table.%s( %s );' % (func_name, apiutil.MakeCallString(params) )
print "\t\tcr_server.current.c.%s.%s%s = cr_unpackData;" % (name,type,array)
print '\t}'
print '}\n'
print """
void crServerInitDispatch(void)
{
crSPUInitDispatchTable( &(cr_server.dispatch) );
crSPUCopyDispatchTable( &(cr_server.dispatch), &(cr_server.head_spu->dispatch_table ) );
"""
for func_name in keys:
if ("get" in apiutil.Properties(func_name) or
apiutil.FindSpecial( "server", func_name ) or
apiutil.FindSpecial( sys.argv[1]+"/../state_tracker/state", func_name )):
wrap = apiutil.GetCategoryWrapper(func_name)
if wrap:
print '#if defined(CR_%s)' % wrap
print '\tcr_server.dispatch.%s = crServerDispatch%s;' % (func_name, func_name)
if wrap:
print '#endif'
print '}'
| gpl-2.0 | -7,610,757,505,432,118,000 | 32.794118 | 125 | 0.577241 | false |
geoffkilpin/pombola | pombola/feedback/forms.py | 4 | 1293 | import time
import datetime
from django import forms
from django.forms.util import ErrorDict
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.utils.crypto import salted_hmac, constant_time_compare
from django.utils.encoding import force_unicode
from django.utils.text import get_text_list
from django.utils.translation import ungettext, ugettext_lazy as _
from pombola.feedback.models import Feedback
class FeedbackForm(forms.Form):
"""
Gather feedback
"""
url = forms.URLField(
widget = forms.HiddenInput,
required = False,
)
comment = forms.CharField(
label = _('Your feedback'),
widget = forms.Textarea,
max_length = 2000,
)
email = forms.EmailField(
label = _('Your email'),
required = False,
help_text = "optional - but lets us get back to you...",
)
# This is a honeypot field to catch spam bots. If there is any content in
# it the feedback status will be set to 'spammy'. This field is hidden by
# CSS in the form so should never be shown to a user. Hopefully it will not
# be autofilled either.
website = forms.CharField(
label = _('Leave this blank'),
required = False,
)
| agpl-3.0 | -3,014,483,108,086,517,000 | 27.733333 | 79 | 0.672854 | false |
heplesser/nest-simulator | pynest/nest/tests/test_connect_arrays.py | 12 | 13340 | # -*- coding: utf-8 -*-
#
# test_connect_arrays.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import unittest
import numpy as np
import nest
nest.set_verbosity('M_WARNING')
HAVE_OPENMP = nest.ll_api.sli_func("is_threaded")
class TestConnectArrays(unittest.TestCase):
non_unique = np.array([1, 1, 3, 5, 4, 5, 9, 7, 2, 8], dtype=np.uint64)
def setUp(self):
nest.ResetKernel()
def test_connect_arrays_unique(self):
"""Connecting NumPy arrays of unique node IDs"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = np.arange(1, n+1, dtype=np.uint64)
weights = 1.5
delays = 1.4
nest.Connect(sources, targets, syn_spec={'weight': weights, 'delay': delays})
conns = nest.GetConnections()
self.assertEqual(len(conns), n*n)
for c in conns:
np.testing.assert_approx_equal(c.weight, weights)
np.testing.assert_approx_equal(c.delay, delays)
def test_connect_arrays_nonunique(self):
"""Connecting NumPy arrays with non-unique node IDs"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = np.ones(n)
delays = np.ones(n)
nest.Connect(sources, targets, syn_spec={'weight': weights, 'delay': delays},
conn_spec='one_to_one')
conns = nest.GetConnections()
for s, t, w, d, c in zip(sources, targets, weights, delays, conns):
self.assertEqual(c.source, s)
self.assertEqual(c.target, t)
self.assertEqual(c.weight, w)
self.assertEqual(c.delay, d)
def test_connect_arrays_nonunique_dict_conn_spec(self):
"""Connecting NumPy arrays with non-unique node IDs and conn_spec as a dict"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = 2 * np.ones(n)
delays = 1.5 * np.ones(n)
nest.Connect(sources, targets, syn_spec={'weight': weights, 'delay': delays},
conn_spec={'rule': 'one_to_one'})
conns = nest.GetConnections()
for s, t, w, d, c in zip(sources, targets, weights, delays, conns):
self.assertEqual(c.source, s)
self.assertEqual(c.target, t)
self.assertEqual(c.weight, w)
self.assertEqual(c.delay, d)
def test_connect_arrays_no_conn_spec(self):
"""Connecting NumPy arrays of node IDs without specifying conn_spec"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
with self.assertRaises(ValueError):
nest.Connect(sources, targets)
def test_connect_arrays_different_weights_delays(self):
"""Connecting NumPy arrays with different weights and delays"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = np.linspace(0.6, 1.5, n)
delays = np.linspace(0.4, 1.3, n)
nest.Connect(sources, targets, syn_spec={'weight': weights, 'delay': delays},
conn_spec={'rule': 'one_to_one'})
conns = nest.GetConnections()
np.testing.assert_array_equal(conns.source, sources)
np.testing.assert_array_equal(conns.target, targets)
np.testing.assert_array_almost_equal(conns.weight, weights)
np.testing.assert_array_almost_equal(conns.delay, delays)
def test_connect_arrays_threaded(self):
"""Connecting NumPy arrays, threaded"""
nest.SetKernelStatus({'local_num_threads': 2})
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = np.ones(len(sources))
delays = np.ones(len(sources))
syn_model = 'static_synapse'
nest.Connect(sources, targets, conn_spec='one_to_one',
syn_spec={'weight': weights, 'delay': delays, 'synapse_model': syn_model})
conns = nest.GetConnections()
# Sorting connection information by source to make it equivalent to the reference.
conn_info = [(c.source, c.target, c.weight, c.delay) for c in conns]
conn_info.sort(key=lambda conn: conn[0])
for s, t, w, d, c in zip(sources, targets, weights, delays, conn_info):
conn_s, conn_t, conn_w, conn_d = c
self.assertEqual(conn_s, s)
self.assertEqual(conn_t, t)
self.assertEqual(conn_w, w)
self.assertEqual(conn_d, d)
def test_connect_arrays_no_delays(self):
"""Connecting NumPy arrays without specifying delays"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = np.ones(n)
nest.Connect(sources, targets, conn_spec='one_to_one', syn_spec={'weight': weights})
conns = nest.GetConnections()
for s, t, w, c in zip(sources, targets, weights, conns):
self.assertEqual(c.source, s)
self.assertEqual(c.target, t)
self.assertEqual(c.weight, w)
def test_connect_array_list(self):
"""Connecting NumPy array and list"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = list(range(1, n + 1))
targets = self.non_unique
nest.Connect(sources, targets, conn_spec='one_to_one')
conns = nest.GetConnections()
for s, t, c in zip(sources, targets, conns):
self.assertEqual(c.source, s)
self.assertEqual(c.target, t)
def test_connect_arrays_no_weights(self):
"""Connecting NumPy arrays without specifying weights"""
n = 10
neurons = nest.Create('iaf_psc_alpha', n)
targets = self.non_unique
delays = np.ones(n)
nest.Connect(neurons, targets, conn_spec='one_to_one', syn_spec={'delay': delays})
conns = nest.GetConnections()
for s, t, d, c in zip(neurons.tolist(), targets, delays, conns):
self.assertEqual(c.source, s)
self.assertEqual(c.target, t)
self.assertEqual(c.delay, d)
def test_connect_arrays_rtype(self):
"""Connecting NumPy arrays with specified receptor_type"""
n = 10
nest.Create('iaf_psc_exp_multisynapse', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = np.ones(len(sources))
delays = np.ones(len(sources))
receptor_type = np.ones(len(sources), dtype=np.uint64)
syn_model = 'static_synapse'
nest.Connect(sources, targets, conn_spec='one_to_one',
syn_spec={'weight': weights, 'delay': delays, 'receptor_type': receptor_type})
conns = nest.GetConnections()
for s, t, w, d, r, c in zip(sources, targets, weights, delays, receptor_type, conns):
self.assertEqual(c.source, s)
self.assertEqual(c.target, t)
self.assertEqual(c.weight, w)
self.assertEqual(c.delay, d)
self.assertEqual(c.receptor, r)
def test_connect_arrays_additional_synspec_params(self):
"""Connecting NumPy arrays with additional syn_spec params"""
n = 10
nest.Create('iaf_psc_exp_multisynapse', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = np.ones(len(sources))
delays = np.ones(len(sources))
syn_model = 'vogels_sprekeler_synapse'
receptor_type = np.ones(len(sources), dtype=np.uint64)
alpha = 0.1*np.ones(len(sources))
tau = 20.*np.ones(len(sources))
nest.Connect(sources, targets, conn_spec='one_to_one',
syn_spec={'weight': weights, 'delay': delays, 'synapse_model': syn_model,
'receptor_type': receptor_type, 'alpha': alpha, 'tau': tau})
conns = nest.GetConnections()
for s, t, w, d, r, a, tau, c in zip(sources, targets, weights, delays, receptor_type, alpha, tau, conns):
self.assertEqual(c.source, s)
self.assertEqual(c.target, t)
self.assertEqual(c.weight, w)
self.assertEqual(c.delay, d)
self.assertEqual(c.receptor, r)
self.assertEqual(c.alpha, a)
self.assertEqual(c.tau, tau)
def test_connect_arrays_float_rtype(self):
"""Raises exception when not using integer value for receptor_type"""
n = 10
nest.Create('iaf_psc_exp_multisynapse', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = np.ones(n)
delays = np.ones(n)
syn_model = 'vogels_sprekeler_synapse'
receptor_type = 1.5*np.ones(len(sources))
with self.assertRaises(nest.kernel.NESTErrors.BadParameter):
nest.Connect(sources, targets, conn_spec='one_to_one',
syn_spec={'weight': weights, 'delay': delays, 'synapse_model': syn_model,
'receptor_type': receptor_type})
def test_connect_arrays_wrong_dtype(self):
"""Raises exception when connecting NumPy arrays with wrong dtype"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.double)
targets = np.array(self.non_unique, dtype=np.double)
weights = np.ones(n)
delays = np.ones(n)
syn_model = 'static_synapse'
with self.assertRaises(nest.kernel.NESTErrors.ArgumentType):
nest.Connect(sources, targets, syn_spec={'weight': weights, 'delay': delays},
conn_spec='one_to_one')
def test_connect_arrays_unknown_nodes(self):
"""Raises exception when connecting NumPy arrays with unknown nodes"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+2, dtype=np.uint64)
targets = np.arange(1, n+2, dtype=np.uint64)
weights = np.ones(len(sources))
delays = np.ones(len(sources))
syn_model = 'static_synapse'
with self.assertRaises(nest.kernel.NESTErrors.UnknownNode):
nest.Connect(sources, targets, syn_spec={'weight': weights, 'delay': delays,
'synapse_model': syn_model})
@unittest.skipIf(not HAVE_OPENMP, 'NEST was compiled without multi-threading')
def test_connect_arrays_receptor_type(self):
"""Connecting NumPy arrays with receptor type specified, threaded"""
nest.SetKernelStatus({'local_num_threads': 2})
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = len(sources) * [2.]
nest.Connect(sources, targets, conn_spec='one_to_one', syn_spec={'weight': weights, 'receptor_type': 0})
self.assertEqual(len(sources) * [0], nest.GetConnections().receptor)
@unittest.skipIf(not HAVE_OPENMP, 'NEST was compiled without multi-threading')
def test_connect_arrays_differnt_alpha(self):
"""Connecting NumPy arrays with different alpha values in a threaded environment"""
nest.SetKernelStatus({'local_num_threads': 4})
neurons = nest.Create("iaf_psc_exp", 10)
# syn_spec parameters are dependent on source, so we test with source id's not starting with 1
source = np.array([2, 5, 3, 10, 1, 9, 4, 6, 8, 7])
target = 1 + np.random.choice(10, 10, replace=True)
weights = len(source) * [2.]
alpha = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.11])
# Need to make sure the correct alpha value is used with the correct source
src_alpha_ref = {key: val for key, val in zip(source, alpha)}
nest.Connect(source, target, conn_spec='one_to_one',
syn_spec={'alpha': alpha, 'receptor_type': 0,
'weight': weights, 'synapse_model': "stdp_synapse"})
conns = nest.GetConnections()
src = conns.source
alp = conns.alpha
src_alpha = {key: val for key, val in zip(src, alp)}
self.assertEqual(src_alpha_ref, src_alpha)
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(TestConnectArrays)
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
| gpl-2.0 | 1,055,305,746,826,928,100 | 37.223496 | 113 | 0.60075 | false |
malkavi/lutris | lutris/runners/o2em.py | 1 | 3325 | # -*- coding: utf-8 -*-
import os
from lutris import settings
from lutris.runners.runner import Runner
class o2em(Runner):
"""Magnavox Oyssey² Emulator"""
human_name = "O2EM"
package = "o2em"
executable = "o2em"
platform = "Magnavox Odyssey 2, Phillips Videopac+"
tarballs = {
'i386': None,
'x64': "o2em-1.18-x86_64.tar.gz",
}
checksums = {
'o2rom': "562d5ebf9e030a40d6fabfc2f33139fd",
'c52': "f1071cdb0b6b10dde94d3bc8a6146387",
'jopac': "279008e4a0db2dc5f1c048853b033828",
'g7400': "79008e4a0db2dc5f1c048853b033828",
}
bios_choices = [
("Magnavox Odyssey2", "o2rom"),
("Phillips C52", "c52"),
("Phillips Videopac+", "g7400"),
("Brandt Jopac", "jopac")
]
controller_choices = [
("Disable", "0"),
("Arrows keys and right shift", "1"),
("W,S,A,D,SPACE", "2"),
("Joystick", "3")
]
game_options = [{
"option": "main_file",
"type": "file",
"label": "ROM file",
"default_path": 'game_path',
'help': ("The game data, commonly called a ROM image.")
}]
runner_options = [
{
"option": "bios",
"type": "choice",
"choices": bios_choices,
"label": "Bios"
},
{
"option": "controller1",
"type": "choice",
"choices": controller_choices,
"label": "First controller"
},
{
"option": "controller2",
"type": "choice",
"choices": controller_choices,
"label": "Second controller"
},
{
"option": "fullscreen",
"type": "bool",
"label": "Fullscreen"
},
{
"option": "scanlines",
"type": "bool",
"label": "Scanlines",
'help': ("Activates a display filter adding scanlines to look "
"more like yesteryear matieral.")
}
]
def install(self):
super(o2em, self).install()
bios_path = os.path.expanduser("~/.o2em/bios")
if not os.path.exists(bios_path):
os.makedirs(bios_path)
def get_executable(self):
return os.path.join(settings.RUNNER_DIR, 'o2em/o2em')
def play(self):
bios_path = os.path.join(os.path.expanduser("~"), ".o2em/bios/")
arguments = ["-biosdir=\"%s\"" % bios_path]
if self.runner_config.get("fullscreen"):
arguments.append("-fullscreen")
if self.runner_config.get("scanlines"):
arguments.append("-scanlines")
if "controller1" in self.runner_config:
arguments.append("-s1=%s" % self.runner_config["controller1"])
if "controller2" in self.runner_config:
arguments.append("-s2=%s" % self.runner_config["controller2"])
rom_path = self.game_config.get('main_file') or ''
if not os.path.exists(rom_path):
return {'error': 'FILE_NOT_FOUND', 'file': rom_path}
romdir = os.path.dirname(rom_path)
romfile = os.path.basename(rom_path)
arguments.append("-romdir=\"%s\"/" % romdir)
arguments.append("\"%s\"" % romfile)
return {'command': [self.get_executable()] + arguments}
| gpl-3.0 | -75,744,864,725,442,660 | 30.065421 | 75 | 0.521961 | false |
mbodenhamer/syn | syn/type/a/tests/test_type.py | 1 | 9511 | import six
from syn.five import xrange
from nose.tools import assert_raises
from syn.type.a import (Type, ValuesType, MultiType, TypeType, AnyType,
TypeExtension, Set, Schema)
from syn.base_utils import is_hashable, feq
from syn.base_utils import ngzwarn, on_error, elog
from syn.globals import TEST_SAMPLES as SAMPLES
SAMPLES //= 2
SAMPLES = max(SAMPLES, 1)
ngzwarn(SAMPLES, 'SAMPLES')
#-------------------------------------------------------------------------------
# Type
def test_type():
t = Type()
assert t == Type()
assert t != 1
assert is_hashable(t)
assert_raises(NotImplementedError, t.check, 1)
assert_raises(NotImplementedError, t.coerce, 1)
assert_raises(NotImplementedError, t.display)
assert_raises(NotImplementedError, t.enumeration_value, 1)
assert_raises(NotImplementedError, t.generate)
assert_raises(NotImplementedError, t.rst)
assert_raises(NotImplementedError, t.validate, 1)
#-------------------------------------------------------------------------------
# AnyType
def test_anytype():
t = AnyType()
assert t == AnyType()
t.check(1)
assert t.coerce(1) == 1
assert t.display() == t.rst() == 'any'
t.validate(1)
#-------------------------------------------------------------------------------
# TypeType
class Foo(object):
def __init__(self, value):
self.value = value
def validate(self):
assert self.value > 5
class Bar(Foo):
@classmethod
def coerce(cls, value):
return Bar(value + 1)
def test_typetype():
t = TypeType(int)
assert t.type is int
assert not t.call_coerce
assert not t.call_validate
assert t == TypeType(int)
assert t != TypeType(float)
t.check(1)
assert_raises(TypeError, t.check, 1.2)
assert t.query(1)
assert not t.query(1.2)
res, e = t.query_exception(1)
assert res
assert e is None
res, e = t.query_exception(1.2)
assert not res
assert isinstance(e, TypeError)
assert t.coerce(1.2) == 1
assert_raises(TypeError, t.coerce, 'abc')
assert t.display() == 'int'
assert t.rst() == '*int*'
t.validate(1)
assert_raises(TypeError, t.validate, 1.2)
f = TypeType(Foo)
assert f.type is Foo
assert not f.call_coerce
assert f.call_validate
f.check(Foo(2))
assert_raises(TypeError, f.check, 2)
f1 = f.coerce(1)
assert isinstance(f1, Foo)
assert f1.value == 1
assert_raises(TypeError, f.validate, 6)
assert_raises(AssertionError, f.validate, Foo(5))
assert f.display() == 'Foo'
assert f.rst() == '*Foo*'
f.validate(Foo(6))
b = TypeType(Bar)
assert b.type is Bar
assert b.call_coerce
assert b.call_validate
b.check(Bar(2))
assert_raises(TypeError, b.check, Foo(2))
b1 = b.coerce(1)
assert isinstance(b1, Bar)
assert b1.value == 2
assert_raises(TypeError, b.validate, 6)
assert_raises(AssertionError, b.validate, Bar(5))
b.validate(Bar(6))
#-------------------------------------------------------------------------------
# ValuesType
def test_valuestype():
t = ValuesType({1, 1.2, u'b'})
assert t == ValuesType([1, 1.2, u'b'])
assert t != ValuesType([1, 1.3, u'b'])
t.check(1)
t.check(1.2)
t.check(u'b')
assert_raises(TypeError, t.check, 2)
assert t.coerce(1) == 1
assert_raises(TypeError, t.coerce, 2)
t.validate(1)
assert_raises(TypeError, t.validate, 2)
t = ValuesType({1, 1.2})
assert t.display() in ('[1, 1.2]', '[1.2, 1]')
assert t.rst() in ('[1, 1.2]', '[1.2, 1]')
assert t.display() == t.rst()
#-------------------------------------------------------------------------------
# MultiType
def test_multitype():
import math
t = MultiType((int, float))
assert t == MultiType((int, float))
assert t != MultiType((int, str))
assert t.is_typelist
assert t.query(1)
assert t.query(1.2)
assert not t.query(u'a')
assert t.coerce(1.2) == 1
assert t.coerce(u'inf') == float(u'inf')
assert_raises(TypeError, t.coerce, u'abc')
assert t.display() == 'int | float'
assert t.rst() == '*int* | *float*'
t.validate(1)
assert_raises(TypeError, t.validate, u'abc')
t = MultiType((int, Foo, ValuesType([math.pi, math.e])))
assert not t.is_typelist
assert t.query(1)
assert t.query(Foo(2))
assert t.query(math.pi)
assert not t.query(3.4)
assert t.coerce(1) == 1
f = t.coerce(u'abc')
assert isinstance(f, Foo)
assert f.value == u'abc'
t.validate(1)
t.validate(Foo(6))
assert_raises(TypeError, t.validate, 3.4)
assert_raises(AssertionError, t.validate, Foo(5))
t = MultiType(six.string_types)
t.validate('abc')
t.validate('abc')
t.validate(u'abc')
assert_raises(TypeError, t.validate, 3.4)
#-------------------------------------------------------------------------------
# Set
def test_set():
from syn.sets.b import Range
t = Set(Range(1, 5))
assert t == Set(Range(1, 5))
assert t != Set(Range(0, 5))
assert Type.dispatch(t) is t
assert t.query(1)
assert not t.query(0)
t.validate(1)
assert_raises(TypeError, t.validate, 0)
assert t.coerce(1) == 1
assert_raises(TypeError, t.coerce, 0)
s = set(xrange(1, 6))
for k in xrange(SAMPLES):
val = t.generate()
with on_error(elog, s.__contains__, (val,)):
assert val in s
assert t.display() == t.rst() == '<Set>'
#-------------------------------------------------------------------------------
# Schema
def test_schema():
from syn.schema.b.sequence import Sequence
from syn.type.a import List
t = Schema(Sequence(1, 2, 3))
assert t == Schema(Sequence(1, 2, 3))
assert t != Schema(Sequence(1, 3, 2))
assert Type.dispatch(t) is t
assert t.query([1, 2, 3])
assert not t.query([1, 3, 2])
t.validate([1, 2, 3])
assert_raises(TypeError, t.validate, [1, 3, 2])
assert t.generate() == [1, 2, 3]
assert t.display() == t.rst() == '<Schema>'
assert t.coerce(1) == 1
t = Schema(Sequence(int, float))
assert t.query([1, 2.3])
assert not t.query([1, 2])
val = t.generate()
assert t.query(val)
t = Schema(Sequence(int, List(float)))
assert not t.query([1, 1.2])
assert not t.query([1, [1, 2]])
assert t.query([1, [1.2, 3.4]])
assert t.query([1, []])
val = t.generate()
assert t.query(val)
#-------------------------------------------------------------------------------
# dispatch_type
def test_dispatch_type():
t = Type.dispatch(None)
assert isinstance(t, AnyType)
t = Type.dispatch(int)
assert isinstance(t, TypeType)
assert t.type is int
t = Type.dispatch((int, float))
assert isinstance(t, MultiType)
assert t.typelist == (int, float)
t = Type.dispatch([1, 2])
assert isinstance(t, ValuesType)
assert t.values == [1, 2]
t = Type.dispatch(six.string_types)
assert isinstance(t, TypeType)
t.validate('abc')
t.validate(u'abc')
assert_raises(TypeError, t.validate, 1)
te = TypeExtension()
assert Type.dispatch(te) is te
assert Type.dispatch(TypeExtension) is not TypeExtension
assert isinstance(Type.dispatch(TypeExtension), TypeExtension)
assert_raises(TypeError, Type.dispatch, 1)
assert_raises(TypeError, Type.dispatch, b'abc')
assert_raises(TypeError, Type.dispatch, u'abc')
#-------------------------------------------------------------------------------
# Test generation
def test_generation():
from syn.base_utils.rand import PRIMITIVE_TYPES
from syn.types import Type as Type_
anys = [AnyType().generate() for k in xrange(SAMPLES)]
if len(anys) > 2:
assert any(x is not None for x in anys)
class Foo(object): pass
assert isinstance(AnyType().generate(types=[Foo]), tuple(PRIMITIVE_TYPES))
class Bar(object):
@classmethod
def _generate(cls, **kwargs):
return cls()
class BarType(Type_): type = Bar
assert isinstance(TypeType(int).generate(), int)
assert isinstance(TypeType(Bar).generate(), Bar)
assert_raises(NotImplementedError, TypeType(Foo).generate)
assert ValuesType([1, 2, 3]).generate() in {1, 2, 3}
t = MultiType([int, float])
assert isinstance(t.generate(), (int, float))
assert isinstance(t.generate(exclude_types=[float]), int)
#-------------------------------------------------------------------------------
# Test enumeration values
def test_enumeration_values():
assert TypeType(int).enumeration_value(0) == 0
v = ValuesType([1, 2, 3])
assert v.enumeration_value(0) == 1
assert v.enumeration_value(1) == 2
assert v.enumeration_value(2) == 3
assert v.enumeration_value(3) == 1
m = MultiType([int, float])
assert m.enumeration_value(0) == 0
assert feq(m.enumeration_value(1), 0.1)
assert m.enumeration_value(2) == 2
assert feq(m.enumeration_value(3), 0.3)
anys = [AnyType().enumeration_value(k) for k in xrange(SAMPLES)]
if len(anys) > 2:
assert any(x is not None for x in anys)
class Foo(object): pass
assert AnyType().enumeration_value(0, types=[Foo]) == 0
#-------------------------------------------------------------------------------
if __name__ == '__main__': # pragma: no cover
from syn.base_utils import run_all_tests
run_all_tests(globals(), verbose=True, print_errors=False)
| mit | 461,501,653,899,558,140 | 26.568116 | 80 | 0.561981 | false |
dermoth/gramps | data/man/pt_BR/conf.py | 12 | 7714 | # -*- coding: utf-8 -*-
#
# Gramps documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 1 14:38:29 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'pt_BR'
# General information about the project.
project = u'Gramps'
copyright = u'2015, Gramps project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.2'
# The full version, including alpha/beta/rc tags.
release = '4.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
locale_dirs = './locale'
gettext_compact = True
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Grampsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Gramps.tex', u'Gramps Documentation',
u'.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('pt_BR', 'gramps', u'Gramps Documentation',
[u'.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Gramps', u'Gramps Documentation',
u'.', 'Gramps', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| gpl-2.0 | 6,465,518,265,717,107,000 | 30.744856 | 80 | 0.701582 | false |
belmiromoreira/nova | nova/image/download/__init__.py | 61 | 1636 | # Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import stevedore.driver
import stevedore.extension
from nova.i18n import _LE
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def load_transfer_modules():
module_dictionary = {}
ex = stevedore.extension.ExtensionManager('nova.image.download.modules')
for module_name in ex.names():
mgr = stevedore.driver.DriverManager(
namespace='nova.image.download.modules',
name=module_name,
invoke_on_load=False)
schemes_list = mgr.driver.get_schemes()
for scheme in schemes_list:
if scheme in module_dictionary:
LOG.error(_LE('%(scheme)s is registered as a module twice. '
'%(module_name)s is not being used.'),
{'scheme': scheme,
'module_name': module_name})
else:
module_dictionary[scheme] = mgr.driver
return module_dictionary
| apache-2.0 | -3,804,272,018,555,422,000 | 33.083333 | 78 | 0.645477 | false |
benthomasson/cumulus-linux-ansible-modules | tests/test_cl_license.py | 6 | 2493 | import mock
from mock import MagicMock
from nose.tools import set_trace
from library import cl_license
from asserts import assert_equals
from datetime import date, datetime
def mod_args_generator(values, *args):
def mod_args(args):
return values[args]
return mod_args
@mock.patch('library.cl_license.AnsibleModule')
def test_install_license_failed(mock_ansible_module):
""" test install license failed"""
instance = mock_ansible_module.return_value
instance.params = {'src': 'blah'}
run_command = MagicMock()
run_command.return_value = (1, 'output', 'err')
instance.run_command = run_command
cl_license.install_license(instance)
run_command.assert_called_with('/usr/cumulus/bin/cl-license -i blah')
instance.fail_json.assert_called_with(msg='err')
@mock.patch('library.cl_license.AnsibleModule')
def test_install_license_passed(mock_ansible_module):
""" test install license passed """
instance = mock_ansible_module.return_value
instance.params = {'src': 'blah'}
run_command = MagicMock()
run_command.return_value = (0, 'output', None)
instance.run_command = run_command
cl_license.install_license(instance)
run_command.assert_called_with('/usr/cumulus/bin/cl-license -i blah')
assert_equals(instance.fail_json.call_count, 0)
@mock.patch('library.cl_license.install_license')
@mock.patch('library.cl_license.AnsibleModule')
def test_license_not_installed(mock_ansible_module,
mock_install_license):
instance = mock_ansible_module.return_value
instance.params = {'src': 'blah'}
run_command = MagicMock()
run_command.return_value = (20, 'No license', None)
instance.run_command = run_command
cl_license.main()
assert_equals(mock_install_license.call_count, 1)
instance.exit_json.assert_called_with(msg='License installation completed',
changed=True)
@mock.patch('library.cl_license.install_license')
@mock.patch('library.cl_license.AnsibleModule')
def test_license_already_installed(mock_ansible_module,
mock_install_license):
instance = mock_ansible_module.return_value
instance.params = {'src': 'blah'}
run_command = MagicMock()
run_command.return_value = (0, 'license is there', None)
instance.run_command = run_command
cl_license.main()
assert_equals(mock_install_license.call_count, 0)
instance.exit_json.assert_called_with(
msg='No change. License already installed',
changed=False)
| gpl-3.0 | -7,002,144,874,640,396,000 | 35.661765 | 79 | 0.708383 | false |
popazerty/gui-test | lib/python/Components/Renderer/DMCHDMaxTemp.py | 11 | 2083 | # -*- coding: utf-8 -*-
#
# Maximum Temperature Renderer for Dreambox/Enigma-2
# Coded by Vali (c)2010
# Support: www.dreambox-tools.info
#
#
# This plugin is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported License.
# To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/
# or send a letter to Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
#
# Alternatively, this plugin may be distributed and executed on hardware which
# is licensed by Dream Multimedia GmbH.
#
#
# This plugin is NOT free software. It is open source, you are allowed to
# modify it (if you keep the license), but it may not be commercially
# distributed other than under the conditions noted above.
#
#
#######################################################################
from Components.VariableText import VariableText
from Components.Sensors import sensors
from Tools.HardwareInfo import HardwareInfo
from enigma import eLabel
from Renderer import Renderer
from os import popen
class DMCHDMaxTemp(Renderer, VariableText):
def __init__(self):
Renderer.__init__(self)
VariableText.__init__(self)
if "8000" in HardwareInfo().get_device_name() or "800se" in HardwareInfo().get_device_name() or "500" in HardwareInfo().get_device_name():
self.ZeigeTemp = True
else:
self.ZeigeTemp = False
GUI_WIDGET = eLabel
def changed(self, what):
if not self.suspended:
if self.ZeigeTemp:
maxtemp = 0
try:
templist = sensors.getSensorsList(sensors.TYPE_TEMPERATURE)
tempcount = len(templist)
for count in range(tempcount):
id = templist[count]
tt = sensors.getSensorValue(id)
if tt > maxtemp:
maxtemp = tt
except:
pass
self.text = str(maxtemp) + "°C"
else:
loada = 0
try:
out_line = popen("cat /proc/loadavg").readline()
loada = out_line[:4]
except:
pass
self.text = loada
def onShow(self):
self.suspended = False
self.changed(None)
def onHide(self):
self.suspended = True
| gpl-2.0 | -1,574,369,708,248,655,000 | 28.757143 | 140 | 0.675948 | false |
openstack/sqlalchemy-migrate | migrate/changeset/constraint.py | 140 | 7318 | """
This module defines standalone schema constraint classes.
"""
from sqlalchemy import schema
from migrate.exceptions import *
class ConstraintChangeset(object):
"""Base class for Constraint classes."""
def _normalize_columns(self, cols, table_name=False):
"""Given: column objects or names; return col names and
(maybe) a table"""
colnames = []
table = None
for col in cols:
if isinstance(col, schema.Column):
if col.table is not None and table is None:
table = col.table
if table_name:
col = '.'.join((col.table.name, col.name))
else:
col = col.name
colnames.append(col)
return colnames, table
def __do_imports(self, visitor_name, *a, **kw):
engine = kw.pop('engine', self.table.bind)
from migrate.changeset.databases.visitor import (get_engine_visitor,
run_single_visitor)
visitorcallable = get_engine_visitor(engine, visitor_name)
run_single_visitor(engine, visitorcallable, self, *a, **kw)
def create(self, *a, **kw):
"""Create the constraint in the database.
:param engine: the database engine to use. If this is \
:keyword:`None` the instance's engine will be used
:type engine: :class:`sqlalchemy.engine.base.Engine`
:param connection: reuse connection istead of creating new one.
:type connection: :class:`sqlalchemy.engine.base.Connection` instance
"""
# TODO: set the parent here instead of in __init__
self.__do_imports('constraintgenerator', *a, **kw)
def drop(self, *a, **kw):
"""Drop the constraint from the database.
:param engine: the database engine to use. If this is
:keyword:`None` the instance's engine will be used
:param cascade: Issue CASCADE drop if database supports it
:type engine: :class:`sqlalchemy.engine.base.Engine`
:type cascade: bool
:param connection: reuse connection istead of creating new one.
:type connection: :class:`sqlalchemy.engine.base.Connection` instance
:returns: Instance with cleared columns
"""
self.cascade = kw.pop('cascade', False)
self.__do_imports('constraintdropper', *a, **kw)
# the spirit of Constraint objects is that they
# are immutable (just like in a DB. they're only ADDed
# or DROPped).
#self.columns.clear()
return self
class PrimaryKeyConstraint(ConstraintChangeset, schema.PrimaryKeyConstraint):
"""Construct PrimaryKeyConstraint
Migrate's additional parameters:
:param cols: Columns in constraint.
:param table: If columns are passed as strings, this kw is required
:type table: Table instance
:type cols: strings or Column instances
"""
__migrate_visit_name__ = 'migrate_primary_key_constraint'
def __init__(self, *cols, **kwargs):
colnames, table = self._normalize_columns(cols)
table = kwargs.pop('table', table)
super(PrimaryKeyConstraint, self).__init__(*colnames, **kwargs)
if table is not None:
self._set_parent(table)
def autoname(self):
"""Mimic the database's automatic constraint names"""
return "%s_pkey" % self.table.name
class ForeignKeyConstraint(ConstraintChangeset, schema.ForeignKeyConstraint):
"""Construct ForeignKeyConstraint
Migrate's additional parameters:
:param columns: Columns in constraint
:param refcolumns: Columns that this FK reffers to in another table.
:param table: If columns are passed as strings, this kw is required
:type table: Table instance
:type columns: list of strings or Column instances
:type refcolumns: list of strings or Column instances
"""
__migrate_visit_name__ = 'migrate_foreign_key_constraint'
def __init__(self, columns, refcolumns, *args, **kwargs):
colnames, table = self._normalize_columns(columns)
table = kwargs.pop('table', table)
refcolnames, reftable = self._normalize_columns(refcolumns,
table_name=True)
super(ForeignKeyConstraint, self).__init__(colnames, refcolnames, *args,
**kwargs)
if table is not None:
self._set_parent(table)
@property
def referenced(self):
return [e.column for e in self.elements]
@property
def reftable(self):
return self.referenced[0].table
def autoname(self):
"""Mimic the database's automatic constraint names"""
if hasattr(self.columns, 'keys'):
# SA <= 0.5
firstcol = self.columns[self.columns.keys()[0]]
ret = "%(table)s_%(firstcolumn)s_fkey" % dict(
table=firstcol.table.name,
firstcolumn=firstcol.name,)
else:
# SA >= 0.6
ret = "%(table)s_%(firstcolumn)s_fkey" % dict(
table=self.table.name,
firstcolumn=self.columns[0],)
return ret
class CheckConstraint(ConstraintChangeset, schema.CheckConstraint):
"""Construct CheckConstraint
Migrate's additional parameters:
:param sqltext: Plain SQL text to check condition
:param columns: If not name is applied, you must supply this kw\
to autoname constraint
:param table: If columns are passed as strings, this kw is required
:type table: Table instance
:type columns: list of Columns instances
:type sqltext: string
"""
__migrate_visit_name__ = 'migrate_check_constraint'
def __init__(self, sqltext, *args, **kwargs):
cols = kwargs.pop('columns', [])
if not cols and not kwargs.get('name', False):
raise InvalidConstraintError('You must either set "name"'
'parameter or "columns" to autogenarate it.')
colnames, table = self._normalize_columns(cols)
table = kwargs.pop('table', table)
schema.CheckConstraint.__init__(self, sqltext, *args, **kwargs)
if table is not None:
self._set_parent(table)
self.colnames = colnames
def autoname(self):
return "%(table)s_%(cols)s_check" % \
dict(table=self.table.name, cols="_".join(self.colnames))
class UniqueConstraint(ConstraintChangeset, schema.UniqueConstraint):
"""Construct UniqueConstraint
Migrate's additional parameters:
:param cols: Columns in constraint.
:param table: If columns are passed as strings, this kw is required
:type table: Table instance
:type cols: strings or Column instances
.. versionadded:: 0.6.0
"""
__migrate_visit_name__ = 'migrate_unique_constraint'
def __init__(self, *cols, **kwargs):
self.colnames, table = self._normalize_columns(cols)
table = kwargs.pop('table', table)
super(UniqueConstraint, self).__init__(*self.colnames, **kwargs)
if table is not None:
self._set_parent(table)
def autoname(self):
"""Mimic the database's automatic constraint names"""
return "%s_%s_key" % (self.table.name, self.colnames[0])
| mit | 784,172,165,800,105,000 | 35.773869 | 80 | 0.618202 | false |
benoitsteiner/tensorflow-opencl | tensorflow/contrib/learn/python/learn/utils/export.py | 48 | 13721 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.session_bundle import exporter
from tensorflow.contrib.session_bundle import gc
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as tf_saver
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _get_first_op_from_collection(collection_name):
"""Get first element from the collection."""
elements = ops.get_collection(collection_name)
if elements is not None:
if elements:
return elements[0]
return None
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _get_saver():
"""Lazy init and return saver."""
saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is not None:
if saver:
saver = saver[0]
else:
saver = None
if saver is None and variables.global_variables():
saver = tf_saver.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
return saver
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _export_graph(graph, saver, checkpoint_path, export_dir,
default_graph_signature, named_graph_signatures,
exports_to_keep):
"""Exports graph via session_bundle, by creating a Session."""
with graph.as_default():
with tf_session.Session('') as session:
variables.local_variables_initializer()
lookup_ops.tables_initializer()
saver.restore(session, checkpoint_path)
export = exporter.Exporter(saver)
export.init(
init_op=control_flow_ops.group(
variables.local_variables_initializer(),
lookup_ops.tables_initializer()),
default_graph_signature=default_graph_signature,
named_graph_signatures=named_graph_signatures,
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS))
return export.export(export_dir, contrib_variables.get_global_step(),
session, exports_to_keep=exports_to_keep)
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def generic_signature_fn(examples, unused_features, predictions):
"""Creates generic signature from given examples and predictions.
This is needed for backward compatibility with default behavior of
export_estimator.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` or `dict` of `Tensor`s.
Returns:
Tuple of default signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
tensors = {'inputs': examples}
if not isinstance(predictions, dict):
predictions = {'outputs': predictions}
tensors.update(predictions)
default_signature = exporter.generic_signature(tensors)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def classification_signature_fn(examples, unused_features, predictions):
"""Creates classification signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` or dict of tensors that contains the classes tensor
as in {'classes': `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
examples, classes_tensor=predictions['classes'])
else:
default_signature = exporter.classification_signature(
examples, classes_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def classification_signature_fn_with_prob(
examples, unused_features, predictions):
"""Classification signature from given examples and predicted probabilities.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` of predicted probabilities or dict that contains the
probabilities tensor as in {'probabilities', `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions['probabilities'])
else:
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def regression_signature_fn(examples, unused_features, predictions):
"""Creates regression signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor`.
Returns:
Tuple of default regression signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
default_signature = exporter.regression_signature(
input_tensor=examples, output_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def logistic_regression_signature_fn(examples, unused_features, predictions):
"""Creates logistic regression signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` of shape [batch_size, 2] of predicted probabilities or
dict that contains the probabilities tensor as in
{'probabilities', `Tensor`}.
Returns:
Tuple of default regression signature and named signature.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
predictions_tensor = predictions['probabilities']
else:
predictions_tensor = predictions
# predictions should have shape [batch_size, 2] where first column is P(Y=0|x)
# while second column is P(Y=1|x). We are only interested in the second
# column for inference.
predictions_shape = predictions_tensor.get_shape()
predictions_rank = len(predictions_shape)
if predictions_rank != 2:
logging.fatal(
'Expected predictions to have rank 2, but received predictions with '
'rank: {} and shape: {}'.format(predictions_rank, predictions_shape))
if predictions_shape[1] != 2:
logging.fatal(
'Expected predictions to have 2nd dimension: 2, but received '
'predictions with 2nd dimension: {} and shape: {}. Did you mean to use '
'regression_signature_fn or classification_signature_fn_with_prob '
'instead?'.format(predictions_shape[1], predictions_shape))
positive_predictions = predictions_tensor[:, 1]
default_signature = exporter.regression_signature(
input_tensor=examples, output_tensor=positive_predictions)
return default_signature, {}
# pylint: disable=protected-access
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _default_input_fn(estimator, examples):
"""Creates default input parsing using Estimator's feature signatures."""
return estimator._get_feature_ops_from_example(examples)
@deprecated('2016-09-23', 'Please use Estimator.export_savedmodel() instead.')
def export_estimator(estimator,
export_dir,
signature_fn=None,
input_fn=_default_input_fn,
default_batch_size=1,
exports_to_keep=None):
"""Deprecated, please use Estimator.export_savedmodel()."""
_export_estimator(estimator=estimator,
export_dir=export_dir,
signature_fn=signature_fn,
input_fn=input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _export_estimator(estimator,
export_dir,
signature_fn,
input_fn,
default_batch_size,
exports_to_keep,
input_feature_key=None,
use_deprecated_input_fn=True,
prediction_key=None,
checkpoint_path=None):
if use_deprecated_input_fn:
input_fn = input_fn or _default_input_fn
elif input_fn is None:
raise ValueError('input_fn must be defined.')
# If checkpoint_path is specified, use the specified checkpoint path.
checkpoint_path = (checkpoint_path or
tf_saver.latest_checkpoint(estimator._model_dir))
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
if use_deprecated_input_fn:
examples = array_ops.placeholder(dtype=dtypes.string,
shape=[default_batch_size],
name='input_example_tensor')
features = input_fn(estimator, examples)
else:
features, _ = input_fn()
examples = None
if input_feature_key is not None:
examples = features.pop(input_feature_key)
if (not features) and (examples is None):
raise ValueError('Either features or examples must be defined.')
predictions = estimator._get_predict_ops(features).predictions
if prediction_key is not None:
predictions = predictions[prediction_key]
# Explicit signature_fn takes priority
if signature_fn:
default_signature, named_graph_signatures = signature_fn(examples,
features,
predictions)
else:
try:
# Some estimators provide a signature function.
# TODO(zakaria): check if the estimator has this function,
# raise helpful error if not
signature_fn = estimator._create_signature_fn()
default_signature, named_graph_signatures = (
signature_fn(examples, features, predictions))
except AttributeError:
logging.warn(
'Change warning: `signature_fn` will be required after'
'2016-08-01.\n'
'Using generic signatures for now. To maintain this behavior, '
'pass:\n'
' signature_fn=export.generic_signature_fn\n'
'Also consider passing a regression or classification signature; '
'see cl/126430915 for an example.')
default_signature, named_graph_signatures = generic_signature_fn(
examples, features, predictions)
if exports_to_keep is not None:
exports_to_keep = gc.largest_export_versions(exports_to_keep)
return _export_graph(
g,
_get_saver(),
checkpoint_path,
export_dir,
default_graph_signature=default_signature,
named_graph_signatures=named_graph_signatures,
exports_to_keep=exports_to_keep)
# pylint: enable=protected-access
| apache-2.0 | -6,981,926,323,205,534,000 | 37.759887 | 82 | 0.674732 | false |
caseyrollins/osf.io | addons/base/generic_views.py | 9 | 4519 | """Generic add-on view factories"""
# -*- coding: utf-8 -*-
import httplib as http
from flask import request
from framework.exceptions import HTTPError, PermissionsError
from framework.auth.decorators import must_be_logged_in
from osf.models import ExternalAccount
from osf.utils import permissions
from website.project.decorators import (
must_have_addon, must_be_addon_authorizer,
must_have_permission, must_not_be_registration,
must_be_valid_project
)
def import_auth(addon_short_name, Serializer):
@must_have_addon(addon_short_name, 'user')
@must_have_addon(addon_short_name, 'node')
@must_have_permission(permissions.WRITE)
def _import_auth(auth, node_addon, user_addon, **kwargs):
"""Import add-on credentials from the currently logged-in user to a node.
"""
external_account = ExternalAccount.load(
request.json['external_account_id']
)
if not user_addon.external_accounts.filter(id=external_account.id).exists():
raise HTTPError(http.FORBIDDEN)
try:
node_addon.set_auth(external_account, user_addon.owner)
except PermissionsError:
raise HTTPError(http.FORBIDDEN)
node_addon.save()
return {
'result': Serializer().serialize_settings(node_addon, auth.user),
'message': 'Successfully imported access token from profile.',
}
_import_auth.__name__ = '{0}_import_auth'.format(addon_short_name)
return _import_auth
def account_list(addon_short_name, Serializer):
@must_be_logged_in
def _account_list(auth):
user_settings = auth.user.get_addon(addon_short_name)
serializer = Serializer(user_settings=user_settings)
return serializer.serialized_user_settings
_account_list.__name__ = '{0}_account_list'.format(addon_short_name)
return _account_list
def folder_list(addon_short_name, addon_full_name, get_folders):
# TODO [OSF-6678]: Generalize this for API use after node settings have been refactored
@must_have_addon(addon_short_name, 'node')
@must_be_addon_authorizer(addon_short_name)
def _folder_list(node_addon, **kwargs):
"""Returns a list of folders"""
if not node_addon.has_auth:
raise HTTPError(http.FORBIDDEN)
folder_id = request.args.get('folderId')
return get_folders(node_addon, folder_id)
_folder_list.__name__ = '{0}_folder_list'.format(addon_short_name)
return _folder_list
def get_config(addon_short_name, Serializer):
@must_be_logged_in
@must_have_addon(addon_short_name, 'node')
@must_be_valid_project
@must_have_permission(permissions.WRITE)
def _get_config(node_addon, auth, **kwargs):
"""API that returns the serialized node settings."""
return {
'result': Serializer().serialize_settings(
node_addon,
auth.user
)
}
_get_config.__name__ = '{0}_get_config'.format(addon_short_name)
return _get_config
def set_config(addon_short_name, addon_full_name, Serializer, set_folder):
@must_not_be_registration
@must_have_addon(addon_short_name, 'user')
@must_have_addon(addon_short_name, 'node')
@must_be_addon_authorizer(addon_short_name)
@must_have_permission(permissions.WRITE)
def _set_config(node_addon, user_addon, auth, **kwargs):
"""View for changing a node's linked folder."""
folder = request.json.get('selected')
set_folder(node_addon, folder, auth)
path = node_addon.folder_path
return {
'result': {
'folder': {
'name': path.replace('All Files', '') if path != '/' else '/ (Full {0})'.format(
addon_full_name
),
'path': path,
},
'urls': Serializer(node_settings=node_addon).addon_serialized_urls,
},
'message': 'Successfully updated settings.',
}
_set_config.__name__ = '{0}_set_config'.format(addon_short_name)
return _set_config
def deauthorize_node(addon_short_name):
@must_not_be_registration
@must_have_addon(addon_short_name, 'node')
@must_have_permission(permissions.WRITE)
def _deauthorize_node(auth, node_addon, **kwargs):
node_addon.deauthorize(auth=auth)
node_addon.save()
_deauthorize_node.__name__ = '{0}_deauthorize_node'.format(addon_short_name)
return _deauthorize_node
| apache-2.0 | -6,626,591,843,576,257,000 | 36.347107 | 100 | 0.634211 | false |
nemesisdesign/django | django/contrib/sessions/backends/base.py | 27 | 12374 | from __future__ import unicode_literals
import base64
import logging
import string
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.sessions.exceptions import SuspiciousSession
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.crypto import (
constant_time_compare, get_random_string, salted_hmac,
)
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
# session_key should not be case sensitive because some backends can store it
# on case insensitive file systems.
VALID_KEY_CHARS = string.ascii_lowercase + string.digits
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class UpdateError(Exception):
"""
Occurs if Django tries to update a session that was deleted.
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
__not_given = object()
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
self.serializer = import_string(settings.SESSION_SERIALIZER)
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, default=__not_given):
self.modified = self.modified or key in self._session
args = () if default is self.__not_given else (default,)
return self._session.pop(key, *args)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Returns the given session dictionary serialized and encoded as a string."
serialized = self.serializer().dumps(session_dict)
hash = self._hash(serialized)
return base64.b64encode(hash.encode() + b":" + serialized).decode('ascii')
def decode(self, session_data):
encoded_data = base64.b64decode(force_bytes(session_data))
try:
# could produce ValueError if there is no ':'
hash, serialized = encoded_data.split(b':', 1)
expected_hash = self._hash(serialized)
if not constant_time_compare(hash.decode(), expected_hash):
raise SuspiciousSession("Session data corrupted")
else:
return self.serializer().loads(serialized)
except Exception as e:
# ValueError, SuspiciousOperation, unpickling exceptions. If any of
# these happen, just return an empty dictionary (an empty session).
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' % e.__class__.__name__)
logger.warning(force_text(e))
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return key in self._session
def keys(self):
return self._session.keys()
def values(self):
return self._session.values()
def items(self):
return self._session.items()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def is_empty(self):
"Returns True when there is no session_key and the session is empty"
try:
return not bool(self._session_key) and not self._session_cache
except AttributeError:
return True
def _get_new_session_key(self):
"Returns session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not self.exists(session_key):
break
return session_key
def _get_or_create_session_key(self):
if self._session_key is None:
self._session_key = self._get_new_session_key()
return self._session_key
def _validate_session_key(self, key):
"""
Key must be truthy and at least 8 characters long. 8 characters is an
arbitrary lower bound for some minimal key security.
"""
return key and len(key) >= 8
def _get_session_key(self):
return self.__session_key
def _set_session_key(self, value):
"""
Validate session key on assignment. Invalid values will set to None.
"""
if self._validate_session_key(value):
self.__session_key = value
else:
self.__session_key = None
session_key = property(_get_session_key)
_session_key = property(_get_session_key, _set_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self.session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self, **kwargs):
"""Get the number of seconds until the session expires.
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Make the difference between "expiry=None passed in kwargs" and
# "expiry not passed in kwargs", in order to guarantee not to trigger
# self.load() when expiry is provided.
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - modification
return delta.days * 86400 + delta.seconds
def get_expiry_date(self, **kwargs):
"""Get session the expiry date (as a datetime object).
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Same comment as in get_expiry_age
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return modification + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = timezone.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self._session_key = None
def cycle_key(self):
"""
Creates a new session key, while retaining the current session data.
"""
try:
data = self._session_cache
except AttributeError:
data = {}
key = self.session_key
self.create()
self._session_cache = data
if key:
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError('subclasses of SessionBase must provide an exists() method')
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError('subclasses of SessionBase must provide a create() method')
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() only updates an existing object and does not create one
(an UpdateError is raised).
"""
raise NotImplementedError('subclasses of SessionBase must provide a save() method')
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError('subclasses of SessionBase must provide a delete() method')
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError('subclasses of SessionBase must provide a load() method')
@classmethod
def clear_expired(cls):
"""
Remove expired sessions from the session store.
If this operation isn't possible on a given backend, it should raise
NotImplementedError. If it isn't necessary, because the backend has
a built-in expiration mechanism, it should be a no-op.
"""
raise NotImplementedError('This backend does not support clear_expired().')
| bsd-3-clause | 8,310,941,662,121,159,000 | 32.808743 | 94 | 0.61306 | false |
SebasSBM/django | django/template/smartif.py | 275 | 6643 | """
Parser and utilities for the smart 'if' tag
"""
import warnings
from django.utils.deprecation import RemovedInDjango110Warning
# Using a simple top down parser, as described here:
# http://effbot.org/zone/simple-top-down-parsing.htm.
# 'led' = left denotation
# 'nud' = null denotation
# 'bp' = binding power (left = lbp, right = rbp)
class TokenBase(object):
"""
Base class for operators and literals, mainly for debugging and for throwing
syntax errors.
"""
id = None # node/token type name
value = None # used by literals
first = second = None # used by tree nodes
def nud(self, parser):
# Null denotation - called in prefix context
raise parser.error_class(
"Not expecting '%s' in this position in if tag." % self.id
)
def led(self, left, parser):
# Left denotation - called in infix context
raise parser.error_class(
"Not expecting '%s' as infix operator in if tag." % self.id
)
def display(self):
"""
Returns what to display in error messages for this node
"""
return self.id
def __repr__(self):
out = [str(x) for x in [self.id, self.first, self.second] if x is not None]
return "(" + " ".join(out) + ")"
def infix(bp, func):
"""
Creates an infix operator, given a binding power and a function that
evaluates the node
"""
class Operator(TokenBase):
lbp = bp
def led(self, left, parser):
self.first = left
self.second = parser.expression(bp)
return self
def eval(self, context):
try:
return func(context, self.first, self.second)
except Exception:
# Templates shouldn't throw exceptions when rendering. We are
# most likely to get exceptions for things like {% if foo in bar
# %} where 'bar' does not support 'in', so default to False
return False
return Operator
def prefix(bp, func):
"""
Creates a prefix operator, given a binding power and a function that
evaluates the node.
"""
class Operator(TokenBase):
lbp = bp
def nud(self, parser):
self.first = parser.expression(bp)
self.second = None
return self
def eval(self, context):
try:
return func(context, self.first)
except Exception:
return False
return Operator
# Operator precedence follows Python.
# NB - we can get slightly more accurate syntax error messages by not using the
# same object for '==' and '='.
# We defer variable evaluation to the lambda to ensure that terms are
# lazily evaluated using Python's boolean parsing logic.
OPERATORS = {
'or': infix(6, lambda context, x, y: x.eval(context) or y.eval(context)),
'and': infix(7, lambda context, x, y: x.eval(context) and y.eval(context)),
'not': prefix(8, lambda context, x: not x.eval(context)),
'in': infix(9, lambda context, x, y: x.eval(context) in y.eval(context)),
'not in': infix(9, lambda context, x, y: x.eval(context) not in y.eval(context)),
# This should be removed in Django 1.10:
'=': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)),
'==': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)),
'!=': infix(10, lambda context, x, y: x.eval(context) != y.eval(context)),
'>': infix(10, lambda context, x, y: x.eval(context) > y.eval(context)),
'>=': infix(10, lambda context, x, y: x.eval(context) >= y.eval(context)),
'<': infix(10, lambda context, x, y: x.eval(context) < y.eval(context)),
'<=': infix(10, lambda context, x, y: x.eval(context) <= y.eval(context)),
}
# Assign 'id' to each:
for key, op in OPERATORS.items():
op.id = key
class Literal(TokenBase):
"""
A basic self-resolvable object similar to a Django template variable.
"""
# IfParser uses Literal in create_var, but TemplateIfParser overrides
# create_var so that a proper implementation that actually resolves
# variables, filters etc is used.
id = "literal"
lbp = 0
def __init__(self, value):
self.value = value
def display(self):
return repr(self.value)
def nud(self, parser):
return self
def eval(self, context):
return self.value
def __repr__(self):
return "(%s %r)" % (self.id, self.value)
class EndToken(TokenBase):
lbp = 0
def nud(self, parser):
raise parser.error_class("Unexpected end of expression in if tag.")
EndToken = EndToken()
class IfParser(object):
error_class = ValueError
def __init__(self, tokens):
# pre-pass necessary to turn 'not','in' into single token
l = len(tokens)
mapped_tokens = []
i = 0
while i < l:
token = tokens[i]
if token == "not" and i + 1 < l and tokens[i + 1] == "in":
token = "not in"
i += 1 # skip 'in'
mapped_tokens.append(self.translate_token(token))
i += 1
self.tokens = mapped_tokens
self.pos = 0
self.current_token = self.next_token()
def translate_token(self, token):
try:
op = OPERATORS[token]
except (KeyError, TypeError):
return self.create_var(token)
else:
if token == '=':
warnings.warn(
"Operator '=' is deprecated and will be removed in Django 1.10. Use '==' instead.",
RemovedInDjango110Warning, stacklevel=2
)
return op()
def next_token(self):
if self.pos >= len(self.tokens):
return EndToken
else:
retval = self.tokens[self.pos]
self.pos += 1
return retval
def parse(self):
retval = self.expression()
# Check that we have exhausted all the tokens
if self.current_token is not EndToken:
raise self.error_class("Unused '%s' at end of if expression." %
self.current_token.display())
return retval
def expression(self, rbp=0):
t = self.current_token
self.current_token = self.next_token()
left = t.nud(self)
while rbp < self.current_token.lbp:
t = self.current_token
self.current_token = self.next_token()
left = t.led(left, self)
return left
def create_var(self, value):
return Literal(value)
| bsd-3-clause | 5,249,933,136,682,386,000 | 29.897674 | 103 | 0.578052 | false |
Benoss/elastic_admin | elastic_utils/utils.py | 1 | 1080 | import time
def singleton(cls):
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
class Timer(object):
def __init__(self, name='elapsed time', logger=None, print_result=False):
self.verbose = print_result
self.logger = logger
self.name = name
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000 # millisecs
if self.verbose:
print(self.get_formatted_string())
if self.logger:
self.logger(self.get_formatted_string())
def get_formatted_string(self):
return '{}: {:.1f} ms'.format(self.name, self.msecs)
def new_index_from_name(base_name):
"""
Return a new index name with a timestamp added at the end
:param base_name: str
:return: str
"""
return base_name + "." + str(int(time.time()))
| mit | -3,953,802,851,662,279,700 | 24.714286 | 77 | 0.57963 | false |
quantum13/hgh | hgh/urls.py | 1 | 1675 | # coding: utf-8
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'hgh.views.home', name='home'),
# url(r'^hgh/', include('hgh.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url(r'^login/$', 'apps.main.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page':'/'}, name='logout'),
url(r'^error_url/$', 'apps.main.views.login_error', name='login_error'),
url(r'^$', 'apps.main.views.home', name='home'),
url(r'^rating/experience/$', 'apps.main.views.rating', {'type':'experience'}, name='rating_exp'),
url(r'^rating/power/$', 'apps.main.views.rating', {'type':'power'}, name='rating_pow'),
url(r'^profile/$', 'apps.main.views.profile', name='profile'),
url(r'^fight/$', 'apps.main.views.prebattle', name='prebattle'),
url(r'^battle/$', 'apps.main.views.battle', name='battle'),
url(r'^battleresult/$', 'apps.main.views.postbattle', name='postbattle'),
url(r'^spellgettargets/(?P<id>\d+)/$', 'apps.main.views.get_target', name='spellgettargets'),
url(r'^battleinfo/(?P<id>\d+)/$', 'apps.main.views.battle_info', name='battle_info'),
url(r'^info/(?P<login>[^/]+)/$', 'apps.main.views.info', name='info'),
url(r'', include('social_auth.urls')),
)
| gpl-2.0 | -332,445,657,382,245,000 | 38.853659 | 101 | 0.614925 | false |
SophieBartmann/Faust-Bot | FaustBot/Modules/CustomUserModules/ICDObserver.py | 1 | 1273 | import csv
import re
from FaustBot.Communication.Connection import Connection
from FaustBot.Modules.PrivMsgObserverPrototype import PrivMsgObserverPrototype
class ICDObserver(PrivMsgObserverPrototype):
@staticmethod
def cmd():
return None
@staticmethod
def help():
return None
def get_icd(self, code):
if code == "C64" or code == "P20":
return ""
icd10_codes = open('care_icd10_de.csv', 'r',encoding='utf8')
icd10 = csv.reader(icd10_codes, delimiter=';', quotechar='"')
for row in icd10:
if row[0] == code:
return code +' - ' + row[1]
return 0
def update_on_priv_msg(self, data, connection: Connection):
if data['channel'] != connection.details.get_channel():
return
regex = r'\b(\w\d{2}\.?\d?)\b'
codes = re.findall(regex, data['message'])
for code in codes:
code = code.capitalize()
text = self.get_icd(code)
if text == 0:
if code.find('.') != -1:
code += '-'
else:
code += '.-'
text = self.get_icd(code)
if text != 0:
connection.send_back(text, data)
| gpl-3.0 | 80,123,927,861,853,950 | 29.309524 | 78 | 0.527101 | false |
F1000Research/khmer | sandbox/count-kmers-single.py | 1 | 3273 | #! /usr/bin/env python2
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) University of California, Davis, 2015. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: [email protected]
#
# pylint: disable=missing-docstring,invalid-name
"""
Produce k-mer counts for all the k-mers in the given sequence file,
using the given countgraph.
% python sandbox/count-kmers-single.py <fasta/fastq>
Use '-h' for parameter help.
"""
from __future__ import print_function
import sys
import khmer
import argparse
import screed
import csv
from khmer.khmer_args import (build_counting_args, report_on_config, info,
add_threading_args)
from khmer.kfile import (check_input_files, check_space,
check_space_for_graph)
import threading
def get_parser():
parser = build_counting_args(
descr="Output abundances of the k-mers in the sequence file.")
add_threading_args(parser)
parser.add_argument('input_sequence_filename', help='The input'
' FAST[AQ] sequence file.')
parser.add_argument('-o', '--out', metavar="output_file",
dest='output_file',
type=argparse.FileType('w'),
default=None, help='output counts to this file')
return parser
def main():
info('count-kmers-single.py', ['counting'])
args = get_parser().parse_args()
check_input_files(args.input_sequence_filename, False)
print ('making k-mer countgraph', file=sys.stderr)
countgraph = khmer.Countgraph(args.ksize, args.max_tablesize,
args.n_tables)
# @CTB countgraph.set_use_bigcount(args.bigcount)
kmer_size = countgraph.ksize()
hashsizes = countgraph.hashsizes()
tracking = khmer._Nodegraph( # pylint: disable=protected-access
kmer_size, hashsizes)
print ('kmer_size: %s' % countgraph.ksize(), file=sys.stderr)
print ('k-mer countgraph sizes: %s' % (countgraph.hashsizes(),),
file=sys.stderr)
if args.output_file is None:
args.output_file = sys.stdout
writer = csv.writer(args.output_file)
# start loading
rparser = khmer.ReadParser(args.input_sequence_filename)
threads = []
print ('consuming input, round 1 -- %s' % (args.input_sequence_filename),
file=sys.stderr)
for _ in range(args.threads):
thread = \
threading.Thread(
target=countgraph.consume_fasta_with_reads_parser,
args=(rparser, )
)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
for record in screed.open(args.input_sequence_filename):
seq = record.sequence.replace('N', 'A')
for i in range(len(seq) - kmer_size + 1):
kmer = seq[i:i+kmer_size]
if not tracking.get(kmer):
tracking.count(kmer)
writer.writerow([kmer, str(countgraph.get(kmer))])
print ('Total number of unique k-mers: {0}'.format(
countgraph.n_unique_kmers()), file=sys.stderr)
if __name__ == '__main__':
main()
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
| bsd-3-clause | -2,723,526,189,707,450,000 | 30.776699 | 77 | 0.617782 | false |
Ken69267/config-stuff | .vim/eclim/autoload/eclim/python/rope/refactor/occurrences.py | 91 | 10704 | import re
import rope.base.pynames
from rope.base import pynames, pyobjects, codeanalyze, evaluate, exceptions, utils, worder
class Finder(object):
"""For finding occurrences of a name
The constructor takes a `filters` argument. It should be a list
of functions that take a single argument. For each possible
occurrence, these functions are called in order with the an
instance of `Occurrence`:
* If it returns `None` other filters are tried.
* If it returns `True`, the occurrence will be a match.
* If it returns `False`, the occurrence will be skipped.
* If all of the filters return `None`, it is skipped also.
"""
def __init__(self, pycore, name, filters=[lambda o: True], docs=False):
self.pycore = pycore
self.name = name
self.docs = docs
self.filters = filters
self._textual_finder = _TextualFinder(name, docs=docs)
def find_occurrences(self, resource=None, pymodule=None):
"""Generate `Occurrence` instances"""
tools = _OccurrenceToolsCreator(self.pycore, resource=resource,
pymodule=pymodule, docs=self.docs)
for offset in self._textual_finder.find_offsets(tools.source_code):
occurrence = Occurrence(tools, offset)
for filter in self.filters:
result = filter(occurrence)
if result is None:
continue
if result:
yield occurrence
break
def create_finder(pycore, name, pyname, only_calls=False, imports=True,
unsure=None, docs=False, instance=None, in_hierarchy=False):
"""A factory for `Finder`
Based on the arguments it creates a list of filters. `instance`
argument is needed only when you want implicit interfaces to be
considered.
"""
pynames = set([pyname])
filters = []
if only_calls:
filters.append(CallsFilter())
if not imports:
filters.append(NoImportsFilter())
if isinstance(instance, rope.base.pynames.ParameterName):
for pyobject in instance.get_objects():
try:
pynames.add(pyobject[name])
except exceptions.AttributeNotFoundError:
pass
for pyname in pynames:
filters.append(PyNameFilter(pyname))
if in_hierarchy:
filters.append(InHierarchyFilter(pyname))
if unsure:
filters.append(UnsureFilter(unsure))
return Finder(pycore, name, filters=filters, docs=docs)
class Occurrence(object):
def __init__(self, tools, offset):
self.tools = tools
self.offset = offset
self.resource = tools.resource
@utils.saveit
def get_word_range(self):
return self.tools.word_finder.get_word_range(self.offset)
@utils.saveit
def get_primary_range(self):
return self.tools.word_finder.get_primary_range(self.offset)
@utils.saveit
def get_pyname(self):
try:
return self.tools.name_finder.get_pyname_at(self.offset)
except exceptions.BadIdentifierError:
pass
@utils.saveit
def get_primary_and_pyname(self):
try:
return self.tools.name_finder.get_primary_and_pyname_at(self.offset)
except exceptions.BadIdentifierError:
pass
@utils.saveit
def is_in_import_statement(self):
return (self.tools.word_finder.is_from_statement(self.offset) or
self.tools.word_finder.is_import_statement(self.offset))
def is_called(self):
return self.tools.word_finder.is_a_function_being_called(self.offset)
def is_defined(self):
return self.tools.word_finder.is_a_class_or_function_name_in_header(self.offset)
def is_a_fixed_primary(self):
return self.tools.word_finder.is_a_class_or_function_name_in_header(self.offset) or \
self.tools.word_finder.is_a_name_after_from_import(self.offset)
def is_written(self):
return self.tools.word_finder.is_assigned_here(self.offset)
def is_unsure(self):
return unsure_pyname(self.get_pyname())
@property
@utils.saveit
def lineno(self):
offset = self.get_word_range()[0]
return self.tools.pymodule.lines.get_line_number(offset)
def same_pyname(expected, pyname):
"""Check whether `expected` and `pyname` are the same"""
if expected is None or pyname is None:
return False
if expected == pyname:
return True
if type(expected) not in (pynames.ImportedModule, pynames.ImportedName) and \
type(pyname) not in (pynames.ImportedModule, pynames.ImportedName):
return False
return expected.get_definition_location() == pyname.get_definition_location() and \
expected.get_object() == pyname.get_object()
def unsure_pyname(pyname, unbound=True):
"""Return `True` if we don't know what this name references"""
if pyname is None:
return True
if unbound and not isinstance(pyname, pynames.UnboundName):
return False
if pyname.get_object() == pyobjects.get_unknown():
return True
class PyNameFilter(object):
"""For finding occurrences of a name"""
def __init__(self, pyname):
self.pyname = pyname
def __call__(self, occurrence):
if same_pyname(self.pyname, occurrence.get_pyname()):
return True
class InHierarchyFilter(object):
"""For finding occurrences of a name"""
def __init__(self, pyname, implementations_only=False):
self.pyname = pyname
self.impl_only = implementations_only
self.pyclass = self._get_containing_class(pyname)
if self.pyclass is not None:
self.name = pyname.get_object().get_name()
self.roots = self._get_root_classes(self.pyclass, self.name)
else:
self.roots = None
def __call__(self, occurrence):
if self.roots is None:
return
pyclass = self._get_containing_class(occurrence.get_pyname())
if pyclass is not None:
roots = self._get_root_classes(pyclass, self.name)
if self.roots.intersection(roots):
return True
def _get_containing_class(self, pyname):
if isinstance(pyname, pynames.DefinedName):
scope = pyname.get_object().get_scope()
parent = scope.parent
if parent is not None and parent.get_kind() == 'Class':
return parent.pyobject
def _get_root_classes(self, pyclass, name):
if self.impl_only and pyclass == self.pyclass:
return set([pyclass])
result = set()
for superclass in pyclass.get_superclasses():
if name in superclass:
result.update(self._get_root_classes(superclass, name))
if not result:
return set([pyclass])
return result
class UnsureFilter(object):
def __init__(self, unsure):
self.unsure = unsure
def __call__(self, occurrence):
if occurrence.is_unsure() and self.unsure(occurrence):
return True
class NoImportsFilter(object):
def __call__(self, occurrence):
if occurrence.is_in_import_statement():
return False
class CallsFilter(object):
def __call__(self, occurrence):
if not occurrence.is_called():
return False
class _TextualFinder(object):
def __init__(self, name, docs=False):
self.name = name
self.docs = docs
self.comment_pattern = _TextualFinder.any('comment', [r'#[^\n]*'])
self.string_pattern = _TextualFinder.any(
'string', [codeanalyze.get_string_pattern()])
self.pattern = self._get_occurrence_pattern(self.name)
def find_offsets(self, source):
if not self._fast_file_query(source):
return
if self.docs:
searcher = self._normal_search
else:
searcher = self._re_search
for matched in searcher(source):
yield matched
def _re_search(self, source):
for match in self.pattern.finditer(source):
for key, value in match.groupdict().items():
if value and key == 'occurrence':
yield match.start(key)
def _normal_search(self, source):
current = 0
while True:
try:
found = source.index(self.name, current)
current = found + len(self.name)
if (found == 0 or not self._is_id_char(source[found - 1])) and \
(current == len(source) or not self._is_id_char(source[current])):
yield found
except ValueError:
break
def _is_id_char(self, c):
return c.isalnum() or c == '_'
def _fast_file_query(self, source):
try:
source.index(self.name)
return True
except ValueError:
return False
def _get_source(self, resource, pymodule):
if resource is not None:
return resource.read()
else:
return pymodule.source_code
def _get_occurrence_pattern(self, name):
occurrence_pattern = _TextualFinder.any('occurrence',
['\\b' + name + '\\b'])
pattern = re.compile(occurrence_pattern + '|' + self.comment_pattern +
'|' + self.string_pattern)
return pattern
@staticmethod
def any(name, list_):
return '(?P<%s>' % name + '|'.join(list_) + ')'
class _OccurrenceToolsCreator(object):
def __init__(self, pycore, resource=None, pymodule=None, docs=False):
self.pycore = pycore
self.__resource = resource
self.__pymodule = pymodule
self.docs = docs
@property
@utils.saveit
def name_finder(self):
return evaluate.ScopeNameFinder(self.pymodule)
@property
@utils.saveit
def source_code(self):
if self.__resource is not None:
return self.resource.read()
else:
return self.pymodule.source_code
@property
@utils.saveit
def word_finder(self):
return worder.Worder(self.source_code, self.docs)
@property
@utils.saveit
def resource(self):
if self.__resource is not None:
return self.__resource
if self.__pymodule is not None:
return self.__pymodule.resource
@property
@utils.saveit
def pymodule(self):
if self.__pymodule is not None:
return self.__pymodule
return self.pycore.resource_to_pyobject(self.resource)
| mit | 8,509,640,756,635,954,000 | 31.047904 | 93 | 0.60426 | false |
Pexego/odoo | addons/hw_scanner/controllers/main.py | 51 | 7441 | # -*- coding: utf-8 -*-
import logging
import os
import time
from os import listdir
from os.path import join
from threading import Thread, Lock
from select import select
from Queue import Queue, Empty
import openerp
import openerp.addons.hw_proxy.controllers.main as hw_proxy
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
try:
import evdev
except ImportError:
_logger.error('OpenERP module hw_scanner depends on the evdev python module')
evdev = None
class Scanner(Thread):
def __init__(self):
Thread.__init__(self)
self.lock = Lock()
self.status = {'status':'connecting', 'messages':[]}
self.input_dir = '/dev/input/by-id/'
self.barcodes = Queue()
self.keymap = {
2: ("1","!"),
3: ("2","@"),
4: ("3","#"),
5: ("4","$"),
6: ("5","%"),
7: ("6","^"),
8: ("7","&"),
9: ("8","*"),
10:("9","("),
11:("0",")"),
12:("-","_"),
13:("=","+"),
# 14 BACKSPACE
# 15 TAB
16:("q","Q"),
17:("w","W"),
18:("e","E"),
19:("r","R"),
20:("t","T"),
21:("y","Y"),
22:("u","U"),
23:("i","I"),
24:("o","O"),
25:("p","P"),
26:("[","{"),
27:("]","}"),
# 28 ENTER
# 29 LEFT_CTRL
30:("a","A"),
31:("s","S"),
32:("d","D"),
33:("f","F"),
34:("g","G"),
35:("h","H"),
36:("j","J"),
37:("k","K"),
38:("l","L"),
39:(";",":"),
40:("'","\""),
41:("`","~"),
# 42 LEFT SHIFT
43:("\\","|"),
44:("z","Z"),
45:("x","X"),
46:("c","C"),
47:("v","V"),
48:("b","B"),
49:("n","N"),
50:("m","M"),
51:(",","<"),
52:(".",">"),
53:("/","?"),
# 54 RIGHT SHIFT
57:(" "," "),
}
def lockedstart(self):
with self.lock:
if not self.isAlive():
self.daemon = True
self.start()
def set_status(self, status, message = None):
if status == self.status['status']:
if message != None and message != self.status['messages'][-1]:
self.status['messages'].append(message)
else:
self.status['status'] = status
if message:
self.status['messages'] = [message]
else:
self.status['messages'] = []
if status == 'error' and message:
_logger.error('Barcode Scanner Error: '+message)
elif status == 'disconnected' and message:
_logger.warning('Disconnected Barcode Scanner: '+message)
def get_device(self):
try:
if not evdev:
return None
devices = [ device for device in listdir(self.input_dir)]
keyboards = [ device for device in devices if ('kbd' in device) and ('keyboard' not in device.lower())]
scanners = [ device for device in devices if ('barcode' in device.lower()) or ('scanner' in device.lower())]
if len(scanners) > 0:
self.set_status('connected','Connected to '+scanners[0])
return evdev.InputDevice(join(self.input_dir,scanners[0]))
elif len(keyboards) > 0:
self.set_status('connected','Connected to '+keyboards[0])
return evdev.InputDevice(join(self.input_dir,keyboards[0]))
else:
self.set_status('disconnected','Barcode Scanner Not Found')
return None
except Exception as e:
self.set_status('error',str(e))
return None
def get_barcode(self):
""" Returns a scanned barcode. Will wait at most 5 seconds to get a barcode, and will
return barcode scanned in the past if they are not older than 5 seconds and have not
been returned before. This is necessary to catch barcodes scanned while the POS is
busy reading another barcode
"""
self.lockedstart()
while True:
try:
timestamp, barcode = self.barcodes.get(True, 5)
if timestamp > time.time() - 5:
return barcode
except Empty:
return ''
def get_status(self):
self.lockedstart()
return self.status
def run(self):
""" This will start a loop that catches all keyboard events, parse barcode
sequences and put them on a timestamped queue that can be consumed by
the point of sale's requests for barcode events
"""
self.barcodes = Queue()
barcode = []
shift = False
device = None
while True: # barcodes loop
if device: # ungrab device between barcodes and timeouts for plug & play
try:
device.ungrab()
except Exception as e:
self.set_status('error',str(e))
device = self.get_device()
if not device:
time.sleep(5) # wait until a suitable device is plugged
else:
try:
device.grab()
shift = False
barcode = []
while True: # keycode loop
r,w,x = select([device],[],[],5)
if len(r) == 0: # timeout
break
events = device.read()
for event in events:
if event.type == evdev.ecodes.EV_KEY:
#_logger.debug('Evdev Keyboard event %s',evdev.categorize(event))
if event.value == 1: # keydown events
if event.code in self.keymap:
if shift:
barcode.append(self.keymap[event.code][1])
else:
barcode.append(self.keymap[event.code][0])
elif event.code == 42 or event.code == 54: # SHIFT
shift = True
elif event.code == 28: # ENTER, end of barcode
self.barcodes.put( (time.time(),''.join(barcode)) )
barcode = []
elif event.value == 0: #keyup events
if event.code == 42 or event.code == 54: # LEFT SHIFT
shift = False
except Exception as e:
self.set_status('error',str(e))
s = Scanner()
hw_proxy.drivers['scanner'] = s
class ScannerDriver(hw_proxy.Proxy):
@http.route('/hw_proxy/scanner', type='json', auth='none', cors='*')
def scanner(self):
return s.get_barcode()
| agpl-3.0 | 9,023,831,003,358,163,000 | 33.449074 | 121 | 0.436097 | false |
Hodorable/0602 | openstack_dashboard/dashboards/project/databases/tables.py | 30 | 13199 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core import urlresolvers
from django.template import defaultfilters as d_filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from horizon.templatetags import sizeformat
from horizon.utils import filters
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.database_backups \
import tables as backup_tables
ACTIVE_STATES = ("ACTIVE",)
class TerminateInstance(tables.BatchAction):
help_text = _("Terminated instances are not recoverable.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Terminate Instance",
u"Terminate Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled termination of Instance",
u"Scheduled termination of Instances",
count
)
name = "terminate"
classes = ("btn-danger", )
icon = "remove"
def action(self, request, obj_id):
api.trove.instance_delete(request, obj_id)
class RestartInstance(tables.BatchAction):
help_text = _("Restarted instances will lose any data not"
" saved in persistent storage.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Restart Instance",
u"Restart Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Restarted Instance",
u"Restarted Instances",
count
)
name = "restart"
classes = ('btn-danger', 'btn-reboot')
def allowed(self, request, instance=None):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTDOWN'))
def action(self, request, obj_id):
api.trove.instance_restart(request, obj_id)
class DetachReplica(tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Detach Replica",
u"Detach Replicas",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Replica Detached",
u"Replicas Detached",
count
)
name = "detach_replica"
classes = ('btn-danger', 'btn-detach-replica')
def allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES
and hasattr(instance, 'replica_of'))
def action(self, request, obj_id):
api.trove.instance_detach_replica(request, obj_id)
class DeleteUser(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete User",
u"Delete Users",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted User",
u"Deleted Users",
count
)
def delete(self, request, obj_id):
datum = self.table.get_object_by_id(obj_id)
try:
api.trove.user_delete(request, datum.instance.id, datum.name)
except Exception:
msg = _('Error deleting database user.')
exceptions.handle(request, msg)
class DeleteDatabase(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Database",
u"Delete Databases",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Database",
u"Deleted Databases",
count
)
def delete(self, request, obj_id):
datum = self.table.get_object_by_id(obj_id)
try:
api.trove.database_delete(request, datum.instance.id, datum.name)
except Exception:
msg = _('Error deleting database on instance.')
exceptions.handle(request, msg)
class LaunchLink(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Instance")
url = "horizon:project:databases:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
class CreateBackup(tables.LinkAction):
name = "backup"
verbose_name = _("Create Backup")
url = "horizon:project:database_backups:create"
classes = ("ajax-modal",)
icon = "camera"
def allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES and
request.user.has_perm('openstack.services.object-store'))
def get_link_url(self, datam):
url = urlresolvers.reverse(self.url)
return url + "?instance=%s" % datam.id
class ResizeVolume(tables.LinkAction):
name = "resize_volume"
verbose_name = _("Resize Volume")
url = "horizon:project:databases:resize_volume"
classes = ("ajax-modal", "btn-resize")
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
class ResizeInstance(tables.LinkAction):
name = "resize_instance"
verbose_name = _("Resize Instance")
url = "horizon:project:databases:resize_instance"
classes = ("ajax-modal", "btn-resize")
def allowed(self, request, instance=None):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF'))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, instance_id):
instance = api.trove.instance_get(request, instance_id)
try:
flavor_id = instance.flavor['id']
instance.full_flavor = api.trove.flavor_get(request, flavor_id)
except Exception:
pass
instance.host = get_host(instance)
return instance
def get_datastore(instance):
if hasattr(instance, "datastore"):
return instance.datastore["type"]
return _("Not available")
def get_datastore_version(instance):
if hasattr(instance, "datastore"):
return instance.datastore["version"]
return _("Not available")
def get_host(instance):
if hasattr(instance, "hostname"):
return instance.hostname
elif hasattr(instance, "ip") and instance.ip:
return instance.ip[0]
return _("Not Assigned")
def get_size(instance):
if hasattr(instance, "full_flavor"):
size_string = _("%(name)s | %(RAM)s RAM")
vals = {'name': instance.full_flavor.name,
'RAM': sizeformat.mb_float_format(instance.full_flavor.ram)}
return size_string % vals
return _("Not available")
def get_volume_size(instance):
if hasattr(instance, "volume"):
return sizeformat.diskgbformat(instance.volume.get("size"))
return _("Not available")
def get_databases(user):
if hasattr(user, "access"):
databases = [db.name for db in user.access]
databases.sort()
return ', '.join(databases)
return _("-")
class InstancesTable(tables.DataTable):
STATUS_CHOICES = (
("ACTIVE", True),
("BLOCKED", True),
("BUILD", None),
("FAILED", False),
("REBOOT", None),
("RESIZE", None),
("BACKUP", None),
("SHUTDOWN", False),
("ERROR", False),
("RESTART_REQUIRED", None),
)
STATUS_DISPLAY_CHOICES = (
("ACTIVE", pgettext_lazy("Current status of a Database Instance",
u"Active")),
("BLOCKED", pgettext_lazy("Current status of a Database Instance",
u"Blocked")),
("BUILD", pgettext_lazy("Current status of a Database Instance",
u"Build")),
("FAILED", pgettext_lazy("Current status of a Database Instance",
u"Failed")),
("REBOOT", pgettext_lazy("Current status of a Database Instance",
u"Reboot")),
("RESIZE", pgettext_lazy("Current status of a Database Instance",
u"Resize")),
("BACKUP", pgettext_lazy("Current status of a Database Instance",
u"Backup")),
("SHUTDOWN", pgettext_lazy("Current status of a Database Instance",
u"Shutdown")),
("ERROR", pgettext_lazy("Current status of a Database Instance",
u"Error")),
("RESTART_REQUIRED",
pgettext_lazy("Current status of a Database Instance",
u"Restart Required")),
)
name = tables.Column("name",
link="horizon:project:databases:detail",
verbose_name=_("Instance Name"))
datastore = tables.Column(get_datastore,
verbose_name=_("Datastore"))
datastore_version = tables.Column(get_datastore_version,
verbose_name=_("Datastore Version"))
host = tables.Column(get_host, verbose_name=_("Host"))
size = tables.Column(get_size,
verbose_name=_("Size"),
attrs={'data-type': 'size'})
volume = tables.Column(get_volume_size,
verbose_name=_("Volume Size"),
attrs={'data-type': 'size'})
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
class Meta(object):
name = "databases"
verbose_name = _("Instances")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (LaunchLink, TerminateInstance)
row_actions = (CreateBackup,
ResizeVolume,
ResizeInstance,
RestartInstance,
DetachReplica,
TerminateInstance)
class UsersTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("User Name"))
host = tables.Column("host", verbose_name=_("Allowed Host"))
databases = tables.Column(get_databases, verbose_name=_("Databases"))
class Meta(object):
name = "users"
verbose_name = _("Users")
table_actions = [DeleteUser]
row_actions = [DeleteUser]
def get_object_id(self, datum):
return datum.name
class DatabaseTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("Database Name"))
class Meta(object):
name = "databases"
verbose_name = _("Databases")
table_actions = [DeleteDatabase]
row_actions = [DeleteDatabase]
def get_object_id(self, datum):
return datum.name
def is_incremental(obj):
return hasattr(obj, 'parent_id') and obj.parent_id is not None
class InstanceBackupsTable(tables.DataTable):
name = tables.Column("name",
link="horizon:project:database_backups:detail",
verbose_name=_("Name"))
created = tables.Column("created", verbose_name=_("Created"),
filters=[filters.parse_isotime])
location = tables.Column(lambda obj: _("Download"),
link=lambda obj: obj.locationRef,
verbose_name=_("Backup File"))
incremental = tables.Column(is_incremental,
verbose_name=_("Incremental"),
filters=(d_filters.yesno,
d_filters.capfirst))
status = tables.Column(
"status",
verbose_name=_("Status"),
status=True,
status_choices=backup_tables.STATUS_CHOICES,
display_choices=backup_tables.STATUS_DISPLAY_CHOICES)
class Meta(object):
name = "backups"
verbose_name = _("Backups")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (backup_tables.LaunchLink, backup_tables.DeleteBackup)
row_actions = (backup_tables.RestoreLink, backup_tables.DeleteBackup)
| apache-2.0 | 5,125,708,908,031,542,000 | 31.192683 | 78 | 0.587014 | false |
louyihua/edx-platform | lms/djangoapps/branding/models.py | 63 | 1687 | """
Model used by Video module for Branding configuration.
Includes:
BrandingInfoConfig: A ConfigurationModel for managing how Video Module will
use Branding.
"""
import json
from django.db.models import TextField
from django.core.exceptions import ValidationError
from config_models.models import ConfigurationModel
class BrandingInfoConfig(ConfigurationModel):
"""
Configuration for Branding.
Example of configuration that must be stored:
{
"CN": {
"url": "http://www.xuetangx.com",
"logo_src": "http://www.xuetangx.com/static/images/logo.png",
"logo_tag": "Video hosted by XuetangX.com"
}
}
"""
class Meta(ConfigurationModel.Meta):
app_label = "branding"
configuration = TextField(
help_text="JSON data of Configuration for Video Branding."
)
def clean(self):
"""
Validates configuration text field.
"""
try:
json.loads(self.configuration)
except ValueError:
raise ValidationError('Must be valid JSON string.')
@classmethod
def get_config(cls):
"""
Get the Video Branding Configuration.
"""
info = cls.current()
return json.loads(info.configuration) if info.enabled else {}
class BrandingApiConfig(ConfigurationModel):
"""Configure Branding api's
Enable or disable api's functionality.
When this flag is disabled, the api will return 404.
When the flag is enabled, the api will returns the valid reponse.
"""
class Meta(ConfigurationModel.Meta):
app_label = "branding"
| agpl-3.0 | -5,193,001,282,957,235,000 | 26.655738 | 81 | 0.631298 | false |
MRigal/django | tests/queryset_pickle/tests.py | 209 | 6081 | from __future__ import unicode_literals
import datetime
import pickle
import unittest
import warnings
from django.test import TestCase
from django.utils import six
from django.utils.encoding import force_text
from django.utils.version import get_version
from .models import Container, Event, Group, Happening, M2MModel
class PickleabilityTestCase(TestCase):
def setUp(self):
Happening.objects.create() # make sure the defaults are working (#20158)
def assert_pickles(self, qs):
self.assertEqual(list(pickle.loads(pickle.dumps(qs))), list(qs))
def test_related_field(self):
g = Group.objects.create(name="Ponies Who Own Maybachs")
self.assert_pickles(Event.objects.filter(group=g.id))
def test_datetime_callable_default_all(self):
self.assert_pickles(Happening.objects.all())
def test_datetime_callable_default_filter(self):
self.assert_pickles(Happening.objects.filter(when=datetime.datetime.now()))
def test_string_as_default(self):
self.assert_pickles(Happening.objects.filter(name="test"))
def test_standalone_method_as_default(self):
self.assert_pickles(Happening.objects.filter(number1=1))
@unittest.skipIf(six.PY2, "Field doesn't exist on Python 2.")
def test_staticmethod_as_default(self):
self.assert_pickles(Happening.objects.filter(number2=1))
def test_filter_reverse_fk(self):
self.assert_pickles(Group.objects.filter(event=1))
def test_doesnotexist_exception(self):
# Ticket #17776
original = Event.DoesNotExist("Doesn't exist")
unpickled = pickle.loads(pickle.dumps(original))
# Exceptions are not equal to equivalent instances of themselves, so
# can't just use assertEqual(original, unpickled)
self.assertEqual(original.__class__, unpickled.__class__)
self.assertEqual(original.args, unpickled.args)
def test_manager_pickle(self):
pickle.loads(pickle.dumps(Happening.objects))
def test_model_pickle(self):
"""
Test that a model not defined on module level is pickleable.
"""
original = Container.SomeModel(pk=1)
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
# Also, deferred dynamic model works
Container.SomeModel.objects.create(somefield=1)
original = Container.SomeModel.objects.defer('somefield')[0]
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
self.assertEqual(original.somefield, reloaded.somefield)
def test_model_pickle_m2m(self):
"""
Test intentionally the automatically created through model.
"""
m1 = M2MModel.objects.create()
g1 = Group.objects.create(name='foof')
m1.groups.add(g1)
m2m_through = M2MModel._meta.get_field('groups').remote_field.through
original = m2m_through.objects.get()
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
def test_model_pickle_dynamic(self):
class Meta:
proxy = True
dynclass = type(str("DynamicEventSubclass"), (Event, ),
{'Meta': Meta, '__module__': Event.__module__})
original = dynclass(pk=1)
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
self.assertIs(reloaded.__class__, dynclass)
def test_specialized_queryset(self):
self.assert_pickles(Happening.objects.values('name'))
self.assert_pickles(Happening.objects.values('name').dates('when', 'year'))
# With related field (#14515)
self.assert_pickles(
Event.objects.select_related('group').order_by('title').values_list('title', 'group__name')
)
def test_pickle_prefetch_related_idempotence(self):
g = Group.objects.create(name='foo')
groups = Group.objects.prefetch_related('event_set')
# First pickling
groups = pickle.loads(pickle.dumps(groups))
self.assertQuerysetEqual(groups, [g], lambda x: x)
# Second pickling
groups = pickle.loads(pickle.dumps(groups))
self.assertQuerysetEqual(groups, [g], lambda x: x)
def test_pickle_prefetch_related_with_m2m_and_objects_deletion(self):
"""
#24831 -- Cached properties on ManyToOneRel created in QuerySet.delete()
caused subsequent QuerySet pickling to fail.
"""
g = Group.objects.create(name='foo')
m2m = M2MModel.objects.create()
m2m.groups.add(g)
Group.objects.all().delete()
m2ms = M2MModel.objects.prefetch_related('groups')
m2ms = pickle.loads(pickle.dumps(m2ms))
self.assertQuerysetEqual(m2ms, [m2m], lambda x: x)
def test_missing_django_version_unpickling(self):
"""
#21430 -- Verifies a warning is raised for querysets that are
unpickled without a Django version
"""
qs = Group.missing_django_version_objects.all()
with warnings.catch_warnings(record=True) as recorded:
pickle.loads(pickle.dumps(qs))
msg = force_text(recorded.pop().message)
self.assertEqual(msg,
"Pickled queryset instance's Django version is not specified.")
def test_unsupported_unpickle(self):
"""
#21430 -- Verifies a warning is raised for querysets that are
unpickled with a different Django version than the current
"""
qs = Group.previous_django_version_objects.all()
with warnings.catch_warnings(record=True) as recorded:
pickle.loads(pickle.dumps(qs))
msg = force_text(recorded.pop().message)
self.assertEqual(
msg,
"Pickled queryset instance's Django version 1.0 does not "
"match the current version %s." % get_version()
)
| bsd-3-clause | 8,018,006,163,063,547,000 | 37.487342 | 103 | 0.6494 | false |
marcusramberg/dotfiles | bin/.venv-ansible-venv/lib/python2.6/site-packages/ansible/modules/extras/system/at.py | 28 | 6404 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2014, Richard Isaacson <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: at
short_description: Schedule the execution of a command or script file via the at command.
description:
- Use this module to schedule a command or script file to run once in the future.
- All jobs are executed in the 'a' queue.
version_added: "1.5"
options:
command:
description:
- A command to be executed in the future.
required: false
default: null
script_file:
description:
- An existing script file to be executed in the future.
required: false
default: null
count:
description:
- The count of units in the future to execute the command or script file.
required: true
units:
description:
- The type of units in the future to execute the command or script file.
required: true
choices: ["minutes", "hours", "days", "weeks"]
state:
description:
- The state dictates if the command or script file should be evaluated as present(added) or absent(deleted).
required: false
choices: ["present", "absent"]
default: "present"
unique:
description:
- If a matching job is present a new job will not be added.
required: false
default: false
requirements:
- at
author: Richard Isaacson
'''
EXAMPLES = '''
# Schedule a command to execute in 20 minutes as root.
- at: command="ls -d / > /dev/null" count=20 units="minutes"
# Match a command to an existing job and delete the job.
- at: command="ls -d / > /dev/null" state="absent"
# Schedule a command to execute in 20 minutes making sure it is unique in the queue.
- at: command="ls -d / > /dev/null" unique=true count=20 units="minutes"
'''
import os
import tempfile
def add_job(module, result, at_cmd, count, units, command, script_file):
at_command = "%s -f %s now + %s %s" % (at_cmd, script_file, count, units)
rc, out, err = module.run_command(at_command, check_rc=True)
if command:
os.unlink(script_file)
result['changed'] = True
def delete_job(module, result, at_cmd, command, script_file):
for matching_job in get_matching_jobs(module, at_cmd, script_file):
at_command = "%s -d %s" % (at_cmd, matching_job)
rc, out, err = module.run_command(at_command, check_rc=True)
result['changed'] = True
if command:
os.unlink(script_file)
module.exit_json(**result)
def get_matching_jobs(module, at_cmd, script_file):
matching_jobs = []
atq_cmd = module.get_bin_path('atq', True)
# Get list of job numbers for the user.
atq_command = "%s" % atq_cmd
rc, out, err = module.run_command(atq_command, check_rc=True)
current_jobs = out.splitlines()
if len(current_jobs) == 0:
return matching_jobs
# Read script_file into a string.
script_file_string = open(script_file).read().strip()
# Loop through the jobs.
# If the script text is contained in a job add job number to list.
for current_job in current_jobs:
split_current_job = current_job.split()
at_command = "%s -c %s" % (at_cmd, split_current_job[0])
rc, out, err = module.run_command(at_command, check_rc=True)
if script_file_string in out:
matching_jobs.append(split_current_job[0])
# Return the list.
return matching_jobs
def create_tempfile(command):
filed, script_file = tempfile.mkstemp(prefix='at')
fileh = os.fdopen(filed, 'w')
fileh.write(command)
fileh.close()
return script_file
def main():
module = AnsibleModule(
argument_spec = dict(
command=dict(required=False,
type='str'),
script_file=dict(required=False,
type='str'),
count=dict(required=False,
type='int'),
units=dict(required=False,
default=None,
choices=['minutes', 'hours', 'days', 'weeks'],
type='str'),
state=dict(required=False,
default='present',
choices=['present', 'absent'],
type='str'),
unique=dict(required=False,
default=False,
type='bool')
),
mutually_exclusive=[['command', 'script_file']],
required_one_of=[['command', 'script_file']],
supports_check_mode=False
)
at_cmd = module.get_bin_path('at', True)
command = module.params['command']
script_file = module.params['script_file']
count = module.params['count']
units = module.params['units']
state = module.params['state']
unique = module.params['unique']
if (state == 'present') and (not count or not units):
module.fail_json(msg="present state requires count and units")
result = {'state': state, 'changed': False}
# If command transform it into a script_file
if command:
script_file = create_tempfile(command)
# if absent remove existing and return
if state == 'absent':
delete_job(module, result, at_cmd, command, script_file)
# if unique if existing return unchanged
if unique:
if len(get_matching_jobs(module, at_cmd, script_file)) != 0:
if command:
os.unlink(script_file)
module.exit_json(**result)
result['script_file'] = script_file
result['count'] = count
result['units'] = units
add_job(module, result, at_cmd, count, units, command, script_file)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| mit | -1,881,615,504,702,092,300 | 31.02 | 113 | 0.623204 | false |
gorakhargosh/mom | mom/os/path.py | 1 | 5970 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: mom.os.path
:synopsis: Directory walking, listing, and path sanitizing functions.
Functions
---------
.. autofunction:: get_dir_walker
.. autofunction:: walk
.. autofunction:: listdir
.. autofunction:: list_directories
.. autofunction:: list_files
.. autofunction:: absolute_path
.. autofunction:: real_absolute_path
.. autofunction:: parent_dir_path
"""
from __future__ import absolute_import
import functools
import os
from mom import builtins
__author__ = "[email protected] (Yesudeep Mangalapilly)"
__all__ = [
"absolute_path",
"get_dir_walker",
"list_directories",
"list_files",
"listdir",
"parent_dir_path",
"real_absolute_path",
"walk",
]
def get_dir_walker(recursive, topdown=True, followlinks=False):
"""
Returns a recursive or a non-recursive directory walker.
:param recursive:
``True`` produces a recursive walker; ``False`` produces a non-recursive
walker.
:returns:
A walker function.
"""
if recursive:
walker = functools.partial(os.walk,
topdown=topdown,
followlinks=followlinks)
else:
def walker(path, topdown=topdown, followlinks=followlinks):
"""Alternative walker."""
yield builtins.next(os.walk(path,
topdown=topdown,
followlinks=followlinks))
return walker
def walk(dir_pathname, recursive=True, topdown=True, followlinks=False):
"""
Walks a directory tree optionally recursively. Works exactly like
:func:`os.walk` only adding the `recursive` argument.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` for walking recursively through the directory tree;
``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
walk_func = get_dir_walker(recursive, topdown, followlinks)
for root, dir_names, file_names in walk_func(dir_pathname):
yield (root, dir_names, file_names)
def listdir(dir_pathname,
recursive=True,
topdown=True,
followlinks=False):
"""
Enlists all items using their absolute paths in a directory, optionally
non-recursively.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` (default) for walking recursively through the directory tree;
``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
for root, dir_names, file_names in walk(dir_pathname,
recursive, topdown, followlinks):
for dir_name in dir_names:
yield absolute_path(os.path.join(root, dir_name))
for file_name in file_names:
yield absolute_path(os.path.join(root, file_name))
def list_directories(dir_pathname, recursive=True, topdown=True,
followlinks=False):
"""
Enlists all the directories using their absolute paths within the
specified directory, optionally non-recursively.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` (default) for walking recursively through the directory
tree; ``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
for root, dir_names, _ in walk(dir_pathname, recursive, topdown, followlinks):
for dir_name in dir_names:
yield absolute_path(os.path.join(root, dir_name))
def list_files(dir_pathname, recursive=True, topdown=True, followlinks=False):
"""
Enlists all the files using their absolute paths within the
specified directory, optionally recursively.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` for walking recursively through the directory tree;
``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
for root, _, file_names in walk(dir_pathname,
recursive, topdown, followlinks):
for file_name in file_names:
yield absolute_path(os.path.join(root, file_name))
def absolute_path(path):
"""
Returns the absolute path for the given path and normalizes the
path.
:param path:
Path for which the absolute normalized path will be found.
:returns:
Absolute normalized path.
"""
return os.path.abspath(os.path.normpath(path))
def real_absolute_path(path):
"""
Returns the real absolute normalized path for the given path.
:param path:
Path for which the real absolute normalized path will be found.
:returns:
Real absolute normalized path.
"""
return os.path.realpath(absolute_path(path))
def parent_dir_path(path):
"""
Returns the parent directory path.
:param path:
Path for which the parent directory will be obtained.
:returns:
Parent directory path.
"""
return absolute_path(os.path.dirname(path))
| apache-2.0 | -7,895,896,464,503,193,000 | 28.264706 | 80 | 0.673199 | false |
UnicornButter/external_chromium | chrome/common/extensions/docs/server/chromeextensionsdocs.py | 64 | 15204 | #!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cgi
import logging
import re
import os
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import memcache
from google.appengine.api import urlfetch
# TODO(nickbaum): unit tests
# TODO(nickbaum): is this the right way to do constants?
class Channel():
def __init__(self, name, tag):
self.name = name
self.tag = tag
# TODO(nickbaum): unit test this
def matchPath(self, path):
match = "/" + self.name + "/"
if path[0:len(match)] == match:
return true
else:
return false
Channel.DEV = Channel("dev", "2.0-dev")
Channel.BETA = Channel("beta", "1.1-beta")
Channel.STABLE = Channel("stable", "")
Channel.CHANNELS = [Channel.DEV, Channel.BETA, Channel.STABLE]
Channel.TRUNK = Channel("trunk", "")
Channel.DEFAULT = Channel.STABLE
DEFAULT_CACHE_TIME = 300
class MainPage(webapp.RequestHandler):
# get page from memcache, or else fetch it from src
def get(self):
path = os.path.realpath(os.path.join('/', self.request.path))
# special path to invoke the unit tests
# TODO(nickbaum): is there a less ghetto way to invoke the unit test?
if path == "/test":
self.unitTest()
return
# if root, redirect to index.html
# TODO(nickbaum): this doesn't handle /chrome/extensions/trunk, etc
if (path == "/chrome/extensions") or (path == "chrome/extensions/"):
self.redirect("/chrome/extensions/index.html")
return
# else remove prefix
if(path[:18] == "/chrome/extensions"):
path = path[18:]
# TODO(nickbaum): there's a subtle bug here: if there are two instances of the app,
# their default caches will override each other. This is bad!
result = memcache.get(path)
if result is None:
logging.info("Cache miss: " + path)
url = self.getSrcUrl(path)
if (url[1] is not Channel.TRUNK) and (url[0] != "http://src.chromium.org/favicon.ico"):
branch = self.getBranch(url[1])
url = url[0] % branch
else:
url = url[0]
logging.info("Path: " + self.request.path)
logging.info("Url: " + url)
try:
result = urlfetch.fetch(url + self.request.query_string)
if result.status_code != 200:
logging.error("urlfetch failed: " + url)
# TODO(nickbaum): what should we do when the urlfetch fails?
except:
logging.error("urlfetch failed: " + url)
# TODO(nickbaum): what should we do when the urlfetch fails?
try:
if not memcache.add(path, result, DEFAULT_CACHE_TIME):
logging.error("Memcache set failed.")
except:
logging.error("Memcache set failed.")
for key in result.headers:
self.response.headers[key] = result.headers[key]
self.response.out.write(result.content)
def head(self):
self.get()
# get the src url corresponding to the request
# returns a tuple of the url and the branch
# this function is the only part that is unit tested
def getSrcUrl(self, path):
# from the path they provided, figure out which channel they requested
# TODO(nickbaum) clean this logic up
# find the first subdirectory of the path
path = path.split('/', 2)
url = "http://src.chromium.org/viewvc/chrome/"
channel = None
# if there's no subdirectory, choose the default channel
# otherwise, figure out if the subdirectory corresponds to a channel
if len(path) == 2:
path.append("")
if path[1] == "":
channel = Channel.DEFAULT
if(Channel.DEFAULT == Channel.TRUNK):
url = url + "trunk/src/chrome/"
else:
url = url + "branches/%s/src/chrome/"
path = ""
elif path[1] == Channel.TRUNK.name:
url = url + "trunk/src/chrome/"
channel = Channel.TRUNK
path = path[2]
else:
# otherwise, run through the different channel options
for c in Channel.CHANNELS:
if(path[1] == c.name):
channel = c
url = url + "branches/%s/src/chrome/"
path = path[2]
break
# if the subdirectory doesn't correspond to a channel, use the default
if channel is None:
channel = Channel.DEFAULT
if(Channel.DEFAULT == Channel.TRUNK):
url = url + "trunk/src/chrome/"
else:
url = url + "branches/%s/src/chrome/"
if path[2] != "":
path = path[1] + "/" + path[2]
else:
path = path[1]
# special cases
# TODO(nickbaum): this is super cumbersome to maintain
if path == "third_party/jstemplate/jstemplate_compiled.js":
url = url + path
elif path == "api/extension_api.json":
url = url + "common/extensions/" + path
elif path == "favicon.ico":
url = "http://src.chromium.org/favicon.ico"
else:
if path == "":
path = "index.html"
url = url + "common/extensions/docs/" + path
return [url, channel]
# get the current version number for the channel requested (dev, beta or stable)
# TODO(nickbaum): move to Channel object
def getBranch(self, channel):
branch = memcache.get(channel.name)
if branch is None:
# query Omaha to figure out which version corresponds to this channel
postdata = """<?xml version="1.0" encoding="UTF-8"?>
<o:gupdate xmlns:o="http://www.google.com/update2/request" protocol="2.0" testsource="crxdocs">
<o:app appid="{8A69D345-D564-463C-AFF1-A69D9E530F96}" version="0.0.0.0" lang="">
<o:updatecheck tag="%s" installsource="ondemandcheckforupdates" />
</o:app>
</o:gupdate>
""" % channel.tag
result = urlfetch.fetch(url="https://tools.google.com/service/update2",
payload=postdata,
method=urlfetch.POST,
headers={'Content-Type': 'application/x-www-form-urlencoded',
'X-USER-IP': '72.1.1.1'})
if result.status_code != 200:
logging.error("urlfetch failed.")
# TODO(nickbaum): what should we do when the urlfetch fails?
# find branch in response
match = re.search(r'<updatecheck Version="\d+\.\d+\.(\d+)\.\d+"', result.content)
if match is None:
logging.error("Version number not found: " + result.content)
#TODO(nickbaum): should we fall back on trunk in this case?
branch = match.group(1)
# TODO(nickbaum): make cache time a constant
if not memcache.add(channel.name, branch, DEFAULT_CACHE_TIME):
logging.error("Memcache set failed.")
return branch
# TODO(nickbaum): is there a more elegant way to write this unit test?
# I deliberately kept it dumb to avoid errors sneaking in, but it's so verbose...
# TODO(nickbaum): should I break this up into multiple files?
def unitTest(self):
self.response.out.write("Testing TRUNK<br/>")
self.check("/trunk/", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/index.html", Channel.TRUNK)
self.check("/trunk/index.html", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/index.html", Channel.TRUNK)
self.check("/trunk/getstarted.html", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/getstarted.html", Channel.TRUNK)
self.check("/trunk/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.TRUNK)
self.response.out.write("<br/>Testing DEV<br/>")
self.check("/dev/", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.DEV)
self.check("/dev/index.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.DEV)
self.check("/dev/getstarted.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/getstarted.html", Channel.DEV)
self.check("/dev/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.DEV)
self.response.out.write("<br/>Testing BETA<br/>")
self.check("/beta/", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.BETA)
self.check("/beta/index.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.BETA)
self.check("/beta/getstarted.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/getstarted.html", Channel.BETA)
self.check("/beta/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.BETA)
self.response.out.write("<br/>Testing STABLE<br/>")
self.check("/stable/", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.STABLE)
self.check("/stable/index.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.STABLE)
self.check("/stable/getstarted.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/getstarted.html", Channel.STABLE)
self.check("/stable/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.STABLE)
self.response.out.write("<br/>Testing jstemplate_compiled.js<br/>")
self.check("/trunk/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.TRUNK)
self.check("/dev/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.DEV)
self.check("/beta/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.BETA)
self.check("/stable/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.STABLE)
self.response.out.write("<br/>Testing extension_api.json<br/>")
self.check("/trunk/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/api/extension_api.json", Channel.TRUNK)
self.check("/dev/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/api/extension_api.json", Channel.DEV)
self.check("/beta/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/api/extension_api.json", Channel.BETA)
self.check("/stable/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/api/extension_api.json", Channel.STABLE)
self.response.out.write("<br/>Testing favicon.ico<br/>")
self.check("/trunk/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.TRUNK)
self.check("/dev/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.DEV)
self.check("/beta/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.BETA)
self.check("/stable/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.STABLE)
self.response.out.write("<br/>Testing DEFAULT<br/>")
temp = Channel.DEFAULT
Channel.DEFAULT = Channel.DEV
self.check("/", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.DEV)
self.check("/index.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.DEV)
self.check("/getstarted.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/getstarted.html", Channel.DEV)
self.check("/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.DEV)
self.check("/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.DEV)
self.check("/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/api/extension_api.json", Channel.DEV)
self.check("/css/ApiRefStyles.css", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/css/ApiRefStyles.css", Channel.DEV)
self.check("/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.DEV)
self.response.out.write("<br/>Testing DEFAULT (trunk)<br/>")
Channel.DEFAULT = Channel.TRUNK
self.check("/", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/index.html", Channel.TRUNK)
self.check("/index.html", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/index.html", Channel.TRUNK)
self.check("/getstarted.html", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/getstarted.html", Channel.TRUNK)
self.check("/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.TRUNK)
self.check("/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.TRUNK)
self.check("/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/api/extension_api.json", Channel.TRUNK)
self.check("/css/ApiRefStyles.css", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/css/ApiRefStyles.css", Channel.TRUNK)
self.check("/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.TRUNK)
Channel.DEFAULT = temp
return
# utility function for my unit test
# checks that getSrcUrl(path) returns the expected values
# TODO(nickbaum): can this be replaced by assert or something similar?
def check(self, path, expectedUrl, expectedChannel):
actual = self.getSrcUrl(path)
if (actual[0] != expectedUrl):
self.response.out.write('<span style="color:#f00;">Failure:</span> path ' + path + " gave url " + actual[0] + "<br/>")
elif (actual[1] != expectedChannel):
self.response.out.write('<span style="color:#f00;">Failure:</span> path ' + path + " gave branch " + actual[1].name + "<br/>")
else:
self.response.out.write("Path " + path + ' <span style="color:#0f0;">OK</span><br/>')
return
application = webapp.WSGIApplication([
('/.*', MainPage),
], debug=False)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| bsd-3-clause | 204,547,551,515,499,780 | 53.3 | 197 | 0.680084 | false |
dhuppenkothen/stingray | stingray/conftest.py | 31 | 1240 | # this contains imports plugins that configure py.test for astropy tests.
# by importing them here in conftest.py they are discoverable by py.test
# no matter how it is invoked within the source tree.
from astropy.tests.pytest_plugins import *
## Uncomment the following line to treat all DeprecationWarnings as
## exceptions
# enable_deprecations_as_exceptions()
## Uncomment and customize the following lines to add/remove entries
## from the list of packages for which version numbers are displayed
## when running the tests
# try:
# PYTEST_HEADER_MODULES['Astropy'] = 'astropy'
# PYTEST_HEADER_MODULES['scikit-image'] = 'skimage'
# del PYTEST_HEADER_MODULES['h5py']
# except NameError: # needed to support Astropy < 1.0
# pass
## Uncomment the following lines to display the version number of the
## package rather than the version number of Astropy in the top line when
## running the tests.
# import os
#
## This is to figure out the affiliated package version, rather than
## using Astropy's
# from . import version
#
# try:
# packagename = os.path.basename(os.path.dirname(__file__))
# TESTED_VERSIONS[packagename] = version.version
# except NameError: # Needed to support Astropy <= 1.0.0
# pass
| mit | 3,487,190,192,756,946,000 | 35.470588 | 73 | 0.734677 | false |
dongjoon-hyun/DIGITS | digits/scheduler.py | 3 | 19330 | # Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from collections import OrderedDict
import os
import re
import shutil
import signal
import time
import traceback
import flask
import gevent
import gevent.event
import gevent.queue
from . import utils
from .config import config_value
from .dataset import DatasetJob
from .job import Job
from .log import logger
from .model import ModelJob
from .status import Status
from digits.utils import errors
"""
This constant configures how long to wait before automatically
deleting completed non-persistent jobs
"""
NON_PERSISTENT_JOB_DELETE_TIMEOUT_SECONDS = 3600
class Resource(object):
"""
Stores information about which tasks are using a resource
"""
class ResourceAllocation(object):
"""
Marks that a task is using [part of] a resource
"""
def __init__(self, task, value):
"""
Arguments:
task -- which task is using the resource
value -- how much of the resource is being used
"""
self.task = task
self.value = value
def __init__(self, identifier=None, max_value=1):
"""
Keyword arguments:
identifier -- some way to identify this resource
max_value -- a numeric representation of the capacity of this resource
"""
if identifier is None:
self.identifier = id(self)
else:
self.identifier = identifier
self.max_value = max_value
self.allocations = []
def remaining(self):
"""
Returns the amount of this resource that is not being used
"""
return self.max_value - sum(a.value for a in self.allocations)
def allocate(self, task, value):
"""
A task is requesting to use this resource
"""
if self.remaining() - value < 0:
raise RuntimeError('Resource is already maxed out at %s/%s' % (
self.remaining(),
self.max_value)
)
self.allocations.append(self.ResourceAllocation(task, value))
def deallocate(self, task):
"""
The task has finished using this resource
"""
for i, a in enumerate(self.allocations):
if id(task) == id(a.task):
self.allocations.pop(i)
return True
return False
class Scheduler:
"""
Coordinates execution of Jobs
"""
def __init__(self, gpu_list=None, verbose=False):
"""
Keyword arguments:
gpu_list -- a comma-separated string which is a list of GPU id's
verbose -- if True, print more errors
"""
self.jobs = OrderedDict()
self.verbose = verbose
# Keeps track of resource usage
self.resources = {
# TODO: break this into CPU cores, memory usage, IO usage, etc.
'parse_folder_task_pool': [Resource()],
'create_db_task_pool': [Resource(max_value=2)],
'analyze_db_task_pool': [Resource(max_value=4)],
'inference_task_pool': [Resource(max_value=4)],
'gpus': [Resource(identifier=index)
for index in gpu_list.split(',')] if gpu_list else [],
}
self.running = False
self.shutdown = gevent.event.Event()
def load_past_jobs(self):
"""
Look in the jobs directory and load all valid jobs
"""
loaded_jobs = []
failed_jobs = []
for dir_name in sorted(os.listdir(config_value('jobs_dir'))):
if os.path.isdir(os.path.join(config_value('jobs_dir'), dir_name)):
# Make sure it hasn't already been loaded
if dir_name in self.jobs:
continue
try:
job = Job.load(dir_name)
# The server might have crashed
if job.status.is_running():
job.status = Status.ABORT
for task in job.tasks:
if task.status.is_running():
task.status = Status.ABORT
# We might have changed some attributes here or in __setstate__
job.save()
loaded_jobs.append(job)
except Exception as e:
failed_jobs.append((dir_name, e))
# add DatasetJobs
for job in loaded_jobs:
if isinstance(job, DatasetJob):
self.jobs[job.id()] = job
# add ModelJobs
for job in loaded_jobs:
if isinstance(job, ModelJob):
try:
# load the DatasetJob
job.load_dataset()
self.jobs[job.id()] = job
except Exception as e:
failed_jobs.append((dir_name, e))
logger.info('Loaded %d jobs.' % len(self.jobs))
if len(failed_jobs):
logger.warning('Failed to load %d jobs.' % len(failed_jobs))
if self.verbose:
for job_id, e in failed_jobs:
logger.debug('%s - %s: %s' % (job_id, type(e).__name__, str(e)))
def add_job(self, job):
"""
Add a job to self.jobs
"""
if not self.running:
logger.error('Scheduler not running. Cannot add job.')
return False
else:
self.jobs[job.id()] = job
# Need to fix this properly
# if True or flask._app_ctx_stack.top is not None:
from digits.webapp import app, socketio
with app.app_context():
# send message to job_management room that the job is added
socketio.emit('job update',
{
'update': 'added',
'job_id': job.id(),
},
namespace='/jobs',
room='job_management',
)
if 'DIGITS_MODE_TEST' not in os.environ:
# Let the scheduler do a little work before returning
time.sleep(utils.wait_time())
return True
def get_job(self, job_id):
"""
Look through self.jobs to try to find the Job
Returns None if not found
"""
if job_id is None:
return None
return self.jobs.get(job_id, None)
def get_related_jobs(self, job):
"""
Look through self.jobs to try to find the Jobs
whose parent contains job
"""
related_jobs = []
if isinstance(job, ModelJob):
datajob = job.dataset
related_jobs.append(datajob)
elif isinstance(job, DatasetJob):
datajob = job
else:
raise ValueError("Unhandled job type %s" % job.job_type())
for j in self.jobs.values():
# Any model that shares (this/the same) dataset should be added too:
if isinstance(j, ModelJob):
if datajob == j.train_task().dataset and j.id() != job.id():
related_jobs.append(j)
return related_jobs
def abort_job(self, job_id):
"""
Aborts a running Job
Returns True if the job was found and aborted
"""
job = self.get_job(job_id)
if job is None or not job.status.is_running():
return False
job.abort()
logger.info('Job aborted.', job_id=job_id)
return True
def delete_job(self, job):
"""
Deletes an entire job folder from disk
Returns True if the Job was found and deleted
"""
if isinstance(job, str) or isinstance(job, unicode):
job_id = str(job)
elif isinstance(job, Job):
job_id = job.id()
else:
raise ValueError('called delete_job with a %s' % type(job))
dependent_jobs = []
# try to find the job
job = self.jobs.get(job_id, None)
if job:
if isinstance(job, DatasetJob):
# check for dependencies
for j in self.jobs.values():
if isinstance(j, ModelJob) and j.dataset_id == job.id():
logger.error('Cannot delete "%s" (%s) because "%s" (%s) depends on it.' % (job.name(), job.id(), j.name(), j.id()))
dependent_jobs.append(j.name())
if len(dependent_jobs)>0:
error_message = 'Cannot delete "%s" because %d model%s depend%s on it: %s' % (
job.name(),
len(dependent_jobs),
('s' if len(dependent_jobs) != 1 else ''),
('s' if len(dependent_jobs) == 1 else ''),
', '.join(['"%s"' % j for j in dependent_jobs]))
raise errors.DeleteError(error_message)
self.jobs.pop(job_id, None)
job.abort()
if os.path.exists(job.dir()):
shutil.rmtree(job.dir())
logger.info('Job deleted.', job_id=job_id)
from digits.webapp import socketio
socketio.emit('job update',
{
'update': 'deleted',
'job_id': job.id()
},
namespace='/jobs',
room='job_management',
)
return True
# see if the folder exists on disk
path = os.path.join(config_value('jobs_dir'), job_id)
path = os.path.normpath(path)
if os.path.dirname(path) == config_value('jobs_dir') and os.path.exists(path):
shutil.rmtree(path)
return True
return False
def running_dataset_jobs(self):
"""a query utility"""
return sorted(
[j for j in self.jobs.values() if isinstance(j, DatasetJob) and j.status.is_running()],
cmp=lambda x,y: cmp(y.id(), x.id())
)
def completed_dataset_jobs(self):
"""a query utility"""
return sorted(
[j for j in self.jobs.values() if isinstance(j, DatasetJob) and not j.status.is_running()],
cmp=lambda x,y: cmp(y.id(), x.id())
)
def running_model_jobs(self):
"""a query utility"""
return sorted(
[j for j in self.jobs.values() if isinstance(j, ModelJob) and j.status.is_running()],
cmp=lambda x,y: cmp(y.id(), x.id())
)
def completed_model_jobs(self):
"""a query utility"""
return sorted(
[j for j in self.jobs.values() if isinstance(j, ModelJob) and not j.status.is_running()],
cmp=lambda x,y: cmp(y.id(), x.id())
)
def start(self):
"""
Start the Scheduler
Returns True on success
"""
if self.running:
return True
gevent.spawn(self.main_thread)
self.running = True
return True
def stop(self):
"""
Stop the Scheduler
Returns True if the shutdown was graceful
"""
self.shutdown.set()
wait_limit = 5
start = time.time()
while self.running:
if time.time() - start > wait_limit:
return False
time.sleep(0.1)
return True
def main_thread(self):
"""
Monitors the jobs in current_jobs, updates their statuses,
and puts their tasks in queues to be processed by other threads
"""
signal.signal(signal.SIGTERM, self.sigterm_handler)
try:
last_saved = None
while not self.shutdown.is_set():
# Iterate backwards so we can delete jobs
for job in self.jobs.values():
if job.status == Status.INIT:
def start_this_job(job):
if isinstance(job, ModelJob):
if job.dataset.status == Status.DONE:
job.status = Status.RUN
elif job.dataset.status in [Status.ABORT, Status.ERROR]:
job.abort()
else:
job.status = Status.WAIT
else:
job.status = Status.RUN
if 'DIGITS_MODE_TEST' in os.environ:
start_this_job(job)
else:
# Delay start by one second for initial page load
gevent.spawn_later(1, start_this_job, job)
if job.status == Status.WAIT:
if isinstance(job, ModelJob):
if job.dataset.status == Status.DONE:
job.status = Status.RUN
elif job.dataset.status in [Status.ABORT, Status.ERROR]:
job.abort()
else:
job.status = Status.RUN
if job.status == Status.RUN:
alldone = True
for task in job.tasks:
if task.status in [Status.INIT, Status.WAIT]:
alldone = False
# try to start the task
if task.ready_to_queue():
requested_resources = task.offer_resources(self.resources)
if requested_resources is None:
task.status = Status.WAIT
else:
if self.reserve_resources(task, requested_resources):
gevent.spawn(self.run_task,
task, requested_resources)
elif task.status == Status.RUN:
# job is not done
alldone = False
elif task.status in [Status.DONE, Status.ABORT]:
# job is done
pass
elif task.status == Status.ERROR:
# propagate error status up to job
job.status = Status.ERROR
alldone = False
break
else:
logger.warning('Unrecognized task status: "%s"', task.status, job_id=job.id())
if alldone:
job.status = Status.DONE
logger.info('Job complete.', job_id=job.id())
job.save()
# save running jobs every 15 seconds
if not last_saved or time.time()-last_saved > 15:
for job in self.jobs.values():
if job.status.is_running():
if job.is_persistent():
job.save()
elif (not job.is_persistent()) and (time.time() - job.status_history[-1][1] > NON_PERSISTENT_JOB_DELETE_TIMEOUT_SECONDS):
# job has been unclaimed for far too long => proceed to garbage collection
self.delete_job(job)
last_saved = time.time()
if 'DIGITS_MODE_TEST' not in os.environ:
time.sleep(utils.wait_time())
else:
time.sleep(0.05)
except KeyboardInterrupt:
pass
# Shutdown
for job in self.jobs.values():
job.abort()
job.save()
self.running = False
def sigterm_handler(self, signal, frame):
"""
Gunicorn shuts down workers with SIGTERM, not SIGKILL
"""
self.shutdown.set()
def task_error(self, task, error):
"""
Handle an error while executing a task
"""
logger.error('%s: %s' % (type(error).__name__, error), job_id=task.job_id)
task.exception = error
task.traceback = traceback.format_exc()
task.status = Status.ERROR
def reserve_resources(self, task, resources):
"""
Reserve resources for a task
"""
try:
# reserve resources
for resource_type, requests in resources.iteritems():
for identifier, value in requests:
found = False
for resource in self.resources[resource_type]:
if resource.identifier == identifier:
resource.allocate(task, value)
self.emit_gpus_available()
found = True
break
if not found:
raise RuntimeError('Resource "%s" with identifier="%s" not found' % (
resource_type, identifier))
task.current_resources = resources
return True
except Exception as e:
self.task_error(task, e)
self.release_resources(task, resources)
return False
def release_resources(self, task, resources):
"""
Release resources previously reserved for a task
"""
# release resources
for resource_type, requests in resources.iteritems():
for identifier, value in requests:
for resource in self.resources[resource_type]:
if resource.identifier == identifier:
resource.deallocate(task)
self.emit_gpus_available()
task.current_resources = None
def run_task(self, task, resources):
"""
Executes a task
Arguments:
task -- the task to run
resources -- the resources allocated for this task
a dict mapping resource_type to lists of (identifier, value) tuples
"""
try:
task.run(resources)
except Exception as e:
self.task_error(task, e)
finally:
self.release_resources(task, resources)
def emit_gpus_available(self):
"""
Call socketio.emit gpu availability
"""
from digits.webapp import scheduler, socketio
socketio.emit('server update',
{
'update': 'gpus_available',
'total_gpu_count': len(self.resources['gpus']),
'remaining_gpu_count': sum(r.remaining() for r in scheduler.resources['gpus']),
},
namespace='/jobs',
room='job_management'
)
| bsd-3-clause | -5,483,175,396,097,687,000 | 35.749049 | 145 | 0.482825 | false |
dreadsci/forget-me-not | test_structures.py | 2 | 13775 | import unittest
from datasets import *
class TestSimplest(Structure):
_fields = [Parsable('base', required=True, positional=True, keyword=False),
Parsable('myParam', required=True, positional=False, keyword=True)]
def test_simplest():
t = TestSimplest(base='a', myParam='b')
assert t.get_dir_string() == 'a/myParam-b'
def test_simpler_strings():
t = TestSimplest(base='baseInst', myParam='paramInst')
assert t.get_arg_string() == '--base baseInst --myParam paramInst'
assert t.get_dir_string() == 'baseInst/myParam-paramInst'
def test_simple_assert_matches():
t = TestSimplest(base='baseInst', myParam='paramInst')
assert t.matches(assert_vals={'base': ['baseInst', 'other']}) == True
assert t.matches(assert_vals={'myParam': ['baseInst', 'other']}) == False
def test_simple_negative_matches():
t = TestSimplest(base='baseInst', myParam='paramInst')
assert t.matches(drop_vals={'base': ['baseInst', 'other']}) == False
assert t.matches(drop_vals={'myParam': ['baseInst', 'other']}) == True
def test_extra_matches():
t = TestSimplest(base='baseInst', myParam='paramInst')
assert t.matches(assert_vals={'base2': ['baseInst', 'other']}) == False
t = TestSimplest(base='baseInst', myParam='paramInst')
assert t.matches(assert_vals={'base': []}) == False
assert t.matches(drop_vals={'base2': ['baseInst', 'other']}) == True
def test_simple_arg_parse():
t = TestSimplest(base='a', myParam='b')
assert t.get_arg_string() == '--base a --myParam b'
t2 = TestSimplest.from_args(['--base', 'a', '--myParam', 'b'])
assert t.get_dir_string() == t2.get_dir_string()
def test_simple_none():
t = TestSimplest(base='a', myParam=None)
assert t.get_dir_string() == 'a/myParam-None'
assert t.myParam is None
class TestNonList(Structure):
_fields = [Parsable('mine', keyword=False, positional=True, default='sub-1'),
Listable('mylist', keyword=False, positional=True, default='0-1-2'),
Keyed('mykey', keyword=False, positional=True, default='a-0_b-1')]
def test_nonlist():
t = TestNonList()
assert t.mine == 'sub-1'
assert t.mylist == [0, 1, 2]
assert t.mykey['a'] == 0
assert t.mykey['b'] == 1
def test_nonlist_string():
t = TestNonList()
ds = t.get_dir_string()
print(ds)
assert ds == 'sub-1/0-1-2/a-0_b-1'
def test_funky_matches():
t = TestNonList()
assert t.matches(assert_vals={'mine': ['sub-1', 'sub-2'],
'mylist':['0-1-2', '1-2-3'],
'mykey': ['a-0_b-0', 'a-0_b-1']}) == True
assert t.matches(assert_vals={'mine': ['sub-1', 'sub-2'],
'mylist':['0-1-2', '1-2-3'],
'mykey': ['a-0_b-0', 'a-1_b-1']}) == False
assert t.matches(assert_vals={'mine': ['sub-1', 'sub-2'],
'mylist':['0-1-2', '1-2-3'],
'mykey': ['a-0_b-0', 'a-0_b-1'],
'other': ['a', 'b']}) == False
class TestTypes(Structure):
_fields = [Parsable('RPK', required=True, positional=True, keyword=True),
Parsable('RPnoK', required=True, positional=True, keyword=False),
Parsable('RnoPK', required=True, positional=False, keyword=True),
Parsable('RnoPnoK', required=True, positional=False, keyword=False),
Parsable('noRPK', required=False, positional=True, keyword=True),
Parsable('noRnoPK', required=False, positional=False, keyword=True),
#Parsable('noRnoPnoK', required=False, positional=False, keyword=False),
#Parsable('noRPnoK', required=False, positional=True, keyword=False),
# can't have optional without a keyword, too hard to parse
]
def test_all_type_config():
t = TestTypes(RPK="overkill",
RPnoK="arrogant",
RnoPK="simple",
RnoPnoK="pushy",
noRPK="verbose",
noRnoPK="simpleopt"
)
print(t.get_dir_string())
assert t.get_dir_string() == "RPK-overkill/arrogant/noRPK-verbose/pushy_RnoPK-simple_noRnoPK-simpleopt"
assert hasattr(t, 'noRPK')
def test_nonreq_type():
t = TestTypes(RPK="overkill",
RPnoK="arrogant",
RnoPK="simple",
RnoPnoK="pushy"
)
assert t.get_dir_string() == "RPK-overkill/arrogant/pushy_RnoPK-simple"
assert not hasattr(t, 'noRPK')
def test_reverse_types():
d = "RPK-overkill/arrogant/noRPK-verbose/pushy_RnoPK-simple_noRnoPK-simpleopt"
dp = TestTypes.params_from_dir_string(d)
print(dp)
assert dp['RPK'] == 'overkill'
assert dp['RPnoK'] == "arrogant"
assert dp["RnoPK"] == "simple"
assert dp["noRPK"] == "verbose"
assert dp["noRnoPK"] == "simpleopt"
def test_missing_opt():
d = "RPK-overkill/arrogant/noRPK-verbose/pushy_RnoPK-simple"
dp = TestTypes.params_from_dir_string(d)
print(dp)
assert dp['RPK'] == 'overkill'
assert dp['RPnoK'] == "arrogant"
assert dp["RnoPK"] == "simple"
assert dp["noRPK"] == "verbose"
assert 'noRnoPK' not in dp
def test_missing_pos():
d = "RPK-overkill/arrogant/pushy_RnoPK-simple_noRnoPK-simpleopt"
dp = TestTypes.params_from_dir_string(d)
print(dp)
assert dp['RPK'] == 'overkill'
assert dp['RPnoK'] == "arrogant"
assert dp["RnoPK"] == "simple"
assert dp["noRnoPK"] == "simpleopt"
assert 'noRPK' not in dp
class TestBools(Structure):
_fields = [Boolean('safety', default=True),
Boolean('verbose', default=False),
Parsable('hello', default='world')]
def test_falses():
t = TestBools.from_args(["--safety_off", "--verbose_off"])
assert t.safety == False
assert t.verbose == False
print(t.get_arg_string())
assert t.get_arg_string() == "--safety_off --verbose_off --hello world"
def test_trues():
t = TestBools.from_args(["--safety", "--verbose", "--hello", "universe"])
assert t.safety
assert t.verbose
assert t.get_arg_string() == "--safety --verbose --hello universe"
class TestImpossibleParsable(Structure):
_fields = [Parsable('opt', required=True, positional=False),
Parsable('helpful', required=True, positional=False, default='nada')]
def test_impossible_string():
t = TestImpossibleParsable(opt='hello')
print(t.get_dir_string())
assert t.get_dir_string() == 'helpful-nada_opt-hello'
def test_gottaGiveSomething():
t = TestImpossibleParsable(opt='hello')
try:
t = TestImpossibleParsable()
except TypeError:
return True
return False
class TestKeywordlessKeyed(Structure):
_fields = [Keyed('myReq', required=True, keyword=False, positional=True),
Parsable('hello', required=True, default='world')]
def test_keywordless():
t = TestKeywordlessKeyed(myReq='got-it')
print(t.get_dir_string())
assert t.get_dir_string() == 'got-it/hello-world'
class TestKeywordedKeyed(Structure):
_fields = [Keyed('myKeyReq', required=True, keyword=True),
Parsable('hello', required=True, default='world', positional=True)]
def test_keyworded():
t = TestKeywordedKeyed(myKeyReq='got-it')
print(t.get_dir_string())
assert t.get_dir_string() == 'hello-world/myKeyReq-got-it'
class TestDefaults(Structure):
"""
If empty is used as the default, then it's only an attribute if it's been set.
If None is used, it's assumed to be a valid value
"""
_fields = [Parsable('must', required=True),
Parsable('default', required=False, default=None),
Parsable('conditional', required=False, default=empty)]
def test_defaults_set():
t = TestDefaults(must='hello', default='world', conditional='hi')
assert t.get_dir_string() == 'conditional-hi_default-world_must-hello'
tas = t.get_arg_string()
assert tas == '--must hello --default world --conditional hi'
t2 = TestDefaults.from_args(tas.split())
assert t2.get_dir_string() == t2.get_dir_string()
def test_defaults_not_set():
t = TestDefaults(must='hello')
assert t.default == None
assert t.get_dir_string() == 'default-None_must-hello'
tas = t.get_arg_string()
assert tas == '--must hello --default None '
t2 = TestDefaults.from_args(tas.split())
assert t2.get_dir_string() == t2.get_dir_string()
class TestListables(Structure):
_fields = [Listable('myPrimeList', required=True, keyword=False),
Listable('myOtherList', required=True, keyword=True),
]
def test_list_params():
t = TestListables(myPrimeList='a-b-c', myOtherList='0-1-2')
ds = t.get_dir_string()
print(ds)
print("npnk key: ", t._has_npnk)
assert ds == 'a-b-c_myOtherList-0-1-2'
dp = t.params_from_dir_string(ds)
print(dp)
assert dp['base_dir'] == ''
assert dp['myPrimeList'] == 'a-b-c'
assert dp['myOtherList'] == '0-1-2'
def test_number_list():
t = TestListables(myPrimeList='0-1-2', myOtherList='0.99')
assert t.myPrimeList == [0, 1, 2]
assert t.myOtherList == [0.99]
class TestStructs(Structure):
_fields = [Parsable('nom', default='hello', required=True),
Struct('child', dtype=TestSimplest),
Struct('problem_child', dtype=TestDefaults),
]
def test_simple_inherit():
ts = TestStructs(nom='hi', base='a', myParam='b', must='hello')
assert ts.child.base == 'a'
assert ts.nom == 'hi'
class TestChoices(Structure):
_fields = [Parsable('pick1', choices=['a', 'b', 'c'])]
def test_choices():
tc = TestChoices(pick1='a')
assert tc.pick1 == 'a'
def test_bad_choices():
try:
tc = TestChoices(pick1='d')
assert False
except TypeError:
assert True
class TestNargs(Structure):
_fields = [Parsable('must', nargs=1),
Parsable('may', nargs='+'),
Parsable('might', nargs='*')]
def test_simple_nargs():
tn = TestNargs.from_args('--must hello --may be --might somewhere'.split())
assert tn.must == ['hello']
assert tn.may == ['be']
assert tn.might == ['somewhere']
tn = TestNargs.from_args('--must be there --may be here --might be somewhere'.split())
assert tn.must == ['be']
assert tn.may == ['be', 'here']
assert tn.might == ['be', 'somewhere']
def test_nargs_direct():
tn = TestNargs(must='hello', may='be', might='somewhere')
assert tn.must == ['hello']
assert tn.may == ['be']
assert tn.might == ['somewhere']
tn = TestNargs(must=['hello'], may=['be'], might=['somewhere'])
assert tn.must == ['hello']
assert tn.may == ['be']
assert tn.might == ['somewhere']
tn = TestNargs(must=['be', 'there'], may=['be', 'here'],
might=['be', 'somewhere'])
assert tn.must == ['be']
assert tn.may == ['be', 'here']
assert tn.might == ['be', 'somewhere']
def test_missing_narg():
try:
tn = TestNargs.from_args('--must --may --might'.split())
assert False
except SystemExit:
assert True
tn = TestNargs.from_args('--must too many --may one --might two'.split())
assert tn.must == ['too']
tn = TestNargs.from_args('--must just --may one --might'.split())
assert tn.must == ['just']
assert tn.may == ['one']
assert tn.might == []
def test_missing_narg_keyword():
try:
tn = TestNargs()
assert False
except TypeError:
assert True
tn = TestNargs(must=['too', 'many'], may='one', might='two')
assert tn.must == ['too']
assert tn.may == ['one']
assert tn.might == ['two']
tn = TestNargs(must=['just'], may='one')
assert tn.must == ['just']
assert tn.may == ['one']
assert not hasattr(tn, 'might') #TODO: is this difference going to be problematic?
class TestAllNargs(Structure):
_fields = [Parsable('strict', nargs=2),
Integer('binary', nargs='?'),
Parsable('flexible', nargs='*'),
Parsable('something', nargs='+')]
def test_all_nargs_given():
tn = TestAllNargs(strict=['a', 'b'], binary='0', flexible=['1', '2', '3'], something=['d', 'e'])
assert tn.strict == ['a', 'b']
assert tn.binary == [0]
assert tn.flexible == ['1', '2', '3']
assert tn.something == ['d', 'e']
def test_mandatory_nargs_given():
tn = TestAllNargs(strict=['a', 'b'], something='a')
assert tn.strict == ['a', 'b']
assert tn.something == ['a']
def test_all_nargs_args():
tn = TestAllNargs.from_args('--strict a b --binary 0 --flexible 1 2 3 --something d e'.split())
assert tn.strict == ['a', 'b']
assert tn.binary == [0]
assert tn.flexible == ['1', '2', '3']
assert tn.something == ['d', 'e']
class TestChoiceNargs(Structure):
_fields = [Parsable('select', choices=['a', 'b','c']),
Parsable('letters', nargs='+'),
Integer('numbers', choices=[0, 1, 2], nargs='+')]
def test_chosen_nargs():
test = TestChoiceNargs(select='a', letters=['d', 'e', 'f'], numbers=[0, 1])
assert test.select == 'a'
assert test.letters == ['d', 'e', 'f']
assert test.numbers == [0, 1]
def test_invalid_narg_choices():
test = TestChoiceNargs(select='a', letters='a', numbers=0)
assert test.numbers == [0]
try:
test = TestChoiceNargs(select='a', letters='a', numbers=99)
assert False
except TypeError:
assert True
if __name__ == '__main__':
unittest.main() | unlicense | 5,840,510,150,390,994,000 | 34.596899 | 107 | 0.582287 | false |
40223143/2015_0505 | static/Brython3.1.1-20150328-091302/Lib/ui/slider.py | 603 | 2394 | from . import widget
from browser import doc,html
class Slider(widget.Widget):
def __init__(self, id=None, label=False):
self._div_shell=html.DIV(Class="ui-slider ui-slider-horizontal ui-widget ui-widget-content ui-corner-all")
widget.Widget.__init__(self, self._div_shell, 'slider', id)
self._handle=html.A(Class="ui-slider-handle ui-state-default ui-corner-all",
Href='#', style={'left': '0px'})
self._value=0
self._isMouseDown=False
self.m0 = [None, None]
def startSlide(ev):
self._isMouseDown=True
self._upperBound = self._div_shell.offsetWidth - self._handle.offsetWidth
pos = widget.getMousePosition(ev)
self._startMouseX=pos['x']
print('left', self._handle.style.left,'ev.x',ev.x)
self._lastElementLeft = int(self._handle.left)
print('left', self._lastElementLeft)
updatePosition(ev)
def updatePosition(ev):
#pos = widget.getMousePosition(ev)
#print('mose pos',pos)
_newPos = self._lastElementLeft + ev.x - self._startMouseX
_newPos = max(0, _newPos)
_newPos = min(_newPos, self._upperBound)
self._handle.left = _newPos
print('new position',self._handle.style.left)
self._lastElementLeft = _newPos
def moving(e):
if self._isMouseDown:
updatePosition(e)
def dropCallback(e):
self._isMouseDown=False
self._handle.unbind('mousemove', moving)
self._handle.bind('mousemove', moving)
self._handle.bind('mouseup', dropCallback)
#self._handle.bind('mouseout', dropCallback)
self._handle.bind('mousedown', startSlide)
def mouseover(e):
_class=self._handle.getAttribute('class')
self._handle.setAttribute('class', '%s %s' % (_class, 'ui-state-hover'))
def mouseout(e):
self._isMouseDown=False
_class=self._handle.getAttribute('class')
self._handle.setAttribute('class', _class.replace('ui-state-hover', ''))
self._handle.bind('mouseover', mouseover)
self._handle.bind('mouseout', mouseout)
self._div_shell <= self._handle
def get_value(self):
return self._value
#def set_value(self, value):
# self._value=value
# self._handle.style.left='%spx' % value
| agpl-3.0 | 4,770,960,762,299,990,000 | 30.92 | 112 | 0.59858 | false |
reversefold/mysql-connector-python | lib/mysql/connector/catch23.py | 26 | 3574 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Python v2 to v3 migration module"""
from decimal import Decimal
import struct
import sys
from .custom_types import HexLiteral
# pylint: disable=E0602,E1103
PY2 = sys.version_info[0] == 2
if PY2:
NUMERIC_TYPES = (int, float, Decimal, HexLiteral, long)
INT_TYPES = (int, long)
UNICODE_TYPES = (unicode,)
STRING_TYPES = (str, unicode)
BYTE_TYPES = (bytearray,)
else:
NUMERIC_TYPES = (int, float, Decimal, HexLiteral)
INT_TYPES = (int,)
UNICODE_TYPES = (str,)
STRING_TYPES = (str,)
BYTE_TYPES = (bytearray, bytes)
def init_bytearray(payload=b'', encoding='utf-8'):
"""Initializes a bytearray from the payload"""
if isinstance(payload, bytearray):
return payload
if PY2:
return bytearray(payload)
if isinstance(payload, int):
return bytearray(payload)
elif not isinstance(payload, bytes):
try:
return bytearray(payload.encode(encoding=encoding))
except AttributeError:
raise ValueError("payload must be a str or bytes")
return bytearray(payload)
def isstr(obj):
"""Returns whether a variable is a string"""
if PY2:
return isinstance(obj, basestring)
else:
return isinstance(obj, str)
def isunicode(obj):
"""Returns whether a variable is a of unicode type"""
if PY2:
return isinstance(obj, unicode)
else:
return isinstance(obj, str)
if PY2:
def struct_unpack(fmt, buf):
"""Wrapper around struct.unpack handling buffer as bytes and strings"""
if isinstance(buf, (bytearray, bytes)):
return struct.unpack_from(fmt, buffer(buf))
return struct.unpack_from(fmt, buf)
else:
struct_unpack = struct.unpack # pylint: disable=C0103
def make_abc(base_class):
"""Decorator used to create a abstract base class
We use this decorator to create abstract base classes instead of
using the abc-module. The decorator makes it possible to do the
same in both Python v2 and v3 code.
"""
def wrapper(class_):
"""Wrapper"""
attrs = class_.__dict__.copy()
for attr in '__dict__', '__weakref__':
attrs.pop(attr, None) # ignore missing attributes
bases = class_.__bases__
if PY2:
attrs['__metaclass__'] = class_
else:
bases = (class_,) + bases
return base_class(class_.__name__, bases, attrs)
return wrapper
| gpl-2.0 | 5,147,846,902,702,296,000 | 30.350877 | 79 | 0.671237 | false |
Shekharrajak/password-alert | server/admin.py | 5 | 3396 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Administrative frontend for viewing reports and setting status of hosts."""
__author__ = '[email protected] (Drew Hintz)'
import json
import logging
import os
import auth
import datastore
import jinja2
import password_change
import webapp2
import xsrf
from google.appengine.ext import db
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
JINJA_ENVIRONMENT.globals['xsrf_token'] = xsrf.xsrf_token
class MainHandler(webapp2.RequestHandler):
"""Displays the list of recent reports from users."""
@auth.admin_authorization_required
def get(self):
query = datastore.Report.all().order('-date')
query.filter('domain =', datastore.CURRENT_DOMAIN)
reports = query.fetch(100)
if not reports:
reports = None # Conversion for templating.
template_values = {
'reports': reports,
'current_domain': datastore.CURRENT_DOMAIN,
'xsrf_token': xsrf.xsrf_token()
}
template = JINJA_ENVIRONMENT.get_template('templates/admin.html')
self.response.write(template.render(template_values))
class HostsHandler(webapp2.RequestHandler):
"""Display the list of allowed hosts."""
@auth.admin_authorization_required
def get(self):
query = datastore.Host.all()
query.filter('domain =', datastore.CURRENT_DOMAIN)
query.filter('status = ', datastore.ALLOWED)
query.order('host')
hosts = query.fetch(100)
template_values = {
'hosts': hosts,
'current_domain': datastore.CURRENT_DOMAIN
}
template = JINJA_ENVIRONMENT.get_template('templates/hosts.html')
self.response.write(template.render(template_values))
@xsrf.xsrf_protect
@auth.admin_authorization_required
def post(self):
host = datastore.Host(
key=db.Key.from_path(
'Host',
datastore.CURRENT_DOMAIN + ':' + self.request.get('host')))
host.domain = datastore.CURRENT_DOMAIN
host.host = datastore.NormalizeUrl(self.request.get('host'))
host.status = datastore.GetStatus(self.request.get('updatedHostStatusName'))
host.put()
self.response.write('{}') # core-ajax library expects a JSON response.
class PasswordHandler(webapp2.RequestHandler):
"""Expires user passwords."""
@xsrf.xsrf_protect
@auth.admin_authorization_required
def post(self):
email = self.request.get('email')
logging.info('Expiring password for: %s', email)
result = password_change.ChangePasswordAtNextLogin(email)
self.response.headers['Content-Type'] = 'application/json'
return self.response.out.write(json.dumps(result))
application = webapp2.WSGIApplication([
('/', MainHandler),
('/hosts', HostsHandler),
('/password', PasswordHandler)
])
| apache-2.0 | -179,779,346,076,768,960 | 31.037736 | 80 | 0.709658 | false |
samthetechie/pyFolia | venv/lib/python2.7/site-packages/distribute-0.6.24-py2.7.egg/setuptools/command/bdist_rpm.py | 470 | 2025 | # This is just a kludge so that bdist_rpm doesn't guess wrong about the
# distribution name and version, if the egg_info command is going to alter
# them, another kludge to allow you to build old-style non-egg RPMs, and
# finally, a kludge to track .rpm files for uploading when run on Python <2.5.
from distutils.command.bdist_rpm import bdist_rpm as _bdist_rpm
import sys, os
class bdist_rpm(_bdist_rpm):
def initialize_options(self):
_bdist_rpm.initialize_options(self)
self.no_egg = None
if sys.version<"2.5":
# Track for uploading any .rpm file(s) moved to self.dist_dir
def move_file(self, src, dst, level=1):
_bdist_rpm.move_file(self, src, dst, level)
if dst==self.dist_dir and src.endswith('.rpm'):
getattr(self.distribution,'dist_files',[]).append(
('bdist_rpm',
src.endswith('.src.rpm') and 'any' or sys.version[:3],
os.path.join(dst, os.path.basename(src)))
)
def run(self):
self.run_command('egg_info') # ensure distro name is up-to-date
_bdist_rpm.run(self)
def _make_spec_file(self):
version = self.distribution.get_version()
rpmversion = version.replace('-','_')
spec = _bdist_rpm._make_spec_file(self)
line23 = '%define version '+version
line24 = '%define version '+rpmversion
spec = [
line.replace(
"Source0: %{name}-%{version}.tar",
"Source0: %{name}-%{unmangled_version}.tar"
).replace(
"setup.py install ",
"setup.py install --single-version-externally-managed "
).replace(
"%setup",
"%setup -n %{name}-%{unmangled_version}"
).replace(line23,line24)
for line in spec
]
spec.insert(spec.index(line24)+1, "%define unmangled_version "+version)
return spec
| gpl-3.0 | -2,203,616,605,688,565,200 | 23.695122 | 79 | 0.561481 | false |
fengjiang96/tushare | tushare/stock/cons.py | 10 | 8788 | # -*- coding:utf-8 -*-
"""
Created on 2014/07/31
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
VERSION = '0.3.6'
K_LABELS = ['D', 'W', 'M']
K_MIN_LABELS = ['5', '15', '30', '60']
K_TYPE = {'D': 'akdaily', 'W': 'akweekly', 'M': 'akmonthly'}
INDEX_LABELS = ['sh', 'sz', 'hs300', 'sz50', 'cyb', 'zxb']
INDEX_LIST = {'sh': 'sh000001', 'sz': 'sz399001', 'hs300': 'sz399300',
'sz50': 'sh000016', 'zxb': 'sz399005', 'cyb': 'sz399006'}
P_TYPE = {'http': 'http://', 'ftp': 'ftp://'}
PAGE_NUM = [38, 60, 80, 100]
FORMAT = lambda x: '%.2f' % x
DOMAINS = {'sina': 'sina.com.cn', 'sinahq': 'sinajs.cn',
'ifeng': 'ifeng.com', 'sf': 'finance.sina.com.cn',
'vsf': 'vip.stock.finance.sina.com.cn',
'idx': 'www.csindex.com.cn', '163': 'money.163.com',
'em': 'eastmoney.com', 'sseq': 'query.sse.com.cn',
'sse': 'www.sse.com.cn', 'szse': 'www.szse.cn',
'oss': '218.244.146.57',
'shibor': 'www.shibor.org'}
PAGES = {'fd': 'index.phtml', 'dl': 'downxls.php', 'jv': 'json_v2.php',
'cpt': 'newFLJK.php', 'ids': 'newSinaHy.php', 'lnews':'rollnews_ch_out_interface.php',
'ntinfo':'vCB_BulletinGather.php', 'hs300b':'000300cons.xls',
'hs300w':'000300closeweight.xls','sz50b':'000016cons.xls',
'dp':'all_fpya.php', '163dp':'fpyg.html',
'emxsg':'JS.aspx', '163fh':'jjcgph.php',
'newstock':'vRPD_NewStockIssue.php', 'zz500b':'000905cons.xls',
't_ticks':'vMS_tradedetail.php', 'dw': 'downLoad.html',
'qmd':'queryMargin.do', 'szsefc':'FrontController.szse',
'ssecq':'commonQuery.do'}
TICK_COLUMNS = ['time', 'price', 'change', 'volume', 'amount', 'type']
TODAY_TICK_COLUMNS = ['time', 'price', 'pchange', 'change', 'volume', 'amount', 'type']
DAY_TRADING_COLUMNS = ['code', 'symbol', 'name', 'changepercent',
'trade', 'open', 'high', 'low', 'settlement', 'volume', 'turnoverratio']
REPORT_COLS = ['code', 'name', 'eps', 'eps_yoy', 'bvps', 'roe',
'epcf', 'net_profits', 'profits_yoy', 'distrib', 'report_date']
FORECAST_COLS = ['code', 'name', 'type', 'report_date', 'pre_eps', 'range']
PROFIT_COLS = ['code', 'name', 'roe', 'net_profit_ratio',
'gross_profit_rate', 'net_profits', 'eps', 'business_income', 'bips']
OPERATION_COLS = ['code', 'name', 'arturnover', 'arturndays', 'inventory_turnover',
'inventory_days', 'currentasset_turnover', 'currentasset_days']
GROWTH_COLS = ['code', 'name', 'mbrg', 'nprg', 'nav', 'targ', 'epsg', 'seg']
DEBTPAYING_COLS = ['code', 'name', 'currentratio',
'quickratio', 'cashratio', 'icratio', 'sheqratio', 'adratio']
CASHFLOW_COLS = ['code', 'name', 'cf_sales', 'rateofreturn',
'cf_nm', 'cf_liabilities', 'cashflowratio']
DAY_PRICE_COLUMNS = ['date', 'open', 'high', 'close', 'low', 'volume', 'price_change', 'p_change',
'ma5', 'ma10', 'ma20', 'v_ma5', 'v_ma10', 'v_ma20', 'turnover']
INX_DAY_PRICE_COLUMNS = ['date', 'open', 'high', 'close', 'low', 'volume', 'price_change', 'p_change',
'ma5', 'ma10', 'ma20', 'v_ma5', 'v_ma10', 'v_ma20']
LIVE_DATA_COLS = ['name', 'open', 'pre_close', 'price', 'high', 'low', 'bid', 'ask', 'volume', 'amount',
'b1_v', 'b1_p', 'b2_v', 'b2_p', 'b3_v', 'b3_p', 'b4_v', 'b4_p', 'b5_v', 'b5_p',
'a1_v', 'a1_p', 'a2_v', 'a2_p', 'a3_v', 'a3_p', 'a4_v', 'a4_p', 'a5_v', 'a5_p', 'date', 'time', 's']
FOR_CLASSIFY_B_COLS = ['code','name']
FOR_CLASSIFY_W_COLS = ['date','code','weight']
THE_FIELDS = ['code','symbol','name','changepercent','trade','open','high','low','settlement','volume','turnoverratio']
TICK_PRICE_URL = '%smarket.%s/%s?date=%s&symbol=%s'
TODAY_TICKS_PAGE_URL = '%s%s/quotes_service/api/%s/CN_Transactions.getAllPageTime?date=%s&symbol=%s'
TODAY_TICKS_URL = '%s%s/quotes_service/view/%s?symbol=%s&date=%s&page=%s'
DAY_PRICE_URL = '%sapi.finance.%s/%s/?code=%s&type=last'
LIVE_DATA_URL = '%shq.%s/rn=%s&list=%s'
DAY_PRICE_MIN_URL = '%sapi.finance.%s/akmin?scode=%s&type=%s'
SINA_DAY_PRICE_URL = '%s%s/quotes_service/api/%s/Market_Center.getHQNodeData?num=80&sort=changepercent&asc=0&node=hs_a&symbol=&_s_r_a=page&page=%s'
REPORT_URL = '%s%s/q/go.php/vFinanceAnalyze/kind/mainindex/%s?s_i=&s_a=&s_c=&reportdate=%s&quarter=%s&p=%s&num=%s'
FORECAST_URL = '%s%s/q/go.php/vFinanceAnalyze/kind/performance/%s?s_i=&s_a=&s_c=&s_type=&reportdate=%s&quarter=%s&p=%s&num=%s'
PROFIT_URL = '%s%s/q/go.php/vFinanceAnalyze/kind/profit/%s?s_i=&s_a=&s_c=&reportdate=%s&quarter=%s&p=%s&num=%s'
OPERATION_URL = '%s%s/q/go.php/vFinanceAnalyze/kind/operation/%s?s_i=&s_a=&s_c=&reportdate=%s&quarter=%s&p=%s&num=%s'
GROWTH_URL = '%s%s/q/go.php/vFinanceAnalyze/kind/grow/%s?s_i=&s_a=&s_c=&reportdate=%s&quarter=%s&p=%s&num=%s'
DEBTPAYING_URL = '%s%s/q/go.php/vFinanceAnalyze/kind/debtpaying/%s?s_i=&s_a=&s_c=&reportdate=%s&quarter=%s&p=%s&num=%s'
CASHFLOW_URL = '%s%s/q/go.php/vFinanceAnalyze/kind/cashflow/%s?s_i=&s_a=&s_c=&reportdate=%s&quarter=%s&p=%s&num=%s'
SHIBOR_TYPE ={'Shibor': 'Shibor数据', 'Quote': '报价数据', 'Tendency': 'Shibor均值数据',
'LPR': 'LPR数据', 'LPR_Tendency': 'LPR均值数据'}
SHIBOR_DATA_URL = '%s%s/shibor/web/html/%s?nameNew=Historical_%s_Data_%s.xls&downLoadPath=data&nameOld=%s%s.xls&shiborSrc=http://www.shibor.org/shibor/'
ALL_STOCK_BASICS_FILE = '%s%s/static/all.csv'%(P_TYPE['http'], DOMAINS['oss'])
SINA_CONCEPTS_INDEX_URL = '%smoney.%s/q/view/%s?param=class'
SINA_INDUSTRY_INDEX_URL = '%s%s/q/view/%s'
SINA_DATA_DETAIL_URL = '%s%s/quotes_service/api/%s/Market_Center.getHQNodeData?page=1&num=400&sort=symbol&asc=1&node=%s&symbol=&_s_r_a=page'
INDEX_C_COMM = 'sseportal/ps/zhs/hqjt/csi'
HS300_CLASSIFY_URL = '%s%s/%s/%s'
HIST_FQ_URL = '%s%s/corp/go.php/vMS_FuQuanMarketHistory/stockid/%s.phtml?year=%s&jidu=%s'
HIST_INDEX_URL = '%s%s/corp/go.php/vMS_MarketHistory/stockid/%s/type/S.phtml?year=%s&jidu=%s'
HIST_FQ_FACTOR_URL = '%s%s/api/json.php/BasicStockSrv.getStockFuQuanData?symbol=%s&type=hfq'
INDEX_HQ_URL = '''%shq.%s/rn=xppzh&list=sh000001,sh000002,sh000003,sh000008,sh000009,sh000010,sh000011,sh000012,sh000016,sh000017,sh000300,sz399001,sz399002,sz399003,sz399004,sz399005,sz399006,sz399100,sz399101,sz399106,sz399107,sz399108,sz399333,sz399606'''
SSEQ_CQ_REF_URL = '%s%s/assortment/stock/list/name'
ALL_STK_URL = '%s%s/all.csv'
SHIBOR_COLS = ['date', 'ON', '1W', '2W', '1M', '3M', '6M', '9M', '1Y']
QUOTE_COLS = ['date', 'bank', 'ON_B', 'ON_A', '1W_B', '1W_A', '2W_B', '2W_A', '1M_B', '1M_A',
'3M_B', '3M_A', '6M_B', '6M_A', '9M_B', '9M_A', '1Y_B', '1Y_A']
SHIBOR_MA_COLS = ['date', 'ON_5', 'ON_10', 'ON_20', '1W_5', '1W_10', '1W_20','2W_5', '2W_10', '2W_20',
'1M_5', '1M_10', '1M_20', '3M_5', '3M_10', '3M_20', '6M_5', '6M_10', '6M_20',
'9M_5', '9M_10', '9M_20','1Y_5', '1Y_10', '1Y_20']
LPR_COLS = ['date', '1Y']
LPR_MA_COLS = ['date', '1Y_5', '1Y_10', '1Y_20']
INDEX_HEADER = 'code,name,open,preclose,close,high,low,0,0,volume,amount,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,d,c,3\n'
INDEX_COLS = ['code', 'name', 'change', 'open', 'preclose', 'close', 'high', 'low', 'volume', 'amount']
HIST_FQ_COLS = ['date', 'open', 'high', 'close', 'low', 'volume', 'amount', 'factor']
HIST_FQ_FACTOR_COLS = ['code','value']
DATA_GETTING_TIPS = '[Getting data:]'
DATA_GETTING_FLAG = '#'
DATA_ROWS_TIPS = '%s rows data found.Please wait for a moment.'
DATA_INPUT_ERROR_MSG = 'date input error.'
NETWORK_URL_ERROR_MSG = '获取失败,请检查网络和URL'
DATE_CHK_MSG = '年度输入错误:请输入1989年以后的年份数字,格式:YYYY'
DATE_CHK_Q_MSG = '季度输入错误:请输入1、2、3或4数字'
TOP_PARAS_MSG = 'top有误,请输入整数或all.'
LHB_MSG = '周期输入有误,请输入数字5、10、30或60'
import sys
PY3 = (sys.version_info[0] >= 3)
def _write_head():
sys.stdout.write(DATA_GETTING_TIPS)
sys.stdout.flush()
def _write_console():
sys.stdout.write(DATA_GETTING_FLAG)
sys.stdout.flush()
def _write_tips(tip):
sys.stdout.write(DATA_ROWS_TIPS%tip)
sys.stdout.flush()
def _write_msg(msg):
sys.stdout.write(msg)
sys.stdout.flush()
def _check_input(year, quarter):
if isinstance(year, str) or year < 1989 :
raise TypeError(DATE_CHK_MSG)
elif quarter is None or isinstance(quarter, str) or quarter not in [1, 2, 3, 4]:
raise TypeError(DATE_CHK_Q_MSG)
else:
return True
def _check_lhb_input(last):
if last not in [5, 10, 30, 60]:
raise TypeError(LHB_MSG)
else:
return True | bsd-3-clause | 5,674,159,537,183,000,000 | 57.819444 | 258 | 0.58941 | false |
f3r/scikit-learn | benchmarks/bench_plot_randomized_svd.py | 38 | 17557 | """
Benchmarks on the power iterations phase in randomized SVD.
We test on various synthetic and real datasets the effect of increasing
the number of power iterations in terms of quality of approximation
and running time. A number greater than 0 should help with noisy matrices,
which are characterized by a slow spectral decay.
We test several policy for normalizing the power iterations. Normalization
is crucial to avoid numerical issues.
The quality of the approximation is measured by the spectral norm discrepancy
between the original input matrix and the reconstructed one (by multiplying
the randomized_svd's outputs). The spectral norm is always equivalent to the
largest singular value of a matrix. (3) justifies this choice. However, one can
notice in these experiments that Frobenius and spectral norms behave
very similarly in a qualitative sense. Therefore, we suggest to run these
benchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to
compute.
The benchmarks follow.
(a) plot: time vs norm, varying number of power iterations
data: many datasets
goal: compare normalization policies and study how the number of power
iterations affect time and norm
(b) plot: n_iter vs norm, varying rank of data and number of components for
randomized_SVD
data: low-rank matrices on which we control the rank
goal: study whether the rank of the matrix and the number of components
extracted by randomized SVD affect "the optimal" number of power iterations
(c) plot: time vs norm, varing datasets
data: many datasets
goal: compare default configurations
We compare the following algorithms:
- randomized_svd(..., power_iteration_normalizer='none')
- randomized_svd(..., power_iteration_normalizer='LU')
- randomized_svd(..., power_iteration_normalizer='QR')
- randomized_svd(..., power_iteration_normalizer='auto')
- fbpca.pca() from https://github.com/facebook/fbpca (if installed)
Conclusion
----------
- n_iter=2 appears to be a good default value
- power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU
gives similar errors to QR but is cheaper. That's what 'auto' implements.
References
----------
(1) Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
(2) A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
(3) An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
# Author: Giorgio Patrini
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import gc
import pickle
from time import time
from collections import defaultdict
import os.path
from sklearn.utils import gen_batches
from sklearn.utils.validation import check_random_state
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import (make_low_rank_matrix,
make_sparse_uncorrelated)
from sklearn.datasets import (fetch_lfw_people,
fetch_mldata,
fetch_20newsgroups_vectorized,
fetch_olivetti_faces,
fetch_rcv1)
try:
import fbpca
fbpca_available = True
except ImportError:
fbpca_available = False
# If this is enabled, tests are much slower and will crash with the large data
enable_spectral_norm = False
# TODO: compute approximate spectral norms with the power method as in
# Estimating the largest eigenvalues by the power and Lanczos methods with
# a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on
# Matrix Analysis and Applications, 13 (4): 1094-1122, 1992.
# This approximation is a very fast estimate of the spectral norm, but depends
# on starting random vectors.
# Determine when to switch to batch computation for matrix norms,
# in case the reconstructed (dense) matrix is too large
MAX_MEMORY = np.int(2e9)
# The following datasets can be dowloaded manually from:
# CIFAR 10: http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
# SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat
CIFAR_FOLDER = "./cifar-10-batches-py/"
SVHN_FOLDER = "./SVHN/"
datasets = ['low rank matrix', 'lfw_people', 'olivetti_faces', '20newsgroups',
'MNIST original', 'CIFAR', 'a1a', 'SVHN', 'uncorrelated matrix']
big_sparse_datasets = ['big sparse matrix', 'rcv1']
def unpickle(file_name):
with open(file_name, 'rb') as fo:
return pickle.load(fo, encoding='latin1')["data"]
def handle_missing_dataset(file_folder):
if not os.path.isdir(file_folder):
print("%s file folder not found. Test skipped." % file_folder)
return 0
def get_data(dataset_name):
print("Getting dataset: %s" % dataset_name)
if dataset_name == 'lfw_people':
X = fetch_lfw_people().data
elif dataset_name == '20newsgroups':
X = fetch_20newsgroups_vectorized().data[:, :100000]
elif dataset_name == 'olivetti_faces':
X = fetch_olivetti_faces().data
elif dataset_name == 'rcv1':
X = fetch_rcv1().data
elif dataset_name == 'CIFAR':
if handle_missing_dataset(CIFAR_FOLDER) == "skip":
return
X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1))
for i in range(5)]
X = np.vstack(X1)
del X1
elif dataset_name == 'SVHN':
if handle_missing_dataset(SVHN_FOLDER) == 0:
return
X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)['X']
X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]
X = np.vstack(X2)
del X1
del X2
elif dataset_name == 'low rank matrix':
X = make_low_rank_matrix(n_samples=500, n_features=np.int(1e4),
effective_rank=100, tail_strength=.5,
random_state=random_state)
elif dataset_name == 'uncorrelated matrix':
X, _ = make_sparse_uncorrelated(n_samples=500, n_features=10000,
random_state=random_state)
elif dataset_name == 'big sparse matrix':
sparsity = np.int(1e6)
size = np.int(1e6)
small_size = np.int(1e4)
data = np.random.normal(0, 1, np.int(sparsity/10))
data = np.repeat(data, 10)
row = np.random.uniform(0, small_size, sparsity)
col = np.random.uniform(0, small_size, sparsity)
X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))
del data
del row
del col
else:
X = fetch_mldata(dataset_name).data
return X
def plot_time_vs_s(time, norm, point_labels, title):
plt.figure()
colors = ['g', 'b', 'y']
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.plot(time[l], norm[l], label=l, marker='o', c=colors.pop())
else:
plt.plot(time[l], norm[l], label=l, marker='^', c='red')
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -20),
textcoords='offset points', ha='right', va='bottom')
plt.legend(loc="upper right")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def scatter_time_vs_s(time, norm, point_labels, title):
plt.figure()
size = 100
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.scatter(time[l], norm[l], label=l, marker='o', c='b', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -80),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
else:
plt.scatter(time[l], norm[l], label=l, marker='^', c='red', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, 30),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
plt.legend(loc="best")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def plot_power_iter_vs_s(power_iter, s, title):
plt.figure()
for l in sorted(s.keys()):
plt.plot(power_iter, s[l], label=l, marker='o')
plt.legend(loc="lower right", prop={'size': 10})
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("n_iter")
def svd_timing(X, n_comps, n_iter, n_oversamples,
power_iteration_normalizer='auto', method=None):
"""
Measure time for decomposition
"""
print("... running SVD ...")
if method is not 'fbpca':
gc.collect()
t0 = time()
U, mu, V = randomized_svd(X, n_comps, n_oversamples, n_iter,
power_iteration_normalizer,
random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
# There is a different convention for l here
U, mu, V = fbpca.pca(X, n_comps, raw=True, n_iter=n_iter,
l=n_oversamples+n_comps)
call_time = time() - t0
return U, mu, V, call_time
def norm_diff(A, norm=2, msg=True):
"""
Compute the norm diff with the original matrix, when randomized
SVD is called with *params.
norm: 2 => spectral; 'fro' => Frobenius
"""
if msg:
print("... computing %s norm ..." % norm)
if norm == 2:
# s = sp.linalg.norm(A, ord=2) # slow
value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False)
else:
if sp.sparse.issparse(A):
value = sp.sparse.linalg.norm(A, ord=norm)
else:
value = sp.linalg.norm(A, ord=norm)
return value
def scalable_frobenius_norm_discrepancy(X, U, s, V):
# if the input is not too big, just call scipy
if X.shape[0] * X.shape[1] < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
return norm_diff(A, norm='fro')
print("... computing fro norm by batches...")
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = .0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
return np.sqrt(cum_norm)
def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
X_spectral_norm = norm_diff(X, norm=2, msg=False)
all_frobenius = defaultdict(list)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for pi in power_iter:
for pm in ['none', 'LU', 'QR']:
print("n_iter = %d on sklearn - %s" % (pi, pm))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples)
label = "sklearn - %s" % pm
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
print("n_iter = %d on fbca" % (pi))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
method='fbpca')
label = "fbpca"
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_spectral, power_iter, title)
title = "%s: Frobenius norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_frobenius, power_iter, title)
def bench_b(power_list):
n_samples, n_features = 1000, 10000
data_params = {'n_samples': n_samples, 'n_features': n_features,
'tail_strength': .7, 'random_state': random_state}
dataset_name = "low rank matrix %d x %d" % (n_samples, n_features)
ranks = [10, 50, 100]
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for rank in ranks:
X = make_low_rank_matrix(effective_rank=rank, **data_params)
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for n_comp in [np.int(rank/2), rank, rank*2]:
label = "rank=%d, n_comp=%d" % (rank, n_comp)
print(label)
for pi in power_list:
U, s, V, _ = svd_timing(X, n_comp, n_iter=pi, n_oversamples=2,
power_iteration_normalizer='LU')
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_spectral, title)
title = "%s: frobenius norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_frobenius, title)
def bench_c(datasets, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
n_comps = np.minimum(n_comps, np.min(X.shape))
label = "sklearn"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
label = "fbpca"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=2,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if len(all_time) == 0:
raise ValueError("No tests ran. Aborting.")
if enable_spectral_norm:
title = "normalized spectral norm diff vs running time"
scatter_time_vs_s(all_time, all_spectral, datasets, title)
title = "normalized Frobenius norm diff vs running time"
scatter_time_vs_s(all_time, all_frobenius, datasets, title)
if __name__ == '__main__':
random_state = check_random_state(1234)
power_iter = np.linspace(0, 6, 7, dtype=int)
n_comps = 50
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
print(" >>>>>> Benching sklearn and fbpca on %s %d x %d" %
(dataset_name, X.shape[0], X.shape[1]))
bench_a(X, dataset_name, power_iter, n_oversamples=2,
n_comps=np.minimum(n_comps, np.min(X.shape)))
print(" >>>>>> Benching on simulated low rank matrix with variable rank")
bench_b(power_iter)
print(" >>>>>> Benching sklearn and fbpca default configurations")
bench_c(datasets + big_sparse_datasets, n_comps)
plt.show()
| bsd-3-clause | -7,443,027,630,697,702,000 | 37.586813 | 79 | 0.586319 | false |
Serag8/Bachelor | google_appengine/google/appengine/ext/ndb/django_middleware.py | 20 | 2158 | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Django middleware for NDB."""
__author__ = 'James A. Morrison'
from . import eventloop, tasklets
class NdbDjangoMiddleware(object):
"""Django middleware for NDB.
To use NDB with django, add
'ndb.NdbDjangoMiddleware',
to the MIDDLEWARE_CLASSES entry in your Django settings.py file.
Or, if you are using the ndb version from the SDK, use
'google.appengine.ext.ndb.NdbDjangoMiddleware',
It's best to insert it in front of any other middleware classes,
since some other middleware may make datastore calls and those won't be
handled properly if that middleware is invoked before this middleware.
See http://docs.djangoproject.com/en/dev/topics/http/middleware/.
"""
def process_request(self, unused_request):
"""Called by Django before deciding which view to execute."""
# Compare to the first half of toplevel() in context.py.
tasklets._state.clear_all_pending()
# Create and install a new context.
ctx = tasklets.make_default_context()
tasklets.set_context(ctx)
@staticmethod
def _finish():
# Compare to the finally clause in toplevel() in context.py.
ctx = tasklets.get_context()
tasklets.set_context(None)
ctx.flush().check_success()
eventloop.run() # Ensure writes are flushed, etc.
def process_response(self, request, response):
"""Called by Django just before returning a response."""
self._finish()
return response
def process_exception(self, unused_request, unused_exception):
"""Called by Django when a view raises an exception."""
self._finish()
return None
| mit | -5,888,150,266,021,499,000 | 32.2 | 74 | 0.722428 | false |
achang97/YouTunes | lib/python2.7/site-packages/youtube_dl/extractor/ard.py | 8 | 12072 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .generic import GenericIE
from ..utils import (
determine_ext,
ExtractorError,
qualities,
int_or_none,
parse_duration,
unified_strdate,
xpath_text,
update_url_query,
)
from ..compat import compat_etree_fromstring
class ARDMediathekIE(InfoExtractor):
IE_NAME = 'ARD:mediathek'
_VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.(?:daserste|rbb-online)\.de)/(?:.*/)(?P<video_id>[0-9]+|[^0-9][^/\?]+)[^/\?]*(?:\?.*)?'
_TESTS = [{
'url': 'http://www.ardmediathek.de/tv/Dokumentation-und-Reportage/Ich-liebe-das-Leben-trotzdem/rbb-Fernsehen/Video?documentId=29582122&bcastId=3822114',
'info_dict': {
'id': '29582122',
'ext': 'mp4',
'title': 'Ich liebe das Leben trotzdem',
'description': 'md5:45e4c225c72b27993314b31a84a5261c',
'duration': 4557,
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'HTTP Error 404: Not Found',
}, {
'url': 'http://www.ardmediathek.de/tv/Tatort/Tatort-Scheinwelten-H%C3%B6rfassung-Video/Das-Erste/Video?documentId=29522730&bcastId=602916',
'md5': 'f4d98b10759ac06c0072bbcd1f0b9e3e',
'info_dict': {
'id': '29522730',
'ext': 'mp4',
'title': 'Tatort: Scheinwelten - Hörfassung (Video tgl. ab 20 Uhr)',
'description': 'md5:196392e79876d0ac94c94e8cdb2875f1',
'duration': 5252,
},
'skip': 'HTTP Error 404: Not Found',
}, {
# audio
'url': 'http://www.ardmediathek.de/tv/WDR-H%C3%B6rspiel-Speicher/Tod-eines-Fu%C3%9Fballers/WDR-3/Audio-Podcast?documentId=28488308&bcastId=23074086',
'md5': '219d94d8980b4f538c7fcb0865eb7f2c',
'info_dict': {
'id': '28488308',
'ext': 'mp3',
'title': 'Tod eines Fußballers',
'description': 'md5:f6e39f3461f0e1f54bfa48c8875c86ef',
'duration': 3240,
},
'skip': 'HTTP Error 404: Not Found',
}, {
'url': 'http://mediathek.daserste.de/sendungen_a-z/328454_anne-will/22429276_vertrauen-ist-gut-spionieren-ist-besser-geht',
'only_matching': True,
}, {
# audio
'url': 'http://mediathek.rbb-online.de/radio/Hörspiel/Vor-dem-Fest/kulturradio/Audio?documentId=30796318&topRessort=radio&bcastId=9839158',
'md5': '4e8f00631aac0395fee17368ac0e9867',
'info_dict': {
'id': '30796318',
'ext': 'mp3',
'title': 'Vor dem Fest',
'description': 'md5:c0c1c8048514deaed2a73b3a60eecacb',
'duration': 3287,
},
'skip': 'Video is no longer available',
}]
def _extract_media_info(self, media_info_url, webpage, video_id):
media_info = self._download_json(
media_info_url, video_id, 'Downloading media JSON')
formats = self._extract_formats(media_info, video_id)
if not formats:
if '"fsk"' in webpage:
raise ExtractorError(
'This video is only available after 20:00', expected=True)
elif media_info.get('_geoblocked'):
raise ExtractorError('This video is not available due to geo restriction', expected=True)
self._sort_formats(formats)
duration = int_or_none(media_info.get('_duration'))
thumbnail = media_info.get('_previewImage')
is_live = media_info.get('_isLive') is True
subtitles = {}
subtitle_url = media_info.get('_subtitleUrl')
if subtitle_url:
subtitles['de'] = [{
'ext': 'ttml',
'url': subtitle_url,
}]
return {
'id': video_id,
'duration': duration,
'thumbnail': thumbnail,
'is_live': is_live,
'formats': formats,
'subtitles': subtitles,
}
def _extract_formats(self, media_info, video_id):
type_ = media_info.get('_type')
media_array = media_info.get('_mediaArray', [])
formats = []
for num, media in enumerate(media_array):
for stream in media.get('_mediaStreamArray', []):
stream_urls = stream.get('_stream')
if not stream_urls:
continue
if not isinstance(stream_urls, list):
stream_urls = [stream_urls]
quality = stream.get('_quality')
server = stream.get('_server')
for stream_url in stream_urls:
ext = determine_ext(stream_url)
if quality != 'auto' and ext in ('f4m', 'm3u8'):
continue
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
update_url_query(stream_url, {
'hdcore': '3.1.1',
'plugin': 'aasp-3.1.1.69.124'
}),
video_id, f4m_id='hds', fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
stream_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
else:
if server and server.startswith('rtmp'):
f = {
'url': server,
'play_path': stream_url,
'format_id': 'a%s-rtmp-%s' % (num, quality),
}
elif stream_url.startswith('http'):
f = {
'url': stream_url,
'format_id': 'a%s-%s-%s' % (num, ext, quality)
}
else:
continue
m = re.search(r'_(?P<width>\d+)x(?P<height>\d+)\.mp4$', stream_url)
if m:
f.update({
'width': int(m.group('width')),
'height': int(m.group('height')),
})
if type_ == 'audio':
f['vcodec'] = 'none'
formats.append(f)
return formats
def _real_extract(self, url):
# determine video id from url
m = re.match(self._VALID_URL, url)
document_id = None
numid = re.search(r'documentId=([0-9]+)', url)
if numid:
document_id = video_id = numid.group(1)
else:
video_id = m.group('video_id')
webpage = self._download_webpage(url, video_id)
ERRORS = (
('>Leider liegt eine Störung vor.', 'Video %s is unavailable'),
('>Der gewünschte Beitrag ist nicht mehr verfügbar.<',
'Video %s is no longer available'),
)
for pattern, message in ERRORS:
if pattern in webpage:
raise ExtractorError(message % video_id, expected=True)
if re.search(r'[\?&]rss($|[=&])', url):
doc = compat_etree_fromstring(webpage.encode('utf-8'))
if doc.tag == 'rss':
return GenericIE()._extract_rss(url, video_id, doc)
title = self._html_search_regex(
[r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
r'<meta name="dcterms.title" content="(.*?)"/>',
r'<h4 class="headline">(.*?)</h4>'],
webpage, 'title')
description = self._html_search_meta(
'dcterms.abstract', webpage, 'description', default=None)
if description is None:
description = self._html_search_meta(
'description', webpage, 'meta description')
# Thumbnail is sometimes not present.
# It is in the mobile version, but that seems to use a different URL
# structure altogether.
thumbnail = self._og_search_thumbnail(webpage, default=None)
media_streams = re.findall(r'''(?x)
mediaCollection\.addMediaStream\([0-9]+,\s*[0-9]+,\s*"[^"]*",\s*
"([^"]+)"''', webpage)
if media_streams:
QUALITIES = qualities(['lo', 'hi', 'hq'])
formats = []
for furl in set(media_streams):
if furl.endswith('.f4m'):
fid = 'f4m'
else:
fid_m = re.match(r'.*\.([^.]+)\.[^.]+$', furl)
fid = fid_m.group(1) if fid_m else None
formats.append({
'quality': QUALITIES(fid),
'format_id': fid,
'url': furl,
})
self._sort_formats(formats)
info = {
'formats': formats,
}
else: # request JSON file
if not document_id:
video_id = self._search_regex(
r'/play/(?:config|media)/(\d+)', webpage, 'media id')
info = self._extract_media_info(
'http://www.ardmediathek.de/play/media/%s' % video_id,
webpage, video_id)
info.update({
'id': video_id,
'title': self._live_title(title) if info.get('is_live') else title,
'description': description,
'thumbnail': thumbnail,
})
return info
class ARDIE(InfoExtractor):
_VALID_URL = r'(?P<mainurl>https?://(www\.)?daserste\.de/[^?#]+/videos/(?P<display_id>[^/?#]+)-(?P<id>[0-9]+))\.html'
_TEST = {
'url': 'http://www.daserste.de/information/reportage-dokumentation/dokus/videos/die-story-im-ersten-mission-unter-falscher-flagge-100.html',
'md5': 'd216c3a86493f9322545e045ddc3eb35',
'info_dict': {
'display_id': 'die-story-im-ersten-mission-unter-falscher-flagge',
'id': '100',
'ext': 'mp4',
'duration': 2600,
'title': 'Die Story im Ersten: Mission unter falscher Flagge',
'upload_date': '20140804',
'thumbnail': r're:^https?://.*\.jpg$',
},
'skip': 'HTTP Error 404: Not Found',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
player_url = mobj.group('mainurl') + '~playerXml.xml'
doc = self._download_xml(player_url, display_id)
video_node = doc.find('./video')
upload_date = unified_strdate(xpath_text(
video_node, './broadcastDate'))
thumbnail = xpath_text(video_node, './/teaserImage//variant/url')
formats = []
for a in video_node.findall('.//asset'):
f = {
'format_id': a.attrib['type'],
'width': int_or_none(a.find('./frameWidth').text),
'height': int_or_none(a.find('./frameHeight').text),
'vbr': int_or_none(a.find('./bitrateVideo').text),
'abr': int_or_none(a.find('./bitrateAudio').text),
'vcodec': a.find('./codecVideo').text,
'tbr': int_or_none(a.find('./totalBitrate').text),
}
if a.find('./serverPrefix').text:
f['url'] = a.find('./serverPrefix').text
f['playpath'] = a.find('./fileName').text
else:
f['url'] = a.find('./fileName').text
formats.append(f)
self._sort_formats(formats)
return {
'id': mobj.group('id'),
'formats': formats,
'display_id': display_id,
'title': video_node.find('./title').text,
'duration': parse_duration(video_node.find('./duration').text),
'upload_date': upload_date,
'thumbnail': thumbnail,
}
| mit | 9,169,110,217,642,017,000 | 38.302932 | 160 | 0.490966 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.