repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
hachard/Cra-Magnet
|
flask/lib/python3.5/site-packages/migrate/changeset/schema.py
|
66
|
24237
|
"""
Schema module providing common schema operations.
"""
import abc
try: # Python 3
from collections import MutableMapping as DictMixin
except ImportError: # Python 2
from UserDict import DictMixin
import warnings
import six
import sqlalchemy
from sqlalchemy.schema import ForeignKeyConstraint
from sqlalchemy.schema import UniqueConstraint
from migrate.exceptions import *
from migrate.changeset import SQLA_07, SQLA_08
from migrate.changeset import util
from migrate.changeset.databases.visitor import (get_engine_visitor,
run_single_visitor)
__all__ = [
'create_column',
'drop_column',
'alter_column',
'rename_table',
'rename_index',
'ChangesetTable',
'ChangesetColumn',
'ChangesetIndex',
'ChangesetDefaultClause',
'ColumnDelta',
]
def create_column(column, table=None, *p, **kw):
"""Create a column, given the table.
API to :meth:`ChangesetColumn.create`.
"""
if table is not None:
return table.create_column(column, *p, **kw)
return column.create(*p, **kw)
def drop_column(column, table=None, *p, **kw):
"""Drop a column, given the table.
API to :meth:`ChangesetColumn.drop`.
"""
if table is not None:
return table.drop_column(column, *p, **kw)
return column.drop(*p, **kw)
def rename_table(table, name, engine=None, **kw):
"""Rename a table.
If Table instance is given, engine is not used.
API to :meth:`ChangesetTable.rename`.
:param table: Table to be renamed.
:param name: New name for Table.
:param engine: Engine instance.
:type table: string or Table instance
:type name: string
:type engine: obj
"""
table = _to_table(table, engine)
table.rename(name, **kw)
def rename_index(index, name, table=None, engine=None, **kw):
"""Rename an index.
If Index instance is given,
table and engine are not used.
API to :meth:`ChangesetIndex.rename`.
:param index: Index to be renamed.
:param name: New name for index.
:param table: Table to which Index is reffered.
:param engine: Engine instance.
:type index: string or Index instance
:type name: string
:type table: string or Table instance
:type engine: obj
"""
index = _to_index(index, table, engine)
index.rename(name, **kw)
def alter_column(*p, **k):
"""Alter a column.
This is a helper function that creates a :class:`ColumnDelta` and
runs it.
:argument column:
The name of the column to be altered or a
:class:`ChangesetColumn` column representing it.
:param table:
A :class:`~sqlalchemy.schema.Table` or table name to
for the table where the column will be changed.
:param engine:
The :class:`~sqlalchemy.engine.base.Engine` to use for table
reflection and schema alterations.
:returns: A :class:`ColumnDelta` instance representing the change.
"""
if 'table' not in k and isinstance(p[0], sqlalchemy.Column):
k['table'] = p[0].table
if 'engine' not in k:
k['engine'] = k['table'].bind
# deprecation
if len(p) >= 2 and isinstance(p[1], sqlalchemy.Column):
warnings.warn(
"Passing a Column object to alter_column is deprecated."
" Just pass in keyword parameters instead.",
MigrateDeprecationWarning
)
engine = k['engine']
# enough tests seem to break when metadata is always altered
# that this crutch has to be left in until they can be sorted
# out
k['alter_metadata']=True
delta = ColumnDelta(*p, **k)
visitorcallable = get_engine_visitor(engine, 'schemachanger')
engine._run_visitor(visitorcallable, delta)
return delta
def _to_table(table, engine=None):
"""Return if instance of Table, else construct new with metadata"""
if isinstance(table, sqlalchemy.Table):
return table
# Given: table name, maybe an engine
meta = sqlalchemy.MetaData()
if engine is not None:
meta.bind = engine
return sqlalchemy.Table(table, meta)
def _to_index(index, table=None, engine=None):
"""Return if instance of Index, else construct new with metadata"""
if isinstance(index, sqlalchemy.Index):
return index
# Given: index name; table name required
table = _to_table(table, engine)
ret = sqlalchemy.Index(index)
ret.table = table
return ret
# Python3: if we just use:
#
# class ColumnDelta(DictMixin, sqlalchemy.schema.SchemaItem):
# ...
#
# We get the following error:
# TypeError: metaclass conflict: the metaclass of a derived class must be a
# (non-strict) subclass of the metaclasses of all its bases.
#
# The complete inheritance/metaclass relationship list of ColumnDelta can be
# summarized by this following dot file:
#
# digraph test123 {
# ColumnDelta -> MutableMapping;
# MutableMapping -> Mapping;
# Mapping -> {Sized Iterable Container};
# {Sized Iterable Container} -> ABCMeta[style=dashed];
#
# ColumnDelta -> SchemaItem;
# SchemaItem -> {SchemaEventTarget Visitable};
# SchemaEventTarget -> object;
# Visitable -> {VisitableType object} [style=dashed];
# VisitableType -> type;
# }
#
# We need to use a metaclass that inherits from all the metaclasses of
# DictMixin and sqlalchemy.schema.SchemaItem. Let's call it "MyMeta".
class MyMeta(sqlalchemy.sql.visitors.VisitableType, abc.ABCMeta, object):
pass
class ColumnDelta(six.with_metaclass(MyMeta, DictMixin, sqlalchemy.schema.SchemaItem)):
"""Extracts the differences between two columns/column-parameters
May receive parameters arranged in several different ways:
* **current_column, new_column, \*p, \*\*kw**
Additional parameters can be specified to override column
differences.
* **current_column, \*p, \*\*kw**
Additional parameters alter current_column. Table name is extracted
from current_column object.
Name is changed to current_column.name from current_name,
if current_name is specified.
* **current_col_name, \*p, \*\*kw**
Table kw must specified.
:param table: Table at which current Column should be bound to.\
If table name is given, reflection will be used.
:type table: string or Table instance
:param metadata: A :class:`MetaData` instance to store
reflected table names
:param engine: When reflecting tables, either engine or metadata must \
be specified to acquire engine object.
:type engine: :class:`Engine` instance
:returns: :class:`ColumnDelta` instance provides interface for altered attributes to \
`result_column` through :func:`dict` alike object.
* :class:`ColumnDelta`.result_column is altered column with new attributes
* :class:`ColumnDelta`.current_name is current name of column in db
"""
# Column attributes that can be altered
diff_keys = ('name', 'type', 'primary_key', 'nullable',
'server_onupdate', 'server_default', 'autoincrement')
diffs = dict()
__visit_name__ = 'column'
def __init__(self, *p, **kw):
# 'alter_metadata' is not a public api. It exists purely
# as a crutch until the tests that fail when 'alter_metadata'
# behaviour always happens can be sorted out
self.alter_metadata = kw.pop("alter_metadata", False)
self.meta = kw.pop("metadata", None)
self.engine = kw.pop("engine", None)
# Things are initialized differently depending on how many column
# parameters are given. Figure out how many and call the appropriate
# method.
if len(p) >= 1 and isinstance(p[0], sqlalchemy.Column):
# At least one column specified
if len(p) >= 2 and isinstance(p[1], sqlalchemy.Column):
# Two columns specified
diffs = self.compare_2_columns(*p, **kw)
else:
# Exactly one column specified
diffs = self.compare_1_column(*p, **kw)
else:
# Zero columns specified
if not len(p) or not isinstance(p[0], six.string_types):
raise ValueError("First argument must be column name")
diffs = self.compare_parameters(*p, **kw)
self.apply_diffs(diffs)
def __repr__(self):
return '<ColumnDelta altermetadata=%r, %s>' % (
self.alter_metadata,
super(ColumnDelta, self).__repr__()
)
def __getitem__(self, key):
if key not in self.keys():
raise KeyError("No such diff key, available: %s" % self.diffs )
return getattr(self.result_column, key)
def __setitem__(self, key, value):
if key not in self.keys():
raise KeyError("No such diff key, available: %s" % self.diffs )
setattr(self.result_column, key, value)
def __delitem__(self, key):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __iter__(self):
raise NotImplementedError
def keys(self):
return self.diffs.keys()
def compare_parameters(self, current_name, *p, **k):
"""Compares Column objects with reflection"""
self.table = k.pop('table')
self.result_column = self._table.c.get(current_name)
if len(p):
k = self._extract_parameters(p, k, self.result_column)
return k
def compare_1_column(self, col, *p, **k):
"""Compares one Column object"""
self.table = k.pop('table', None)
if self.table is None:
self.table = col.table
self.result_column = col
if len(p):
k = self._extract_parameters(p, k, self.result_column)
return k
def compare_2_columns(self, old_col, new_col, *p, **k):
"""Compares two Column objects"""
self.process_column(new_col)
self.table = k.pop('table', None)
# we cannot use bool() on table in SA06
if self.table is None:
self.table = old_col.table
if self.table is None:
new_col.table
self.result_column = old_col
# set differences
# leave out some stuff for later comp
for key in (set(self.diff_keys) - set(('type',))):
val = getattr(new_col, key, None)
if getattr(self.result_column, key, None) != val:
k.setdefault(key, val)
# inspect types
if not self.are_column_types_eq(self.result_column.type, new_col.type):
k.setdefault('type', new_col.type)
if len(p):
k = self._extract_parameters(p, k, self.result_column)
return k
def apply_diffs(self, diffs):
"""Populate dict and column object with new values"""
self.diffs = diffs
for key in self.diff_keys:
if key in diffs:
setattr(self.result_column, key, diffs[key])
self.process_column(self.result_column)
# create an instance of class type if not yet
if 'type' in diffs and callable(self.result_column.type):
self.result_column.type = self.result_column.type()
# add column to the table
if self.table is not None and self.alter_metadata:
self.result_column.add_to_table(self.table)
def are_column_types_eq(self, old_type, new_type):
"""Compares two types to be equal"""
ret = old_type.__class__ == new_type.__class__
# String length is a special case
if ret and isinstance(new_type, sqlalchemy.types.String):
ret = (getattr(old_type, 'length', None) == \
getattr(new_type, 'length', None))
return ret
def _extract_parameters(self, p, k, column):
"""Extracts data from p and modifies diffs"""
p = list(p)
while len(p):
if isinstance(p[0], six.string_types):
k.setdefault('name', p.pop(0))
elif isinstance(p[0], sqlalchemy.types.TypeEngine):
k.setdefault('type', p.pop(0))
elif callable(p[0]):
p[0] = p[0]()
else:
break
if len(p):
new_col = column.copy_fixed()
new_col._init_items(*p)
k = self.compare_2_columns(column, new_col, **k)
return k
def process_column(self, column):
"""Processes default values for column"""
# XXX: this is a snippet from SA processing of positional parameters
toinit = list()
if column.server_default is not None:
if isinstance(column.server_default, sqlalchemy.FetchedValue):
toinit.append(column.server_default)
else:
toinit.append(sqlalchemy.DefaultClause(column.server_default))
if column.server_onupdate is not None:
if isinstance(column.server_onupdate, FetchedValue):
toinit.append(column.server_default)
else:
toinit.append(sqlalchemy.DefaultClause(column.server_onupdate,
for_update=True))
if toinit:
column._init_items(*toinit)
def _get_table(self):
return getattr(self, '_table', None)
def _set_table(self, table):
if isinstance(table, six.string_types):
if self.alter_metadata:
if not self.meta:
raise ValueError("metadata must be specified for table"
" reflection when using alter_metadata")
meta = self.meta
if self.engine:
meta.bind = self.engine
else:
if not self.engine and not self.meta:
raise ValueError("engine or metadata must be specified"
" to reflect tables")
if not self.engine:
self.engine = self.meta.bind
meta = sqlalchemy.MetaData(bind=self.engine)
self._table = sqlalchemy.Table(table, meta, autoload=True)
elif isinstance(table, sqlalchemy.Table):
self._table = table
if not self.alter_metadata:
self._table.meta = sqlalchemy.MetaData(bind=self._table.bind)
def _get_result_column(self):
return getattr(self, '_result_column', None)
def _set_result_column(self, column):
"""Set Column to Table based on alter_metadata evaluation."""
self.process_column(column)
if not hasattr(self, 'current_name'):
self.current_name = column.name
if self.alter_metadata:
self._result_column = column
else:
self._result_column = column.copy_fixed()
table = property(_get_table, _set_table)
result_column = property(_get_result_column, _set_result_column)
class ChangesetTable(object):
"""Changeset extensions to SQLAlchemy tables."""
def create_column(self, column, *p, **kw):
"""Creates a column.
The column parameter may be a column definition or the name of
a column in this table.
API to :meth:`ChangesetColumn.create`
:param column: Column to be created
:type column: Column instance or string
"""
if not isinstance(column, sqlalchemy.Column):
# It's a column name
column = getattr(self.c, str(column))
column.create(table=self, *p, **kw)
def drop_column(self, column, *p, **kw):
"""Drop a column, given its name or definition.
API to :meth:`ChangesetColumn.drop`
:param column: Column to be droped
:type column: Column instance or string
"""
if not isinstance(column, sqlalchemy.Column):
# It's a column name
try:
column = getattr(self.c, str(column))
except AttributeError:
# That column isn't part of the table. We don't need
# its entire definition to drop the column, just its
# name, so create a dummy column with the same name.
column = sqlalchemy.Column(str(column), sqlalchemy.Integer())
column.drop(table=self, *p, **kw)
def rename(self, name, connection=None, **kwargs):
"""Rename this table.
:param name: New name of the table.
:type name: string
:param connection: reuse connection istead of creating new one.
:type connection: :class:`sqlalchemy.engine.base.Connection` instance
"""
engine = self.bind
self.new_name = name
visitorcallable = get_engine_visitor(engine, 'schemachanger')
run_single_visitor(engine, visitorcallable, self, connection, **kwargs)
# Fix metadata registration
self.name = name
self.deregister()
self._set_parent(self.metadata)
def _meta_key(self):
"""Get the meta key for this table."""
return sqlalchemy.schema._get_table_key(self.name, self.schema)
def deregister(self):
"""Remove this table from its metadata"""
if SQLA_07:
self.metadata._remove_table(self.name, self.schema)
else:
key = self._meta_key()
meta = self.metadata
if key in meta.tables:
del meta.tables[key]
class ChangesetColumn(object):
"""Changeset extensions to SQLAlchemy columns."""
def alter(self, *p, **k):
"""Makes a call to :func:`alter_column` for the column this
method is called on.
"""
if 'table' not in k:
k['table'] = self.table
if 'engine' not in k:
k['engine'] = k['table'].bind
return alter_column(self, *p, **k)
def create(self, table=None, index_name=None, unique_name=None,
primary_key_name=None, populate_default=True, connection=None, **kwargs):
"""Create this column in the database.
Assumes the given table exists. ``ALTER TABLE ADD COLUMN``,
for most databases.
:param table: Table instance to create on.
:param index_name: Creates :class:`ChangesetIndex` on this column.
:param unique_name: Creates :class:\
`~migrate.changeset.constraint.UniqueConstraint` on this column.
:param primary_key_name: Creates :class:\
`~migrate.changeset.constraint.PrimaryKeyConstraint` on this column.
:param populate_default: If True, created column will be \
populated with defaults
:param connection: reuse connection istead of creating new one.
:type table: Table instance
:type index_name: string
:type unique_name: string
:type primary_key_name: string
:type populate_default: bool
:type connection: :class:`sqlalchemy.engine.base.Connection` instance
:returns: self
"""
self.populate_default = populate_default
self.index_name = index_name
self.unique_name = unique_name
self.primary_key_name = primary_key_name
for cons in ('index_name', 'unique_name', 'primary_key_name'):
self._check_sanity_constraints(cons)
self.add_to_table(table)
engine = self.table.bind
visitorcallable = get_engine_visitor(engine, 'columngenerator')
engine._run_visitor(visitorcallable, self, connection, **kwargs)
# TODO: reuse existing connection
if self.populate_default and self.default is not None:
stmt = table.update().values({self: engine._execute_default(self.default)})
engine.execute(stmt)
return self
def drop(self, table=None, connection=None, **kwargs):
"""Drop this column from the database, leaving its table intact.
``ALTER TABLE DROP COLUMN``, for most databases.
:param connection: reuse connection istead of creating new one.
:type connection: :class:`sqlalchemy.engine.base.Connection` instance
"""
if table is not None:
self.table = table
engine = self.table.bind
visitorcallable = get_engine_visitor(engine, 'columndropper')
engine._run_visitor(visitorcallable, self, connection, **kwargs)
self.remove_from_table(self.table, unset_table=False)
self.table = None
return self
def add_to_table(self, table):
if table is not None and self.table is None:
if SQLA_07:
table.append_column(self)
else:
self._set_parent(table)
def _col_name_in_constraint(self,cons,name):
return False
def remove_from_table(self, table, unset_table=True):
# TODO: remove primary keys, constraints, etc
if unset_table:
self.table = None
to_drop = set()
for index in table.indexes:
columns = []
for col in index.columns:
if col.name!=self.name:
columns.append(col)
if columns:
index.columns = columns
if SQLA_08:
index.expressions = columns
else:
to_drop.add(index)
table.indexes = table.indexes - to_drop
to_drop = set()
for cons in table.constraints:
# TODO: deal with other types of constraint
if isinstance(cons,(ForeignKeyConstraint,
UniqueConstraint)):
for col_name in cons.columns:
if not isinstance(col_name,six.string_types):
col_name = col_name.name
if self.name==col_name:
to_drop.add(cons)
table.constraints = table.constraints - to_drop
if table.c.contains_column(self):
if SQLA_07:
table._columns.remove(self)
else:
table.c.remove(self)
# TODO: this is fixed in 0.6
def copy_fixed(self, **kw):
"""Create a copy of this ``Column``, with all attributes."""
q = util.safe_quote(self)
return sqlalchemy.Column(self.name, self.type, self.default,
key=self.key,
primary_key=self.primary_key,
nullable=self.nullable,
quote=q,
index=self.index,
unique=self.unique,
onupdate=self.onupdate,
autoincrement=self.autoincrement,
server_default=self.server_default,
server_onupdate=self.server_onupdate,
*[c.copy(**kw) for c in self.constraints])
def _check_sanity_constraints(self, name):
"""Check if constraints names are correct"""
obj = getattr(self, name)
if (getattr(self, name[:-5]) and not obj):
raise InvalidConstraintError("Column.create() accepts index_name,"
" primary_key_name and unique_name to generate constraints")
if not isinstance(obj, six.string_types) and obj is not None:
raise InvalidConstraintError(
"%s argument for column must be constraint name" % name)
class ChangesetIndex(object):
"""Changeset extensions to SQLAlchemy Indexes."""
__visit_name__ = 'index'
def rename(self, name, connection=None, **kwargs):
"""Change the name of an index.
:param name: New name of the Index.
:type name: string
:param connection: reuse connection istead of creating new one.
:type connection: :class:`sqlalchemy.engine.base.Connection` instance
"""
engine = self.table.bind
self.new_name = name
visitorcallable = get_engine_visitor(engine, 'schemachanger')
engine._run_visitor(visitorcallable, self, connection, **kwargs)
self.name = name
class ChangesetDefaultClause(object):
"""Implements comparison between :class:`DefaultClause` instances"""
def __eq__(self, other):
if isinstance(other, self.__class__):
if self.arg == other.arg:
return True
def __ne__(self, other):
return not self.__eq__(other)
|
gpl-3.0
|
mvaled/OpenUpgrade
|
openerp/report/print_fnc.py
|
458
|
1318
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
functions = {
'today': lambda x: time.strftime('%d/%m/%Y', time.localtime()).decode('latin1')
}
#
# TODO: call an object internal function too
#
def print_fnc(fnc, arg):
if fnc in functions:
return functions[fnc](arg)
return ''
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ohsangjin/git-core
|
contrib/hg-to-git/hg-to-git.py
|
342
|
8074
|
#!/usr/bin/env python
""" hg-to-git.py - A Mercurial to GIT converter
Copyright (C)2007 Stelian Pop <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
import os, os.path, sys
import tempfile, pickle, getopt
import re
if sys.hexversion < 0x02030000:
# The behavior of the pickle module changed significantly in 2.3
sys.stderr.write("hg-to-git.py: requires Python 2.3 or later.\n")
sys.exit(1)
# Maps hg version -> git version
hgvers = {}
# List of children for each hg revision
hgchildren = {}
# List of parents for each hg revision
hgparents = {}
# Current branch for each hg revision
hgbranch = {}
# Number of new changesets converted from hg
hgnewcsets = 0
#------------------------------------------------------------------------------
def usage():
print """\
%s: [OPTIONS] <hgprj>
options:
-s, --gitstate=FILE: name of the state to be saved/read
for incrementals
-n, --nrepack=INT: number of changesets that will trigger
a repack (default=0, -1 to deactivate)
-v, --verbose: be verbose
required:
hgprj: name of the HG project to import (directory)
""" % sys.argv[0]
#------------------------------------------------------------------------------
def getgitenv(user, date):
env = ''
elems = re.compile('(.*?)\s+<(.*)>').match(user)
if elems:
env += 'export GIT_AUTHOR_NAME="%s" ;' % elems.group(1)
env += 'export GIT_COMMITTER_NAME="%s" ;' % elems.group(1)
env += 'export GIT_AUTHOR_EMAIL="%s" ;' % elems.group(2)
env += 'export GIT_COMMITTER_EMAIL="%s" ;' % elems.group(2)
else:
env += 'export GIT_AUTHOR_NAME="%s" ;' % user
env += 'export GIT_COMMITTER_NAME="%s" ;' % user
env += 'export GIT_AUTHOR_EMAIL= ;'
env += 'export GIT_COMMITTER_EMAIL= ;'
env += 'export GIT_AUTHOR_DATE="%s" ;' % date
env += 'export GIT_COMMITTER_DATE="%s" ;' % date
return env
#------------------------------------------------------------------------------
state = ''
opt_nrepack = 0
verbose = False
try:
opts, args = getopt.getopt(sys.argv[1:], 's:t:n:v', ['gitstate=', 'tempdir=', 'nrepack=', 'verbose'])
for o, a in opts:
if o in ('-s', '--gitstate'):
state = a
state = os.path.abspath(state)
if o in ('-n', '--nrepack'):
opt_nrepack = int(a)
if o in ('-v', '--verbose'):
verbose = True
if len(args) != 1:
raise Exception('params')
except:
usage()
sys.exit(1)
hgprj = args[0]
os.chdir(hgprj)
if state:
if os.path.exists(state):
if verbose:
print 'State does exist, reading'
f = open(state, 'r')
hgvers = pickle.load(f)
else:
print 'State does not exist, first run'
sock = os.popen('hg tip --template "{rev}"')
tip = sock.read()
if sock.close():
sys.exit(1)
if verbose:
print 'tip is', tip
# Calculate the branches
if verbose:
print 'analysing the branches...'
hgchildren["0"] = ()
hgparents["0"] = (None, None)
hgbranch["0"] = "master"
for cset in range(1, int(tip) + 1):
hgchildren[str(cset)] = ()
prnts = os.popen('hg log -r %d --template "{parents}"' % cset).read().strip().split(' ')
prnts = map(lambda x: x[:x.find(':')], prnts)
if prnts[0] != '':
parent = prnts[0].strip()
else:
parent = str(cset - 1)
hgchildren[parent] += ( str(cset), )
if len(prnts) > 1:
mparent = prnts[1].strip()
hgchildren[mparent] += ( str(cset), )
else:
mparent = None
hgparents[str(cset)] = (parent, mparent)
if mparent:
# For merge changesets, take either one, preferably the 'master' branch
if hgbranch[mparent] == 'master':
hgbranch[str(cset)] = 'master'
else:
hgbranch[str(cset)] = hgbranch[parent]
else:
# Normal changesets
# For first children, take the parent branch, for the others create a new branch
if hgchildren[parent][0] == str(cset):
hgbranch[str(cset)] = hgbranch[parent]
else:
hgbranch[str(cset)] = "branch-" + str(cset)
if not hgvers.has_key("0"):
print 'creating repository'
os.system('git init')
# loop through every hg changeset
for cset in range(int(tip) + 1):
# incremental, already seen
if hgvers.has_key(str(cset)):
continue
hgnewcsets += 1
# get info
log_data = os.popen('hg log -r %d --template "{tags}\n{date|date}\n{author}\n"' % cset).readlines()
tag = log_data[0].strip()
date = log_data[1].strip()
user = log_data[2].strip()
parent = hgparents[str(cset)][0]
mparent = hgparents[str(cset)][1]
#get comment
(fdcomment, filecomment) = tempfile.mkstemp()
csetcomment = os.popen('hg log -r %d --template "{desc}"' % cset).read().strip()
os.write(fdcomment, csetcomment)
os.close(fdcomment)
print '-----------------------------------------'
print 'cset:', cset
print 'branch:', hgbranch[str(cset)]
print 'user:', user
print 'date:', date
print 'comment:', csetcomment
if parent:
print 'parent:', parent
if mparent:
print 'mparent:', mparent
if tag:
print 'tag:', tag
print '-----------------------------------------'
# checkout the parent if necessary
if cset != 0:
if hgbranch[str(cset)] == "branch-" + str(cset):
print 'creating new branch', hgbranch[str(cset)]
os.system('git checkout -b %s %s' % (hgbranch[str(cset)], hgvers[parent]))
else:
print 'checking out branch', hgbranch[str(cset)]
os.system('git checkout %s' % hgbranch[str(cset)])
# merge
if mparent:
if hgbranch[parent] == hgbranch[str(cset)]:
otherbranch = hgbranch[mparent]
else:
otherbranch = hgbranch[parent]
print 'merging', otherbranch, 'into', hgbranch[str(cset)]
os.system(getgitenv(user, date) + 'git merge --no-commit -s ours "" %s %s' % (hgbranch[str(cset)], otherbranch))
# remove everything except .git and .hg directories
os.system('find . \( -path "./.hg" -o -path "./.git" \) -prune -o ! -name "." -print | xargs rm -rf')
# repopulate with checkouted files
os.system('hg update -C %d' % cset)
# add new files
os.system('git ls-files -x .hg --others | git update-index --add --stdin')
# delete removed files
os.system('git ls-files -x .hg --deleted | git update-index --remove --stdin')
# commit
os.system(getgitenv(user, date) + 'git commit --allow-empty --allow-empty-message -a -F %s' % filecomment)
os.unlink(filecomment)
# tag
if tag and tag != 'tip':
os.system(getgitenv(user, date) + 'git tag %s' % tag)
# delete branch if not used anymore...
if mparent and len(hgchildren[str(cset)]):
print "Deleting unused branch:", otherbranch
os.system('git branch -d %s' % otherbranch)
# retrieve and record the version
vvv = os.popen('git show --quiet --pretty=format:%H').read()
print 'record', cset, '->', vvv
hgvers[str(cset)] = vvv
if hgnewcsets >= opt_nrepack and opt_nrepack != -1:
os.system('git repack -a -d')
# write the state for incrementals
if state:
if verbose:
print 'Writing state'
f = open(state, 'w')
pickle.dump(hgvers, f)
# vim: et ts=8 sw=4 sts=4
|
gpl-2.0
|
minhphung171093/GreenERP
|
openerp/tools/pdf_utils.py
|
72
|
2702
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" Copyright (c) 2003-2007 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:[email protected]
manipulate pdf and fdf files. pdftk recommended.
Notes regarding pdftk, pdf forms and fdf files (form definition file)
fields names can be extracted with:
pdftk orig.pdf generate_fdf output truc.fdf
to merge fdf and pdf:
pdftk orig.pdf fill_form test.fdf output result.pdf [flatten]
without flatten, one could further edit the resulting form.
with flatten, everything is turned into text.
"""
from __future__ import with_statement
import os
import tempfile
HEAD="""%FDF-1.2
%\xE2\xE3\xCF\xD3
1 0 obj
<<
/FDF
<<
/Fields [
"""
TAIL="""]
>>
>>
endobj
trailer
<<
/Root 1 0 R
>>
%%EOF
"""
def output_field(f):
return "\xfe\xff" + "".join( [ "\x00"+c for c in f ] )
def extract_keys(lines):
keys = []
for line in lines:
if line.startswith('/V'):
pass #print 'value',line
elif line.startswith('/T'):
key = line[7:-2]
key = ''.join(key.split('\x00'))
keys.append( key )
return keys
def write_field(out, key, value):
out.write("<<\n")
if value:
out.write("/V (%s)\n" %value)
else:
out.write("/V /\n")
out.write("/T (%s)\n" % output_field(key) )
out.write(">> \n")
def write_fields(out, fields):
out.write(HEAD)
for key in fields:
value = fields[key]
write_field(out, key, value)
# write_field(out, key+"a", value) # pour copie-carbone sur autres pages
out.write(TAIL)
def extract_keys_from_pdf(filename):
# what about using 'pdftk filename dump_data_fields' and parsing the output ?
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
os.system('pdftk %s generate_fdf output \"%s\"' % (filename, tmp_file))
with open(tmp_file, "r") as ofile:
lines = ofile.readlines()
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
return extract_keys(lines)
def fill_pdf(infile, outfile, fields):
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
with open(tmp_file, "w") as ofile:
write_fields(ofile, fields)
os.system('pdftk %s fill_form \"%s\" output %s flatten' % (infile, tmp_file, outfile))
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
def testfill_pdf(infile, outfile):
keys = extract_keys_from_pdf(infile)
fields = []
for key in keys:
fields.append( (key, key, '') )
fill_pdf(infile, outfile, fields)
|
gpl-3.0
|
smartdata-x/robots
|
pylib/Twisted/twisted/web/test/test_static.py
|
28
|
56293
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.static}.
"""
import inspect
import mimetypes
import os
import re
import StringIO
from zope.interface.verify import verifyObject
from twisted.internet import abstract, interfaces
from twisted.python.runtime import platform
from twisted.python.filepath import FilePath
from twisted.python import log
from twisted.trial.unittest import TestCase
from twisted.web import static, http, script, resource
from twisted.web.server import UnsupportedMethod
from twisted.web.test.test_web import DummyRequest
from twisted.web.test._util import _render
class StaticDataTests(TestCase):
"""
Tests for L{Data}.
"""
def test_headRequest(self):
"""
L{Data.render} returns an empty response body for a I{HEAD} request.
"""
data = static.Data("foo", "bar")
request = DummyRequest([''])
request.method = 'HEAD'
d = _render(data, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), "")
d.addCallback(cbRendered)
return d
def test_invalidMethod(self):
"""
L{Data.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
"""
data = static.Data("foo", "bar")
request = DummyRequest([''])
request.method = 'POST'
self.assertRaises(UnsupportedMethod, data.render, request)
class StaticFileTests(TestCase):
"""
Tests for the basic behavior of L{File}.
"""
def _render(self, resource, request):
return _render(resource, request)
def test_invalidMethod(self):
"""
L{File.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
"""
request = DummyRequest([''])
request.method = 'POST'
path = FilePath(self.mktemp())
path.setContent("foo")
file = static.File(path.path)
self.assertRaises(UnsupportedMethod, file.render, request)
def test_notFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which does not correspond to any file in the path the L{File} was
created with, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest(['foobar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_emptyChild(self):
"""
The C{''} child of a L{File} which corresponds to a directory in the
filesystem is a L{DirectoryLister}.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([''])
child = resource.getChildForRequest(file, request)
self.assertIsInstance(child, static.DirectoryLister)
self.assertEqual(child.path, base.path)
def test_securityViolationNotFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which cannot be looked up in the filesystem due to security
considerations, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest(['..'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_forbiddenResource(self):
"""
If the file in the filesystem which would satisfy a request cannot be
read, L{File.render} sets the HTTP response code to I{FORBIDDEN}.
"""
base = FilePath(self.mktemp())
base.setContent('')
# Make sure we can delete the file later.
self.addCleanup(base.chmod, 0700)
# Get rid of our own read permission.
base.chmod(0)
file = static.File(base.path)
request = DummyRequest([''])
d = self._render(file, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 403)
d.addCallback(cbRendered)
return d
if platform.isWindows():
test_forbiddenResource.skip = "Cannot remove read permission on Windows"
def test_indexNames(self):
"""
If a request is made which encounters a L{File} before a final empty
segment, a file in the L{File} instance's C{indexNames} list which
exists in the path the L{File} was created with is served as the
response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent("baz")
file = static.File(base.path)
file.indexNames = ['foo.bar']
request = DummyRequest([''])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
self.assertEqual(request.outgoingHeaders['content-length'], '3')
d.addCallback(cbRendered)
return d
def test_staticFile(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file in the path the L{File} was created with, that file
is served as the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent("baz")
file = static.File(base.path)
request = DummyRequest(['foo.bar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
self.assertEqual(request.outgoingHeaders['content-length'], '3')
d.addCallback(cbRendered)
return d
def test_staticFileDeletedGetChild(self):
"""
A L{static.File} created for a directory which does not exist should
return childNotFound from L{static.File.getChild}.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest(['foo.bar'])
child = staticFile.getChild("foo.bar", request)
self.assertEqual(child, staticFile.childNotFound)
def test_staticFileDeletedRender(self):
"""
A L{static.File} created for a file which does not exist should render
its C{childNotFound} page.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest(['foo.bar'])
request2 = DummyRequest(['foo.bar'])
d = self._render(staticFile, request)
d2 = self._render(staticFile.childNotFound, request2)
def cbRendered2(ignored):
def cbRendered(ignored):
self.assertEqual(''.join(request.written),
''.join(request2.written))
d.addCallback(cbRendered)
return d
d2.addCallback(cbRendered2)
return d2
def test_headRequest(self):
"""
L{static.File.render} returns an empty response body for I{HEAD}
requests.
"""
path = FilePath(self.mktemp())
path.setContent("foo")
file = static.File(path.path)
request = DummyRequest([''])
request.method = 'HEAD'
d = _render(file, request)
def cbRendered(ignored):
self.assertEqual("".join(request.written), "")
d.addCallback(cbRendered)
return d
def test_processors(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file with an extension which is in the L{File}'s
C{processors} mapping, the processor associated with that extension is
used to serve the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent(
"from twisted.web.static import Data\n"
"resource = Data('dynamic world','text/plain')\n")
file = static.File(base.path)
file.processors = {'.bar': script.ResourceScript}
request = DummyRequest(["foo.bar"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'dynamic world')
self.assertEqual(request.outgoingHeaders['content-length'], '13')
d.addCallback(cbRendered)
return d
def test_ignoreExt(self):
"""
The list of ignored extensions can be set by passing a value to
L{File.__init__} or by calling L{File.ignoreExt} later.
"""
file = static.File(".")
self.assertEqual(file.ignoredExts, [])
file.ignoreExt(".foo")
file.ignoreExt(".bar")
self.assertEqual(file.ignoredExts, [".foo", ".bar"])
file = static.File(".", ignoredExts=(".bar", ".baz"))
self.assertEqual(file.ignoredExts, [".bar", ".baz"])
def test_ignoredExtensionsIgnored(self):
"""
A request for the I{base} child of a L{File} succeeds with a resource
for the I{base<extension>} file in the path the L{File} was created
with if such a file exists and the L{File} has been configured to
ignore the I{<extension>} extension.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child('foo.bar').setContent('baz')
base.child('foo.quux').setContent('foobar')
file = static.File(base.path, ignoredExts=(".bar",))
request = DummyRequest(["foo"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
d.addCallback(cbRendered)
return d
class StaticMakeProducerTests(TestCase):
"""
Tests for L{File.makeProducer}.
"""
def makeResourceWithContent(self, content, type=None, encoding=None):
"""
Make a L{static.File} resource that has C{content} for its content.
@param content: The bytes to use as the contents of the resource.
@param type: Optional value for the content type of the resource.
"""
fileName = self.mktemp()
fileObject = open(fileName, 'w')
fileObject.write(content)
fileObject.close()
resource = static.File(fileName)
resource.encoding = encoding
resource.type = type
return resource
def contentHeaders(self, request):
"""
Extract the content-* headers from the L{DummyRequest} C{request}.
This returns the subset of C{request.outgoingHeaders} of headers that
start with 'content-'.
"""
contentHeaders = {}
for k, v in request.outgoingHeaders.iteritems():
if k.startswith('content-'):
contentHeaders[k] = v
return contentHeaders
def test_noRangeHeaderGivesNoRangeStaticProducer(self):
"""
makeProducer when no Range header is set returns an instance of
NoRangeStaticProducer.
"""
resource = self.makeResourceWithContent('')
request = DummyRequest([])
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.NoRangeStaticProducer)
def test_noRangeHeaderSets200OK(self):
"""
makeProducer when no Range header is set sets the responseCode on the
request to 'OK'.
"""
resource = self.makeResourceWithContent('')
request = DummyRequest([])
resource.makeProducer(request, resource.openForReading())
self.assertEqual(http.OK, request.responseCode)
def test_noRangeHeaderSetsContentHeaders(self):
"""
makeProducer when no Range header is set sets the Content-* headers
for the response.
"""
length = 123
contentType = "text/plain"
contentEncoding = 'gzip'
resource = self.makeResourceWithContent(
'a'*length, type=contentType, encoding=contentEncoding)
request = DummyRequest([])
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': contentType, 'content-length': str(length),
'content-encoding': contentEncoding},
self.contentHeaders(request))
def test_singleRangeGivesSingleRangeStaticProducer(self):
"""
makeProducer when the Range header requests a single byte range
returns an instance of SingleRangeStaticProducer.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
resource = self.makeResourceWithContent('abcdef')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleRangeSets206PartialContent(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the response code on the request to 'Partial Content'.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
resource = self.makeResourceWithContent('abcdef')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
def test_singleRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
contentType = "text/plain"
contentEncoding = 'gzip'
resource = self.makeResourceWithContent('abcdef', type=contentType, encoding=contentEncoding)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': contentType, 'content-encoding': contentEncoding,
'content-range': 'bytes 1-3/6', 'content-length': '3'},
self.contentHeaders(request))
def test_singleUnsatisfiableRangeReturnsSingleRangeStaticProducer(self):
"""
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests a single unsatisfiable byte range.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
resource = self.makeResourceWithContent('abc')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleUnsatisfiableRangeSets416ReqestedRangeNotSatisfiable(self):
"""
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests a single
unsatisfiable byte range.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
resource = self.makeResourceWithContent('abc')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode)
def test_singleUnsatisfiableRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, unsatisfiable
byte range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
contentType = "text/plain"
resource = self.makeResourceWithContent('abc', type=contentType)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': 'text/plain', 'content-length': '0',
'content-range': 'bytes */3'},
self.contentHeaders(request))
def test_singlePartiallyOverlappingRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single byte range that
partly overlaps the resource sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=2-10'
contentType = "text/plain"
resource = self.makeResourceWithContent('abc', type=contentType)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': 'text/plain', 'content-length': '1',
'content-range': 'bytes 2-2/3'},
self.contentHeaders(request))
def test_multipleRangeGivesMultipleRangeStaticProducer(self):
"""
makeProducer when the Range header requests a single byte range
returns an instance of MultipleRangeStaticProducer.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,5-6'
resource = self.makeResourceWithContent('abcdef')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.MultipleRangeStaticProducer)
def test_multipleRangeSets206PartialContent(self):
"""
makeProducer when the Range header requests a multiple satisfiable
byte ranges sets the response code on the request to 'Partial
Content'.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,5-6'
resource = self.makeResourceWithContent('abcdef')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
def test_mutipleRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,5-6'
resource = self.makeResourceWithContent(
'abcdefghijkl', encoding='gzip')
producer = resource.makeProducer(request, resource.openForReading())
contentHeaders = self.contentHeaders(request)
# The only content-* headers set are content-type and content-length.
self.assertEqual(
set(['content-length', 'content-type']),
set(contentHeaders.keys()))
# The content-length depends on the boundary used in the response.
expectedLength = 5
for boundary, offset, size in producer.rangeInfo:
expectedLength += len(boundary)
self.assertEqual(expectedLength, contentHeaders['content-length'])
# Content-type should be set to a value indicating a multipart
# response and the boundary used to separate the parts.
self.assertIn('content-type', contentHeaders)
contentType = contentHeaders['content-type']
self.assertNotIdentical(
None, re.match(
'multipart/byteranges; boundary="[^"]*"\Z', contentType))
# Content-encoding is not set in the response to a multiple range
# response, which is a bit wussy but works well enough with the way
# static.File does content-encodings...
self.assertNotIn('content-encoding', contentHeaders)
def test_multipleUnsatisfiableRangesReturnsMultipleRangeStaticProducer(self):
"""
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests multiple ranges, none of which are
satisfiable.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=10-12,15-20'
resource = self.makeResourceWithContent('abc')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.MultipleRangeStaticProducer)
def test_multipleUnsatisfiableRangesSets416ReqestedRangeNotSatisfiable(self):
"""
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests multiple ranges,
none of which are satisfiable.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=10-12,15-20'
resource = self.makeResourceWithContent('abc')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode)
def test_multipleUnsatisfiableRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests multiple ranges, none of
which are satisfiable, sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
contentType = "text/plain"
request.headers['range'] = 'bytes=10-12,15-20'
resource = self.makeResourceWithContent('abc', type=contentType)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-length': '0', 'content-range': 'bytes */3'},
self.contentHeaders(request))
def test_oneSatisfiableRangeIsEnough(self):
"""
makeProducer when the Range header requests multiple ranges, at least
one of which matches, sets the response code to 'Partial Content'.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,100-200'
resource = self.makeResourceWithContent('abcdef')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
class StaticProducerTests(TestCase):
"""
Tests for the abstract L{StaticProducer}.
"""
def test_stopProducingClosesFile(self):
"""
L{StaticProducer.stopProducing} closes the file object the producer is
producing data from.
"""
fileObject = StringIO.StringIO()
producer = static.StaticProducer(None, fileObject)
producer.stopProducing()
self.assertTrue(fileObject.closed)
def test_stopProducingSetsRequestToNone(self):
"""
L{StaticProducer.stopProducing} sets the request instance variable to
None, which indicates to subclasses' resumeProducing methods that no
more data should be produced.
"""
fileObject = StringIO.StringIO()
producer = static.StaticProducer(DummyRequest([]), fileObject)
producer.stopProducing()
self.assertIdentical(None, producer.request)
class NoRangeStaticProducerTests(TestCase):
"""
Tests for L{NoRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{NoRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.NoRangeStaticProducer(None, None))
def test_resumeProducingProducesContent(self):
"""
L{NoRangeStaticProducer.resumeProducing} writes content from the
resource to the request.
"""
request = DummyRequest([])
content = 'abcdef'
producer = static.NoRangeStaticProducer(
request, StringIO.StringIO(content))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual(content, ''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{NoRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
"""
request = DummyRequest([])
bufferSize = abstract.FileDescriptor.bufferSize
content = 'a' * (2*bufferSize + 1)
producer = static.NoRangeStaticProducer(
request, StringIO.StringIO(content))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
expected = [
content[0:bufferSize],
content[bufferSize:2*bufferSize],
content[2*bufferSize:]
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{NoRangeStaticProducer.resumeProducing} calls finish() on the request
after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.NoRangeStaticProducer(
request, StringIO.StringIO('abcdef'))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class SingleRangeStaticProducerTests(TestCase):
"""
Tests for L{SingleRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{SingleRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.SingleRangeStaticProducer(None, None, None, None))
def test_resumeProducingProducesContent(self):
"""
L{SingleRangeStaticProducer.resumeProducing} writes the given amount
of content, starting at the given offset, from the resource to the
request.
"""
request = DummyRequest([])
content = 'abcdef'
producer = static.SingleRangeStaticProducer(
request, StringIO.StringIO(content), 1, 3)
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
self.assertEqual(content[1:4], ''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{SingleRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
"""
request = DummyRequest([])
bufferSize = abstract.FileDescriptor.bufferSize
content = 'abc' * bufferSize
producer = static.SingleRangeStaticProducer(
request, StringIO.StringIO(content), 1, bufferSize+10)
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
expected = [
content[1:bufferSize+1],
content[bufferSize+1:bufferSize+11],
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{SingleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.SingleRangeStaticProducer(
request, StringIO.StringIO('abcdef'), 1, 1)
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class MultipleRangeStaticProducerTests(TestCase):
"""
Tests for L{MultipleRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{MultipleRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.MultipleRangeStaticProducer(None, None, None))
def test_resumeProducingProducesContent(self):
"""
L{MultipleRangeStaticProducer.resumeProducing} writes the requested
chunks of content from the resource to the request, with the supplied
boundaries in between each chunk.
"""
request = DummyRequest([])
content = 'abcdef'
producer = static.MultipleRangeStaticProducer(
request, StringIO.StringIO(content), [('1', 1, 3), ('2', 5, 1)])
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
self.assertEqual('1bcd2f', ''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{MultipleRangeStaticProducer.start} writes about
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
To be specific about the 'about' above: it can write slightly more,
for example in the case where the first boundary plus the first chunk
is less than C{bufferSize} but first boundary plus the first chunk
plus the second boundary is more, but this is unimportant as in
practice the boundaries are fairly small. On the other side, it is
important for performance to bundle up several small chunks into one
call to request.write.
"""
request = DummyRequest([])
content = '0123456789' * 2
producer = static.MultipleRangeStaticProducer(
request, StringIO.StringIO(content),
[('a', 0, 2), ('b', 5, 10), ('c', 0, 0)])
producer.bufferSize = 10
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
expected = [
'a' + content[0:2] + 'b' + content[5:11],
content[11:15] + 'c',
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{MultipleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.MultipleRangeStaticProducer(
request, StringIO.StringIO('abcdef'), [('', 1, 2)])
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class RangeTests(TestCase):
"""
Tests for I{Range-Header} support in L{twisted.web.static.File}.
@type file: L{file}
@ivar file: Temporary (binary) file containing the content to be served.
@type resource: L{static.File}
@ivar resource: A leaf web resource using C{file} as content.
@type request: L{DummyRequest}
@ivar request: A fake request, requesting C{resource}.
@type catcher: L{list}
@ivar catcher: List which gathers all log information.
"""
def setUp(self):
"""
Create a temporary file with a fixed payload of 64 bytes. Create a
resource for that file and create a request which will be for that
resource. Each test can set a different range header to test different
aspects of the implementation.
"""
path = FilePath(self.mktemp())
# This is just a jumble of random stuff. It's supposed to be a good
# set of data for this test, particularly in order to avoid
# accidentally seeing the right result by having a byte sequence
# repeated at different locations or by having byte values which are
# somehow correlated with their position in the string.
self.payload = ('\xf8u\xf3E\x8c7\xce\x00\x9e\xb6a0y0S\xf0\xef\xac\xb7'
'\xbe\xb5\x17M\x1e\x136k{\x1e\xbe\x0c\x07\x07\t\xd0'
'\xbckY\xf5I\x0b\xb8\x88oZ\x1d\x85b\x1a\xcdk\xf2\x1d'
'&\xfd%\xdd\x82q/A\x10Y\x8b')
path.setContent(self.payload)
self.file = path.open()
self.resource = static.File(self.file.name)
self.resource.isLeaf = 1
self.request = DummyRequest([''])
self.request.uri = self.file.name
self.catcher = []
log.addObserver(self.catcher.append)
def tearDown(self):
"""
Clean up the resource file and the log observer.
"""
self.file.close()
log.removeObserver(self.catcher.append)
def _assertLogged(self, expected):
"""
Asserts that a given log message occurred with an expected message.
"""
logItem = self.catcher.pop()
self.assertEqual(logItem["message"][0], expected)
self.assertEqual(
self.catcher, [], "An additional log occured: %r" % (logItem,))
def test_invalidRanges(self):
"""
L{File._parseRangeHeader} raises L{ValueError} when passed
syntactically invalid byte ranges.
"""
f = self.resource._parseRangeHeader
# there's no =
self.assertRaises(ValueError, f, 'bytes')
# unknown isn't a valid Bytes-Unit
self.assertRaises(ValueError, f, 'unknown=1-2')
# there's no - in =stuff
self.assertRaises(ValueError, f, 'bytes=3')
# both start and end are empty
self.assertRaises(ValueError, f, 'bytes=-')
# start isn't an integer
self.assertRaises(ValueError, f, 'bytes=foo-')
# end isn't an integer
self.assertRaises(ValueError, f, 'bytes=-foo')
# end isn't equal to or greater than start
self.assertRaises(ValueError, f, 'bytes=5-4')
def test_rangeMissingStop(self):
"""
A single bytes range without an explicit stop position is parsed into a
two-tuple giving the start position and C{None}.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=0-'), [(0, None)])
def test_rangeMissingStart(self):
"""
A single bytes range without an explicit start position is parsed into
a two-tuple of C{None} and the end position.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=-3'), [(None, 3)])
def test_range(self):
"""
A single bytes range with explicit start and stop positions is parsed
into a two-tuple of those positions.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=2-5'), [(2, 5)])
def test_rangeWithSpace(self):
"""
A single bytes range with whitespace in allowed places is parsed in
the same way as it would be without the whitespace.
"""
self.assertEqual(
self.resource._parseRangeHeader(' bytes=1-2 '), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes =1-2 '), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes= 1-2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes=1 -2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes=1- 2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes=1-2 '), [(1, 2)])
def test_nullRangeElements(self):
"""
If there are multiple byte ranges but only one is non-null, the
non-null range is parsed and its start and stop returned.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=1-2,\r\n, ,\t'), [(1, 2)])
def test_multipleRanges(self):
"""
If multiple byte ranges are specified their starts and stops are
returned.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=1-2,3-4'),
[(1, 2), (3, 4)])
def test_bodyLength(self):
"""
A correct response to a range request is as long as the length of the
requested range.
"""
self.request.headers['range'] = 'bytes=0-43'
self.resource.render(self.request)
self.assertEqual(len(''.join(self.request.written)), 44)
def test_invalidRangeRequest(self):
"""
An incorrect range request (RFC 2616 defines a correct range request as
a Bytes-Unit followed by a '=' character followed by a specific range.
Only 'bytes' is defined) results in the range header value being logged
and a normal 200 response being sent.
"""
self.request.headers['range'] = range = 'foobar=0-43'
self.resource.render(self.request)
expected = "Ignoring malformed Range header %r" % (range,)
self._assertLogged(expected)
self.assertEqual(''.join(self.request.written), self.payload)
self.assertEqual(self.request.responseCode, http.OK)
self.assertEqual(
self.request.outgoingHeaders['content-length'],
str(len(self.payload)))
def parseMultipartBody(self, body, boundary):
"""
Parse C{body} as a multipart MIME response separated by C{boundary}.
Note that this with fail the calling test on certain syntactic
problems.
"""
sep = "\r\n--" + boundary
parts = ''.join(body).split(sep)
self.assertEqual('', parts[0])
self.assertEqual('--\r\n', parts[-1])
parsed_parts = []
for part in parts[1:-1]:
before, header1, header2, blank, partBody = part.split('\r\n', 4)
headers = header1 + '\n' + header2
self.assertEqual('', before)
self.assertEqual('', blank)
partContentTypeValue = re.search(
'^content-type: (.*)$', headers, re.I|re.M).group(1)
start, end, size = re.search(
'^content-range: bytes ([0-9]+)-([0-9]+)/([0-9]+)$',
headers, re.I|re.M).groups()
parsed_parts.append(
{'contentType': partContentTypeValue,
'contentRange': (start, end, size),
'body': partBody})
return parsed_parts
def test_multipleRangeRequest(self):
"""
The response to a request for multipe bytes ranges is a MIME-ish
multipart response.
"""
startEnds = [(0, 2), (20, 30), (40, 50)]
rangeHeaderValue = ','.join(["%s-%s"%(s,e) for (s, e) in startEnds])
self.request.headers['range'] = 'bytes=' + rangeHeaderValue
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
boundary = re.match(
'^multipart/byteranges; boundary="(.*)"$',
self.request.outgoingHeaders['content-type']).group(1)
parts = self.parseMultipartBody(''.join(self.request.written), boundary)
self.assertEqual(len(startEnds), len(parts))
for part, (s, e) in zip(parts, startEnds):
self.assertEqual(self.resource.type, part['contentType'])
start, end, size = part['contentRange']
self.assertEqual(int(start), s)
self.assertEqual(int(end), e)
self.assertEqual(int(size), self.resource.getFileSize())
self.assertEqual(self.payload[s:e+1], part['body'])
def test_multipleRangeRequestWithRangeOverlappingEnd(self):
"""
The response to a request for multipe bytes ranges is a MIME-ish
multipart response, even when one of the ranged falls off the end of
the resource.
"""
startEnds = [(0, 2), (40, len(self.payload) + 10)]
rangeHeaderValue = ','.join(["%s-%s"%(s,e) for (s, e) in startEnds])
self.request.headers['range'] = 'bytes=' + rangeHeaderValue
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
boundary = re.match(
'^multipart/byteranges; boundary="(.*)"$',
self.request.outgoingHeaders['content-type']).group(1)
parts = self.parseMultipartBody(''.join(self.request.written), boundary)
self.assertEqual(len(startEnds), len(parts))
for part, (s, e) in zip(parts, startEnds):
self.assertEqual(self.resource.type, part['contentType'])
start, end, size = part['contentRange']
self.assertEqual(int(start), s)
self.assertEqual(int(end), min(e, self.resource.getFileSize()-1))
self.assertEqual(int(size), self.resource.getFileSize())
self.assertEqual(self.payload[s:e+1], part['body'])
def test_implicitEnd(self):
"""
If the end byte position is omitted, then it is treated as if the
length of the resource was specified by the end byte position.
"""
self.request.headers['range'] = 'bytes=23-'
self.resource.render(self.request)
self.assertEqual(''.join(self.request.written), self.payload[23:])
self.assertEqual(len(''.join(self.request.written)), 41)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.outgoingHeaders['content-range'], 'bytes 23-63/64')
self.assertEqual(self.request.outgoingHeaders['content-length'], '41')
def test_implicitStart(self):
"""
If the start byte position is omitted but the end byte position is
supplied, then the range is treated as requesting the last -N bytes of
the resource, where N is the end byte position.
"""
self.request.headers['range'] = 'bytes=-17'
self.resource.render(self.request)
self.assertEqual(''.join(self.request.written), self.payload[-17:])
self.assertEqual(len(''.join(self.request.written)), 17)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.outgoingHeaders['content-range'], 'bytes 47-63/64')
self.assertEqual(self.request.outgoingHeaders['content-length'], '17')
def test_explicitRange(self):
"""
A correct response to a bytes range header request from A to B starts
with the A'th byte and ends with (including) the B'th byte. The first
byte of a page is numbered with 0.
"""
self.request.headers['range'] = 'bytes=3-43'
self.resource.render(self.request)
written = ''.join(self.request.written)
self.assertEqual(written, self.payload[3:44])
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.outgoingHeaders['content-range'], 'bytes 3-43/64')
self.assertEqual(
str(len(written)), self.request.outgoingHeaders['content-length'])
def test_explicitRangeOverlappingEnd(self):
"""
A correct response to a bytes range header request from A to B when B
is past the end of the resource starts with the A'th byte and ends
with the last byte of the resource. The first byte of a page is
numbered with 0.
"""
self.request.headers['range'] = 'bytes=40-100'
self.resource.render(self.request)
written = ''.join(self.request.written)
self.assertEqual(written, self.payload[40:])
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.outgoingHeaders['content-range'], 'bytes 40-63/64')
self.assertEqual(
str(len(written)), self.request.outgoingHeaders['content-length'])
def test_statusCodeRequestedRangeNotSatisfiable(self):
"""
If a range is syntactically invalid due to the start being greater than
the end, the range header is ignored (the request is responded to as if
it were not present).
"""
self.request.headers['range'] = 'bytes=20-13'
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.OK)
self.assertEqual(''.join(self.request.written), self.payload)
self.assertEqual(
self.request.outgoingHeaders['content-length'],
str(len(self.payload)))
def test_invalidStartBytePos(self):
"""
If a range is unsatisfiable due to the start not being less than the
length of the resource, the response is 416 (Requested range not
satisfiable) and no data is written to the response body (RFC 2616,
section 14.35.1).
"""
self.request.headers['range'] = 'bytes=67-108'
self.resource.render(self.request)
self.assertEqual(
self.request.responseCode, http.REQUESTED_RANGE_NOT_SATISFIABLE)
self.assertEqual(''.join(self.request.written), '')
self.assertEqual(self.request.outgoingHeaders['content-length'], '0')
# Sections 10.4.17 and 14.16
self.assertEqual(
self.request.outgoingHeaders['content-range'],
'bytes */%d' % (len(self.payload),))
class DirectoryListerTest(TestCase):
"""
Tests for L{static.DirectoryLister}.
"""
def _request(self, uri):
request = DummyRequest([''])
request.uri = uri
return request
def test_renderHeader(self):
"""
L{static.DirectoryLister} prints the request uri as header of the
rendered content.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo'))
self.assertIn("<h1>Directory listing for foo</h1>", data)
self.assertIn("<title>Directory listing for foo</title>", data)
def test_renderUnquoteHeader(self):
"""
L{static.DirectoryLister} unquote the request uri before printing it.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo%20bar'))
self.assertIn("<h1>Directory listing for foo bar</h1>", data)
self.assertIn("<title>Directory listing for foo bar</title>", data)
def test_escapeHeader(self):
"""
L{static.DirectoryLister} escape "&", "<" and ">" after unquoting the
request uri.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo%26bar'))
self.assertIn("<h1>Directory listing for foo&bar</h1>", data)
self.assertIn("<title>Directory listing for foo&bar</title>", data)
def test_renderFiles(self):
"""
L{static.DirectoryLister} is able to list all the files inside a
directory.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('file1').setContent("content1")
path.child('file2').setContent("content2" * 1000)
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo'))
body = """<tr class="odd">
<td><a href="file1">file1</a></td>
<td>8B</td>
<td>[text/html]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="file2">file2</a></td>
<td>7K</td>
<td>[text/html]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_renderDirectories(self):
"""
L{static.DirectoryLister} is able to list all the directories inside
a directory.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('dir1').makedirs()
path.child('dir2 & 3').makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo'))
body = """<tr class="odd">
<td><a href="dir1/">dir1/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="dir2%20%26%203/">dir2 & 3/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_renderFiltered(self):
"""
L{static.DirectoryLister} takes a optional C{dirs} argument that
filter out the list of of directories and files printed.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('dir1').makedirs()
path.child('dir2').makedirs()
path.child('dir3').makedirs()
lister = static.DirectoryLister(path.path, dirs=["dir1", "dir3"])
data = lister.render(self._request('foo'))
body = """<tr class="odd">
<td><a href="dir1/">dir1/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="dir3/">dir3/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_oddAndEven(self):
"""
L{static.DirectoryLister} gives an alternate class for each odd and
even rows in the table.
"""
lister = static.DirectoryLister(None)
elements = [{"href": "", "text": "", "size": "", "type": "",
"encoding": ""} for i in xrange(5)]
content = lister._buildTableContent(elements)
self.assertEqual(len(content), 5)
self.assertTrue(content[0].startswith('<tr class="odd">'))
self.assertTrue(content[1].startswith('<tr class="even">'))
self.assertTrue(content[2].startswith('<tr class="odd">'))
self.assertTrue(content[3].startswith('<tr class="even">'))
self.assertTrue(content[4].startswith('<tr class="odd">'))
def test_contentType(self):
"""
L{static.DirectoryLister} produces a MIME-type that indicates that it is
HTML, and includes its charset (UTF-8).
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
req = self._request('')
lister.render(req)
self.assertEqual(req.outgoingHeaders['content-type'],
"text/html; charset=utf-8")
def test_mimeTypeAndEncodings(self):
"""
L{static.DirectoryLister} is able to detect mimetype and encoding of
listed files.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('file1.txt').setContent("file1")
path.child('file2.py').setContent("python")
path.child('file3.conf.gz').setContent("conf compressed")
path.child('file4.diff.bz2').setContent("diff compressed")
directory = os.listdir(path.path)
directory.sort()
contentTypes = {
".txt": "text/plain",
".py": "text/python",
".conf": "text/configuration",
".diff": "text/diff"
}
lister = static.DirectoryLister(path.path, contentTypes=contentTypes)
dirs, files = lister._getFilesAndDirectories(directory)
self.assertEqual(dirs, [])
self.assertEqual(files, [
{'encoding': '',
'href': 'file1.txt',
'size': '5B',
'text': 'file1.txt',
'type': '[text/plain]'},
{'encoding': '',
'href': 'file2.py',
'size': '6B',
'text': 'file2.py',
'type': '[text/python]'},
{'encoding': '[gzip]',
'href': 'file3.conf.gz',
'size': '15B',
'text': 'file3.conf.gz',
'type': '[text/configuration]'},
{'encoding': '[bzip2]',
'href': 'file4.diff.bz2',
'size': '15B',
'text': 'file4.diff.bz2',
'type': '[text/diff]'}])
def test_brokenSymlink(self):
"""
If on the file in the listing points to a broken symlink, it should not
be returned by L{static.DirectoryLister._getFilesAndDirectories}.
"""
path = FilePath(self.mktemp())
path.makedirs()
file1 = path.child('file1')
file1.setContent("file1")
file1.linkTo(path.child("file2"))
file1.remove()
lister = static.DirectoryLister(path.path)
directory = os.listdir(path.path)
directory.sort()
dirs, files = lister._getFilesAndDirectories(directory)
self.assertEqual(dirs, [])
self.assertEqual(files, [])
if getattr(os, "symlink", None) is None:
test_brokenSymlink.skip = "No symlink support"
def test_childrenNotFound(self):
"""
Any child resource of L{static.DirectoryLister} renders an HTTP
I{NOT FOUND} response code.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
request = self._request('')
child = resource.getChildForRequest(lister, request)
result = _render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, http.NOT_FOUND)
result.addCallback(cbRendered)
return result
def test_repr(self):
"""
L{static.DirectoryLister.__repr__} gives the path of the lister.
"""
path = FilePath(self.mktemp())
lister = static.DirectoryLister(path.path)
self.assertEqual(repr(lister),
"<DirectoryLister of %r>" % (path.path,))
self.assertEqual(str(lister),
"<DirectoryLister of %r>" % (path.path,))
def test_formatFileSize(self):
"""
L{static.formatFileSize} format an amount of bytes into a more readable
format.
"""
self.assertEqual(static.formatFileSize(0), "0B")
self.assertEqual(static.formatFileSize(123), "123B")
self.assertEqual(static.formatFileSize(4567), "4K")
self.assertEqual(static.formatFileSize(8900000), "8M")
self.assertEqual(static.formatFileSize(1234000000), "1G")
self.assertEqual(static.formatFileSize(1234567890000), "1149G")
class LoadMimeTypesTests(TestCase):
"""
Tests for the MIME type loading routine.
@cvar UNSET: A sentinel to signify that C{self.paths} has not been set by
the mock init.
"""
UNSET = object()
def setUp(self):
self.paths = self.UNSET
def _fakeInit(self, paths):
"""
A mock L{mimetypes.init} that records the value of the passed C{paths}
argument.
@param paths: The paths that will be recorded.
"""
self.paths = paths
def test_defaultArgumentIsNone(self):
"""
By default, C{None} is passed to C{mimetypes.init}.
"""
static.loadMimeTypes(init=self._fakeInit)
self.assertIdentical(self.paths, None)
def test_extraLocationsWork(self):
"""
Passed MIME type files are passed to C{mimetypes.init}.
"""
paths = ["x", "y", "z"]
static.loadMimeTypes(paths, init=self._fakeInit)
self.assertIdentical(self.paths, paths)
def test_usesGlobalInitFunction(self):
"""
By default, C{mimetypes.init} is called.
"""
# Checking mimetypes.inited doesn't always work, because
# something, somewhere, calls mimetypes.init. Yay global
# mutable state :)
args, _, _, defaults = inspect.getargspec(static.loadMimeTypes)
defaultInit = defaults[args.index("init")]
self.assertIdentical(defaultInit, mimetypes.init)
|
apache-2.0
|
MattsFleaMarket/python-for-android
|
python3-alpha/python3-src/Lib/idlelib/SearchDialogBase.py
|
55
|
4385
|
from tkinter import *
class SearchDialogBase:
title = "Search Dialog"
icon = "Search"
needwrapbutton = 1
def __init__(self, root, engine):
self.root = root
self.engine = engine
self.top = None
def open(self, text, searchphrase=None):
self.text = text
if not self.top:
self.create_widgets()
else:
self.top.deiconify()
self.top.tkraise()
if searchphrase:
self.ent.delete(0,"end")
self.ent.insert("end",searchphrase)
self.ent.focus_set()
self.ent.selection_range(0, "end")
self.ent.icursor(0)
self.top.grab_set()
def close(self, event=None):
if self.top:
self.top.grab_release()
self.top.withdraw()
def create_widgets(self):
top = Toplevel(self.root)
top.bind("<Return>", self.default_command)
top.bind("<Escape>", self.close)
top.protocol("WM_DELETE_WINDOW", self.close)
top.wm_title(self.title)
top.wm_iconname(self.icon)
self.top = top
self.row = 0
self.top.grid_columnconfigure(0, pad=2, weight=0)
self.top.grid_columnconfigure(1, pad=2, minsize=100, weight=100)
self.create_entries()
self.create_option_buttons()
self.create_other_buttons()
return self.create_command_buttons()
def make_entry(self, label, var):
l = Label(self.top, text=label)
l.grid(row=self.row, column=0, sticky="nw")
e = Entry(self.top, textvariable=var, exportselection=0)
e.grid(row=self.row, column=1, sticky="nwe")
self.row = self.row + 1
return e
def make_frame(self,labeltext=None):
if labeltext:
l = Label(self.top, text=labeltext)
l.grid(row=self.row, column=0, sticky="nw")
f = Frame(self.top)
f.grid(row=self.row, column=1, columnspan=1, sticky="nwe")
self.row = self.row + 1
return f
def make_button(self, label, command, isdef=0):
b = Button(self.buttonframe,
text=label, command=command,
default=isdef and "active" or "normal")
cols,rows=self.buttonframe.grid_size()
b.grid(pady=1,row=rows,column=0,sticky="ew")
self.buttonframe.grid(rowspan=rows+1)
return b
def create_entries(self):
self.ent = self.make_entry("Find:", self.engine.patvar)
def create_option_buttons(self):
f = self.make_frame("Options")
btn = Checkbutton(f, anchor="w",
variable=self.engine.revar,
text="Regular expression")
btn.pack(side="left", fill="both")
if self.engine.isre():
btn.select()
btn = Checkbutton(f, anchor="w",
variable=self.engine.casevar,
text="Match case")
btn.pack(side="left", fill="both")
if self.engine.iscase():
btn.select()
btn = Checkbutton(f, anchor="w",
variable=self.engine.wordvar,
text="Whole word")
btn.pack(side="left", fill="both")
if self.engine.isword():
btn.select()
if self.needwrapbutton:
btn = Checkbutton(f, anchor="w",
variable=self.engine.wrapvar,
text="Wrap around")
btn.pack(side="left", fill="both")
if self.engine.iswrap():
btn.select()
def create_other_buttons(self):
f = self.make_frame("Direction")
#lbl = Label(f, text="Direction: ")
#lbl.pack(side="left")
btn = Radiobutton(f, anchor="w",
variable=self.engine.backvar, value=1,
text="Up")
btn.pack(side="left", fill="both")
if self.engine.isback():
btn.select()
btn = Radiobutton(f, anchor="w",
variable=self.engine.backvar, value=0,
text="Down")
btn.pack(side="left", fill="both")
if not self.engine.isback():
btn.select()
def create_command_buttons(self):
#
# place button frame on the right
f = self.buttonframe = Frame(self.top)
f.grid(row=0,column=2,padx=2,pady=2,ipadx=2,ipady=2)
b = self.make_button("close", self.close)
b.lower()
|
apache-2.0
|
cfelton/minnesota
|
examples/cores/fpgalink/led/fpgalink_led.py
|
1
|
2713
|
from myhdl import *
from mn.cores.usb_ext import fl_fx2
from mn.cores.usb_ext import fpgalink_fx2
def fpgalink_led(
# ~~ FX2 interface signals ~~
IFCLK, # 48 MHz clock from FX2
RST, # active low async reset
SLWR, # active low write strobe
SLRD, # active low read strobe
SLOE, # active low output enable
FDI, # input data bus
FDO, # output data bus
FDS, # data select
ADDR, # 2bit address (fifo select)
FLAGA, # not used
FLAGB, # gotroom
FLAGC, # gotdata
FLAGD, # not used
PKTEND, # submit partial (less than 512)
# ~~ peripherals interfaces ~~
LEDS # external LEDs
):
"""
"""
# get the local references for the top-level
clock,reset,fx2_bus,fl_bus = fl_fx2.get_interfaces()
clock = IFCLK
reset = RST
fx2_bus.data_i = FDI
fx2_bus.data_o = FDO
fx2_bus.data_t = FDS
fx2_bus.gotdata = FLAGC
fx2_bus.gotroom = FLAGB
fx2_bus.write = SLWR
fx2_bus.read = SLRD
#SLOE = SLRD now shadowed signals for conversion
fx2_bus.pktend = PKTEND
# instantiate the fpgalink interface
g_fli = fpgalink_fx2(clock, reset, fx2_bus, fl_bus)
# ~~~~~~
lreg = Signal(intbv(0)[8:])
f2hValid_in = fl_bus.valid_i
h2fReady_in = fl_bus.ready_i
h2fValid_out = fl_bus.valid_o
chanAddr_out = fl_bus.chan_addr
f2hData_in = fl_bus.data_i
h2fData_out = fl_bus.data_o
fifosel = fx2_bus.fifosel
@always_comb
def hdl_assigns():
ADDR.next[0] = False
ADDR.next[1] = fifosel
SLOE.next = SLRD
f2hValid_in.next = True
h2fReady_in.next = True
LEDS.next = lreg
if chanAddr_out == 0:
f2hData_in.next = 0xCE
elif chanAddr_out == 1:
f2hData_in.next = lreg
else:
f2hData_in.next = 0x55
@always_seq(clock.posedge, reset=reset)
def hdl_fl():
if h2fValid_out and chanAddr_out == 1:
lreg.next = h2fData_out
return g_fli, hdl_fl, hdl_assigns
def convert():
FDO = Signal(intbv(0)[8:])
FDI = Signal(intbv(0)[8:])
FDS = Signal(bool(0))
SLWR,SLRD,SLOE = [Signal(bool(0)) for ii in range(3)]
FLAGA,FLAGB,FLAGC,FLAGD = [Signal(bool(0)) for ii in range(4)]
ADDR = Signal(intbv(0)[2:])
IFCLK = Signal(bool(0))
RST = ResetSignal(bool(1), active=0, async=True)
LEDS = Signal(intbv(0)[8:])
PKTEND = Signal(bool(0))
toVerilog(fpgalink_led, IFCLK, RST, SLWR, SLRD, SLOE,
FDI, FDO, FDS, ADDR, FLAGA, FLAGB, FLAGC, FLAGD, PKTEND,
LEDS)
if __name__ == '__main__':
convert()
|
gpl-3.0
|
huawei-cloud/compass
|
compass/hdsdiscovery/utils.py
|
4
|
4810
|
"""Utility functions
Including functions of get/getbulk/walk/set of snmp for three versions
"""
import imp
import re
import logging
def load_module(mod_name, path, host=None, credential=None):
""" Load a module instance.
:param str mod_name: module name
:param str path: directory of the module
:param str host: switch ip address
:param str credential: credential used to access switch
"""
instance = None
try:
file, path, descr = imp.find_module(mod_name, [path])
if file:
mod = imp.load_module(mod_name, file, path, descr)
if host and credential:
instance = getattr(mod, mod.CLASS_NAME)(host, credential)
else:
instance = getattr(mod, mod.CLASS_NAME)()
except ImportError as exc:
logging.error('No such plugin : %s', mod_name)
logging.exception(exc)
finally:
return instance
def ssh_remote_execute(host, username, password, cmd, *args):
"""SSH to execute script on remote machine
:param host: ip of the remote machine
:param username: username to access the remote machine
:param password: password to access the remote machine
:param cmd: command to execute
"""
try:
import paramiko
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, username=username, password=password)
stdin, stdout, stderr = client.exec_command(cmd)
return stdout.readlines()
except ImportError as exc:
logging.error("[hdsdiscovery][utils][ssh_remote_execute] failed to"
"load module 'paramiko', donnot exist!")
logging.exception(exc)
return None
except Exception as exc:
logging.error("[hdsdiscovery][utils][ssh_remote_execute] failed: %s",
cmd)
logging.exception(exc)
return None
finally:
client.close()
def valid_ip_format(ip_address):
"""Valid the format of an Ip address"""
if not re.match(r'^((([0-2]?\d{0,2}\.){3}([0-2]?\d{0,2}))'
'|(([\da-fA-F]{1,4}:){7}([\da-fA-F]{1,4})))$',
ip_address):
# check IP's format is match ipv4 or ipv6 by regex
return False
return True
#################################################################
# Implement snmpwalk and snmpget funtionality
# The structure of returned dictionary will by tag/iid/value/type
#################################################################
AUTH_VERSIONS = {'v1': 1,
'v2c': 2,
'v3': 3}
def snmp_walk(host, credential, *args):
"""Impelmentation of snmpwalk functionality
:param host: switch ip
:param credential: credential to access switch
:param args: OIDs
"""
try:
import netsnmp
except ImportError:
logging.error("Module 'netsnmp' do not exist! Please install it first")
return None
if 'Version' not in credential or 'Community' not in credential:
logging.error("[utils] missing 'Version' and 'Community' in %s",
credential)
return None
if credential['Version'] in AUTH_VERSIONS:
version = AUTH_VERSIONS[credential['Version']]
credential['Version'] = version
varbind_list = []
for arg in args:
varbind = netsnmp.Varbind(arg)
varbind_list.append(varbind)
var_list = netsnmp.VarList(*varbind_list)
res = netsnmp.snmpwalk(var_list, DestHost=host, **credential)
result = []
for var in var_list:
response = {}
response['elem_name'] = var.tag
response['iid'] = var.iid
response['value'] = var.val
response['type'] = var.type
result.append(response)
return result
def snmp_get(host, credential, object_type):
"""Impelmentation of snmp get functionality
:param object_type: mib object
:param host: switch ip
:param credential: the dict of credential to access switch
"""
try:
import netsnmp
except ImportError:
logging.error("Module 'netsnmp' do not exist! Please install it first")
return None
if 'Version' not in credential or 'Community' not in credential:
logging.error('[uitls][snmp_get] missing keywords in %s for %s',
credential, host)
return None
if credential['Version'] in AUTH_VERSIONS:
version = AUTH_VERSIONS[credential['Version']]
credential['Version'] = version
varbind = netsnmp.Varbind(object_type)
res = netsnmp.snmpget(varbind, DestHost=host, **credential)
if not res:
logging.error('no result found for %s %s', host, credential)
return None
return res[0]
|
apache-2.0
|
miguelparaiso/PracticaOdoo
|
addons/website_membership/models/product.py
|
338
|
1264
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class product_template(osv.Model):
_inherit = 'product.template'
_columns = {
'website_published': fields.boolean('Available in the website', copy=False),
}
_defaults = {
'website_published': False,
}
|
agpl-3.0
|
google/cloudprint_logocert
|
_ticket.py
|
1
|
5318
|
"""Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Model that represents the CloudJobTicket that's used to submit print jobs to the
Google Cloud Print Service /submit interface.
CloudJobTicket will provide methods to set the various fields of a job ticket:
"""
class CloudJobTicket(object):
"""Represents the print job specifications sent to the printer on
job submission."""
def __init__(self, version = '1.0'):
"""Get a reference to a logger object.
Args:
version: string, gcp version
"""
self.val = {}
self.val['print'] = {}
self.val['version'] = version
def AddColorOption(self, color_type):
"""
Specify the print job's color scheme
Args:
color_type: string, STANDARD_COLOR or STANDARD_MONOCHROME
"""
self.val['print']['color'] = {'type': color_type}
def AddCopiesOption(self, num_copies):
"""
Specify the number of copies to print
Args:
num_copies: integer, number of copies to print
"""
self.val['print']['copies'] = {'copies': num_copies}
def AddDuplexOption(self, duplex_type):
"""
Specify the duplexing type of the print job
Args:
duplex_type: string, NO_DUPLEX, LONG_EDGE, or SHORT_EDGE
"""
self.val['print']['duplex'] = {'type': duplex_type}
def AddPageOrientationOption(self, orientation_type):
"""
Specify the page orientation of the print job
Args:
orientation_type: string, PORTRAIT, LANDSCAPE, or AUTO
"""
self.val['print']['page_orientation'] = {'type': orientation_type}
def AddDpiOption(self, horizontal_dpi, vertical_dpi):
"""
Specify the DPI for the print job
Args:
horizontal_dpi: integer, horizontal dpi
vertical_dpi : integer, vertical dpi
"""
self.val['print']['dpi'] = {'horizontal_dpi': horizontal_dpi,
'vertical_dpi': vertical_dpi}
def AddMarginOption(self, top, right, bottom, left):
"""
Specify the margins for the print job
Args:
top, int, top margin in microns
right, int, right margin in microns
bottom, int, bottom margin in microns
left, int, left margin in microns
"""
self.val['print']['margins'] = {'top_microns': top,
'right_microns': right,
'bottom_microns': bottom,
'left_microns': left}
def AddSizeOption(self, height_microns, width_microns):
"""
Specify the size of the print job
Args:
height_microns: integer, height in microns
width_microns : integer, width in microns
"""
self.val['print']['media_size'] = {'height_microns': height_microns,
'width_microns': width_microns}
def AddReverseOption(self):
"""
Enable the reverse print option
"""
self.val['print']['reverse_order'] = {'reverse_order': True}
def AddFitToPageOption(self, type):
"""
Specify the size of the print job
Args:
type: string, NO_FITTING, FIT_TO_PAGE, GROW_TO_PAGE, SHRINK_TO_PAGE,
or FILL_PAGE
"""
self.val['print']['fit_to_page'] = {'type': type}
def AddPageRangeOption(self, start, end = None):
"""
Specify a range of pages to print
Args:
start: integer, Beginning of the print interval (inclusive)
end : integer, The last page of the range to print (inclusive).
If not specified, all pages after 'start' are printed
"""
# If this is the first page range for this CJT, start with an empty array;
# otherwise, get the existing array
page_ranges = ([] if 'page_range' not in self.val['print'] else
self.val['print']['page_range']['interval'])
new_range = {'start': start}
if end is not None:
new_range['end']= end
page_ranges.append(new_range)
self.val['print']['page_range']= {'interval': page_ranges}
class GCPConstants(object):
"""A class that holds constants that are used in a GCP"""
#
# CJT (Cloud Job Ticket) constants
#
# Color scheme
MONOCHROME = 'STANDARD_MONOCHROME'
COLOR = 'STANDARD_COLOR'
# Page orientation
LANDSCAPE = 'LANDSCAPE'
PORTRAIT = 'PORTRAIT'
# Duplexing
LONG_EDGE = 'LONG_EDGE'
SHORT_EDGE = 'SHORT_EDGE'
# Page fit
NO_FIT = 'NO_FITTING'
FIT = 'FIT_TO_PAGE'
GROW = 'GROW_TO_PAGE'
SHRINK = 'SHRINK_TO_PAGE'
FILL = 'FILL_PAGE'
# A4 size in microns
A4_HEIGHT = 297000
A4_WIDTH = 210000
#
# CJS (Cloud Job State) constants
#
DRAFT = 'DRAFT'
HELD = 'HELD'
QUEUED = 'QUEUED'
IN_PROGRESS = 'IN_PROGRESS'
STOPPED = 'STOPPED'
DONE = 'DONE'
ABORTED = 'ABORTED'
|
apache-2.0
|
Acehaidrey/incubator-airflow
|
airflow/migrations/versions/1507a7289a2f_create_is_encrypted.py
|
8
|
2164
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""create is_encrypted
Revision ID: 1507a7289a2f
Revises: e3a246e0dc1
Create Date: 2015-08-18 18:57:51.927315
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision = '1507a7289a2f'
down_revision = 'e3a246e0dc1'
branch_labels = None
depends_on = None
connectionhelper = sa.Table(
'connection', sa.MetaData(), sa.Column('id', sa.Integer, primary_key=True), sa.Column('is_encrypted')
)
def upgrade(): # noqa: D103
# first check if the user already has this done. This should only be
# true for users who are upgrading from a previous version of Airflow
# that predates Alembic integration
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
# this will only be true if 'connection' already exists in the db,
# but not if alembic created it in a previous migration
if 'connection' in inspector.get_table_names():
col_names = [c['name'] for c in inspector.get_columns('connection')]
if 'is_encrypted' in col_names:
return
op.add_column('connection', sa.Column('is_encrypted', sa.Boolean, unique=False, default=False))
conn = op.get_bind()
conn.execute(connectionhelper.update().values(is_encrypted=False))
def downgrade(): # noqa: D103
op.drop_column('connection', 'is_encrypted')
|
apache-2.0
|
JCBarahona/edX
|
common/djangoapps/student/helpers.py
|
9
|
10576
|
"""Helpers for the student app. """
from datetime import datetime
import urllib
from pytz import UTC
from django.core.urlresolvers import reverse, NoReverseMatch
import third_party_auth
from verify_student.models import VerificationDeadline, SoftwareSecurePhotoVerification # pylint: disable=import-error
from course_modes.models import CourseMode
# Enumeration of per-course verification statuses
# we display on the student dashboard.
VERIFY_STATUS_NEED_TO_VERIFY = "verify_need_to_verify"
VERIFY_STATUS_SUBMITTED = "verify_submitted"
VERIFY_STATUS_APPROVED = "verify_approved"
VERIFY_STATUS_MISSED_DEADLINE = "verify_missed_deadline"
VERIFY_STATUS_NEED_TO_REVERIFY = "verify_need_to_reverify"
DISABLE_UNENROLL_CERT_STATES = [
'generating',
'ready',
]
def check_verify_status_by_course(user, course_enrollments):
"""
Determine the per-course verification statuses for a given user.
The possible statuses are:
* VERIFY_STATUS_NEED_TO_VERIFY: The student has not yet submitted photos for verification.
* VERIFY_STATUS_SUBMITTED: The student has submitted photos for verification,
but has have not yet been approved.
* VERIFY_STATUS_APPROVED: The student has been successfully verified.
* VERIFY_STATUS_MISSED_DEADLINE: The student did not submit photos within the course's deadline.
* VERIFY_STATUS_NEED_TO_REVERIFY: The student has an active verification, but it is
set to expire before the verification deadline for the course.
It is is also possible that a course does NOT have a verification status if:
* The user is not enrolled in a verified mode, meaning that the user didn't pay.
* The course does not offer a verified mode.
* The user submitted photos but an error occurred while verifying them.
* The user submitted photos but the verification was denied.
In the last two cases, we rely on messages in the sidebar rather than displaying
messages for each course.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): The courses the user is enrolled in.
Returns:
dict: Mapping of course keys verification status dictionaries.
If no verification status is applicable to a course, it will not
be included in the dictionary.
The dictionaries have these keys:
* status (str): One of the enumerated status codes.
* days_until_deadline (int): Number of days until the verification deadline.
* verification_good_until (str): Date string for the verification expiration date.
"""
status_by_course = {}
# Retrieve all verifications for the user, sorted in descending
# order by submission datetime
verifications = SoftwareSecurePhotoVerification.objects.filter(user=user)
# Check whether the user has an active or pending verification attempt
# To avoid another database hit, we re-use the queryset we have already retrieved.
has_active_or_pending = SoftwareSecurePhotoVerification.user_has_valid_or_pending(
user, queryset=verifications
)
# Retrieve verification deadlines for the enrolled courses
enrolled_course_keys = [enrollment.course_id for enrollment in course_enrollments]
course_deadlines = VerificationDeadline.deadlines_for_courses(enrolled_course_keys)
recent_verification_datetime = None
for enrollment in course_enrollments:
# If the user hasn't enrolled as verified, then the course
# won't display state related to its verification status.
if enrollment.mode in CourseMode.VERIFIED_MODES:
# Retrieve the verification deadline associated with the course.
# This could be None if the course doesn't have a deadline.
deadline = course_deadlines.get(enrollment.course_id)
relevant_verification = SoftwareSecurePhotoVerification.verification_for_datetime(deadline, verifications)
# Picking the max verification datetime on each iteration only with approved status
if relevant_verification is not None and relevant_verification.status == "approved":
recent_verification_datetime = max(
recent_verification_datetime if recent_verification_datetime is not None
else relevant_verification.expiration_datetime,
relevant_verification.expiration_datetime
)
# By default, don't show any status related to verification
status = None
# Check whether the user was approved or is awaiting approval
if relevant_verification is not None:
if relevant_verification.status == "approved":
status = VERIFY_STATUS_APPROVED
elif relevant_verification.status == "submitted":
status = VERIFY_STATUS_SUBMITTED
# If the user didn't submit at all, then tell them they need to verify
# If the deadline has already passed, then tell them they missed it.
# If they submitted but something went wrong (error or denied),
# then don't show any messaging next to the course, since we already
# show messages related to this on the left sidebar.
submitted = (
relevant_verification is not None and
relevant_verification.status not in ["created", "ready"]
)
if status is None and not submitted:
if deadline is None or deadline > datetime.now(UTC):
if has_active_or_pending:
# The user has an active verification, but the verification
# is set to expire before the deadline. Tell the student
# to reverify.
status = VERIFY_STATUS_NEED_TO_REVERIFY
else:
status = VERIFY_STATUS_NEED_TO_VERIFY
else:
# If a user currently has an active or pending verification,
# then they may have submitted an additional attempt after
# the verification deadline passed. This can occur,
# for example, when the support team asks a student
# to reverify after the deadline so they can receive
# a verified certificate.
# In this case, we still want to show them as "verified"
# on the dashboard.
if has_active_or_pending:
status = VERIFY_STATUS_APPROVED
# Otherwise, the student missed the deadline, so show
# them as "honor" (the kind of certificate they will receive).
else:
status = VERIFY_STATUS_MISSED_DEADLINE
# Set the status for the course only if we're displaying some kind of message
# Otherwise, leave the course out of the dictionary.
if status is not None:
days_until_deadline = None
now = datetime.now(UTC)
if deadline is not None and deadline > now:
days_until_deadline = (deadline - now).days
status_by_course[enrollment.course_id] = {
'status': status,
'days_until_deadline': days_until_deadline
}
if recent_verification_datetime:
for key, value in status_by_course.iteritems(): # pylint: disable=unused-variable
status_by_course[key]['verification_good_until'] = recent_verification_datetime.strftime("%m/%d/%Y")
return status_by_course
def auth_pipeline_urls(auth_entry, redirect_url=None):
"""Retrieve URLs for each enabled third-party auth provider.
These URLs are used on the "sign up" and "sign in" buttons
on the login/registration forms to allow users to begin
authentication with a third-party provider.
Optionally, we can redirect the user to an arbitrary
url after auth completes successfully. We use this
to redirect the user to a page that required login,
or to send users to the payment flow when enrolling
in a course.
Args:
auth_entry (string): Either `pipeline.AUTH_ENTRY_LOGIN` or `pipeline.AUTH_ENTRY_REGISTER`
Keyword Args:
redirect_url (unicode): If provided, send users to this URL
after they successfully authenticate.
Returns:
dict mapping provider IDs to URLs
"""
if not third_party_auth.is_enabled():
return {}
return {
provider.provider_id: third_party_auth.pipeline.get_login_url(
provider.provider_id, auth_entry, redirect_url=redirect_url
) for provider in third_party_auth.provider.Registry.accepting_logins()
}
# Query string parameters that can be passed to the "finish_auth" view to manage
# things like auto-enrollment.
POST_AUTH_PARAMS = ('course_id', 'enrollment_action', 'course_mode', 'email_opt_in')
def get_next_url_for_login_page(request):
"""
Determine the URL to redirect to following login/registration/third_party_auth
The user is currently on a login or reigration page.
If 'course_id' is set, or other POST_AUTH_PARAMS, we will need to send the user to the
/account/finish_auth/ view following login, which will take care of auto-enrollment in
the specified course.
Otherwise, we go to the ?next= query param or to the dashboard if nothing else is
specified.
"""
redirect_to = request.GET.get('next', None)
if not redirect_to:
try:
redirect_to = reverse('dashboard')
except NoReverseMatch:
redirect_to = reverse('home')
if any(param in request.GET for param in POST_AUTH_PARAMS):
# Before we redirect to next/dashboard, we need to handle auto-enrollment:
params = [(param, request.GET[param]) for param in POST_AUTH_PARAMS if param in request.GET]
params.append(('next', redirect_to)) # After auto-enrollment, user will be sent to payment page or to this URL
redirect_to = '{}?{}'.format(reverse('finish_auth'), urllib.urlencode(params))
# Note: if we are resuming a third party auth pipeline, then the next URL will already
# be saved in the session as part of the pipeline state. That URL will take priority
# over this one.
return redirect_to
|
agpl-3.0
|
ric2b/Vivaldi-browser
|
chromium/build/toolchain/win/tool_wrapper.py
|
1
|
8967
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for Windows builds.
This file is copied to the build directory as part of toolchain setup and
is used to set up calls to tools used by the build that need wrappers.
"""
from __future__ import print_function
import os
import re
import shutil
import subprocess
import stat
import sys
# Embedded vpython spec to provide `win32file` when this is invoked with
# vpython on windows.
#
# [VPYTHON:BEGIN]
# wheel: <
# name: "infra/python/wheels/pypiwin32/${vpython_platform}"
# version: "version:219"
# match_tag: < platform: "win32" >
# match_tag: < platform: "win_amd64" >
# >
# [VPYTHON:END]
if sys.platform == "win32":
try:
# First, try the normal way. This will work for python installations which
# have win32file already, or for vpython invocations of this script.
import win32file
except ImportError:
# Otherwise, do a hack to locate the depot_tools specific version of
# win32file.
#
# tool_wrapper.py doesn't get invoked through python.bat so the Python bin
# directory doesn't get added to the path. The Python module search logic
# handles this fine and finds win32file.pyd. However the Windows module
# search logic then looks for pywintypes27.dll and other DLLs in the path
# and if it finds versions with a different bitness first then win32file.pyd
# will fail to load with a cryptic error:
# ImportError: DLL load failed: %1 is not a valid Win32 application.
if sys.platform == 'win32':
os.environ['PATH'] = os.path.dirname(sys.executable) + \
os.pathsep + os.environ['PATH']
import win32file # pylint: disable=import-error
def superflush(pe_name):
# Flush the file buffers to try to work around a Windows 10 kernel bug,
# https://crbug.com/644525
output_handle = win32file.CreateFile(pe_name, win32file.GENERIC_WRITE,
0, None, win32file.OPEN_EXISTING, 0, 0)
win32file.FlushFileBuffers(output_handle)
output_handle.Close()
else:
def superflush(pe_name):
return None
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# A regex matching an argument corresponding to the output filename passed to
# link.exe.
_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
def main(args):
exit_code = WinTool().Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def _UseSeparateMspdbsrv(self, env, args):
"""Allows to use a unique instance of mspdbsrv.exe per linker instead of a
shared one."""
if len(args) < 1:
raise Exception("Not enough arguments")
if args[0] != 'link.exe':
return
# Use the output filename passed to the linker to generate an endpoint name
# for mspdbsrv.exe.
endpoint_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
endpoint_name = re.sub(r'\W+', '',
'%s_%d' % (m.group('out'), os.getpid()))
break
if endpoint_name is None:
return
# Adds the appropriate environment variable. This will be read by link.exe
# to know which instance of mspdbsrv.exe it should connect to (if it's
# not set then the default endpoint is used).
env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecDeleteFile(self, path):
"""Simple file delete command."""
if os.path.exists(path):
os.unlink(path)
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
def _on_error(fn, path, dummy_excinfo):
# The operation failed, possibly because the file is set to
# read-only. If that's why, make it writable and try the op again.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWRITE)
fn(path)
shutil.rmtree(dest, onerror=_on_error)
else:
if not os.access(dest, os.W_OK):
# Attempt to make the file writable before deleting it.
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
# Try to diagnose crbug.com/741603
if not os.path.exists(dest):
raise Exception("Copying of %s to %s failed" % (source, dest))
def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
env = self._GetEnv(arch)
if use_separate_mspdbsrv == 'True':
self._UseSeparateMspdbsrv(env, args)
if sys.platform == 'win32':
args = list(args) # *args is a tuple by default, which is read-only.
args[0] = args[0].replace('/', '\\')
# https://docs.python.org/2/library/subprocess.html:
# "On Unix with shell=True [...] if args is a sequence, the first item
# specifies the command string, and any additional items will be treated as
# additional arguments to the shell itself. That is to say, Popen does the
# equivalent of:
# Popen(['/bin/sh', '-c', args[0], args[1], ...])"
# For that reason, since going through the shell doesn't seem necessary on
# non-Windows don't do that there.
pe_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
pe_name = m.group('out')
link = subprocess.Popen(args, shell=sys.platform == 'win32', env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Read output one line at a time as it shows up to avoid OOM failures when
# GBs of output is produced.
for line in link.stdout:
if (not line.startswith(' Creating library ') and
not line.startswith('Generating code') and
not line.startswith('Finished generating code')):
print(line)
result = link.wait()
if result == 0 and sys.platform == 'win32':
superflush(pe_name)
return result
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
if sys.platform == 'win32':
# Windows ARM64 uses clang-cl as assembler which has '/' as path
# separator, convert it to '\\' when running on Windows.
args = list(args) # *args is a tuple by default, which is read-only
args[0] = args[0].replace('/', '\\')
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if not line.startswith(' Assembling: '):
print(line)
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Converts .rc files to .res files."""
env = self._GetEnv(arch)
args = list(args)
rcpy_args = args[:]
rcpy_args[0:1] = [sys.executable, os.path.join(BASE_DIR, 'rc', 'rc.py')]
rcpy_args.append('/showIncludes')
return subprocess.call(rcpy_args, env=env)
def ExecActionWrapper(self, arch, rspfile, *dirname):
"""Runs an action command line from a response file using the environment
for |arch|. If |dirname| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after GN-time. http://crbug.com/333738.
for k, v in os.environ.items():
if k not in env:
env[k] = v
args = open(rspfile).read()
dirname = dirname[0] if dirname else None
return subprocess.call(args, shell=True, env=env, cwd=dirname)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
bsd-3-clause
|
acenario/Payable
|
lib/python2.7/site-packages/django/template/engine.py
|
38
|
10794
|
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.utils import lru_cache, six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from .base import Context, Lexer, Parser, Template, TemplateDoesNotExist
from .context import _builtin_context_processors
_context_instance_undefined = object()
_dictionary_undefined = object()
_dirs_undefined = object()
class Engine(object):
def __init__(self, dirs=None, app_dirs=False,
allowed_include_roots=None, context_processors=None,
debug=False, loaders=None, string_if_invalid='',
file_charset='utf-8'):
if dirs is None:
dirs = []
if allowed_include_roots is None:
allowed_include_roots = []
if context_processors is None:
context_processors = []
if loaders is None:
loaders = ['django.template.loaders.filesystem.Loader']
if app_dirs:
loaders += ['django.template.loaders.app_directories.Loader']
else:
if app_dirs:
raise ImproperlyConfigured(
"app_dirs must not be set when loaders is defined.")
if isinstance(allowed_include_roots, six.string_types):
raise ImproperlyConfigured(
"allowed_include_roots must be a tuple, not a string.")
self.dirs = dirs
self.app_dirs = app_dirs
self.allowed_include_roots = allowed_include_roots
self.context_processors = context_processors
self.debug = debug
self.loaders = loaders
self.string_if_invalid = string_if_invalid
self.file_charset = file_charset
@staticmethod
@lru_cache.lru_cache()
def get_default():
"""
When only one DjangoTemplates backend is configured, returns it.
Raises ImproperlyConfigured otherwise.
This is required for preserving historical APIs that rely on a
globally available, implicitly configured engine such as:
>>> from django.template import Context, Template
>>> template = Template("Hello {{ name }}!")
>>> context = Context({'name': "world"})
>>> template.render(context)
'Hello world!'
"""
# Since Engine is imported in django.template and since
# DjangoTemplates is a wrapper around this Engine class,
# local imports are required to avoid import loops.
from django.template import engines
from django.template.backends.django import DjangoTemplates
django_engines = [engine for engine in engines.all()
if isinstance(engine, DjangoTemplates)]
if len(django_engines) == 1:
# Unwrap the Engine instance inside DjangoTemplates
return django_engines[0].engine
elif len(django_engines) == 0:
raise ImproperlyConfigured(
"No DjangoTemplates backend is configured.")
else:
raise ImproperlyConfigured(
"Several DjangoTemplates backends are configured. "
"You must select one explicitly.")
@cached_property
def template_context_processors(self):
context_processors = _builtin_context_processors
context_processors += tuple(self.context_processors)
return tuple(import_string(path) for path in context_processors)
@cached_property
def template_loaders(self):
return self.get_template_loaders(self.loaders)
def get_template_loaders(self, template_loaders):
loaders = []
for template_loader in template_loaders:
loader = self.find_template_loader(template_loader)
if loader is not None:
loaders.append(loader)
return loaders
def find_template_loader(self, loader):
if isinstance(loader, (tuple, list)):
args = list(loader[1:])
loader = loader[0]
else:
args = []
if isinstance(loader, six.string_types):
loader_class = import_string(loader)
if getattr(loader_class, '_accepts_engine_in_init', False):
args.insert(0, self)
else:
warnings.warn(
"%s inherits from django.template.loader.BaseLoader "
"instead of django.template.loaders.base.Loader. " %
loader, RemovedInDjango110Warning, stacklevel=2)
loader_instance = loader_class(*args)
if not loader_instance.is_usable:
warnings.warn(
"Your template loaders configuration includes %r, but "
"your Python installation doesn't support that type of "
"template loading. Consider removing that line from "
"your settings." % loader)
return None
else:
return loader_instance
else:
raise ImproperlyConfigured(
"Invalid value in template loaders configuration: %r" % loader)
def find_template(self, name, dirs=None):
for loader in self.template_loaders:
try:
source, display_name = loader(name, dirs)
origin = self.make_origin(display_name, loader, name, dirs)
return source, origin
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name)
def from_string(self, template_code):
"""
Returns a compiled Template object for the given template code,
handling template inheritance recursively.
"""
return Template(template_code, engine=self)
def get_template(self, template_name, dirs=_dirs_undefined):
"""
Returns a compiled Template object for the given template name,
handling template inheritance recursively.
"""
if dirs is _dirs_undefined:
dirs = None
else:
warnings.warn(
"The dirs argument of get_template is deprecated.",
RemovedInDjango110Warning, stacklevel=2)
template, origin = self.find_template(template_name, dirs)
if not hasattr(template, 'render'):
# template needs to be compiled
template = Template(template, origin, template_name, engine=self)
return template
# This method was originally a function defined in django.template.loader.
# It was moved here in Django 1.8 when encapsulating the Django template
# engine in this Engine class. It's still called by deprecated code but it
# will be removed in Django 1.10. It's superseded by a new render_to_string
# function in django.template.loader.
def render_to_string(self, template_name, context=None,
context_instance=_context_instance_undefined,
dirs=_dirs_undefined,
dictionary=_dictionary_undefined):
if context_instance is _context_instance_undefined:
context_instance = None
else:
warnings.warn(
"The context_instance argument of render_to_string is "
"deprecated.", RemovedInDjango110Warning, stacklevel=2)
if dirs is _dirs_undefined:
# Do not set dirs to None here to avoid triggering the deprecation
# warning in select_template or get_template.
pass
else:
warnings.warn(
"The dirs argument of render_to_string is deprecated.",
RemovedInDjango110Warning, stacklevel=2)
if dictionary is _dictionary_undefined:
dictionary = None
else:
warnings.warn(
"The dictionary argument of render_to_string was renamed to "
"context.", RemovedInDjango110Warning, stacklevel=2)
context = dictionary
if isinstance(template_name, (list, tuple)):
t = self.select_template(template_name, dirs)
else:
t = self.get_template(template_name, dirs)
if not context_instance:
# Django < 1.8 accepted a Context in `context` even though that's
# unintended. Preserve this ability but don't rewrap `context`.
if isinstance(context, Context):
return t.render(context)
else:
return t.render(Context(context))
if not context:
return t.render(context_instance)
# Add the context to the context stack, ensuring it gets removed again
# to keep the context_instance in the same state it started in.
with context_instance.push(context):
return t.render(context_instance)
def select_template(self, template_name_list, dirs=_dirs_undefined):
"""
Given a list of template names, returns the first that can be loaded.
"""
if dirs is _dirs_undefined:
# Do not set dirs to None here to avoid triggering the deprecation
# warning in get_template.
pass
else:
warnings.warn(
"The dirs argument of select_template is deprecated.",
RemovedInDjango110Warning, stacklevel=2)
if not template_name_list:
raise TemplateDoesNotExist("No template names provided")
not_found = []
for template_name in template_name_list:
try:
return self.get_template(template_name, dirs)
except TemplateDoesNotExist as exc:
if exc.args[0] not in not_found:
not_found.append(exc.args[0])
continue
# If we get here, none of the templates could be loaded
raise TemplateDoesNotExist(', '.join(not_found))
def compile_string(self, template_string, origin):
"""
Compiles template_string into a NodeList ready for rendering.
"""
if self.debug:
from .debug import DebugLexer, DebugParser
lexer_class, parser_class = DebugLexer, DebugParser
else:
lexer_class, parser_class = Lexer, Parser
lexer = lexer_class(template_string, origin)
tokens = lexer.tokenize()
parser = parser_class(tokens)
return parser.parse()
def make_origin(self, display_name, loader, name, dirs):
if self.debug and display_name:
# Inner import to avoid circular dependency
from .loader import LoaderOrigin
return LoaderOrigin(display_name, loader, name, dirs)
else:
return None
|
mit
|
tadeegan/eiger-application-aware
|
test/system/test_thrift_server.py
|
3
|
101018
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to run a single test, run from trunk/:
# PYTHONPATH=test nosetests --tests=system.test_thrift_server:TestMutations.test_empty_range
import os, sys, time, struct, uuid
from . import root, ThriftTester
from . import thrift_client as client
from thrift.Thrift import TApplicationException
from ttypes import *
from constants import VERSION
def _i64(n):
return struct.pack('>q', n) # big endian = network order
_SIMPLE_COLUMNS = [Column('c1', 'value1', 0),
Column('c2', 'value2', 0)]
_SUPER_COLUMNS = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 0),
Column(_i64(6), 'value6', 0)])]
def _assert_column(column_family, key, column, value, ts = 0):
try:
assert client.get(key, ColumnPath(column_family, column=column), ConsistencyLevel.ONE).column == Column(column, value, ts)
except NotFoundException:
raise Exception('expected %s:%s:%s:%s, but was not present' % (column_family, key, column, value) )
def _assert_columnpath_exists(key, column_path):
try:
assert client.get(key, column_path, ConsistencyLevel.ONE)
except NotFoundException:
raise Exception('expected %s with %s but was not present.' % (key, column_path) )
def _assert_no_columnpath(key, column_path):
try:
client.get(key, column_path, ConsistencyLevel.ONE)
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except NotFoundException:
assert True, 'column did not exist'
def _insert_simple(block=True):
return _insert_multi(['key1'])
def _insert_batch(block):
return _insert_multi_batch(['key1'], block)
def _insert_multi(keys):
CL = ConsistencyLevel.ONE
for key in keys:
client.insert(key, ColumnParent('Standard1'), Column('c1', 'value1', 0), CL)
client.insert(key, ColumnParent('Standard1'), Column('c2', 'value2', 0), CL)
def _insert_multi_batch(keys, block):
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS],
'Standard2': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]}
for key in keys:
client.batch_mutate({key: cfmap}, ConsistencyLevel.ONE)
def _big_slice(key, column_parent):
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
return client.get_slice(key, column_parent, p, ConsistencyLevel.ONE)
def _big_multislice(keys, column_parent):
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
return client.multiget_slice(keys, column_parent, p, ConsistencyLevel.ONE)
def _verify_batch():
_verify_simple()
L = [result.column
for result in _big_slice('key1', ColumnParent('Standard2'))]
assert L == _SIMPLE_COLUMNS, L
def _verify_simple():
assert client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE).column == Column('c1', 'value1', 0)
L = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert L == _SIMPLE_COLUMNS, L
def _insert_super(key='key1'):
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc2'), Column(_i64(6), 'value6', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_range():
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c2', 'value2', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_counter_range():
client.add('key1', ColumnParent('Counter1'), CounterColumn('c1', 1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('Counter1'), CounterColumn('c2', 2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('Counter1'), CounterColumn('c3', 3), ConsistencyLevel.ONE)
time.sleep(0.1)
def _verify_range():
p = SlicePredicate(slice_range=SliceRange('c1', 'c2', False, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c1'
assert result[1].column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('c3', 'c2', True, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c3'
assert result[1].column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 3, result
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 2))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2, result
def _verify_counter_range():
p = SlicePredicate(slice_range=SliceRange('c1', 'c2', False, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_column.name == 'c1'
assert result[1].counter_column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('c3', 'c2', True, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_column.name == 'c3'
assert result[1].counter_column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 3, result
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 2))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2, result
def _set_keyspace(keyspace):
client.set_keyspace(keyspace)
def _insert_super_range():
client.insert('key1', ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(6), 'value6', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc3'), Column(_i64(7), 'value7', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_counter_super_range():
client.add('key1', ColumnParent('SuperCounter1', 'sc1'), CounterColumn(_i64(4), 4), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc2'), CounterColumn(_i64(5), 5), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc2'), CounterColumn(_i64(6), 6), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc3'), CounterColumn(_i64(7), 7), ConsistencyLevel.ONE)
time.sleep(0.1)
def _verify_super_range():
p = SlicePredicate(slice_range=SliceRange('sc2', 'sc3', False, 2))
result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].super_column.name == 'sc2'
assert result[1].super_column.name == 'sc3'
p = SlicePredicate(slice_range=SliceRange('sc3', 'sc2', True, 2))
result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].super_column.name == 'sc3'
assert result[1].super_column.name == 'sc2'
def _verify_counter_super_range():
p = SlicePredicate(slice_range=SliceRange('sc2', 'sc3', False, 2))
result = client.get_slice('key1', ColumnParent('SuperCounter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_super_column.name == 'sc2'
assert result[1].counter_super_column.name == 'sc3'
p = SlicePredicate(slice_range=SliceRange('sc3', 'sc2', True, 2))
result = client.get_slice('key1', ColumnParent('SuperCounter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_super_column.name == 'sc3'
assert result[1].counter_super_column.name == 'sc2'
def _verify_super(supercf='Super1', key='key1'):
assert client.get(key, ColumnPath(supercf, 'sc1', _i64(4)), ConsistencyLevel.ONE).column == Column(_i64(4), 'value4', 0)
slice = [result.super_column
for result in _big_slice(key, ColumnParent('Super1'))]
assert slice == _SUPER_COLUMNS, slice
def _expect_exception(fn, type_):
try:
r = fn()
except type_, t:
return t
else:
raise Exception('expected %s; got %s' % (type_.__name__, r))
def _expect_missing(fn):
_expect_exception(fn, NotFoundException)
def get_range_slice(client, parent, predicate, start, end, count, cl, row_filter=None):
kr = KeyRange(start, end, count=count, row_filter=row_filter)
return client.get_range_slices(parent, predicate, kr, cl)
class TestMutations(ThriftTester):
def test_insert(self):
_set_keyspace('Keyspace1')
_insert_simple(False)
time.sleep(0.1)
_verify_simple()
def test_empty_slice(self):
_set_keyspace('Keyspace1')
assert _big_slice('key1', ColumnParent('Standard2')) == []
assert _big_slice('key1', ColumnParent('Super1')) == []
def test_missing_super(self):
_set_keyspace('Keyspace1')
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1', _i64(1)), ConsistencyLevel.ONE))
_insert_super()
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1', _i64(1)), ConsistencyLevel.ONE))
def test_count(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
assert client.get_count('key1', ColumnParent('Standard2'), p, ConsistencyLevel.ONE) == 0
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 2
assert client.get_count('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE) == 2
assert client.get_count('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE) == 2
# Let's make that a little more interesting
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c4', 'value4', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c5', 'value5', 0), ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange('c2', 'c4', False, 1000))
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 3
def test_count_paging(self):
_set_keyspace('Keyspace1')
_insert_simple()
# Exercise paging
column_parent = ColumnParent('Standard1')
super_column_parent = ColumnParent('Super1', 'sc3')
# Paging for small columns starts at 1024 columns
columns_to_insert = [Column('c%d' % (i,), 'value%d' % (i,), 0) for i in xrange(3, 1026)]
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in columns_to_insert]}
client.batch_mutate({'key1' : cfmap }, ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange('', '', False, 2000))
assert client.get_count('key1', column_parent, p, ConsistencyLevel.ONE) == 1025
# Ensure that the count limit isn't clobbered
p = SlicePredicate(slice_range=SliceRange('', '', False, 10))
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 10
def test_insert_blocking(self):
_set_keyspace('Keyspace1')
_insert_simple()
_verify_simple()
def test_super_insert(self):
_set_keyspace('Keyspace1')
_insert_super()
_verify_super()
def test_super_get(self):
_set_keyspace('Keyspace1')
_insert_super()
result = client.get('key1', ColumnPath('Super1', 'sc2'), ConsistencyLevel.ONE).super_column
assert result == _SUPER_COLUMNS[1], result
def test_super_subcolumn_limit(self):
_set_keyspace('Keyspace1')
_insert_super()
p = SlicePredicate(slice_range=SliceRange('', '', False, 1))
column_parent = ColumnParent('Super1', 'sc2')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(5), 'value5', 0)], slice
p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(6), 'value6', 0)], slice
def test_long_order(self):
_set_keyspace('Keyspace1')
def long_xrange(start, stop, step):
i = start
while i < stop:
yield i
i += step
L = []
for i in long_xrange(0, 104294967296, 429496729):
name = _i64(i)
client.insert('key1', ColumnParent('StandardLong1'), Column(name, 'v', 0), ConsistencyLevel.ONE)
L.append(name)
slice = [result.column.name for result in _big_slice('key1', ColumnParent('StandardLong1'))]
assert slice == L, slice
def test_integer_order(self):
_set_keyspace('Keyspace1')
def long_xrange(start, stop, step):
i = start
while i >= stop:
yield i
i -= step
L = []
for i in long_xrange(104294967296, 0, 429496729):
name = _i64(i)
client.insert('key1', ColumnParent('StandardInteger1'), Column(name, 'v', 0), ConsistencyLevel.ONE)
L.append(name)
slice = [result.column.name for result in _big_slice('key1', ColumnParent('StandardInteger1'))]
L.sort()
assert slice == L, slice
def test_time_uuid(self):
import uuid
L = []
_set_keyspace('Keyspace2')
# 100 isn't enough to fail reliably if the comparator is borked
for i in xrange(500):
L.append(uuid.uuid1())
client.insert('key1', ColumnParent('Super4', 'sc1'), Column(L[-1].bytes, 'value%s' % i, i), ConsistencyLevel.ONE)
slice = _big_slice('key1', ColumnParent('Super4', 'sc1'))
assert len(slice) == 500, len(slice)
for i in xrange(500):
u = slice[i].column
assert u.value == 'value%s' % i
assert u.name == L[i].bytes
p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[-1].bytes, 'value499', 499)], slice
p = SlicePredicate(slice_range=SliceRange('', L[2].bytes, False, 1000))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[0].bytes, 'value0', 0),
Column(L[1].bytes, 'value1', 1),
Column(L[2].bytes, 'value2', 2)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', True, 1000))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[2].bytes, 'value2', 2),
Column(L[1].bytes, 'value1', 1),
Column(L[0].bytes, 'value0', 0)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', False, 1))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[2].bytes, 'value2', 2)], slice
def test_long_remove(self):
column_parent = ColumnParent('StandardLong1')
sp = SlicePredicate(slice_range=SliceRange('', '', False, 1))
_set_keyspace('Keyspace1')
for i in xrange(10):
parent = ColumnParent('StandardLong1')
client.insert('key1', parent, Column(_i64(i), 'value1', 10 * i), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('StandardLong1'), 10 * i + 1, ConsistencyLevel.ONE)
slice = client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)
assert slice == [], slice
# resurrect
client.insert('key1', parent, Column(_i64(i), 'value2', 10 * i + 2), ConsistencyLevel.ONE)
slice = [result.column
for result in client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(i), 'value2', 10 * i + 2)], (slice, i)
def test_integer_remove(self):
column_parent = ColumnParent('StandardInteger1')
sp = SlicePredicate(slice_range=SliceRange('', '', False, 1))
_set_keyspace('Keyspace1')
for i in xrange(10):
parent = ColumnParent('StandardInteger1')
client.insert('key1', parent, Column(_i64(i), 'value1', 10 * i), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('StandardInteger1'), 10 * i + 1, ConsistencyLevel.ONE)
slice = client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)
assert slice == [], slice
# resurrect
client.insert('key1', parent, Column(_i64(i), 'value2', 10 * i + 2), ConsistencyLevel.ONE)
slice = [result.column
for result in client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(i), 'value2', 10 * i + 2)], (slice, i)
def test_batch_insert(self):
_set_keyspace('Keyspace1')
_insert_batch(False)
time.sleep(0.1)
_verify_batch()
def test_batch_insert_blocking(self):
_set_keyspace('Keyspace1')
_insert_batch(True)
_verify_batch()
def test_batch_mutate_standard_columns(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(27,32)]
mutations = [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for key in keys:
_assert_column(column_family, key, 'c1', 'value1')
def test_batch_mutate_standard_columns_blocking(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(38,46)]
mutations = [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for key in keys:
_assert_column(column_family, key, 'c1', 'value1')
def test_batch_mutate_remove_standard_columns(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_multi(keys)
mutations = [Mutation(deletion=Deletion(20, predicate=SlicePredicate(column_names=[c.name]))) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for c in _SIMPLE_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
def test_batch_mutate_remove_standard_row(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_multi(keys)
mutations = [Mutation(deletion=Deletion(20))]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for c in _SIMPLE_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
def test_batch_mutate_remove_super_columns_with_standard_under(self):
_set_keyspace('Keyspace1')
column_families = ['Super1', 'Super2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_super()
mutations = []
for sc in _SUPER_COLUMNS:
names = []
for c in sc.columns:
names.append(c.name)
mutations.append(Mutation(deletion=Deletion(20, super_column=c.name, predicate=SlicePredicate(column_names=names))))
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for sc in _SUPER_COLUMNS:
for c in sc.columns:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, super_column=sc.name, column=c.name))
def test_batch_mutate_remove_super_columns_with_none_given_underneath(self):
_set_keyspace('Keyspace1')
keys = ['key_%d' % i for i in range(17,21)]
for key in keys:
_insert_super(key)
mutations = []
for sc in _SUPER_COLUMNS:
mutations.append(Mutation(deletion=Deletion(20,
super_column=sc.name)))
mutation_map = {'Super1': mutations}
keyed_mutations = dict((key, mutation_map) for key in keys)
# Sanity check
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for sc in _SUPER_COLUMNS:
for c in sc.columns:
for key in keys:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
def test_batch_mutate_remove_super_columns_entire_row(self):
_set_keyspace('Keyspace1')
keys = ['key_%d' % i for i in range(17,21)]
for key in keys:
_insert_super(key)
mutations = []
mutations.append(Mutation(deletion=Deletion(20)))
mutation_map = {'Super1': mutations}
keyed_mutations = dict((key, mutation_map) for key in keys)
# Sanity check
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
def test_batch_mutate_insertions_and_deletions(self):
_set_keyspace('Keyspace1')
first_insert = SuperColumn("sc1",
columns=[Column(_i64(20), 'value20', 3),
Column(_i64(21), 'value21', 3)])
second_insert = SuperColumn("sc1",
columns=[Column(_i64(20), 'value20', 3),
Column(_i64(21), 'value21', 3)])
first_deletion = {'super_column': "sc1",
'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
second_deletion = {'super_column': "sc2",
'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
keys = ['key_30', 'key_31']
for key in keys:
sc = SuperColumn('sc1',[Column(_i64(22), 'value22', 0),
Column(_i64(23), 'value23', 0)])
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=sc))]}
client.batch_mutate({key: cfmap}, ConsistencyLevel.ONE)
sc2 = SuperColumn('sc2', [Column(_i64(22), 'value22', 0),
Column(_i64(23), 'value23', 0)])
cfmap2 = {'Super2': [Mutation(ColumnOrSuperColumn(super_column=sc2))]}
client.batch_mutate({key: cfmap2}, ConsistencyLevel.ONE)
cfmap3 = {
'Super1' : [Mutation(ColumnOrSuperColumn(super_column=first_insert)),
Mutation(deletion=Deletion(3, **first_deletion))],
'Super2' : [Mutation(deletion=Deletion(2, **second_deletion)),
Mutation(ColumnOrSuperColumn(super_column=second_insert))]
}
keyed_mutations = dict((key, cfmap3) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for key in keys:
for c in [_i64(22), _i64(23)]:
_assert_no_columnpath(key, ColumnPath('Super1', super_column='sc1', column=c))
_assert_no_columnpath(key, ColumnPath('Super2', super_column='sc2', column=c))
for c in [_i64(20), _i64(21)]:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column='sc1', column=c))
_assert_columnpath_exists(key, ColumnPath('Super2', super_column='sc1', column=c))
def test_bad_system_calls(self):
def duplicate_index_names():
_set_keyspace('Keyspace1')
cd1 = ColumnDef('foo', 'BytesType', IndexType.KEYS, 'i')
cd2 = ColumnDef('bar', 'BytesType', IndexType.KEYS, 'i')
cf = CfDef('Keyspace1', 'BadCF', column_metadata=[cd1, cd2])
client.system_add_column_family(cf)
_expect_exception(duplicate_index_names, InvalidRequestException)
def test_bad_batch_calls(self):
# mutate_does_not_accept_cosc_and_deletion_in_same_mutation
def too_full():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column("foo", 'bar', 0))
dele = Deletion(2, predicate=SlicePredicate(column_names=['baz']))
client.batch_mutate({'key_34': {'Standard1': [Mutation(col, dele)]}},
ConsistencyLevel.ONE)
_expect_exception(too_full, InvalidRequestException)
# test_batch_mutate_does_not_yet_accept_slice_ranges
def send_range():
_set_keyspace('Keyspace1')
sp = SlicePredicate(slice_range=SliceRange(start='0', finish="", count=10))
d = Deletion(2, predicate=sp)
client.batch_mutate({'key_35': {'Standard1':[Mutation(deletion=d)]}},
ConsistencyLevel.ONE)
_expect_exception(send_range, InvalidRequestException)
# test_batch_mutate_does_not_accept_cosc_on_undefined_cf:
def bad_cf():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column("foo", 'bar', 0))
client.batch_mutate({'key_36': {'Undefined': [Mutation(col)]}},
ConsistencyLevel.ONE)
_expect_exception(bad_cf, InvalidRequestException)
# test_batch_mutate_does_not_accept_deletion_on_undefined_cf
def bad_cf():
_set_keyspace('Keyspace1')
d = Deletion(2, predicate=SlicePredicate(column_names=['baz']))
client.batch_mutate({'key_37': {'Undefined':[Mutation(deletion=d)]}},
ConsistencyLevel.ONE)
_expect_exception(bad_cf, InvalidRequestException)
# a column value that does not match the declared validator
def send_string_instead_of_long():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column('birthdate', 'bar', 0))
client.batch_mutate({'key_38': {'Indexed1': [Mutation(col)]}},
ConsistencyLevel.ONE)
_expect_exception(send_string_instead_of_long, InvalidRequestException)
def test_column_name_lengths(self):
_set_keyspace('Keyspace1')
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), Column('', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
client.insert('key1', ColumnParent('Standard1'), Column('x'*1, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*127, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*128, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*129, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*255, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*256, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*257, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*(2**16 - 1), 'value', 0), ConsistencyLevel.ONE)
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), Column('x'*(2**16), 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
def test_bad_calls(self):
_set_keyspace('Keyspace1')
# missing arguments
_expect_exception(lambda: client.insert(None, None, None, None), TApplicationException)
# supercolumn in a non-super CF
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1', 'x'), Column('y', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# no supercolumn in a super CF
_expect_exception(lambda: client.insert('key1', ColumnParent('Super1'), Column('y', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# column but no supercolumn in remove
_expect_exception(lambda: client.remove('key1', ColumnPath('Super1', column='x'), 0, ConsistencyLevel.ONE), InvalidRequestException)
# super column in non-super CF
_expect_exception(lambda: client.remove('key1', ColumnPath('Standard1', 'y', 'x'), 0, ConsistencyLevel.ONE), InvalidRequestException)
# key too long
_expect_exception(lambda: client.get('x' * 2**16, ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE), InvalidRequestException)
# empty key
_expect_exception(lambda: client.get('', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE), InvalidRequestException)
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS]}
_expect_exception(lambda: client.batch_mutate({'': cfmap}, ConsistencyLevel.ONE), InvalidRequestException)
# empty column name
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1', column=''), ConsistencyLevel.ONE), InvalidRequestException)
# get doesn't specify column name
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1'), ConsistencyLevel.ONE), InvalidRequestException)
# supercolumn in a non-super CF
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1', 'x', 'y'), ConsistencyLevel.ONE), InvalidRequestException)
# get doesn't specify supercolumn name
_expect_exception(lambda: client.get('key1', ColumnPath('Super1'), ConsistencyLevel.ONE), InvalidRequestException)
# invalid CF
_expect_exception(lambda: get_range_slice(client, ColumnParent('S'), SlicePredicate(column_names=['', '']), '', '', 5, ConsistencyLevel.ONE), InvalidRequestException)
# 'x' is not a valid Long
_expect_exception(lambda: client.insert('key1', ColumnParent('Super1', 'sc1'), Column('x', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# start is not a valid Long
p = SlicePredicate(slice_range=SliceRange('x', '', False, 1))
column_parent = ColumnParent('StandardLong1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish
p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
column_parent = ColumnParent('StandardLong1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start is not a valid Long, supercolumn version
p = SlicePredicate(slice_range=SliceRange('x', '', False, 1))
column_parent = ColumnParent('Super1', 'sc1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish, supercolumn version
p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
column_parent = ColumnParent('Super1', 'sc1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish, key version
_expect_exception(lambda: get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['']), 'z', 'a', 1, ConsistencyLevel.ONE), InvalidRequestException)
# ttl must be positive
column = Column('cttl1', 'value1', 0, 0)
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE),
InvalidRequestException)
# don't allow super_column in Deletion for standard ColumnFamily
deletion = Deletion(1, 'supercolumn', None)
mutation = Mutation(deletion=deletion)
mutations = {'key' : {'Standard1' : [mutation]}}
_expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM),
InvalidRequestException)
# 'x' is not a valid long
deletion = Deletion(1, 'x', None)
mutation = Mutation(deletion=deletion)
mutations = {'key' : {'Super5' : [mutation]}}
_expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM), InvalidRequestException)
# counters don't support ANY
_expect_exception(lambda: client.add('key1', ColumnParent('Counter1', 'x'), CounterColumn('y', 1), ConsistencyLevel.ANY), InvalidRequestException)
def test_batch_insert_super(self):
_set_keyspace('Keyspace1')
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
_verify_super('Super1')
_verify_super('Super2')
def test_batch_insert_super_blocking(self):
_set_keyspace('Keyspace1')
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
_verify_super('Super1')
_verify_super('Super2')
def test_cf_remove_column(self):
_set_keyspace('Keyspace1')
_insert_simple()
client.remove('key1', ColumnPath('Standard1', column='c1'), 1, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE))
assert client.get('key1', ColumnPath('Standard1', column='c2'), ConsistencyLevel.ONE).column \
== Column('c2', 'value2', 0)
assert _big_slice('key1', ColumnParent('Standard1')) \
== [ColumnOrSuperColumn(column=Column('c2', 'value2', 0))]
# New insert, make sure it shows up post-remove:
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
# Next, w/ a newer timestamp; it should come back:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 2), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c1', 'value1', 2), Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
def test_cf_remove(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Remove the key1:Standard1 cf; verify super is unaffected
client.remove('key1', ColumnPath('Standard1'), 3, ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Standard1')) == []
_verify_super()
# Test resurrection. First, re-insert a value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Standard1')) == []
# Next, w/ a newer timestamp; it should come back:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 4), ConsistencyLevel.ONE)
result = _big_slice('key1', ColumnParent('Standard1'))
assert result == [ColumnOrSuperColumn(column=Column('c1', 'value1', 4))], result
# check removing the entire super cf, too.
client.remove('key1', ColumnPath('Super1'), 3, ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Super1')) == []
assert _big_slice('key1', ColumnParent('Super1', 'sc1')) == []
def test_super_cf_remove_and_range_slice(self):
_set_keyspace('Keyspace1')
client.insert('key3', ColumnParent('Super1', 'sc1'), Column(_i64(1), 'v1', 0), ConsistencyLevel.ONE)
client.remove('key3', ColumnPath('Super1', 'sc1'), 5, ConsistencyLevel.ONE)
rows = {}
for row in get_range_slice(client, ColumnParent('Super1'), SlicePredicate(slice_range=SliceRange('', '', False, 1000)), '', '', 1000, ConsistencyLevel.ONE):
scs = [cosc.super_column for cosc in row.columns]
rows[row.key] = scs
assert rows == {'key3': []}, rows
def test_super_cf_remove_column(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Make sure remove clears out what it's supposed to, and _only_ that:
client.remove('key1', ColumnPath('Super1', 'sc2', _i64(5)), 5, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc2', _i64(5)), ConsistencyLevel.ONE))
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(6), 'value6', 0)])]
_verify_simple()
# New insert, make sure it shows up post-remove:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(7), 'value7', 0), ConsistencyLevel.ONE)
super_columns_expected = [SuperColumn(name='sc1',
columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2',
columns=[Column(_i64(6), 'value6', 0), Column(_i64(7), 'value7', 0)])]
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, actual
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Next, w/ a newer timestamp; it should come back
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 6), ConsistencyLevel.ONE)
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 6),
Column(_i64(6), 'value6', 0),
Column(_i64(7), 'value7', 0)])]
assert super_columns == super_columns_expected, super_columns
# shouldn't be able to specify a column w/o a super column for remove
cp = ColumnPath(column_family='Super1', column='sc2')
e = _expect_exception(lambda: client.remove('key1', cp, 5, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("column cannot be specified without") >= 0
def test_super_cf_remove_supercolumn(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Make sure remove clears out what it's supposed to, and _only_ that:
client.remove('key1', ColumnPath('Super1', 'sc2'), 5, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc2', _i64(5)), ConsistencyLevel.ONE))
super_columns = _big_slice('key1', ColumnParent('Super1', 'sc2'))
assert super_columns == [], super_columns
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)])]
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
_verify_simple()
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 1), ConsistencyLevel.ONE)
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Next, w/ a newer timestamp; it should come back
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 6), ConsistencyLevel.ONE)
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 6)])]
assert super_columns == super_columns_expected, super_columns
# check slicing at the subcolumn level too
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
columns = [result.column
for result in client.get_slice('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE)]
assert columns == [Column(_i64(5), 'value5', 6)], columns
def test_super_cf_resurrect_subcolumn(self):
_set_keyspace('Keyspace1')
key = 'vijay'
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.remove(key, ColumnPath('Super1', 'sc1'), 1, ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 2), ConsistencyLevel.ONE)
result = client.get(key, ColumnPath('Super1', 'sc1'), ConsistencyLevel.ONE)
assert result.super_column.columns is not None, result.super_column
def test_empty_range(self):
_set_keyspace('Keyspace1')
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE) == []
_insert_simple()
assert get_range_slice(client, ColumnParent('Super1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE) == []
def test_range_with_remove(self):
_set_keyspace('Keyspace1')
_insert_simple()
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), 'key1', '', 1000, ConsistencyLevel.ONE)[0].key == 'key1'
client.remove('key1', ColumnPath('Standard1', column='c1'), 1, ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Standard1', column='c2'), 1, ConsistencyLevel.ONE)
actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c2']), '', '', 1000, ConsistencyLevel.ONE)
assert actual == [KeySlice(columns=[], key='key1')], actual
def test_range_with_remove_cf(self):
_set_keyspace('Keyspace1')
_insert_simple()
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), 'key1', '', 1000, ConsistencyLevel.ONE)[0].key == 'key1'
client.remove('key1', ColumnPath('Standard1'), 1, ConsistencyLevel.ONE)
actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE)
assert actual == [KeySlice(columns=[], key='key1')], actual
def test_range_collation(self):
_set_keyspace('Keyspace1')
for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in xrange(100)]:
client.insert(key, ColumnParent('Standard1'), Column(key, 'v', 0), ConsistencyLevel.ONE)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '', '', 1000, ConsistencyLevel.ONE)
# note the collated ordering rather than ascii
L = ['0', '1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '2', '20', '21', '22', '23', '24', '25', '26', '27','28', '29', '3', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '4', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '5', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '6', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '7', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '8', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '9', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', 'a', '-a', 'b', '-b']
assert len(slices) == len(L)
for key, ks in zip(L, slices):
assert key == ks.key
def test_range_partial(self):
_set_keyspace('Keyspace1')
for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in xrange(100)]:
client.insert(key, ColumnParent('Standard1'), Column(key, 'v', 0), ConsistencyLevel.ONE)
def check_slices_against_keys(keyList, sliceList):
assert len(keyList) == len(sliceList), "%d vs %d" % (len(keyList), len(sliceList))
for key, ks in zip(keyList, sliceList):
assert key == ks.key
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), 'a', '', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['a', '-a', 'b', '-b'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '', '15', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['0', '1', '10', '11', '12', '13', '14', '15'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '50', '51', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['50', '51'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '1', '', 10, ConsistencyLevel.ONE)
check_slices_against_keys(['1', '10', '11', '12', '13', '14', '15', '16', '17', '18'], slices)
def test_get_slice_range(self):
_set_keyspace('Keyspace1')
_insert_range()
_verify_range()
def test_get_slice_super_range(self):
_set_keyspace('Keyspace1')
_insert_super_range()
_verify_super_range()
def test_get_range_slices_tokens(self):
_set_keyspace('Keyspace2')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Super3', 'sc1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3', 'sc1')
predicate = SlicePredicate(column_names=['col1', 'col3'])
range = KeyRange(start_token='55', end_token='55', count=100)
result = client.get_range_slices(cp, predicate, range, ConsistencyLevel.ONE)
assert len(result) == 5
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
def test_get_range_slice_super(self):
_set_keyspace('Keyspace2')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Super3', 'sc1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3', 'sc1')
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
cp = ColumnParent('Super3')
result = get_range_slice(client, cp, SlicePredicate(column_names=['sc1']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3
assert list(set(row.columns[0].super_column.name for row in result))[0] == 'sc1'
def test_get_range_slice(self):
_set_keyspace('Keyspace1')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Standard1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Standard1')
# test empty slice
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key6', '', 1, ConsistencyLevel.ONE)
assert len(result) == 0
# test empty columns
result = get_range_slice(client, cp, SlicePredicate(column_names=['a']), 'key2', '', 1, ConsistencyLevel.ONE)
assert len(result) == 1
assert len(result[0].columns) == 0
# test column_names predicate
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3, result
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
# row limiting via count.
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 1, ConsistencyLevel.ONE)
assert len(result) == 1
# test column slice predicate
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=5)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].key == 'key1'
assert result[1].key == 'key2'
assert len(result[0].columns) == 3
assert result[0].columns[0].column.name == 'col2'
assert result[0].columns[2].column.name == 'col4'
# col limiting via count
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=2)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result[0].columns) == 2
# and reversed
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col4', finish='col2', reversed=True, count=5)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert result[0].columns[0].column.name == 'col4'
assert result[0].columns[2].column.name == 'col2'
# row limiting via count
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=5)), 'key1', 'key2', 1, ConsistencyLevel.ONE)
assert len(result) == 1
# removed data
client.remove('key1', ColumnPath('Standard1', column='col1'), 1, ConsistencyLevel.ONE)
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange('', '')), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result) == 2, result
assert result[0].columns[0].column.name == 'col2', result[0].columns[0].column.name
assert result[1].columns[0].column.name == 'col1'
def test_wrapped_range_slices(self):
_set_keyspace('Keyspace1')
def copp_token(key):
# I cheated and generated this from Java
return {'a': '00530000000100000001',
'b': '00540000000100000001',
'c': '00550000000100000001',
'd': '00560000000100000001',
'e': '00580000000100000001'}[key]
for key in ['a', 'b', 'c', 'd', 'e']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Standard1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Standard1')
result = client.get_range_slices(cp, SlicePredicate(column_names=['col1', 'col3']), KeyRange(start_token=copp_token('e'), end_token=copp_token('e')), ConsistencyLevel.ONE)
assert [row.key for row in result] == ['a', 'b', 'c', 'd', 'e',], [row.key for row in result]
result = client.get_range_slices(cp, SlicePredicate(column_names=['col1', 'col3']), KeyRange(start_token=copp_token('c'), end_token=copp_token('c')), ConsistencyLevel.ONE)
assert [row.key for row in result] == ['d', 'e', 'a', 'b', 'c',], [row.key for row in result]
def test_get_slice_by_names(self):
_set_keyspace('Keyspace1')
_insert_range()
p = SlicePredicate(column_names=['c1', 'c2'])
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c1'
assert result[1].column.name == 'c2'
_insert_super()
p = SlicePredicate(column_names=[_i64(4)])
result = client.get_slice('key1', ColumnParent('Super1', 'sc1'), p, ConsistencyLevel.ONE)
assert len(result) == 1
assert result[0].column.name == _i64(4)
def test_multiget_slice(self):
"""Insert multiple keys and retrieve them using the multiget_slice interface"""
_set_keyspace('Keyspace1')
# Generate a list of 10 keys and insert them
num_keys = 10
keys = ['key'+str(i) for i in range(1, num_keys+1)]
_insert_multi(keys)
# Retrieve all 10 key slices
rows = _big_multislice(keys, ColumnParent('Standard1'))
keys1 = rows.keys().sort()
keys2 = keys.sort()
columns = [ColumnOrSuperColumn(c) for c in _SIMPLE_COLUMNS]
# Validate if the returned rows have the keys requested and if the ColumnOrSuperColumn is what was inserted
for key in keys:
assert rows.has_key(key) == True
assert columns == rows[key]
def test_multi_count(self):
"""Insert multiple keys and count them using the multiget interface"""
_set_keyspace('Keyspace1')
# Generate a list of 10 keys countaining 1 to 10 columns and insert them
num_keys = 10
for i in range(1, num_keys+1):
key = 'key'+str(i)
for j in range(1, i+1):
client.insert(key, ColumnParent('Standard1'), Column('c'+str(j), 'value'+str(j), 0), ConsistencyLevel.ONE)
# Count columns in all 10 keys
keys = ['key'+str(i) for i in range(1, num_keys+1)]
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
counts = client.multiget_count(keys, ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
# Check the returned counts
for i in range(1, num_keys+1):
key = 'key'+str(i)
assert counts[key] == i
def test_batch_mutate_super_deletion(self):
_set_keyspace('Keyspace1')
_insert_super('test')
d = Deletion(1, predicate=SlicePredicate(column_names=['sc1']))
cfmap = {'Super1': [Mutation(deletion=d)]}
client.batch_mutate({'test': cfmap}, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1'), ConsistencyLevel.ONE))
def test_super_reinsert(self):
_set_keyspace('Keyspace1')
for x in xrange(3):
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(x), 'value', 1), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Super1'), 2, ConsistencyLevel.ONE)
for x in xrange(3):
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(x + 3), 'value', 3), ConsistencyLevel.ONE)
for n in xrange(1, 4):
p = SlicePredicate(slice_range=SliceRange('', '', False, n))
slice = client.get_slice('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE)
assert len(slice) == n, "expected %s results; found %s" % (n, slice)
def test_describe_keyspace(self):
kspaces = client.describe_keyspaces()
assert len(kspaces) == 3, kspaces # ['Keyspace2', 'Keyspace1', 'system']
sysks = client.describe_keyspace("system")
assert sysks in kspaces
ks1 = client.describe_keyspace("Keyspace1")
assert ks1.strategy_options['replication_factor'] == '1', ks1.strategy_options
for cf in ks1.cf_defs:
if cf.name == "Standard1":
cf0 = cf
break;
assert cf0.comparator_type == "org.apache.cassandra.db.marshal.BytesType"
def test_describe(self):
server_version = client.describe_version()
assert server_version == VERSION, (server_version, VERSION)
assert client.describe_cluster_name() == 'Test Cluster'
def test_describe_ring(self):
assert list(client.describe_ring('Keyspace1'))[0].endpoints == ['127.0.0.1']
def test_describe_partitioner(self):
# Make sure this just reads back the values from the config.
assert client.describe_partitioner() == "org.apache.cassandra.dht.CollatingOrderPreservingPartitioner"
def test_describe_snitch(self):
assert client.describe_snitch() == "org.apache.cassandra.locator.SimpleSnitch"
def test_invalid_ks_names(self):
def invalid_keyspace():
client.system_add_keyspace(KsDef('in-valid', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[]))
_expect_exception(invalid_keyspace, InvalidRequestException)
def test_invalid_strategy_class(self):
def add_invalid_keyspace():
client.system_add_keyspace(KsDef('ValidKs', 'InvalidStrategyClass', {}, cf_defs=[]))
exc = _expect_exception(add_invalid_keyspace, InvalidRequestException)
s = str(exc)
assert s.find("InvalidStrategyClass") > -1, s
assert s.find("Unable to find replication strategy") > -1, s
def update_invalid_keyspace():
client.system_add_keyspace(KsDef('ValidKsForUpdate', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[]))
client.system_update_keyspace(KsDef('ValidKsForUpdate', 'InvalidStrategyClass', {}, cf_defs=[]))
exc = _expect_exception(update_invalid_keyspace, InvalidRequestException)
s = str(exc)
assert s.find("InvalidStrategyClass") > -1, s
assert s.find("Unable to find replication strategy") > -1, s
def test_invalid_cf_names(self):
def invalid_cf():
_set_keyspace('Keyspace1')
newcf = CfDef('Keyspace1', 'in-valid')
client.system_add_column_family(newcf)
_expect_exception(invalid_cf, InvalidRequestException)
def invalid_cf_inside_new_ks():
cf = CfDef('ValidKsName_invalid_cf', 'in-valid')
_set_keyspace('system')
client.system_add_keyspace(KsDef('ValidKsName_invalid_cf', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[cf]))
_expect_exception(invalid_cf_inside_new_ks, InvalidRequestException)
def test_system_cf_recreate(self):
"ensures that keyspaces and column familes can be dropped and recreated in short order"
for x in range(2):
keyspace = 'test_cf_recreate'
cf_name = 'recreate_cf'
# create
newcf = CfDef(keyspace, cf_name)
newks = KsDef(keyspace, 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[newcf])
client.system_add_keyspace(newks)
_set_keyspace(keyspace)
# insert
client.insert('key0', ColumnParent(cf_name), Column('colA', 'colA-value', 0), ConsistencyLevel.ONE)
col1 = client.get_slice('key0', ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange('', '', False, 100)), ConsistencyLevel.ONE)[0].column
assert col1.name == 'colA' and col1.value == 'colA-value'
# drop
client.system_drop_column_family(cf_name)
# recreate
client.system_add_column_family(newcf)
# query
cosc_list = client.get_slice('key0', ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange('', '', False, 100)), ConsistencyLevel.ONE)
# this was failing prior to CASSANDRA-1477.
assert len(cosc_list) == 0 , 'cosc length test failed'
client.system_drop_keyspace(keyspace)
def test_system_keyspace_operations(self):
# create. note large RF, this is OK
keyspace = KsDef('CreateKeyspace',
'org.apache.cassandra.locator.SimpleStrategy',
{'replication_factor': '10'},
cf_defs=[CfDef('CreateKeyspace', 'CreateKsCf')])
client.system_add_keyspace(keyspace)
newks = client.describe_keyspace('CreateKeyspace')
assert 'CreateKsCf' in [x.name for x in newks.cf_defs]
_set_keyspace('CreateKeyspace')
# modify valid
modified_keyspace = KsDef('CreateKeyspace',
'org.apache.cassandra.locator.OldNetworkTopologyStrategy',
{'replication_factor': '1'},
cf_defs=[])
client.system_update_keyspace(modified_keyspace)
modks = client.describe_keyspace('CreateKeyspace')
assert modks.strategy_class == modified_keyspace.strategy_class
assert modks.strategy_options == modified_keyspace.strategy_options
# drop
client.system_drop_keyspace('CreateKeyspace')
def get_second_ks():
client.describe_keyspace('CreateKeyspace')
_expect_exception(get_second_ks, NotFoundException)
def test_create_then_drop_ks(self):
keyspace = KsDef('AddThenDrop',
strategy_class='org.apache.cassandra.locator.SimpleStrategy',
strategy_options={'replication_factor':'1'},
cf_defs=[])
def test_existence():
client.describe_keyspace(keyspace.name)
_expect_exception(test_existence, NotFoundException)
client.set_keyspace('system')
client.system_add_keyspace(keyspace)
test_existence()
client.system_drop_keyspace(keyspace.name)
def test_column_validators(self):
# columndef validation for regular CF
ks = 'Keyspace1'
_set_keyspace(ks)
cd = ColumnDef('col', 'LongType', None, None)
cf = CfDef('Keyspace1', 'ValidatorColumnFamily', column_metadata=[cd])
client.system_add_column_family(cf)
ks_def = client.describe_keyspace(ks)
assert 'ValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
cp = ColumnParent('ValidatorColumnFamily')
col0 = Column('col', _i64(42), 0)
col1 = Column('col', "ceci n'est pas 64bit", 0)
client.insert('key0', cp, col0, ConsistencyLevel.ONE)
e = _expect_exception(lambda: client.insert('key1', cp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# columndef validation for super CF
scf = CfDef('Keyspace1', 'ValidatorSuperColumnFamily', column_type='Super', column_metadata=[cd])
client.system_add_column_family(scf)
ks_def = client.describe_keyspace(ks)
assert 'ValidatorSuperColumnFamily' in [x.name for x in ks_def.cf_defs]
scp = ColumnParent('ValidatorSuperColumnFamily','sc1')
client.insert('key0', scp, col0, ConsistencyLevel.ONE)
e = _expect_exception(lambda: client.insert('key1', scp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# columndef and cfdef default validation
cf = CfDef('Keyspace1', 'DefaultValidatorColumnFamily', column_metadata=[cd], default_validation_class='UTF8Type')
client.system_add_column_family(cf)
ks_def = client.describe_keyspace(ks)
assert 'DefaultValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
dcp = ColumnParent('DefaultValidatorColumnFamily')
# inserting a longtype into column 'col' is valid at the columndef level
client.insert('key0', dcp, col0, ConsistencyLevel.ONE)
# inserting a UTF8type into column 'col' fails at the columndef level
e = _expect_exception(lambda: client.insert('key1', dcp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# insert a longtype into column 'fcol' should fail at the cfdef level
col2 = Column('fcol', _i64(4224), 0)
e = _expect_exception(lambda: client.insert('key1', dcp, col2, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# insert a UTF8type into column 'fcol' is valid at the cfdef level
col3 = Column('fcol', "Stringin' it up in the Stringtel Stringifornia", 0)
client.insert('key0', dcp, col3, ConsistencyLevel.ONE)
def test_system_column_family_operations(self):
_set_keyspace('Keyspace1')
# create
cd = ColumnDef('ValidationColumn', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'NewColumnFamily', column_metadata=[cd])
client.system_add_column_family(newcf)
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewColumnFamily' in [x.name for x in ks1.cf_defs]
cfid = [x.id for x in ks1.cf_defs if x.name=='NewColumnFamily'][0]
assert cfid > 1000
# modify invalid
modified_cf = CfDef('Keyspace1', 'NewColumnFamily', column_metadata=[cd])
modified_cf.id = cfid
def fail_invalid_field():
modified_cf.comparator_type = 'LongType'
client.system_update_column_family(modified_cf)
_expect_exception(fail_invalid_field, InvalidRequestException)
# modify valid
modified_cf.comparator_type = 'BytesType' # revert back to old value.
modified_cf.row_cache_size = 25
modified_cf.gc_grace_seconds = 1
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='NewColumnFamily'][0]
assert server_cf
assert server_cf.row_cache_size == 25
assert server_cf.gc_grace_seconds == 1
# drop
client.system_drop_column_family('NewColumnFamily')
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewColumnFamily' not in [x.name for x in ks1.cf_defs]
assert 'Standard1' in [x.name for x in ks1.cf_defs]
# Make a LongType CF and add a validator
newcf = CfDef('Keyspace1', 'NewLongColumnFamily', comparator_type='LongType')
client.system_add_column_family(newcf)
three = _i64(3)
cd = ColumnDef(three, 'LongType', None, None)
ks1 = client.describe_keyspace('Keyspace1')
modified_cf = [x for x in ks1.cf_defs if x.name=='NewLongColumnFamily'][0]
modified_cf.column_metadata = [cd]
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='NewLongColumnFamily'][0]
assert server_cf.column_metadata[0].name == _i64(3), server_cf.column_metadata
def test_dynamic_indexes_creation_deletion(self):
_set_keyspace('Keyspace1')
cfdef = CfDef('Keyspace1', 'BlankCF')
client.system_add_column_family(cfdef)
ks1 = client.describe_keyspace('Keyspace1')
cfid = [x.id for x in ks1.cf_defs if x.name=='BlankCF'][0]
modified_cd = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, None)
modified_cf = CfDef('Keyspace1', 'BlankCF', column_metadata=[modified_cd])
modified_cf.id = cfid
client.system_update_column_family(modified_cf)
# Add a second indexed CF ...
birthdate_coldef = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, None)
age_coldef = ColumnDef('age', 'BytesType', IndexType.KEYS, 'age_index')
cfdef = CfDef('Keyspace1', 'BlankCF2', column_metadata=[birthdate_coldef, age_coldef])
client.system_add_column_family(cfdef)
# ... and update it to have a third index
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF2'][0]
name_coldef = ColumnDef('name', 'BytesType', IndexType.KEYS, 'name_index')
cfdef.column_metadata.append(name_coldef)
client.system_update_column_family(cfdef)
# Now drop the indexes
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF2'][0]
birthdate_coldef = ColumnDef('birthdate', 'BytesType', None, None)
age_coldef = ColumnDef('age', 'BytesType', None, None)
name_coldef = ColumnDef('name', 'BytesType', None, None)
cfdef.column_metadata = [birthdate_coldef, age_coldef, name_coldef]
client.system_update_column_family(cfdef)
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF'][0]
birthdate_coldef = ColumnDef('birthdate', 'BytesType', None, None)
cfdef.column_metadata = [birthdate_coldef]
client.system_update_column_family(cfdef)
client.system_drop_column_family('BlankCF')
client.system_drop_column_family('BlankCF2')
def test_dynamic_indexes_with_system_update_cf(self):
_set_keyspace('Keyspace1')
cd = ColumnDef('birthdate', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'ToBeIndexed', default_validation_class='LongType', column_metadata=[cd])
client.system_add_column_family(newcf)
client.insert('key1', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(1), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('ToBeIndexed'), Column('b', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(3), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('ToBeIndexed'), Column('b', _i64(3), 0), ConsistencyLevel.ONE)
# First without index
cp = ColumnParent('ToBeIndexed')
sp = SlicePredicate(slice_range=SliceRange('', ''))
clause = FilterClause([IndexExpression('birthdate', IndexOperator.EQ, _i64(1))])
result = get_range_slice(client, cp, sp, '', '', 100, ConsistencyLevel.ONE, clause)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
# add an index on 'birthdate'
ks1 = client.describe_keyspace('Keyspace1')
cfid = [x.id for x in ks1.cf_defs if x.name=='ToBeIndexed'][0]
modified_cd = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, 'bd_index')
modified_cf = CfDef('Keyspace1', 'ToBeIndexed', column_metadata=[modified_cd])
modified_cf.id = cfid
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='ToBeIndexed'][0]
assert server_cf
assert server_cf.column_metadata[0].index_type == modified_cd.index_type
assert server_cf.column_metadata[0].index_name == modified_cd.index_name
# sleep a bit to give time for the index to build.
time.sleep(0.5)
# repeat query on one index expression
result = get_range_slice(client, cp, sp, '', '', 100, ConsistencyLevel.ONE, clause)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
def test_system_super_column_family_operations(self):
_set_keyspace('Keyspace1')
# create
cd = ColumnDef('ValidationColumn', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'NewSuperColumnFamily', 'Super', column_metadata=[cd])
client.system_add_column_family(newcf)
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewSuperColumnFamily' in [x.name for x in ks1.cf_defs]
# drop
client.system_drop_column_family('NewSuperColumnFamily')
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewSuperColumnFamily' not in [x.name for x in ks1.cf_defs]
assert 'Standard1' in [x.name for x in ks1.cf_defs]
def test_insert_ttl(self):
""" Test simple insertion of a column with ttl """
_set_keyspace('Keyspace1')
column = Column('cttl1', 'value1', 0, 5)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
assert client.get('key1', ColumnPath('Standard1', column='cttl1'), ConsistencyLevel.ONE).column == column
def test_simple_expiration(self):
""" Test that column ttled do expires """
_set_keyspace('Keyspace1')
column = Column('cttl3', 'value1', 0, 2)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
time.sleep(1)
c = client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE).column
assert c == column
assert client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE).column == column
time.sleep(2)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE))
def test_simple_expiration_batch_mutate(self):
""" Test that column ttled do expires using batch_mutate """
_set_keyspace('Keyspace1')
column = Column('cttl4', 'value1', 0, 2)
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(column))]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
time.sleep(1)
c = client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column
assert c == column
assert client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column == column
time.sleep(2)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE))
def test_update_expiring(self):
""" Test that updating a column with ttl override the ttl """
_set_keyspace('Keyspace1')
column1 = Column('cttl4', 'value1', 0, 1)
client.insert('key1', ColumnParent('Standard1'), column1, ConsistencyLevel.ONE)
column2 = Column('cttl4', 'value1', 1)
client.insert('key1', ColumnParent('Standard1'), column2, ConsistencyLevel.ONE)
time.sleep(1.5)
assert client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column == column2
def test_remove_expiring(self):
""" Test removing a column with ttl """
_set_keyspace('Keyspace1')
column = Column('cttl5', 'value1', 0, 10)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Standard1', column='cttl5'), 1, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='ctt5'), ConsistencyLevel.ONE))
def test_describe_ring_on_invalid_keyspace(self):
def req():
client.describe_ring('system')
_expect_exception(req, InvalidRequestException)
def test_incr_decr_standard_add(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
d3 = 35
# insert positive and negative values and check the counts
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == (d1+d2)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d3), ConsistencyLevel.ONE)
time.sleep(0.1)
rv3 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv3.counter_column.value == (d1+d2+d3)
def test_incr_decr_super_add(self):
_set_keyspace('Keyspace1')
d1 = -234
d2 = 52345
d3 = 3123
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c2', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
assert rv1.counter_super_column.columns[0].value == d1
assert rv1.counter_super_column.columns[1].value == d2
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv2 = client.get('key1', ColumnPath('SuperCounter1', 'sc1', 'c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == (d1+d2)
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d3), ConsistencyLevel.ONE)
time.sleep(0.1)
rv3 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv3.counter_column.value == (d1+d2+d3)
def test_incr_standard_remove(self):
_set_keyspace('Keyspace1')
d1 = 124
# insert value and check it exists
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='Counter1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_super_remove(self):
_set_keyspace('Keyspace1')
d1 = 52345
# insert value and check it exists
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
def test_incr_decr_standard_remove(self):
_set_keyspace('Keyspace1')
d1 = 124
# insert value and check it exists
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='Counter1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_decr_super_remove(self):
_set_keyspace('Keyspace1')
d1 = 52345
# insert value and check it exists
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
def test_incr_decr_standard_batch_add(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
# insert positive and negative values and check the counts
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1+d2
def test_incr_decr_standard_batch_remove(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
# insert positive and negative values and check the counts
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1+d2
# remove the previous column and check that it is gone
update_map = {'key1': {'Counter1': [
Mutation(deletion=Deletion(predicate=SlicePredicate(column_names=['c1']))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1+d2
update_map = {'key1': {'Counter1': [
Mutation(deletion=Deletion()),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_decr_standard_slice(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
# insert positive and negative values and check the counts
counters = client.get_slice('key1', ColumnParent('Counter1'), SlicePredicate(['c3', 'c4']), ConsistencyLevel.ONE)
assert counters[0].counter_column.value == d1+d2
assert counters[1].counter_column.value == d1
def test_incr_decr_standard_muliget_slice(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
# insert positive and negative values and check the counts
counters = client.multiget_slice(['key1', 'key2'], ColumnParent('Counter1'), SlicePredicate(['c3', 'c4']), ConsistencyLevel.ONE)
assert counters['key1'][0].counter_column.value == d1+d2
assert counters['key1'][1].counter_column.value == d1
assert counters['key2'][0].counter_column.value == d1+d2
assert counters['key2'][1].counter_column.value == d1
def test_counter_get_slice_range(self):
_set_keyspace('Keyspace1')
_insert_counter_range()
_verify_counter_range()
def test_counter_get_slice_super_range(self):
_set_keyspace('Keyspace1')
_insert_counter_super_range()
_verify_counter_super_range()
def test_index_scan(self):
_set_keyspace('Keyspace1')
client.insert('key1', ColumnParent('Indexed1'), Column('birthdate', _i64(1), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('Indexed1'), Column('birthdate', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('Indexed1'), Column('b', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('Indexed1'), Column('birthdate', _i64(3), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('Indexed1'), Column('b', _i64(3), 0), ConsistencyLevel.ONE)
# simple query on one index expression
cp = ColumnParent('Indexed1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
clause = FilterClause([IndexExpression('birthdate', IndexOperator.EQ, _i64(1))])
result = get_range_slice(client, cp, sp, '', '', 100, ConsistencyLevel.ONE, clause)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
# without index
clause = FilterClause([IndexExpression('b', IndexOperator.EQ, _i64(1))])
result = get_range_slice(client, cp, sp, '', '', 100, ConsistencyLevel.ONE, clause)
assert len(result) == 0, result
# but unindexed expression added to indexed one is ok
clause = FilterClause([IndexExpression('b', IndexOperator.EQ, _i64(3)),
IndexExpression('birthdate', IndexOperator.EQ, _i64(3))])
result = get_range_slice(client, cp, sp, '', '', 100, ConsistencyLevel.ONE, clause)
assert len(result) == 1, result
assert result[0].key == 'key3'
assert len(result[0].columns) == 2, result[0].columns
def test_index_scan_uuid_names(self):
_set_keyspace('Keyspace1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
cp = ColumnParent('Indexed3') # timeuuid name, utf8 values
u = uuid.UUID('00000000-0000-1000-0000-000000000000').bytes
u2 = uuid.UUID('00000000-0000-1000-0000-000000000001').bytes
client.insert('key1', ColumnParent('Indexed3'), Column(u, 'a', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Indexed3'), Column(u2, 'b', 0), ConsistencyLevel.ONE)
# name comparator + data validator of incompatible types -- see CASSANDRA-2347
clause = FilterClause([IndexExpression(u, IndexOperator.EQ, 'a'),
IndexExpression(u2, IndexOperator.EQ, 'b')])
result = get_range_slice(client, cp, sp, '', '', 100, ConsistencyLevel.ONE, clause)
assert len(result) == 1, result
cp = ColumnParent('Indexed2') # timeuuid name, long values
# name must be valid (TimeUUID)
clause = FilterClause([IndexExpression('foo', IndexOperator.EQ, uuid.UUID('00000000-0000-1000-0000-000000000000').bytes)])
_expect_exception(lambda: get_range_slice(client, cp, sp, '', '', 100, ConsistencyLevel.ONE, clause), InvalidRequestException)
# value must be valid (TimeUUID)
clause = FilterClause([IndexExpression(uuid.UUID('00000000-0000-1000-0000-000000000000').bytes, IndexOperator.EQ, "foo")])
_expect_exception(lambda: get_range_slice(client, cp, sp, '', '', 100, ConsistencyLevel.ONE, clause), InvalidRequestException)
def test_index_scan_expiring(self):
""" Test that column ttled expires from KEYS index"""
_set_keyspace('Keyspace1')
client.insert('key1', ColumnParent('Indexed1'), Column('birthdate', _i64(1), 0, 1), ConsistencyLevel.ONE)
cp = ColumnParent('Indexed1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
clause = FilterClause([IndexExpression('birthdate', IndexOperator.EQ, _i64(1))])
# query before expiration
result = get_range_slice(client, cp, sp, '', '', 100, ConsistencyLevel.ONE, clause)
assert len(result) == 1, result
# wait for expiration and requery
time.sleep(2)
result = get_range_slice(client, cp, sp, '', '', 100, ConsistencyLevel.ONE, clause)
assert len(result) == 0, result
def test_column_not_found_quorum(self):
_set_keyspace('Keyspace1')
key = 'doesntexist'
column_path = ColumnPath(column_family="Standard1", column="idontexist")
try:
client.get(key, column_path, ConsistencyLevel.QUORUM)
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except NotFoundException:
assert True, 'column did not exist'
def test_get_range_slice_after_deletion(self):
_set_keyspace('Keyspace2')
key = 'key1'
# three supercoluns, each with "col1" subcolumn
for i in range(1,4):
client.insert(key, ColumnParent('Super3', 'sc%d' % i), Column('col1', 'val1', 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3')
predicate = SlicePredicate(slice_range=SliceRange('sc1', 'sc3', False, count=1))
k_range = KeyRange(start_key=key, end_key=key, count=1)
# validate count=1 restricts to 1 supercolumn
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1
# remove sc1; add back subcolumn to override tombstone
client.remove(key, ColumnPath('Super3', 'sc1'), 1, ConsistencyLevel.ONE)
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1
client.insert(key, ColumnParent('Super3', 'sc1'), Column('col1', 'val1', 2), ConsistencyLevel.ONE)
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1, result[0].columns
assert result[0].columns[0].super_column.name == 'sc1'
class TestTruncate(ThriftTester):
def test_truncate(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# truncate Standard1
client.truncate('Standard1')
assert _big_slice('key1', ColumnParent('Standard1')) == []
# truncate Super1
client.truncate('Super1')
assert _big_slice('key1', ColumnParent('Super1')) == []
assert _big_slice('key1', ColumnParent('Super1', 'sc1')) == []
|
apache-2.0
|
brainstorm/python_koans
|
python3/koans/about_iteration.py
|
8
|
4399
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutIteration(Koan):
def test_iterators_are_a_type(self):
it = iter(range(1,6))
fib = 0
for num in it:
fib += num
self.assertEqual(__ , fib)
def test_iterating_with_next(self):
stages = iter(['alpha','beta','gamma'])
try:
self.assertEqual(__, next(stages))
next(stages)
self.assertEqual(__, next(stages))
next(stages)
except StopIteration as ex:
err_msg = 'Ran out of iterations'
self.assertRegexpMatches(err_msg, __)
# ------------------------------------------------------------------
def add_ten(self, item):
return item + 10
def test_map_transforms_elements_of_a_list(self):
seq = [1, 2, 3]
mapped_seq = list()
mapping = map(self.add_ten, seq)
self.assertNotEqual(list, type(mapping).__name__)
self.assertEqual(__, type(mapping).__name__)
# In Python 3 built in iterator funcs return iteratable view objects
# instead of lists
for item in mapping:
mapped_seq.append(item)
self.assertEqual(__, mapped_seq)
# None, iterator methods actually return objects of iter type in
# python 3. In python 2 map() would give you a list.
def test_filter_selects_certain_items_from_a_list(self):
def is_even(item):
return (item % 2) == 0
seq = [1, 2, 3, 4, 5, 6]
even_numbers = list()
for item in filter(is_even, seq):
even_numbers.append(item)
self.assertEqual(__, even_numbers)
def test_just_return_first_item_found(self):
def is_big_name(item):
return len(item) > 4
names = ["Jim", "Bill", "Clarence", "Doug", "Eli"]
name = None
iterator = filter(is_big_name, names)
try:
name = next(iterator)
except StopIteration:
msg = 'Ran out of big names'
self.assertEqual(__, name)
# ------------------------------------------------------------------
def add(self,accum,item):
return accum + item
def multiply(self,accum,item):
return accum * item
def test_reduce_will_blow_your_mind(self):
import functools
# As of Python 3 reduce() has been demoted from a builtin function
# to the functools module.
result = functools.reduce(self.add, [2, 3, 4])
self.assertEqual(__, type(result).__name__)
# Reduce() syntax is same as Python 2
self.assertEqual(__, result)
result2 = functools.reduce(self.multiply, [2, 3, 4], 1)
self.assertEqual(__, result2)
# Extra Credit:
# Describe in your own words what reduce does.
# ------------------------------------------------------------------
def test_creating_lists_with_list_comprehensions(self):
feast = ['lambs', 'sloths', 'orangutans', 'breakfast cereals', 'fruit bats']
comprehension = [delicacy.capitalize() for delicacy in feast]
self.assertEqual(__, comprehension[0])
self.assertEqual(__, comprehension[2])
def test_use_pass_for_iterations_with_no_body(self):
for num in range(1,5):
pass
self.assertEqual(__, num)
# ------------------------------------------------------------------
def test_all_iteration_methods_work_on_any_sequence_not_just_lists(self):
# Ranges are an iteratable sequence
result = map(self.add_ten, range(1,4))
self.assertEqual(__, list(result))
try:
file = open("example_file.txt")
try:
def make_upcase(line):
return line.strip().upper()
upcase_lines = map(make_upcase, file.readlines())
self.assertEqual(__, list(upcase_lines))
finally:
# Arg, this is ugly.
# We will figure out how to fix this later.
file.close()
except IOError:
# should never happen
self.fail()
|
mit
|
CriticalD20/Final-Project
|
ggame/headlessdeps.py
|
228
|
9672
|
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('PIL'):
from PIL import Image
class _body(object):
def __init__(self):
self.events = {}
def appendChild(self, obj):
self.child = obj
def bind(self, evt, action):
self.events[evt] = action
print("Binding {} to {}".format(evt, action))
class _document(object):
def __init__(self):
self.body = _body()
class _window(object):
def __init__(self):
self.document = _document()
self.animatex = 0
def open(self, s1, s2):
return self
def requestAnimationFrame(self, target):
if self.animatex < 10:
self.animatex += 1
target('dummy')
print("Animation frame")
class _Container(object):
def __init__(self):
self.things = []
def destroy(self):
del self.things
def addChild(self, obj):
self.things.append(obj)
def removeChild(self, obj):
self.things.remove(obj)
class _Renderer(object):
def __init__(self, x, y, argsdict):
self.x = x
self.y = y
self.argsdict = argsdict
self.view = 'view'
print("Rendering created with {}x{} area".format(x, y))
def render(self, stage):
pass
class _GFX(object):
def __init__(self):
self.Container = _Container
self.autoDetectRenderer = _Renderer
window = _window()
GFX = _GFX()
#document = object()
def JSConstructor(cls):
return cls
def JSObject(obj):
return obj
class _GFX_Rectangle(object):
def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.width = w
self.height = h
GFX_Rectangle = _GFX_Rectangle
class _Texture(object):
def __init__(self, img='', crossdomain=False):
self.name = img
self.crossdomain = crossdomain
if img == '':
self.img = None
self.basewidth = 0
self.baseheight = 0
self.width = 0
self.height = 0
else:
self.img = Image.open(img)
self.basewidth = self.img.width
self.baseheight = self.img.height
self.width = self.basewidth
self.height = self.baseheight
print("Texture from image {}, {}x{} pixels".format(img, self.basewidth, self.baseheight))
self.baserect = _GFX_Rectangle(0, 0, self.basewidth, self.baseheight)
self.framerect = self.baserect
@classmethod
def fromTexture(cls, texture, frame):
inst = cls()
inst.img = texture.img
inst.name = texture.name
inst.basewidth = texture.basewidth
inst.baseheight = texture.baseheight
inst.baserect = texture.baserect
inst.framerect = frame
inst.width = frame.width
inst.height = frame.height
print("Texture from base texture {}, {}x{} subframe {}x{}".format(inst.name, inst.basewidth, inst.baseheight, inst.framerect.width, inst.framerect.height))
return inst
def destroy(self):
try:
self.img.close()
print("Destroying an image")
except:
print("Destroying a non-image")
GFX_Texture = _Texture.fromTexture
GFX_Texture_fromImage = _Texture
class vector(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __getitem__(self, key):
if key == 0:
return self.x
elif key == 1:
return self.y
else:
raise KeyError
def __setitem(self, key, value):
if key == 0:
self.x = value
elif key == 1:
self.y = value
else:
raise KeyError
class GFX_Sprite(object):
def __init__(self, texture):
self.texture = texture
self.visible = True
self.pos = vector(0,0)
self.anch = vector(0,0)
self.scal = vector(1.0, 1.0)
self.width = texture.width
self.height = texture.height
self.rotation = 0.0
@property
def position(self):
return self.pos
@position.setter
def position(self, value):
self.pos.x = value[0]
self.pos.y = value[1]
@property
def anchor(self):
return self.anch
@anchor.setter
def anchor(self, value):
self.anch.x = value[0]
self.anch.y = value[1]
@property
def scale(self):
return self.scal
@scale.setter
def scale(self, value):
self.scal.x = value[0]
self.scal.y = value[1]
def destroy(self):
pass
class _GFX_Graphics(object):
def __init__(self):
self.clear()
def clear(self):
self.cleared = True
self.visible = True
self.lwidth = None
self.color = None
self.alpha = None
self.fillcolor = None
self.fillalpha = None
self.x = None
self.y = None
self.rwidth = None
self.rheight = None
self.radius = None
self.ehw = None
self.ehh = None
self.xto = None
self.yto = None
self.jpath = None
self.width = None
self.height = None
self.position = vector(0,0)
def destroy(self):
self.clear()
def clone(self):
clone = type(self)()
clone.cleared = self.cleared
clone.visible = self.visible
clone.lwidth = self.lwidth
clone.color = self.color
clone.alpha = self.alpha
clone.fillalpha = self.fillalpha
clone.fillcolor = self.fillcolor
clone.x = self.x
clone.y = self.y
clone.rwidth = self.rwidth
clone.rheight = self.rheight
clone.radius = self.radius
clone.ehw = self.ehw
clone.ehh = self.ehh
clone.xto = self.xto
clone.yto = self.yto
clone.jpath = self.jpath
clone.width = self.width
clone.height = self.height
clone.position = self.position
return clone
def lineStyle(self, width, color, alpha):
self.width = width
self.color = color
self.alpha = alpha
def beginFill(self, color, alpha):
self.fillcolor = color
self.fillalpha = alpha
def drawRect(self, x, y, w, h):
self.x = x
self.y = y
self.position = vector(x,y)
self.rwidth = w
self.rheight = h
self.width = w
self.height = h
self.cleared = False
print("Rectangle {}x{} at {},{}".format(w,h,x,y))
return self
def drawCircle(self, x, y, radius):
self.x = x
self.y = y
self.position = vector(x,y)
self.radius = radius
self.cleared = False
self.width = radius*2
self.height = radius*2
print("Circle, radius {} at {},{}".format(radius,x,y))
return self
def drawEllipse(self, x, y, hw, hh):
self.x = x
self.y = y
self.position = vector(x,y)
self.ehw = hw
self.ehh = hh
self.width = hw*2
self.height = hh*2
self.cleared = False
print("Ellipse, {}x{} at {},{}".format(hw,hh,x,y))
return self
def drawPolygon(self, jpath):
self.jpath = jpath
self.cleared = False
self.position = vector(jpath[0],jpath[1])
x = []
y = []
for i in range(0,len(jpath)-1,2):
x.append(jpath[i])
y.append(jpath[i+1])
self.width = max(x)-min(x)
self.height = max(y)-min(y)
print("Polygon")
return self
def moveTo(self, x, y):
self.x = x
self.y = y
self.position = vector(x,y)
return self
def lineTo(self, x, y):
self.xto = x
self.yto = y
self.width = abs(x)
self.height = abs(y)
self.cleared = False
print("Line from {},{} to {},{}".format(self.x, self.y, x, y))
return self
class _GFX_Text(object):
def __init__(self, text, styledict):
self.text = text
self.styledict = styledict
self.alpha = None
self.visible = None
self.width = 99
self.height = 99
self.position = vector(0,0)
print("Text: {} in {}".format(text, styledict['font']))
def clone(self):
clone = type(self)(self.text, self.styledict)
return clone
def destroy(self):
self.text = ''
GFX_Text = _GFX_Text
_globalGraphics = _GFX_Graphics()
GFX_Graphics = _globalGraphics
def GFX_DetectRenderer():
pass
class _SND_all(object):
def __init__(self):
pass
def stop(self):
print("Stopping all sounds")
class _SND(object):
def __init__(self):
self.all = _SND_all
SND = _SND()
class _SND_Sound(object):
def __init__(self, url):
self.url = url
print("Creating sound object {}".format(url))
def load(self):
pass
def play(self):
print("Playing sound object {}".format(self.url))
SND_Sound = _SND_Sound
class GFX_Window(object):
def __init__(self, width, height, onclose):
self._w = window.open("", "")
self.width = width if width != 0 else 100
self.height = height if height != 0 else 100
self._stage = JSConstructor(GFX.Container)()
self._renderer = GFX.autoDetectRenderer(width, height, {'transparent':True})
self._w.document.body.appendChild(self._renderer.view)
self._w.onunload = onclose
def bind(self, evtspec, callback):
self._w.document.body.bind(evtspec, callback)
def add(self, obj):
self._stage.addChild(obj)
def remove(self, obj):
self._stage.removeChild(obj)
def animate(self, stepcallback):
self._renderer.render(self._stage)
self._w.requestAnimationFrame(stepcallback)
def destroy(self):
SND.all().stop()
self._stage.destroy()
|
mit
|
laurentgo/pants
|
src/python/pants/backend/codegen/register.py
|
9
|
2481
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.codegen.targets.java_antlr_library import JavaAntlrLibrary
from pants.backend.codegen.targets.java_protobuf_library import JavaProtobufLibrary
from pants.backend.codegen.targets.java_ragel_library import JavaRagelLibrary
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.codegen.targets.java_wire_library import JavaWireLibrary
from pants.backend.codegen.targets.jaxb_library import JaxbLibrary
from pants.backend.codegen.targets.python_antlr_library import PythonAntlrLibrary
from pants.backend.codegen.targets.python_thrift_library import PythonThriftLibrary
from pants.backend.codegen.tasks.antlr_gen import AntlrGen
from pants.backend.codegen.tasks.apache_thrift_gen import ApacheThriftGen
from pants.backend.codegen.tasks.jaxb_gen import JaxbGen
from pants.backend.codegen.tasks.protobuf_gen import ProtobufGen
from pants.backend.codegen.tasks.ragel_gen import RagelGen
from pants.backend.codegen.tasks.wire_gen import WireGen
from pants.base.build_file_aliases import BuildFileAliases
from pants.goal.task_registrar import TaskRegistrar as task
def build_file_aliases():
return BuildFileAliases.create(
targets={
'java_antlr_library': JavaAntlrLibrary,
'java_protobuf_library': JavaProtobufLibrary,
'java_ragel_library': JavaRagelLibrary,
'java_thrift_library': JavaThriftLibrary,
'java_wire_library': JavaWireLibrary,
'python_antlr_library': PythonAntlrLibrary,
'python_thrift_library': PythonThriftLibrary,
'jaxb_library': JaxbLibrary,
}
)
def register_goals():
task(name='thrift', action=ApacheThriftGen).install('gen').with_description('Generate code.')
# TODO(Garrett Malmquist): 'protoc' depends on a nonlocal goal (imports is in the jvm register).
# This should be cleaned up, with protobuf stuff moved to its own backend. (See John's comment on
# RB 592).
task(name='protoc', action=ProtobufGen).install('gen')
task(name='antlr', action=AntlrGen).install('gen')
task(name='ragel', action=RagelGen).install('gen')
task(name='jaxb', action=JaxbGen).install('gen')
task(name='wire', action=WireGen).install('gen')
|
apache-2.0
|
hajgato/easybuild-easyblocks
|
easybuild/easyblocks/s/suitesparse.py
|
4
|
7013
|
##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for SuiteSparse, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import fileinput
import re
import os
import shutil
import sys
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.filetools import mkdir
from easybuild.tools.modules import get_software_root
class EB_SuiteSparse(ConfigureMake):
"""Support for building SuiteSparse."""
def __init__(self, *args, **kwargs):
"""Custom constructor for SuiteSparse easyblock, initialize custom class parameters."""
super(EB_SuiteSparse, self).__init__(*args, **kwargs)
self.config_name = None
def configure_step(self):
"""Configure build by patching UFconfig.mk or SuiteSparse_config.mk."""
if LooseVersion(self.version) < LooseVersion('4.0'):
self.config_name = 'UFconfig'
else:
self.config_name = 'SuiteSparse_config'
fp = os.path.join(self.cfg['start_dir'], self.config_name, '%s.mk' % self.config_name)
cfgvars = {
'CC': os.getenv('MPICC'),
'CFLAGS': os.getenv('CFLAGS'),
'CXX': os.getenv('MPICXX'),
'F77': os.getenv('MPIF77'),
'F77FLAGS': os.getenv('F77FLAGS'),
'BLAS': os.getenv('LIBBLAS_MT'),
'LAPACK': os.getenv('LIBLAPACK_MT'),
}
metis = get_software_root('METIS')
parmetis = get_software_root('ParMETIS')
if parmetis:
metis_path = parmetis
metis_libs = ' '.join([
os.path.join(parmetis, 'lib', 'libparmetis.a'),
os.path.join(parmetis, 'lib', 'metis.a'),
])
elif metis:
metis_path = metis
metis_libs = os.path.join(metis, 'lib', 'metis.a')
else:
self.log.error("Neither METIS or ParMETIS module loaded.")
cfgvars.update({
'METIS_PATH': metis_path,
'METIS': metis_libs,
})
# patch file
try:
for line in fileinput.input(fp, inplace=1, backup='.orig'):
for (k, v) in cfgvars.items():
line = re.sub(r"^(%s\s*=\s*).*$" % k, r"\1 %s # patched by EasyBuild" % v, line)
if k in line:
cfgvars.pop(k)
sys.stdout.write(line)
except IOError, err:
self.log.error("Failed to patch %s in: %s" % (fp, err))
# add remaining entries at the end
if cfgvars:
try:
f = open(fp, "a")
f.write("# lines below added automatically by EasyBuild")
for (k, v) in cfgvars.items():
f.write("%s = %s\n" % (k,v))
f.close()
except IOError, err:
self.log.error("Failed to complete %s: %s" % (fp, err))
def install_step(self):
"""Install by copying the contents of the builddir to the installdir (preserving permissions)"""
for x in os.listdir(self.cfg['start_dir']):
src = os.path.join(self.cfg['start_dir'], x)
dst = os.path.join(self.installdir, x)
try:
if os.path.isdir(src):
shutil.copytree(src, dst)
# symlink
# - dst/Lib to dst/lib
# - dst/Include to dst/include
for c in ['Lib', 'Include']:
nsrc = os.path.join(dst, c)
ndst = os.path.join(dst, c.lower())
if os.path.exists(nsrc):
os.symlink(nsrc, ndst)
else:
shutil.copy2(src, dst)
except:
self.log.exception("Copying src %s to dst %s failed" % (src, dst))
# some extra symlinks are necessary for UMFPACK to work.
paths = [
os.path.join('AMD', 'include', 'amd.h'),
os.path.join('AMD' ,'include' ,'amd_internal.h'),
os.path.join(self.config_name, '%s.h' % self.config_name),
os.path.join('AMD', 'lib', 'libamd.a')
]
for path in paths:
src = os.path.join(self.installdir, path)
dn = path.split(os.path.sep)[-2]
fn = path.split(os.path.sep)[-1]
dstdir = os.path.join(self.installdir, 'UMFPACK', dn)
mkdir(dstdir)
if os.path.exists(src):
try:
os.symlink(src, os.path.join(dstdir, fn))
except Exception, err:
self.log.error("Failed to make symbolic link from %s to %s: %s" % (src, dst, err))
def make_module_req_guess(self):
"""Add config dir to CPATH so include file is found."""
guesses = super(EB_SuiteSparse, self).make_module_req_guess()
guesses.update({'CPATH': [self.config_name]})
return guesses
def sanity_check_step(self):
"""Custom sanity check for SuiteSparse."""
if LooseVersion(self.version) < LooseVersion('4.0'):
csparse_dir = 'CSparse3'
else:
csparse_dir = 'CSparse'
custom_paths = {
'files': [os.path.join(x, 'lib', 'lib%s.a' % x.lower()) for x in ["AMD", "BTF", "CAMD", "CCOLAMD", "CHOLMOD",
"COLAMD", "CXSparse", "KLU", "LDL", "RBio",
"SPQR", "UMFPACK"]] +
[os.path.join(csparse_dir, 'lib', 'libcsparse.a')],
'dirs': ["MATLAB_Tools"],
}
super(EB_SuiteSparse, self).sanity_check_step(custom_paths=custom_paths)
|
gpl-2.0
|
AmurG/tardis
|
tardis/simulation.py
|
11
|
2036
|
import logging
import time
from pandas import HDFStore
import os
# Adding logging support
logger = logging.getLogger(__name__)
def run_radial1d(radial1d_model, history_fname=None):
if history_fname:
if os.path.exists(history_fname):
logger.warn('History file %s exists - it will be overwritten', history_fname)
os.system('rm %s' % history_fname)
history_buffer = HDFStore(history_fname)
radial1d_model.atom_data.lines.to_hdf(history_buffer, 'atom_data/lines')
radial1d_model.atom_data.levels.to_hdf(history_buffer, 'atom_data/levels')
start_time = time.time()
initialize_j_blues = True
initialize_nlte = True
update_radiation_field = False
while radial1d_model.iterations_remaining > 1:
logger.info('Remaining run %d', radial1d_model.iterations_remaining)
radial1d_model.simulate(update_radiation_field=update_radiation_field, enable_virtual=False, initialize_nlte=initialize_nlte,
initialize_j_blues=initialize_j_blues)
initialize_j_blues=False
initialize_nlte=False
update_radiation_field = True
if history_fname:
radial1d_model.to_hdf5(history_buffer, path='model%03d' % radial1d_model.iterations_executed, close_h5=False)
#Finished second to last loop running one more time
logger.info('Doing last run')
if radial1d_model.tardis_config.montecarlo.last_no_of_packets is not None:
radial1d_model.current_no_of_packets = radial1d_model.tardis_config.montecarlo.last_no_of_packets
radial1d_model.simulate(enable_virtual=True, update_radiation_field=update_radiation_field, initialize_nlte=initialize_nlte,
initialize_j_blues=initialize_j_blues)
if history_fname:
radial1d_model.to_hdf5(history_buffer, path='model%03d' % radial1d_model.iterations_executed)
logger.info("Finished in %d iterations and took %.2f s", radial1d_model.iterations_executed, time.time()-start_time)
|
bsd-3-clause
|
detrout/debian-statsmodels
|
statsmodels/stats/tabledist.py
|
34
|
11643
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 01 20:20:16 2011
Author: Josef Perktold
License: BSD-3
TODO:
check orientation, size and alpha should be increasing for interp1d,
but what is alpha? can be either sf or cdf probability
change it to use one consistent notation
check: instead of bound checking I could use the fill-value of the interpolators
"""
from __future__ import print_function
from statsmodels.compat.python import range
import numpy as np
from scipy.interpolate import interp1d, interp2d, Rbf
from statsmodels.tools.decorators import cache_readonly
class TableDist(object):
'''Distribution, critical values and p-values from tables
currently only 1 extra parameter, e.g. sample size
Parameters
----------
alpha : array_like, 1d
probabiliy in the table, could be either sf (right tail) or cdf (left
tail)
size : array_like, 1d
second paramater in the table
crit_table : array_like, 2d
array with critical values for sample size in rows and probability in
columns
Notes
-----
size and alpha should be increasing
'''
def __init__(self, alpha, size, crit_table):
self.alpha = np.asarray(alpha)
self.size = np.asarray(size)
self.crit_table = np.asarray(crit_table)
self.n_alpha = len(alpha)
self.signcrit = np.sign(np.diff(self.crit_table, 1).mean())
if self.signcrit > 0: #increasing
self.critv_bounds = self.crit_table[:,[0,1]]
else:
self.critv_bounds = self.crit_table[:,[1,0]]
@cache_readonly
def polyn(self):
polyn = [interp1d(self.size, self.crit_table[:,i])
for i in range(self.n_alpha)]
return polyn
@cache_readonly
def poly2d(self):
#check for monotonicity ?
#fix this, interp needs increasing
poly2d = interp2d(self.size, self.alpha, self.crit_table)
return poly2d
@cache_readonly
def polyrbf(self):
xs, xa = np.meshgrid(self.size.astype(float), self.alpha)
polyrbf = Rbf(xs.ravel(), xa.ravel(), self.crit_table.T.ravel(),function='linear')
return polyrbf
def _critvals(self, n):
'''rows of the table, linearly interpolated for given sample size
Parameters
----------
n : float
sample size, second parameter of the table
Returns
-------
critv : ndarray, 1d
critical values (ppf) corresponding to a row of the table
Notes
-----
This is used in two step interpolation, or if we want to know the
critical values for all alphas for any sample size that we can obtain
through interpolation
'''
return np.array([p(n) for p in self.polyn])
def prob(self, x, n):
'''find pvalues by interpolation, eiter cdf(x) or sf(x)
returns extrem probabilities, 0.001 and 0.2, for out of range
Parameters
----------
x : array_like
observed value, assumed to follow the distribution in the table
n : float
sample size, second parameter of the table
Returns
-------
prob : arraylike
This is the probability for each value of x, the p-value in
underlying distribution is for a statistical test.
'''
critv = self._critvals(n)
alpha = self.alpha
# if self.signcrit == 1:
# if x < critv[0]: #generalize: ? np.sign(x - critvals[0]) == self.signcrit:
# return alpha[0]
# elif x > critv[-1]:
# return alpha[-1]
# elif self.signcrit == -1:
# if x > critv[0]:
# return alpha[0]
# elif x < critv[-1]:
# return alpha[-1]
if self.signcrit < 1:
#reverse if critv is decreasing
critv, alpha = critv[::-1], alpha[::-1]
#now critv is increasing
if np.size(x) == 1:
if x < critv[0]:
return alpha[0]
elif x > critv[-1]:
return alpha[-1]
return interp1d(critv, alpha)(x)[()]
else:
#vectorized
cond_low = (x < critv[0])
cond_high = (x > critv[-1])
cond_interior = ~np.logical_or(cond_low, cond_high)
probs = np.nan * np.ones(x.shape) #mistake if nan left
probs[cond_low] = alpha[0]
probs[cond_low] = alpha[-1]
probs[cond_interior] = interp1d(critv, alpha)(x[cond_interior])
return probs
def crit2(self, prob, n):
'''returns interpolated quantiles, similar to ppf or isf
this can be either cdf or sf depending on the table, twosided?
this doesn't work, no more knots warning
'''
return self.poly2d(n, prob)
def crit(self, prob, n):
'''returns interpolated quantiles, similar to ppf or isf
use two sequential 1d interpolation, first by n then by prob
Parameters
----------
prob : array_like
probabilities corresponding to the definition of table columns
n : int or float
sample size, second parameter of the table
Returns
-------
ppf : array_like
critical values with same shape as prob
'''
prob = np.asarray(prob)
alpha = self.alpha
critv = self._critvals(n)
#vectorized
cond_ilow = (prob > alpha[0])
cond_ihigh = (prob < alpha[-1])
cond_interior = np.logical_or(cond_ilow, cond_ihigh)
#scalar
if prob.size == 1:
if cond_interior:
return interp1d(alpha, critv)(prob)
else:
return np.nan
#vectorized
quantile = np.nan * np.ones(prob.shape) #nans for outside
quantile[cond_interior] = interp1d(alpha, critv)(prob[cond_interior])
return quantile
def crit3(self, prob, n):
'''returns interpolated quantiles, similar to ppf or isf
uses Rbf to interpolate critical values as function of `prob` and `n`
Parameters
----------
prob : array_like
probabilities corresponding to the definition of table columns
n : int or float
sample size, second parameter of the table
Returns
-------
ppf : array_like
critical values with same shape as prob, returns nan for arguments
that are outside of the table bounds
'''
prob = np.asarray(prob)
alpha = self.alpha
#vectorized
cond_ilow = (prob > alpha[0])
cond_ihigh = (prob < alpha[-1])
cond_interior = np.logical_or(cond_ilow, cond_ihigh)
#scalar
if prob.size == 1:
if cond_interior:
return self.polyrbf(n, prob)
else:
return np.nan
#vectorized
quantile = np.nan * np.ones(prob.shape) #nans for outside
quantile[cond_interior] = self.polyrbf(n, prob[cond_interior])
return quantile
if __name__ == '__main__':
'''
example Lilliefors test for normality
An Analytic Approximation to the Distribution of Lilliefors's Test Statistic for Normality
Author(s): Gerard E. Dallal and Leland WilkinsonSource: The American Statistician, Vol. 40, No. 4 (Nov., 1986), pp. 294-296Published by: American Statistical AssociationStable URL: http://www.jstor.org/stable/2684607 .
'''
#for this test alpha is sf probability, i.e. right tail probability
alpha = np.array([ 0.2 , 0.15 , 0.1 , 0.05 , 0.01 , 0.001])[::-1]
size = np.array([ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 25, 30, 40, 100, 400, 900], float)
#critical values, rows are by sample size, columns are by alpha
crit_lf = np.array( [[303, 321, 346, 376, 413, 433],
[289, 303, 319, 343, 397, 439],
[269, 281, 297, 323, 371, 424],
[252, 264, 280, 304, 351, 402],
[239, 250, 265, 288, 333, 384],
[227, 238, 252, 274, 317, 365],
[217, 228, 241, 262, 304, 352],
[208, 218, 231, 251, 291, 338],
[200, 210, 222, 242, 281, 325],
[193, 202, 215, 234, 271, 314],
[187, 196, 208, 226, 262, 305],
[181, 190, 201, 219, 254, 296],
[176, 184, 195, 213, 247, 287],
[171, 179, 190, 207, 240, 279],
[167, 175, 185, 202, 234, 273],
[163, 170, 181, 197, 228, 266],
[159, 166, 176, 192, 223, 260],
[143, 150, 159, 173, 201, 236],
[131, 138, 146, 159, 185, 217],
[115, 120, 128, 139, 162, 189],
[ 74, 77, 82, 89, 104, 122],
[ 37, 39, 41, 45, 52, 61],
[ 25, 26, 28, 30, 35, 42]])[:,::-1] / 1000.
lf = TableDist(alpha, size, crit_lf)
print(lf.prob(0.166, 20), 'should be:', 0.15)
print('')
print(lf.crit2(0.15, 20), 'should be:', 0.166, 'interp2d bad')
print(lf.crit(0.15, 20), 'should be:', 0.166, 'two 1d')
print(lf.crit3(0.15, 20), 'should be:', 0.166, 'Rbf')
print('')
print(lf.crit2(0.17, 20), 'should be in:', (.159, .166), 'interp2d bad')
print(lf.crit(0.17, 20), 'should be in:', (.159, .166), 'two 1d')
print(lf.crit3(0.17, 20), 'should be in:', (.159, .166), 'Rbf')
print('')
print(lf.crit2(0.19, 20), 'should be in:', (.159, .166), 'interp2d bad')
print(lf.crit(0.19, 20), 'should be in:', (.159, .166), 'two 1d')
print(lf.crit3(0.19, 20), 'should be in:', (.159, .166), 'Rbf')
print('')
print(lf.crit2(0.199, 20), 'should be in:', (.159, .166), 'interp2d bad')
print(lf.crit(0.199, 20), 'should be in:', (.159, .166), 'two 1d')
print(lf.crit3(0.199, 20), 'should be in:', (.159, .166), 'Rbf')
#testing
print(np.max(np.abs(np.array([lf.prob(c, size[i]) for i in range(len(size)) for c in crit_lf[i]]).reshape(-1,lf.n_alpha) - lf.alpha)))
#1.6653345369377348e-16
print(np.max(np.abs(np.array([lf.crit(c, size[i]) for i in range(len(size)) for c in lf.alpha]).reshape(-1,lf.n_alpha) - crit_lf)))
#6.9388939039072284e-18)
print(np.max(np.abs(np.array([lf.crit3(c, size[i]) for i in range(len(size)) for c in lf.alpha]).reshape(-1,lf.n_alpha) - crit_lf)))
#4.0615705243496336e-12)
print((np.array([lf.crit3(c, size[i]) for i in range(len(size)) for c in lf.alpha[:-1]*1.1]).reshape(-1,lf.n_alpha-1) < crit_lf[:,:-1]).all())
print((np.array([lf.crit3(c, size[i]) for i in range(len(size)) for c in lf.alpha[:-1]*1.1]).reshape(-1,lf.n_alpha-1) > crit_lf[:,1:]).all())
print((np.array([lf.prob(c*0.9, size[i]) for i in range(len(size)) for c in crit_lf[i,:-1]]).reshape(-1,lf.n_alpha-1) > lf.alpha[:-1]).all())
print((np.array([lf.prob(c*1.1, size[i]) for i in range(len(size)) for c in crit_lf[i,1:]]).reshape(-1,lf.n_alpha-1) < lf.alpha[1:]).all())
#start at size_idx=2 because of non-monotonicity of lf_crit
print((np.array([lf.prob(c, size[i]*0.9) for i in range(2,len(size)) for c in crit_lf[i,:-1]]).reshape(-1,lf.n_alpha-1) > lf.alpha[:-1]).all())
|
bsd-3-clause
|
ConeyLiu/spark
|
examples/src/main/python/ml/power_iteration_clustering_example.py
|
54
|
1604
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An example demonstrating PowerIterationClustering.
Run with:
bin/spark-submit examples/src/main/python/ml/power_iteration_clustering_example.py
"""
# $example on$
from pyspark.ml.clustering import PowerIterationClustering
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("PowerIterationClusteringExample")\
.getOrCreate()
# $example on$
df = spark.createDataFrame([
(0, 1, 1.0),
(0, 2, 1.0),
(1, 2, 1.0),
(3, 4, 1.0),
(4, 0, 0.1)
], ["src", "dst", "weight"])
pic = PowerIterationClustering(k=2, maxIter=20, initMode="degree", weightCol="weight")
# Shows the cluster assignment
pic.assignClusters(df).show()
# $example off$
spark.stop()
|
apache-2.0
|
willworth/thermos
|
thermos/Lib/site-packages/pip/utils/logging.py
|
516
|
3327
|
from __future__ import absolute_import
import contextlib
import logging
import logging.handlers
import os
try:
import threading
except ImportError:
import dummy_threading as threading
from pip.compat import WINDOWS
from pip.utils import ensure_dir
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
_log_state = threading.local()
_log_state.indentation = 0
@contextlib.contextmanager
def indent_log(num=2):
"""
A context manager which will cause the log output to be indented for any
log messages emitted inside it.
"""
_log_state.indentation += num
try:
yield
finally:
_log_state.indentation -= num
def get_indentation():
return getattr(_log_state, 'indentation', 0)
class IndentingFormatter(logging.Formatter):
def format(self, record):
"""
Calls the standard formatter, but will indent all of the log messages
by our current indentation level.
"""
formatted = logging.Formatter.format(self, record)
formatted = "".join([
(" " * get_indentation()) + line
for line in formatted.splitlines(True)
])
return formatted
def _color_wrap(*colors):
def wrapped(inp):
return "".join(list(colors) + [inp, colorama.Style.RESET_ALL])
return wrapped
class ColorizedStreamHandler(logging.StreamHandler):
# Don't build up a list of colors if we don't have colorama
if colorama:
COLORS = [
# This needs to be in order from highest logging level to lowest.
(logging.ERROR, _color_wrap(colorama.Fore.RED)),
(logging.WARNING, _color_wrap(colorama.Fore.YELLOW)),
]
else:
COLORS = []
def __init__(self, stream=None):
logging.StreamHandler.__init__(self, stream)
if WINDOWS and colorama:
self.stream = colorama.AnsiToWin32(self.stream)
def should_color(self):
# Don't colorize things if we do not have colorama
if not colorama:
return False
real_stream = (
self.stream if not isinstance(self.stream, colorama.AnsiToWin32)
else self.stream.wrapped
)
# If the stream is a tty we should color it
if hasattr(real_stream, "isatty") and real_stream.isatty():
return True
# If we have an ASNI term we should color it
if os.environ.get("TERM") == "ANSI":
return True
# If anything else we should not color it
return False
def format(self, record):
msg = logging.StreamHandler.format(self, record)
if self.should_color():
for level, color in self.COLORS:
if record.levelno >= level:
msg = color(msg)
break
return msg
class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler):
def _open(self):
ensure_dir(os.path.dirname(self.baseFilename))
return logging.handlers.RotatingFileHandler._open(self)
class MaxLevelFilter(logging.Filter):
def __init__(self, level):
self.level = level
def filter(self, record):
return record.levelno < self.level
|
mit
|
littlepea/django-docs
|
docs/tests/runtests.py
|
1
|
1998
|
#!/usr/bin/env python
"""
This script is a trick to setup a fake Django environment, since this reusable
app will be developed and tested outside any specific Django project.
Via ``settings.configure`` you will be able to set all necessary settings
for your app and run the tests as if you were calling ``./manage.py test``.
"""
import os
import sys
from django.conf import settings
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
]
INTERNAL_APPS = [
'django_nose',
'docs',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
COVERAGE_MODULE_EXCLUDES = [
'tests$', 'settings$', 'urls$', 'locale$',
'migrations', 'fixtures', 'admin$', 'django_extensions',
]
COVERAGE_MODULE_EXCLUDES += EXTERNAL_APPS
if not settings.configured:
settings.configure(
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
INSTALLED_APPS=INSTALLED_APPS,
ROOT_URLCONF='docs.tests.urls',
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(__file__), '../../templates'),
),
COVERAGE_MODULE_EXCLUDES=COVERAGE_MODULE_EXCLUDES,
COVERAGE_REPORT_HTML_OUTPUT_DIR=os.path.join(
os.path.dirname(__file__), 'coverage'),
USE_TZ=True,
PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',),
)
from django_nose import NoseTestSuiteRunner
class NoseCoverageTestRunner(NoseTestSuiteRunner):
"""Custom test runner that uses nose and coverage"""
pass
def runtests(*test_args):
failures = NoseCoverageTestRunner(verbosity=2, interactive=True).run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
bsd-3-clause
|
fone4u/python-oauth2
|
tests/test_oauth.py
|
301
|
53269
|
# -*- coding: utf-8 -*-
"""
The MIT License
Copyright (c) 2009 Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import os
import unittest
import oauth2 as oauth
import random
import time
import urllib
import urlparse
from types import ListType
import mock
import httplib2
# Fix for python2.5 compatibility
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
sys.path[0:0] = [os.path.join(os.path.dirname(__file__), ".."),]
class TestError(unittest.TestCase):
def test_message(self):
try:
raise oauth.Error
except oauth.Error, e:
self.assertEqual(e.message, 'OAuth error occurred.')
msg = 'OMG THINGS BROKE!!!!'
try:
raise oauth.Error(msg)
except oauth.Error, e:
self.assertEqual(e.message, msg)
def test_str(self):
try:
raise oauth.Error
except oauth.Error, e:
self.assertEquals(str(e), 'OAuth error occurred.')
class TestGenerateFunctions(unittest.TestCase):
def test_build_auth_header(self):
header = oauth.build_authenticate_header()
self.assertEqual(header['WWW-Authenticate'], 'OAuth realm=""')
self.assertEqual(len(header), 1)
realm = 'http://example.myrealm.com/'
header = oauth.build_authenticate_header(realm)
self.assertEqual(header['WWW-Authenticate'], 'OAuth realm="%s"' %
realm)
self.assertEqual(len(header), 1)
def test_build_xoauth_string(self):
consumer = oauth.Consumer('consumer_token', 'consumer_secret')
token = oauth.Token('user_token', 'user_secret')
url = "https://mail.google.com/mail/b/[email protected]/imap/"
xoauth_string = oauth.build_xoauth_string(url, consumer, token)
method, oauth_url, oauth_string = xoauth_string.split(' ')
self.assertEqual("GET", method)
self.assertEqual(url, oauth_url)
returned = {}
parts = oauth_string.split(',')
for part in parts:
var, val = part.split('=')
returned[var] = val.strip('"')
self.assertEquals('HMAC-SHA1', returned['oauth_signature_method'])
self.assertEquals('user_token', returned['oauth_token'])
self.assertEquals('consumer_token', returned['oauth_consumer_key'])
self.assertTrue('oauth_signature' in returned, 'oauth_signature')
def test_escape(self):
string = 'http://whatever.com/~someuser/?test=test&other=other'
self.assert_('~' in oauth.escape(string))
string = '../../../../../../../etc/passwd'
self.assert_('../' not in oauth.escape(string))
def test_gen_nonce(self):
nonce = oauth.generate_nonce()
self.assertEqual(len(nonce), 8)
nonce = oauth.generate_nonce(20)
self.assertEqual(len(nonce), 20)
def test_gen_verifier(self):
verifier = oauth.generate_verifier()
self.assertEqual(len(verifier), 8)
verifier = oauth.generate_verifier(16)
self.assertEqual(len(verifier), 16)
def test_gen_timestamp(self):
exp = int(time.time())
now = oauth.generate_timestamp()
self.assertEqual(exp, now)
class TestConsumer(unittest.TestCase):
def setUp(self):
self.key = 'my-key'
self.secret = 'my-secret'
self.consumer = oauth.Consumer(key=self.key, secret=self.secret)
def test_init(self):
self.assertEqual(self.consumer.key, self.key)
self.assertEqual(self.consumer.secret, self.secret)
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Consumer(None, None))
self.assertRaises(ValueError, lambda: oauth.Consumer('asf', None))
self.assertRaises(ValueError, lambda: oauth.Consumer(None, 'dasf'))
def test_str(self):
res = dict(parse_qsl(str(self.consumer)))
self.assertTrue('oauth_consumer_key' in res)
self.assertTrue('oauth_consumer_secret' in res)
self.assertEquals(res['oauth_consumer_key'], self.consumer.key)
self.assertEquals(res['oauth_consumer_secret'], self.consumer.secret)
class TestToken(unittest.TestCase):
def setUp(self):
self.key = 'my-key'
self.secret = 'my-secret'
self.token = oauth.Token(self.key, self.secret)
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Token(None, None))
self.assertRaises(ValueError, lambda: oauth.Token('asf', None))
self.assertRaises(ValueError, lambda: oauth.Token(None, 'dasf'))
def test_init(self):
self.assertEqual(self.token.key, self.key)
self.assertEqual(self.token.secret, self.secret)
self.assertEqual(self.token.callback, None)
self.assertEqual(self.token.callback_confirmed, None)
self.assertEqual(self.token.verifier, None)
def test_set_callback(self):
self.assertEqual(self.token.callback, None)
self.assertEqual(self.token.callback_confirmed, None)
cb = 'http://www.example.com/my-callback'
self.token.set_callback(cb)
self.assertEqual(self.token.callback, cb)
self.assertEqual(self.token.callback_confirmed, 'true')
self.token.set_callback(None)
self.assertEqual(self.token.callback, None)
# TODO: The following test should probably not pass, but it does
# To fix this, check for None and unset 'true' in set_callback
# Additionally, should a confirmation truly be done of the callback?
self.assertEqual(self.token.callback_confirmed, 'true')
def test_set_verifier(self):
self.assertEqual(self.token.verifier, None)
v = oauth.generate_verifier()
self.token.set_verifier(v)
self.assertEqual(self.token.verifier, v)
self.token.set_verifier()
self.assertNotEqual(self.token.verifier, v)
self.token.set_verifier('')
self.assertEqual(self.token.verifier, '')
def test_get_callback_url(self):
self.assertEqual(self.token.get_callback_url(), None)
self.token.set_verifier()
self.assertEqual(self.token.get_callback_url(), None)
cb = 'http://www.example.com/my-callback?save=1&return=true'
v = oauth.generate_verifier()
self.token.set_callback(cb)
self.token.set_verifier(v)
url = self.token.get_callback_url()
verifier_str = '&oauth_verifier=%s' % v
self.assertEqual(url, '%s%s' % (cb, verifier_str))
cb = 'http://www.example.com/my-callback-no-query'
v = oauth.generate_verifier()
self.token.set_callback(cb)
self.token.set_verifier(v)
url = self.token.get_callback_url()
verifier_str = '?oauth_verifier=%s' % v
self.assertEqual(url, '%s%s' % (cb, verifier_str))
def test_to_string(self):
string = 'oauth_token_secret=%s&oauth_token=%s' % (self.secret,
self.key)
self.assertEqual(self.token.to_string(), string)
self.token.set_callback('http://www.example.com/my-callback')
string += '&oauth_callback_confirmed=true'
self.assertEqual(self.token.to_string(), string)
def _compare_tokens(self, new):
self.assertEqual(self.token.key, new.key)
self.assertEqual(self.token.secret, new.secret)
# TODO: What about copying the callback to the new token?
# self.assertEqual(self.token.callback, new.callback)
self.assertEqual(self.token.callback_confirmed,
new.callback_confirmed)
# TODO: What about copying the verifier to the new token?
# self.assertEqual(self.token.verifier, new.verifier)
def test_to_string(self):
tok = oauth.Token('tooken', 'seecret')
self.assertEqual(str(tok), 'oauth_token_secret=seecret&oauth_token=tooken')
def test_from_string(self):
self.assertRaises(ValueError, lambda: oauth.Token.from_string(''))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blahblahblah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blah=blah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=&oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=tooken%26oauth_token_secret=seecret'))
string = self.token.to_string()
new = oauth.Token.from_string(string)
self._compare_tokens(new)
self.token.set_callback('http://www.example.com/my-callback')
string = self.token.to_string()
new = oauth.Token.from_string(string)
self._compare_tokens(new)
class ReallyEqualMixin:
def failUnlessReallyEqual(self, a, b, msg=None):
self.failUnlessEqual(a, b, msg=msg)
self.failUnlessEqual(type(a), type(b), msg="a :: %r, b :: %r, %r" % (a, b, msg))
class TestFuncs(unittest.TestCase):
def test_to_unicode(self):
self.failUnlessRaises(TypeError, oauth.to_unicode, '\xae')
self.failUnlessRaises(TypeError, oauth.to_unicode_optional_iterator, '\xae')
self.failUnlessRaises(TypeError, oauth.to_unicode_optional_iterator, ['\xae'])
self.failUnlessEqual(oauth.to_unicode(':-)'), u':-)')
self.failUnlessEqual(oauth.to_unicode(u'\u00ae'), u'\u00ae')
self.failUnlessEqual(oauth.to_unicode('\xc2\xae'), u'\u00ae')
self.failUnlessEqual(oauth.to_unicode_optional_iterator([':-)']), [u':-)'])
self.failUnlessEqual(oauth.to_unicode_optional_iterator([u'\u00ae']), [u'\u00ae'])
class TestRequest(unittest.TestCase, ReallyEqualMixin):
def test_setter(self):
url = "http://example.com"
method = "GET"
req = oauth.Request(method)
self.assertTrue(not hasattr(req, 'url') or req.url is None)
self.assertTrue(not hasattr(req, 'normalized_url') or req.normalized_url is None)
def test_deleter(self):
url = "http://example.com"
method = "GET"
req = oauth.Request(method, url)
try:
del req.url
url = req.url
self.fail("AttributeError should have been raised on empty url.")
except AttributeError:
pass
except Exception, e:
self.fail(str(e))
def test_url(self):
url1 = "http://example.com:80/foo.php"
url2 = "https://example.com:443/foo.php"
exp1 = "http://example.com/foo.php"
exp2 = "https://example.com/foo.php"
method = "GET"
req = oauth.Request(method, url1)
self.assertEquals(req.normalized_url, exp1)
self.assertEquals(req.url, url1)
req = oauth.Request(method, url2)
self.assertEquals(req.normalized_url, exp2)
self.assertEquals(req.url, url2)
def test_bad_url(self):
request = oauth.Request()
try:
request.url = "ftp://example.com"
self.fail("Invalid URL scheme was accepted.")
except ValueError:
pass
def test_unset_consumer_and_token(self):
consumer = oauth.Consumer('my_consumer_key', 'my_consumer_secret')
token = oauth.Token('my_key', 'my_secret')
request = oauth.Request("GET", "http://example.com/fetch.php")
request.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer,
token)
self.assertEquals(consumer.key, request['oauth_consumer_key'])
self.assertEquals(token.key, request['oauth_token'])
def test_no_url_set(self):
consumer = oauth.Consumer('my_consumer_key', 'my_consumer_secret')
token = oauth.Token('my_key', 'my_secret')
request = oauth.Request()
try:
try:
request.sign_request(oauth.SignatureMethod_HMAC_SHA1(),
consumer, token)
except TypeError:
self.fail("Signature method didn't check for a normalized URL.")
except ValueError:
pass
def test_url_query(self):
url = "https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10"
normalized_url = urlparse.urlunparse(urlparse.urlparse(url)[:3] + (None, None, None))
method = "GET"
req = oauth.Request(method, url)
self.assertEquals(req.url, url)
self.assertEquals(req.normalized_url, normalized_url)
def test_get_parameter(self):
url = "http://example.com"
method = "GET"
params = {'oauth_consumer' : 'asdf'}
req = oauth.Request(method, url, parameters=params)
self.assertEquals(req.get_parameter('oauth_consumer'), 'asdf')
self.assertRaises(oauth.Error, req.get_parameter, 'blah')
def test_get_nonoauth_parameters(self):
oauth_params = {
'oauth_consumer': 'asdfasdfasdf'
}
other_params = {
u'foo': u'baz',
u'bar': u'foo',
u'multi': [u'FOO',u'BAR'],
u'uni_utf8': u'\xae',
u'uni_unicode': u'\u00ae',
u'uni_unicode_2': u'åÅøØ',
}
params = oauth_params
params.update(other_params)
req = oauth.Request("GET", "http://example.com", params)
self.assertEquals(other_params, req.get_nonoauth_parameters())
def test_to_header(self):
realm = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
header, value = req.to_header(realm).items()[0]
parts = value.split('OAuth ')
vars = parts[1].split(', ')
self.assertTrue(len(vars), (len(params) + 1))
res = {}
for v in vars:
var, val = v.split('=')
res[var] = urllib.unquote(val.strip('"'))
self.assertEquals(realm, res['realm'])
del res['realm']
self.assertTrue(len(res), len(params))
for key, val in res.items():
self.assertEquals(val, params.get(key))
def test_to_postdata_nonascii(self):
realm = "http://sp.example.com/"
params = {
'nonasciithing': u'q\xbfu\xe9 ,aasp u?..a.s',
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
self.failUnlessReallyEqual(req.to_postdata(), 'nonasciithing=q%C2%BFu%C3%A9%20%2Caasp%20u%3F..a.s&oauth_nonce=4572616e48616d6d65724c61686176&oauth_timestamp=137131200&oauth_consumer_key=0685bd9184jfhq22&oauth_signature_method=HMAC-SHA1&oauth_version=1.0&oauth_token=ad180jjd733klru7&oauth_signature=wOJIO9A2W5mFwDgiDvZbTSMK%252FPY%253D')
def test_to_postdata(self):
realm = "http://sp.example.com/"
params = {
'multi': ['FOO','BAR'],
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
flat = [('multi','FOO'),('multi','BAR')]
del params['multi']
flat.extend(params.items())
kf = lambda x: x[0]
self.assertEquals(sorted(flat, key=kf), sorted(parse_qsl(req.to_postdata()), key=kf))
def test_to_url(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
exp = urlparse.urlparse("%s?%s" % (url, urllib.urlencode(params)))
res = urlparse.urlparse(req.to_url())
self.assertEquals(exp.scheme, res.scheme)
self.assertEquals(exp.netloc, res.netloc)
self.assertEquals(exp.path, res.path)
a = parse_qs(exp.query)
b = parse_qs(res.query)
self.assertEquals(a, b)
def test_to_url_with_query(self):
url = "https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
# Note: the url above already has query parameters, so append new ones with &
exp = urlparse.urlparse("%s&%s" % (url, urllib.urlencode(params)))
res = urlparse.urlparse(req.to_url())
self.assertEquals(exp.scheme, res.scheme)
self.assertEquals(exp.netloc, res.netloc)
self.assertEquals(exp.path, res.path)
a = parse_qs(exp.query)
b = parse_qs(res.query)
self.assertTrue('alt' in b)
self.assertTrue('max-contacts' in b)
self.assertEquals(b['alt'], ['json'])
self.assertEquals(b['max-contacts'], ['10'])
self.assertEquals(a, b)
def test_signature_base_string_nonascii_nonutf8(self):
consumer = oauth.Consumer('consumer_token', 'consumer_secret')
url = u'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc\u2766,+CA'
req = oauth.Request("GET", url)
self.failUnlessReallyEqual(req.normalized_url, u'http://api.simplegeo.com/1.0/places/address.json')
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'WhufgeZKyYpKsI70GZaiDaYwl6g=')
url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc\xe2\x9d\xa6,+CA'
req = oauth.Request("GET", url)
self.failUnlessReallyEqual(req.normalized_url, u'http://api.simplegeo.com/1.0/places/address.json')
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'WhufgeZKyYpKsI70GZaiDaYwl6g=')
url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc%E2%9D%A6,+CA'
req = oauth.Request("GET", url)
self.failUnlessReallyEqual(req.normalized_url, u'http://api.simplegeo.com/1.0/places/address.json')
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'WhufgeZKyYpKsI70GZaiDaYwl6g=')
url = u'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc%E2%9D%A6,+CA'
req = oauth.Request("GET", url)
self.failUnlessReallyEqual(req.normalized_url, u'http://api.simplegeo.com/1.0/places/address.json')
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'WhufgeZKyYpKsI70GZaiDaYwl6g=')
def test_signature_base_string_with_query(self):
url = "https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
self.assertEquals(req.normalized_url, 'https://www.google.com/m8/feeds/contacts/default/full/')
self.assertEquals(req.url, 'https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10')
normalized_params = parse_qsl(req.get_normalized_parameters())
self.assertTrue(len(normalized_params), len(params) + 2)
normalized_params = dict(normalized_params)
for key, value in params.iteritems():
if key == 'oauth_signature':
continue
self.assertEquals(value, normalized_params[key])
self.assertEquals(normalized_params['alt'], 'json')
self.assertEquals(normalized_params['max-contacts'], '10')
def test_get_normalized_parameters_empty(self):
url = "http://sp.example.com/?empty="
req = oauth.Request("GET", url)
res = req.get_normalized_parameters()
expected='empty='
self.assertEquals(expected, res)
def test_get_normalized_parameters_duplicate(self):
url = "http://example.com/v2/search/videos?oauth_nonce=79815175&oauth_timestamp=1295397962&oauth_consumer_key=mykey&oauth_signature_method=HMAC-SHA1&q=car&oauth_version=1.0&offset=10&oauth_signature=spWLI%2FGQjid7sQVd5%2FarahRxzJg%3D"
req = oauth.Request("GET", url)
res = req.get_normalized_parameters()
expected='oauth_consumer_key=mykey&oauth_nonce=79815175&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1295397962&oauth_version=1.0&offset=10&q=car'
self.assertEquals(expected, res)
def test_get_normalized_parameters_from_url(self):
# example copied from
# https://github.com/ciaranj/node-oauth/blob/master/tests/oauth.js
# which in turns says that it was copied from
# http://oauth.net/core/1.0/#sig_base_example .
url = "http://photos.example.net/photos?file=vacation.jpg&oauth_consumer_key=dpf43f3p2l4k3l03&oauth_nonce=kllo9940pd9333jh&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1191242096&oauth_token=nnch734d00sl2jdk&oauth_version=1.0&size=original"
req = oauth.Request("GET", url)
res = req.get_normalized_parameters()
expected = 'file=vacation.jpg&oauth_consumer_key=dpf43f3p2l4k3l03&oauth_nonce=kllo9940pd9333jh&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1191242096&oauth_token=nnch734d00sl2jdk&oauth_version=1.0&size=original'
self.assertEquals(expected, res)
def test_signing_base(self):
# example copied from
# https://github.com/ciaranj/node-oauth/blob/master/tests/oauth.js
# which in turns says that it was copied from
# http://oauth.net/core/1.0/#sig_base_example .
url = "http://photos.example.net/photos?file=vacation.jpg&oauth_consumer_key=dpf43f3p2l4k3l03&oauth_nonce=kllo9940pd9333jh&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1191242096&oauth_token=nnch734d00sl2jdk&oauth_version=1.0&size=original"
req = oauth.Request("GET", url)
sm = oauth.SignatureMethod_HMAC_SHA1()
consumer = oauth.Consumer('dpf43f3p2l4k3l03', 'foo')
key, raw = sm.signing_base(req, consumer, None)
expected = 'GET&http%3A%2F%2Fphotos.example.net%2Fphotos&file%3Dvacation.jpg%26oauth_consumer_key%3Ddpf43f3p2l4k3l03%26oauth_nonce%3Dkllo9940pd9333jh%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1191242096%26oauth_token%3Dnnch734d00sl2jdk%26oauth_version%3D1.0%26size%3Doriginal'
self.assertEquals(expected, raw)
def test_get_normalized_parameters(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'multi': ['FOO','BAR', u'\u00ae', '\xc2\xae'],
'multi_same': ['FOO','FOO'],
'uni_utf8_bytes': '\xc2\xae',
'uni_unicode_object': u'\u00ae'
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
expected='multi=BAR&multi=FOO&multi=%C2%AE&multi=%C2%AE&multi_same=FOO&multi_same=FOO&oauth_consumer_key=0685bd9184jfhq22&oauth_nonce=4572616e48616d6d65724c61686176&oauth_signature_method=HMAC-SHA1&oauth_timestamp=137131200&oauth_token=ad180jjd733klru7&oauth_version=1.0&uni_unicode_object=%C2%AE&uni_utf8_bytes=%C2%AE'
self.assertEquals(expected, res)
def test_get_normalized_parameters_ignores_auth_signature(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_signature': "some-random-signature-%d" % random.randint(1000, 2000),
'oauth_token': "ad180jjd733klru7",
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
self.assertNotEquals(urllib.urlencode(sorted(params.items())), res)
foo = params.copy()
del foo["oauth_signature"]
self.assertEqual(urllib.urlencode(sorted(foo.items())), res)
def test_set_signature_method(self):
consumer = oauth.Consumer('key', 'secret')
client = oauth.Client(consumer)
class Blah:
pass
try:
client.set_signature_method(Blah())
self.fail("Client.set_signature_method() accepted invalid method.")
except ValueError:
pass
m = oauth.SignatureMethod_HMAC_SHA1()
client.set_signature_method(m)
self.assertEquals(m, client.method)
def test_get_normalized_string_escapes_spaces_properly(self):
url = "http://sp.example.com/"
params = {
"some_random_data": random.randint(100, 1000),
"data": "This data with a random number (%d) has spaces!" % random.randint(1000, 2000),
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
expected = urllib.urlencode(sorted(params.items())).replace('+', '%20')
self.assertEqual(expected, res)
@mock.patch('oauth2.Request.make_timestamp')
@mock.patch('oauth2.Request.make_nonce')
def test_request_nonutf8_bytes(self, mock_make_nonce, mock_make_timestamp):
mock_make_nonce.return_value = 5
mock_make_timestamp.return_value = 6
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_token': tok.key,
'oauth_consumer_key': con.key
}
# If someone passes a sequence of bytes which is not ascii for
# url, we'll raise an exception as early as possible.
url = "http://sp.example.com/\x92" # It's actually cp1252-encoding...
self.assertRaises(TypeError, oauth.Request, method="GET", url=url, parameters=params)
# And if they pass an unicode, then we'll use it.
url = u'http://sp.example.com/\u2019'
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'cMzvCkhvLL57+sTIxLITTHfkqZk=')
# And if it is a utf-8-encoded-then-percent-encoded non-ascii
# thing, we'll decode it and use it.
url = "http://sp.example.com/%E2%80%99"
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'yMLKOyNKC/DkyhUOb8DLSvceEWE=')
# Same thing with the params.
url = "http://sp.example.com/"
# If someone passes a sequence of bytes which is not ascii in
# params, we'll raise an exception as early as possible.
params['non_oauth_thing'] = '\xae', # It's actually cp1252-encoding...
self.assertRaises(TypeError, oauth.Request, method="GET", url=url, parameters=params)
# And if they pass a unicode, then we'll use it.
params['non_oauth_thing'] = u'\u2019'
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_signature'], '0GU50m0v60CVDB5JnoBXnvvvKx4=')
# And if it is a utf-8-encoded non-ascii thing, we'll decode
# it and use it.
params['non_oauth_thing'] = '\xc2\xae'
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'pqOCu4qvRTiGiXB8Z61Jsey0pMM=')
# Also if there are non-utf8 bytes in the query args.
url = "http://sp.example.com/?q=\x92" # cp1252
self.assertRaises(TypeError, oauth.Request, method="GET", url=url, parameters=params)
def test_request_hash_of_body(self):
tok = oauth.Token(key="token", secret="tok-test-secret")
con = oauth.Consumer(key="consumer", secret="con-test-secret")
# Example 1a from Appendix A.1 of
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# Except that we get a differetn result than they do.
params = {
'oauth_version': "1.0",
'oauth_token': tok.key,
'oauth_nonce': 10288510250934,
'oauth_timestamp': 1236874155,
'oauth_consumer_key': con.key
}
url = u"http://www.example.com/resource"
req = oauth.Request(method="PUT", url=url, parameters=params, body="Hello World!", is_form_encoded=False)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_body_hash'], 'Lve95gjOVATpfV8EL5X4nxwjKHE=')
self.failUnlessReallyEqual(req['oauth_signature'], 't+MX8l/0S8hdbVQL99nD0X1fPnM=')
# oauth-bodyhash.html A.1 has
# '08bUFF%2Fjmp59mWB7cSgCYBUpJ0U%3D', but I don't see how that
# is possible.
# Example 1b
params = {
'oauth_version': "1.0",
'oauth_token': tok.key,
'oauth_nonce': 10369470270925,
'oauth_timestamp': 1236874236,
'oauth_consumer_key': con.key
}
req = oauth.Request(method="PUT", url=url, parameters=params, body="Hello World!", is_form_encoded=False)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_body_hash'], 'Lve95gjOVATpfV8EL5X4nxwjKHE=')
self.failUnlessReallyEqual(req['oauth_signature'], 'CTFmrqJIGT7NsWJ42OrujahTtTc=')
# Appendix A.2
params = {
'oauth_version': "1.0",
'oauth_token': tok.key,
'oauth_nonce': 8628868109991,
'oauth_timestamp': 1238395022,
'oauth_consumer_key': con.key
}
req = oauth.Request(method="GET", url=url, parameters=params, is_form_encoded=False)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_body_hash'], '2jmj7l5rSw0yVb/vlWAYkK/YBwk=')
self.failUnlessReallyEqual(req['oauth_signature'], 'Zhl++aWSP0O3/hYQ0CuBc7jv38I=')
def test_sign_request(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200"
}
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
params['oauth_token'] = tok.key
params['oauth_consumer_key'] = con.key
req = oauth.Request(method="GET", url=url, parameters=params)
methods = {
'DX01TdHws7OninCLK9VztNTH1M4=': oauth.SignatureMethod_HMAC_SHA1(),
'con-test-secret&tok-test-secret': oauth.SignatureMethod_PLAINTEXT()
}
for exp, method in methods.items():
req.sign_request(method, con, tok)
self.assertEquals(req['oauth_signature_method'], method.name)
self.assertEquals(req['oauth_signature'], exp)
# Also if there are non-ascii chars in the URL.
url = "http://sp.example.com/\xe2\x80\x99" # utf-8 bytes
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, tok)
self.assertEquals(req['oauth_signature'], 'loFvp5xC7YbOgd9exIO6TxB7H4s=')
url = u'http://sp.example.com/\u2019' # Python unicode object
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, tok)
self.assertEquals(req['oauth_signature'], 'loFvp5xC7YbOgd9exIO6TxB7H4s=')
# Also if there are non-ascii chars in the query args.
url = "http://sp.example.com/?q=\xe2\x80\x99" # utf-8 bytes
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, tok)
self.assertEquals(req['oauth_signature'], 'IBw5mfvoCsDjgpcsVKbyvsDqQaU=')
url = u'http://sp.example.com/?q=\u2019' # Python unicode object
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, tok)
self.assertEquals(req['oauth_signature'], 'IBw5mfvoCsDjgpcsVKbyvsDqQaU=')
def test_from_request(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
headers = req.to_header()
# Test from the headers
req = oauth.Request.from_request("GET", url, headers)
self.assertEquals(req.method, "GET")
self.assertEquals(req.url, url)
self.assertEquals(params, req.copy())
# Test with bad OAuth headers
bad_headers = {
'Authorization' : 'OAuth this is a bad header'
}
self.assertRaises(oauth.Error, oauth.Request.from_request, "GET",
url, bad_headers)
# Test getting from query string
qs = urllib.urlencode(params)
req = oauth.Request.from_request("GET", url, query_string=qs)
exp = parse_qs(qs, keep_blank_values=False)
for k, v in exp.iteritems():
exp[k] = urllib.unquote(v[0])
self.assertEquals(exp, req.copy())
# Test that a boned from_request() call returns None
req = oauth.Request.from_request("GET", url)
self.assertEquals(None, req)
def test_from_token_and_callback(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
req = oauth.Request.from_token_and_callback(tok)
self.assertFalse('oauth_callback' in req)
self.assertEquals(req['oauth_token'], tok.key)
req = oauth.Request.from_token_and_callback(tok, callback=url)
self.assertTrue('oauth_callback' in req)
self.assertEquals(req['oauth_callback'], url)
def test_from_consumer_and_token(self):
url = "http://sp.example.com/"
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
tok.set_verifier('this_is_a_test_verifier')
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
req = oauth.Request.from_consumer_and_token(con, token=tok,
http_method="GET", http_url=url)
self.assertEquals(req['oauth_token'], tok.key)
self.assertEquals(req['oauth_consumer_key'], con.key)
self.assertEquals(tok.verifier, req['oauth_verifier'])
class SignatureMethod_Bad(oauth.SignatureMethod):
name = "BAD"
def signing_base(self, request, consumer, token):
return ""
def sign(self, request, consumer, token):
return "invalid-signature"
class TestServer(unittest.TestCase):
def setUp(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
self.consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
self.token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = self.token.key
params['oauth_consumer_key'] = self.consumer.key
self.request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.request.sign_request(signature_method, self.consumer, self.token)
def test_init(self):
server = oauth.Server(signature_methods={'HMAC-SHA1' : oauth.SignatureMethod_HMAC_SHA1()})
self.assertTrue('HMAC-SHA1' in server.signature_methods)
self.assertTrue(isinstance(server.signature_methods['HMAC-SHA1'],
oauth.SignatureMethod_HMAC_SHA1))
server = oauth.Server()
self.assertEquals(server.signature_methods, {})
def test_add_signature_method(self):
server = oauth.Server()
res = server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertTrue(len(res) == 1)
self.assertTrue('HMAC-SHA1' in res)
self.assertTrue(isinstance(res['HMAC-SHA1'],
oauth.SignatureMethod_HMAC_SHA1))
res = server.add_signature_method(oauth.SignatureMethod_PLAINTEXT())
self.assertTrue(len(res) == 2)
self.assertTrue('PLAINTEXT' in res)
self.assertTrue(isinstance(res['PLAINTEXT'],
oauth.SignatureMethod_PLAINTEXT))
def test_verify_request(self):
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
parameters = server.verify_request(self.request, self.consumer,
self.token)
self.assertTrue('bar' in parameters)
self.assertTrue('foo' in parameters)
self.assertTrue('multi' in parameters)
self.assertEquals(parameters['bar'], 'blerg')
self.assertEquals(parameters['foo'], 59)
self.assertEquals(parameters['multi'], ['FOO','BAR'])
def test_build_authenticate_header(self):
server = oauth.Server()
headers = server.build_authenticate_header('example.com')
self.assertTrue('WWW-Authenticate' in headers)
self.assertEquals('OAuth realm="example.com"',
headers['WWW-Authenticate'])
def test_no_version(self):
url = "http://sp.example.com/"
params = {
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
self.consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
self.token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = self.token.key
params['oauth_consumer_key'] = self.consumer.key
self.request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.request.sign_request(signature_method, self.consumer, self.token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
parameters = server.verify_request(self.request, self.consumer,
self.token)
def test_invalid_version(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '222.9922',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['foo','bar'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.Error, server.verify_request, request, consumer, token)
def test_invalid_signature_method(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '1.0',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = SignatureMethod_Bad()
request.sign_request(signature_method, consumer, token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.Error, server.verify_request, request,
consumer, token)
def test_missing_signature(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '1.0',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
del request['oauth_signature']
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.MissingSignature, server.verify_request,
request, consumer, token)
# Request Token: http://oauth-sandbox.sevengoslings.net/request_token
# Auth: http://oauth-sandbox.sevengoslings.net/authorize
# Access Token: http://oauth-sandbox.sevengoslings.net/access_token
# Two-legged: http://oauth-sandbox.sevengoslings.net/two_legged
# Three-legged: http://oauth-sandbox.sevengoslings.net/three_legged
# Key: bd37aed57e15df53
# Secret: 0e9e6413a9ef49510a4f68ed02cd
class TestClient(unittest.TestCase):
# oauth_uris = {
# 'request_token': '/request_token.php',
# 'access_token': '/access_token.php'
# }
oauth_uris = {
'request_token': '/request_token',
'authorize': '/authorize',
'access_token': '/access_token',
'two_legged': '/two_legged',
'three_legged': '/three_legged'
}
consumer_key = 'bd37aed57e15df53'
consumer_secret = '0e9e6413a9ef49510a4f68ed02cd'
host = 'http://oauth-sandbox.sevengoslings.net'
def setUp(self):
self.consumer = oauth.Consumer(key=self.consumer_key,
secret=self.consumer_secret)
self.body = {
'foo': 'bar',
'bar': 'foo',
'multi': ['FOO','BAR'],
'blah': 599999
}
def _uri(self, type):
uri = self.oauth_uris.get(type)
if uri is None:
raise KeyError("%s is not a valid OAuth URI type." % type)
return "%s%s" % (self.host, uri)
def create_simple_multipart_data(self, data):
boundary = '---Boundary-%d' % random.randint(1,1000)
crlf = '\r\n'
items = []
for key, value in data.iteritems():
items += [
'--'+boundary,
'Content-Disposition: form-data; name="%s"'%str(key),
'',
str(value),
]
items += ['', '--'+boundary+'--', '']
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, crlf.join(items)
def test_init(self):
class Blah():
pass
try:
client = oauth.Client(Blah())
self.fail("Client.__init__() accepted invalid Consumer.")
except ValueError:
pass
consumer = oauth.Consumer('token', 'secret')
try:
client = oauth.Client(consumer, Blah())
self.fail("Client.__init__() accepted invalid Token.")
except ValueError:
pass
def test_access_token_get(self):
"""Test getting an access token via GET."""
client = oauth.Client(self.consumer, None)
resp, content = client.request(self._uri('request_token'), "GET")
self.assertEquals(int(resp['status']), 200)
def test_access_token_post(self):
"""Test getting an access token via POST."""
client = oauth.Client(self.consumer, None)
resp, content = client.request(self._uri('request_token'), "POST")
self.assertEquals(int(resp['status']), 200)
res = dict(parse_qsl(content))
self.assertTrue('oauth_token' in res)
self.assertTrue('oauth_token_secret' in res)
def _two_legged(self, method):
client = oauth.Client(self.consumer, None)
return client.request(self._uri('two_legged'), method,
body=urllib.urlencode(self.body))
def test_two_legged_post(self):
"""A test of a two-legged OAuth POST request."""
resp, content = self._two_legged("POST")
self.assertEquals(int(resp['status']), 200)
def test_two_legged_get(self):
"""A test of a two-legged OAuth GET request."""
resp, content = self._two_legged("GET")
self.assertEquals(int(resp['status']), 200)
@mock.patch('httplib2.Http.request')
def test_multipart_post_does_not_alter_body(self, mockHttpRequest):
random_result = random.randint(1,100)
data = {
'rand-%d'%random.randint(1,100):random.randint(1,100),
}
content_type, body = self.create_simple_multipart_data(data)
client = oauth.Client(self.consumer, None)
uri = self._uri('two_legged')
def mockrequest(cl, ur, **kw):
self.failUnless(cl is client)
self.failUnless(ur is uri)
self.failUnlessEqual(frozenset(kw.keys()), frozenset(['method', 'body', 'redirections', 'connection_type', 'headers']))
self.failUnlessEqual(kw['body'], body)
self.failUnlessEqual(kw['connection_type'], None)
self.failUnlessEqual(kw['method'], 'POST')
self.failUnlessEqual(kw['redirections'], httplib2.DEFAULT_MAX_REDIRECTS)
self.failUnless(isinstance(kw['headers'], dict))
return random_result
mockHttpRequest.side_effect = mockrequest
result = client.request(uri, 'POST', headers={'Content-Type':content_type}, body=body)
self.assertEqual(result, random_result)
@mock.patch('httplib2.Http.request')
def test_url_with_query_string(self, mockHttpRequest):
uri = 'http://example.com/foo/bar/?show=thundercats&character=snarf'
client = oauth.Client(self.consumer, None)
random_result = random.randint(1,100)
def mockrequest(cl, ur, **kw):
self.failUnless(cl is client)
self.failUnlessEqual(frozenset(kw.keys()), frozenset(['method', 'body', 'redirections', 'connection_type', 'headers']))
self.failUnlessEqual(kw['body'], '')
self.failUnlessEqual(kw['connection_type'], None)
self.failUnlessEqual(kw['method'], 'GET')
self.failUnlessEqual(kw['redirections'], httplib2.DEFAULT_MAX_REDIRECTS)
self.failUnless(isinstance(kw['headers'], dict))
req = oauth.Request.from_consumer_and_token(self.consumer, None,
http_method='GET', http_url=uri, parameters={})
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), self.consumer, None)
expected = parse_qsl(urlparse.urlparse(req.to_url()).query)
actual = parse_qsl(urlparse.urlparse(ur).query)
self.failUnlessEqual(len(expected), len(actual))
actual = dict(actual)
for key, value in expected:
if key not in ('oauth_signature', 'oauth_nonce', 'oauth_timestamp'):
self.failUnlessEqual(actual[key], value)
return random_result
mockHttpRequest.side_effect = mockrequest
client.request(uri, 'GET')
@mock.patch('httplib2.Http.request')
@mock.patch('oauth2.Request.from_consumer_and_token')
def test_multiple_values_for_a_key(self, mockReqConstructor, mockHttpRequest):
client = oauth.Client(self.consumer, None)
request = oauth.Request("GET", "http://example.com/fetch.php", parameters={'multi': ['1', '2']})
mockReqConstructor.return_value = request
client.request('http://whatever', 'POST', body='multi=1&multi=2')
self.failUnlessEqual(mockReqConstructor.call_count, 1)
self.failUnlessEqual(mockReqConstructor.call_args[1]['parameters'], {'multi': ['1', '2']})
self.failUnless('multi=1' in mockHttpRequest.call_args[1]['body'])
self.failUnless('multi=2' in mockHttpRequest.call_args[1]['body'])
if __name__ == "__main__":
unittest.main()
|
mit
|
helloiloveit/VkxPhoneProject
|
submodules/externals/libvpx/third_party/googletest/src/test/gtest_throw_on_failure_test.py
|
2917
|
5766
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO([email protected]): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
|
gpl-2.0
|
CosmicFish/CosmicFish
|
bundled/doxygen/testing/testsqlite3.py
|
27
|
3977
|
#! /usr/bin/python
from xml.etree import cElementTree as ET
import os
import sqlite3
import sys
import getopt
# map XML attributes/elements to SQL rows
# --POC: iterate through the children and attributes of the memberdef elelement
# and search it in doxygen_sqlite3.db
g_conn=None
val=[]
def print_unprocessed_attributes(node):
for key in node.attrib:
print "WARNING: '%s' has unprocessed attr '%s'" % (node.tag,key)
def extract_attribute(node,attribute,pnl):
if not attribute in node.attrib:
return
pnl.append("%s = ?" % attribute)
val.append(node.attrib[attribute])
node.attrib.pop(attribute)
def extract_element(node,chld,pnl):
# deal with <tag />
if chld.text == None:
if len(chld.attrib)==0:
node.remove(chld)
return
a=chld.text.strip()
if not a == "":
pnl.append("%s =?" % chld.tag)
val.append(chld.text.strip())
else:
pnl.append("%s IS NULL OR %s = ''" % (chld.tag,chld.tag))
node.remove(chld)
def process_memberdef(node):
q=[]
for chld in node.getchildren():
if chld.tag == "referencedby":
continue
if chld.tag == "references":
continue
if chld.tag == "param":
continue
if chld.tag == "type":
continue
if chld.tag == "location":
extract_attribute(chld,"line",q)
extract_attribute(chld,"column",q)
extract_attribute(chld,"bodystart",q)
extract_attribute(chld,"bodyend",q)
q.append("id_bodyfile=(select id from files where name=?)")
val.append(chld.attrib["bodyfile"])
chld.attrib.pop("bodyfile")
q.append("id_file=(select id from files where name=?)")
val.append(chld.attrib["file"])
chld.attrib.pop("file")
print_unprocessed_attributes(chld)
if len(chld.attrib) == 0:
node.remove(chld)
else:
extract_element(node,chld,q)
for chld in node.getchildren():
print "WARNING: '%s' has unprocessed child elem '%s'" % (node.tag,chld.tag)
extract_attribute(node,"kind",q)
extract_attribute(node,"prot",q)
extract_attribute(node,"static",q)
extract_attribute(node,"mutable",q)
extract_attribute(node,"const",q)
extract_attribute(node,"virt",q)
extract_attribute(node,"explicit",q)
extract_attribute(node,"inline",q)
q.append("refid=?")
val.append(node.attrib['id'])
node.attrib.pop('id')
print_unprocessed_attributes(node)
query="SELECT * FROM memberdef WHERE %s" % " AND ".join(q)
r=[]
try:
r = g_conn.execute(query,val).fetchall()
except sqlite3.OperationalError,e:
print "SQL_ERROR:%s"%e
del val[:]
if not len(r) > 0:
print "TEST_ERROR: Member not found in SQL DB"
def load_xml(name):
context = ET.iterparse(name, events=("start", "end"))
event, root = context.next()
for event, elem in context:
if event == "end" and elem.tag == "memberdef":
process_memberdef(elem)
print "\n== Unprocessed XML =="
# ET.dump(root)
def open_db(dbname):
global g_conn
if dbname == None:
dbname = "doxygen_sqlite3.db"
if not os.path.isfile(dbname):
raise BaseException("No such file %s" % dbname )
g_conn = sqlite3.connect(dbname)
g_conn.execute('PRAGMA temp_store = MEMORY;')
g_conn.row_factory = sqlite3.Row
def main(argv):
try:
opts, args = getopt.getopt(argv, "hd:x:",["help"])
except getopt.GetoptError:
sys.exit(1)
dbname=None
xmlfile=None
for a, o in opts:
if a in ('-h', '--help'):
sys.exit(0)
elif a in ('-d'):
dbname=o
continue
elif a in ('-x'):
xmlfile=o
continue
open_db(dbname)
load_xml(xmlfile)
if __name__ == '__main__':
main(sys.argv[1:])
|
gpl-3.0
|
OpenFacetracker/facetracker-core
|
lib/youtube-dl/youtube_dl/extractor/fourtube.py
|
39
|
3885
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
)
from ..utils import (
parse_duration,
parse_iso8601,
str_to_int,
)
class FourTubeIE(InfoExtractor):
IE_NAME = '4tube'
_VALID_URL = r'https?://(?:www\.)?4tube\.com/videos/(?P<id>\d+)'
_TEST = {
'url': 'http://www.4tube.com/videos/209733/hot-babe-holly-michaels-gets-her-ass-stuffed-by-black',
'md5': '6516c8ac63b03de06bc8eac14362db4f',
'info_dict': {
'id': '209733',
'ext': 'mp4',
'title': 'Hot Babe Holly Michaels gets her ass stuffed by black',
'uploader': 'WCP Club',
'uploader_id': 'wcp-club',
'upload_date': '20131031',
'timestamp': 1383263892,
'duration': 583,
'view_count': int,
'like_count': int,
'categories': list,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_meta('name', webpage)
timestamp = parse_iso8601(self._html_search_meta(
'uploadDate', webpage))
thumbnail = self._html_search_meta('thumbnailUrl', webpage)
uploader_id = self._html_search_regex(
r'<a class="img-avatar" href="[^"]+/channels/([^/"]+)" title="Go to [^"]+ page">',
webpage, 'uploader id')
uploader = self._html_search_regex(
r'<a class="img-avatar" href="[^"]+/channels/[^/"]+" title="Go to ([^"]+) page">',
webpage, 'uploader')
categories_html = self._search_regex(
r'(?s)><i class="icon icon-tag"></i>\s*Categories / Tags\s*.*?<ul class="list">(.*?)</ul>',
webpage, 'categories', fatal=False)
categories = None
if categories_html:
categories = [
c.strip() for c in re.findall(
r'(?s)<li><a.*?>(.*?)</a>', categories_html)]
view_count = str_to_int(self._search_regex(
r'<meta itemprop="interactionCount" content="UserPlays:([0-9,]+)">',
webpage, 'view count', fatal=False))
like_count = str_to_int(self._search_regex(
r'<meta itemprop="interactionCount" content="UserLikes:([0-9,]+)">',
webpage, 'like count', fatal=False))
duration = parse_duration(self._html_search_meta('duration', webpage))
params_js = self._search_regex(
r'\$\.ajax\(url,\ opts\);\s*\}\s*\}\)\(([0-9,\[\] ]+)\)',
webpage, 'initialization parameters'
)
params = self._parse_json('[%s]' % params_js, video_id)
media_id = params[0]
sources = ['%s' % p for p in params[2]]
token_url = 'http://tkn.4tube.com/{0}/desktop/{1}'.format(
media_id, '+'.join(sources))
headers = {
b'Content-Type': b'application/x-www-form-urlencoded',
b'Origin': b'http://www.4tube.com',
}
token_req = compat_urllib_request.Request(token_url, b'{}', headers)
tokens = self._download_json(token_req, video_id)
formats = [{
'url': tokens[format]['token'],
'format_id': format + 'p',
'resolution': format + 'p',
'quality': int(format),
} for format in sources]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'categories': categories,
'thumbnail': thumbnail,
'uploader': uploader,
'uploader_id': uploader_id,
'timestamp': timestamp,
'like_count': like_count,
'view_count': view_count,
'duration': duration,
'age_limit': 18,
}
|
gpl-2.0
|
aboutsajjad/Bridge
|
app_packages/youtube_dl/extractor/streetvoice.py
|
60
|
1615
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import unified_strdate
class StreetVoiceIE(InfoExtractor):
_VALID_URL = r'https?://(?:.+?\.)?streetvoice\.com/[^/]+/songs/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://streetvoice.com/skippylu/songs/94440/',
'md5': '15974627fc01a29e492c98593c2fd472',
'info_dict': {
'id': '94440',
'ext': 'mp3',
'title': '輸',
'description': 'Crispy脆樂團 - 輸',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 260,
'upload_date': '20091018',
'uploader': 'Crispy脆樂團',
'uploader_id': '627810',
}
}, {
'url': 'http://tw.streetvoice.com/skippylu/songs/94440/',
'only_matching': True,
}]
def _real_extract(self, url):
song_id = self._match_id(url)
song = self._download_json(
'https://streetvoice.com/api/v1/public/song/%s/' % song_id, song_id, data=b'')
title = song['name']
author = song['user']['nickname']
return {
'id': song_id,
'url': song['file'],
'title': title,
'description': '%s - %s' % (author, title),
'thumbnail': self._proto_relative_url(song.get('image'), 'http:'),
'duration': song.get('length'),
'upload_date': unified_strdate(song.get('created_at')),
'uploader': author,
'uploader_id': compat_str(song['user']['id']),
}
|
mit
|
txemi/ansible
|
lib/ansible/modules/network/panos/panos_address.py
|
78
|
6066
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_address
short_description: Create address service object on PanOS devices
description:
- Create address service object of different types [IP Range, FQDN, or IP Netmask].
author: "Luigi Mori (@jtschichold), Ken Celenza (@itdependsnetworks), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python can be obtained from PyPi U(https://pypi.python.org/pypi/pan-python)
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device being configured.
required: true
username:
description:
- Username credentials to use for authentication.
default: "admin"
password:
description:
- Password credentials to use for authentication.
required: true
address:
description:
- IP address with or without mask, range, or FQDN.
required: true
default: None
address_name:
description:
- Human readable name of the address.
required: true
default: None
type:
description:
- This is the type of the object created.
default: ip-nemask
choices: [ 'ip-netmask', 'fqdn', 'ip-range' ]
description:
description:
- Description of the address object.
default: None
tag:
description:
- Tag of the address object.
default: None
commit:
description:
- Commit configuration to the Firewall if it is changed.
default: true
'''
EXAMPLES = '''
- name: create IP-Netmask Object
panos_address:
ip_address: "192.168.1.1"
password: 'admin'
address_name: 'google_dns'
address: '8.8.8.8/32'
description: 'Google DNS'
tag: 'Outbound'
commit: False
- name: create IP-Range Object
panos_address:
ip_address: "192.168.1.1"
password: 'admin'
type: 'ip-range'
address_name: 'apple-range'
address: '17.0.0.0-17.255.255.255'
commit: False
- name: create FQDN Object
panos_address:
ip_address: "192.168.1.1"
password: 'admin'
type: 'fqdn'
address_name: 'google.com'
address: 'www.google.com'
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
try:
import pan.xapi
from pan.xapi import PanXapiError
HAS_LIB = True
except ImportError:
HAS_LIB = False
_ADDRESS_XPATH = "/config/devices/entry[@name='localhost.localdomain']" + \
"/vsys/entry[@name='vsys1']" + \
"/address/entry[@name='%s']"
def address_exists(xapi, address_name):
xapi.get(_ADDRESS_XPATH % address_name)
e = xapi.element_root.find('.//entry')
if e is None:
return False
return True
def add_address(xapi, module, address, address_name, description, type, tag):
if address_exists(xapi, address_name):
return False
exml = []
exml.append('<%s>' % type)
exml.append('%s' % address)
exml.append('</%s>' % type)
if description:
exml.append('<description>')
exml.append('%s' % description)
exml.append('</description>')
if tag:
exml.append('<tag>')
exml.append('<member>%s</member>' % tag)
exml.append('</tag>')
exml = ''.join(exml)
xapi.set(xpath=_ADDRESS_XPATH % address_name, element=exml)
return True
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
address_name=dict(required=True),
address=dict(),
description=dict(),
tag=dict(),
type=dict(default='ip-netmask', choices=['ip-netmask', 'ip-range', 'fqdn']),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
address_name = module.params['address_name']
address = module.params['address']
commit = module.params['commit']
description = module.params['description']
tag = module.params['tag']
type = module.params['type']
changed = False
try:
changed = add_address(xapi, module,
address,
address_name,
description,
type,
tag)
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
if changed and commit:
xapi.commit(cmd="<commit></commit>", sync=True, interval=1)
module.exit_json(changed=changed, msg="okey dokey")
if __name__ == '__main__':
main()
|
gpl-3.0
|
veloutin/papas
|
lib6ko/tests/mocks/interactive_console.py
|
1
|
2406
|
# PAPAS Access Point Administration System
# Copyright (c) 2010 Revolution Linux inc. <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/python
import sys
import getpass
"""
Interactive console emulator.
Presents the caller with a MOTD, Username and password prompt and expects
the password to match the username.
It then provides a console prompt-like behavior.
- ROOT_CMD allows becoming root by entering the password
- UNROOT_CMD allows leaving root session
- EXIT_CMD leaves
- everything else is outputted if it contains echo, or ignored
"""
PROMPT = "fake> "
RPROMPT = "fake# "
USERNAME = "Username :"
PASSWD = "Password :"
MOTD = "Welcome to this fake prompt!"
ROOT_CMD = "enable"
UNROOT_CMD = "disable"
EXIT_CMD = "exit"
def main():
print MOTD
username = raw_input(USERNAME)
password = getpass.getpass(PASSWD)
if username != password:
print "Failed."
sys.exit(1)
root = False
while True:
try:
if root:
line = raw_input(RPROMPT)
else:
line = raw_input(PROMPT)
except Exception:
sys.exit(0)
if line == ROOT_CMD:
if root:
print "Already Root!"
else:
root_pass = getpass.getpass(PASSWD)
if password == root_pass:
root = True
else:
print "Bad password!"
elif line == UNROOT_CMD:
if not root:
print "Not root!"
else:
root = False
elif line == EXIT_CMD:
print "Bye bye"
sys.exit(0)
else:
if "echo" in line:
print line
if __name__ == '__main__':
main()
|
agpl-3.0
|
konstruktoid/ansible-upstream
|
lib/ansible/module_utils/network/junos/junos.py
|
19
|
13610
|
#
# (c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import collections
import json
from contextlib import contextmanager
from copy import deepcopy
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.common.netconf import NetconfConnection
from ansible.module_utils._text import to_text
try:
from lxml.etree import Element, SubElement, fromstring, tostring
HAS_LXML = True
except ImportError:
from xml.etree.ElementTree import Element, SubElement, fromstring, tostring
HAS_LXML = False
ACTIONS = frozenset(['merge', 'override', 'replace', 'update', 'set'])
JSON_ACTIONS = frozenset(['merge', 'override', 'update'])
FORMATS = frozenset(['xml', 'text', 'json'])
CONFIG_FORMATS = frozenset(['xml', 'text', 'json', 'set'])
junos_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'timeout': dict(type='int'),
'transport': dict(default='netconf', choices=['cli', 'netconf'])
}
junos_argument_spec = {
'provider': dict(type='dict', options=junos_provider_spec),
}
junos_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'timeout': dict(removed_in_version=2.9, type='int'),
'transport': dict(removed_in_version=2.9)
}
junos_argument_spec.update(junos_top_spec)
def get_provider_argspec():
return junos_provider_spec
def get_connection(module):
if hasattr(module, '_junos_connection'):
return module._junos_connection
capabilities = get_capabilities(module)
network_api = capabilities.get('network_api')
if network_api == 'cliconf':
module._junos_connection = Connection(module._socket_path)
elif network_api == 'netconf':
module._junos_connection = NetconfConnection(module._socket_path)
else:
module.fail_json(msg='Invalid connection type %s' % network_api)
return module._junos_connection
def get_capabilities(module):
if hasattr(module, '_junos_capabilities'):
return module._junos_capabilities
capabilities = Connection(module._socket_path).get_capabilities()
module._junos_capabilities = json.loads(capabilities)
return module._junos_capabilities
def _validate_rollback_id(module, value):
try:
if not 0 <= int(value) <= 49:
raise ValueError
except ValueError:
module.fail_json(msg='rollback must be between 0 and 49')
def load_configuration(module, candidate=None, action='merge', rollback=None, format='xml'):
if all((candidate is None, rollback is None)):
module.fail_json(msg='one of candidate or rollback must be specified')
elif all((candidate is not None, rollback is not None)):
module.fail_json(msg='candidate and rollback are mutually exclusive')
if format not in FORMATS:
module.fail_json(msg='invalid format specified')
if format == 'json' and action not in JSON_ACTIONS:
module.fail_json(msg='invalid action for format json')
elif format in ('text', 'xml') and action not in ACTIONS:
module.fail_json(msg='invalid action format %s' % format)
if action == 'set' and not format == 'text':
module.fail_json(msg='format must be text when action is set')
conn = get_connection(module)
if rollback is not None:
_validate_rollback_id(module, rollback)
obj = Element('load-configuration', {'rollback': str(rollback)})
conn.execute_rpc(tostring(obj))
else:
return conn.load_configuration(config=candidate, action=action, format=format)
def get_configuration(module, compare=False, format='xml', rollback='0', filter=None):
if format not in CONFIG_FORMATS:
module.fail_json(msg='invalid config format specified')
conn = get_connection(module)
if compare:
xattrs = {'format': format}
_validate_rollback_id(module, rollback)
xattrs['compare'] = 'rollback'
xattrs['rollback'] = str(rollback)
reply = conn.execute_rpc(tostring(Element('get-configuration', xattrs)))
else:
reply = conn.get_configuration(format=format, filter=filter)
return reply
def commit_configuration(module, confirm=False, check=False, comment=None, confirm_timeout=None, synchronize=False,
at_time=None, exit=False):
conn = get_connection(module)
if check:
reply = conn.validate()
else:
reply = conn.commit(confirmed=confirm, timeout=confirm_timeout, comment=comment, synchronize=synchronize, at_time=at_time)
return reply
def command(module, cmd, format='text', rpc_only=False):
conn = get_connection(module)
if rpc_only:
cmd += ' | display xml rpc'
return conn.command(command=cmd, format=format)
def lock_configuration(x):
conn = get_connection(x)
return conn.lock()
def unlock_configuration(x):
conn = get_connection(x)
return conn.unlock()
@contextmanager
def locked_config(module):
try:
lock_configuration(module)
yield
finally:
unlock_configuration(module)
def discard_changes(module):
conn = get_connection(module)
return conn.discard_changes()
def get_diff(module, rollback='0'):
reply = get_configuration(module, compare=True, format='text', rollback=rollback)
# if warning is received from device diff is empty.
if isinstance(reply, list):
return None
output = reply.find('.//configuration-output')
if output is not None:
return to_text(output.text, encoding='latin-1').strip()
def load_config(module, candidate, warnings, action='merge', format='xml'):
get_connection(module)
if not candidate:
return
if isinstance(candidate, list):
candidate = '\n'.join(candidate)
reply = load_configuration(module, candidate, action=action, format=format)
if isinstance(reply, list):
warnings.extend(reply)
module._junos_connection.validate()
return get_diff(module)
def get_param(module, key):
if module.params.get(key):
value = module.params[key]
elif module.params.get('provider'):
value = module.params['provider'].get(key)
else:
value = None
return value
def map_params_to_obj(module, param_to_xpath_map, param=None):
"""
Creates a new dictionary with key as xpath corresponding
to param and value is a list of dict with metadata and values for
the xpath.
Acceptable metadata keys:
'value': Value of param.
'tag_only': Value is indicated by tag only in xml hierarchy.
'leaf_only': If operation is to be added at leaf node only.
'value_req': If value(text) is requried for leaf node.
'is_key': If the field is key or not.
eg: Output
{
'name': [{'value': 'ge-0/0/1'}]
'disable': [{'value': True, tag_only': True}]
}
:param module:
:param param_to_xpath_map: Modules params to xpath map
:return: obj
"""
if not param:
param = module.params
obj = collections.OrderedDict()
for key, attribute in param_to_xpath_map.items():
if key in param:
is_attribute_dict = False
value = param[key]
if not isinstance(value, (list, tuple)):
value = [value]
if isinstance(attribute, dict):
xpath = attribute.get('xpath')
is_attribute_dict = True
else:
xpath = attribute
if not obj.get(xpath):
obj[xpath] = list()
for val in value:
if is_attribute_dict:
attr = deepcopy(attribute)
del attr['xpath']
attr.update({'value': val})
obj[xpath].append(attr)
else:
obj[xpath].append({'value': val})
return obj
def map_obj_to_ele(module, want, top, value_map=None, param=None):
if not HAS_LXML:
module.fail_json(msg='lxml is not installed.')
if not param:
param = module.params
root = Element('root')
top_ele = top.split('/')
ele = SubElement(root, top_ele[0])
if len(top_ele) > 1:
for item in top_ele[1:-1]:
ele = SubElement(ele, item)
container = ele
state = param.get('state')
active = param.get('active')
if active:
oper = 'active'
else:
oper = 'inactive'
# build xml subtree
if container.tag != top_ele[-1]:
node = SubElement(container, top_ele[-1])
else:
node = container
for fxpath, attributes in want.items():
for attr in attributes:
tag_only = attr.get('tag_only', False)
leaf_only = attr.get('leaf_only', False)
value_req = attr.get('value_req', False)
is_key = attr.get('is_key', False)
parent_attrib = attr.get('parent_attrib', True)
value = attr.get('value')
field_top = attr.get('top')
# operation 'delete' is added as element attribute
# only if it is key or leaf only node
if state == 'absent' and not (is_key or leaf_only):
continue
# convert param value to device specific value
if value_map and fxpath in value_map:
value = value_map[fxpath].get(value)
if (value is not None) or tag_only or leaf_only:
ele = node
if field_top:
# eg: top = 'system/syslog/file'
# field_top = 'system/syslog/file/contents'
# <file>
# <name>test</name>
# <contents>
# </contents>
# </file>
ele_list = root.xpath(top + '/' + field_top)
if not len(ele_list):
fields = field_top.split('/')
ele = node
for item in fields:
inner_ele = root.xpath(top + '/' + item)
if len(inner_ele):
ele = inner_ele[0]
else:
ele = SubElement(ele, item)
else:
ele = ele_list[0]
if value is not None and not isinstance(value, bool):
value = to_text(value, errors='surrogate_then_replace')
if fxpath:
tags = fxpath.split('/')
for item in tags:
ele = SubElement(ele, item)
if tag_only:
if state == 'present':
if not value:
# if value of tag_only node is false, delete the node
ele.set('delete', 'delete')
elif leaf_only:
if state == 'present':
ele.set(oper, oper)
ele.text = value
else:
ele.set('delete', 'delete')
# Add value of leaf node if required while deleting.
# in some cases if value is present while deleting, it
# can result in error, hence the check
if value_req:
ele.text = value
if is_key:
par = ele.getparent()
par.set('delete', 'delete')
else:
ele.text = value
par = ele.getparent()
if parent_attrib:
if state == 'present':
# set replace attribute at parent node
if not par.attrib.get('replace'):
par.set('replace', 'replace')
# set active/inactive at parent node
if not par.attrib.get(oper):
par.set(oper, oper)
else:
par.set('delete', 'delete')
return root.getchildren()[0]
def to_param_list(module):
aggregate = module.params.get('aggregate')
if aggregate:
if isinstance(aggregate, dict):
return [aggregate]
else:
return aggregate
else:
return [module.params]
|
gpl-3.0
|
home-assistant/home-assistant
|
homeassistant/components/smappee/sensor.py
|
2
|
12427
|
"""Support for monitoring a Smappee energy sensor."""
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import DEVICE_CLASS_POWER, ENERGY_WATT_HOUR, POWER_WATT, VOLT
from .const import DOMAIN
TREND_SENSORS = {
"total_power": [
"Total consumption - Active power",
None,
POWER_WATT,
"total_power",
DEVICE_CLASS_POWER,
True, # both cloud and local
],
"alwayson": [
"Always on - Active power",
None,
POWER_WATT,
"alwayson",
DEVICE_CLASS_POWER,
False, # cloud only
],
"power_today": [
"Total consumption - Today",
"mdi:power-plug",
ENERGY_WATT_HOUR,
"power_today",
None,
False, # cloud only
],
"power_current_hour": [
"Total consumption - Current hour",
"mdi:power-plug",
ENERGY_WATT_HOUR,
"power_current_hour",
None,
False, # cloud only
],
"power_last_5_minutes": [
"Total consumption - Last 5 minutes",
"mdi:power-plug",
ENERGY_WATT_HOUR,
"power_last_5_minutes",
None,
False, # cloud only
],
"alwayson_today": [
"Always on - Today",
"mdi:sleep",
ENERGY_WATT_HOUR,
"alwayson_today",
None,
False, # cloud only
],
}
REACTIVE_SENSORS = {
"total_reactive_power": [
"Total consumption - Reactive power",
None,
POWER_WATT,
"total_reactive_power",
DEVICE_CLASS_POWER,
]
}
SOLAR_SENSORS = {
"solar_power": [
"Total production - Active power",
None,
POWER_WATT,
"solar_power",
DEVICE_CLASS_POWER,
True, # both cloud and local
],
"solar_today": [
"Total production - Today",
"mdi:white-balance-sunny",
ENERGY_WATT_HOUR,
"solar_today",
None,
False, # cloud only
],
"solar_current_hour": [
"Total production - Current hour",
"mdi:white-balance-sunny",
ENERGY_WATT_HOUR,
"solar_current_hour",
None,
False, # cloud only
],
}
VOLTAGE_SENSORS = {
"phase_voltages_a": [
"Phase voltages - A",
"mdi:flash",
VOLT,
"phase_voltage_a",
None,
["ONE", "TWO", "THREE_STAR", "THREE_DELTA"],
],
"phase_voltages_b": [
"Phase voltages - B",
"mdi:flash",
VOLT,
"phase_voltage_b",
None,
["TWO", "THREE_STAR", "THREE_DELTA"],
],
"phase_voltages_c": [
"Phase voltages - C",
"mdi:flash",
VOLT,
"phase_voltage_c",
None,
["THREE_STAR"],
],
"line_voltages_a": [
"Line voltages - A",
"mdi:flash",
VOLT,
"line_voltage_a",
None,
["ONE", "TWO", "THREE_STAR", "THREE_DELTA"],
],
"line_voltages_b": [
"Line voltages - B",
"mdi:flash",
VOLT,
"line_voltage_b",
None,
["TWO", "THREE_STAR", "THREE_DELTA"],
],
"line_voltages_c": [
"Line voltages - C",
"mdi:flash",
VOLT,
"line_voltage_c",
None,
["THREE_STAR", "THREE_DELTA"],
],
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Smappee sensor."""
smappee_base = hass.data[DOMAIN][config_entry.entry_id]
entities = []
for service_location in smappee_base.smappee.service_locations.values():
# Add all basic sensors (realtime values and aggregators)
# Some are available in local only env
for sensor in TREND_SENSORS:
if not service_location.local_polling or TREND_SENSORS[sensor][5]:
entities.append(
SmappeeSensor(
smappee_base=smappee_base,
service_location=service_location,
sensor=sensor,
attributes=TREND_SENSORS[sensor],
)
)
if service_location.has_reactive_value:
for reactive_sensor in REACTIVE_SENSORS:
entities.append(
SmappeeSensor(
smappee_base=smappee_base,
service_location=service_location,
sensor=reactive_sensor,
attributes=REACTIVE_SENSORS[reactive_sensor],
)
)
# Add solar sensors (some are available in local only env)
if service_location.has_solar_production:
for sensor in SOLAR_SENSORS:
if not service_location.local_polling or SOLAR_SENSORS[sensor][5]:
entities.append(
SmappeeSensor(
smappee_base=smappee_base,
service_location=service_location,
sensor=sensor,
attributes=SOLAR_SENSORS[sensor],
)
)
# Add all CT measurements
for measurement_id, measurement in service_location.measurements.items():
entities.append(
SmappeeSensor(
smappee_base=smappee_base,
service_location=service_location,
sensor="load",
attributes=[
measurement.name,
None,
POWER_WATT,
measurement_id,
DEVICE_CLASS_POWER,
],
)
)
# Add phase- and line voltages if available
if service_location.has_voltage_values:
for sensor_name, sensor in VOLTAGE_SENSORS.items():
if service_location.phase_type in sensor[5]:
if (
sensor_name.startswith("line_")
and service_location.local_polling
):
continue
entities.append(
SmappeeSensor(
smappee_base=smappee_base,
service_location=service_location,
sensor=sensor_name,
attributes=sensor,
)
)
# Add Gas and Water sensors
for sensor_id, sensor in service_location.sensors.items():
for channel in sensor.channels:
gw_icon = "mdi:gas-cylinder"
if channel.get("type") == "water":
gw_icon = "mdi:water"
entities.append(
SmappeeSensor(
smappee_base=smappee_base,
service_location=service_location,
sensor="sensor",
attributes=[
channel.get("name"),
gw_icon,
channel.get("uom"),
f"{sensor_id}-{channel.get('channel')}",
None,
],
)
)
async_add_entities(entities, True)
class SmappeeSensor(SensorEntity):
"""Implementation of a Smappee sensor."""
def __init__(self, smappee_base, service_location, sensor, attributes):
"""Initialize the Smappee sensor."""
self._smappee_base = smappee_base
self._service_location = service_location
self._sensor = sensor
self.data = None
self._state = None
self._name = attributes[0]
self._icon = attributes[1]
self._unit_of_measurement = attributes[2]
self._sensor_id = attributes[3]
self._device_class = attributes[4]
@property
def name(self):
"""Return the name for this sensor."""
if self._sensor in ["sensor", "load"]:
return (
f"{self._service_location.service_location_name} - "
f"{self._sensor.title()} - {self._name}"
)
return f"{self._service_location.service_location_name} - {self._name}"
@property
def icon(self):
"""Icon to use in the frontend."""
return self._icon
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return self._device_class
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def unique_id(
self,
):
"""Return the unique ID for this sensor."""
if self._sensor in ["load", "sensor"]:
return (
f"{self._service_location.device_serial_number}-"
f"{self._service_location.service_location_id}-"
f"{self._sensor}-{self._sensor_id}"
)
return (
f"{self._service_location.device_serial_number}-"
f"{self._service_location.service_location_id}-"
f"{self._sensor}"
)
@property
def device_info(self):
"""Return the device info for this sensor."""
return {
"identifiers": {(DOMAIN, self._service_location.device_serial_number)},
"name": self._service_location.service_location_name,
"manufacturer": "Smappee",
"model": self._service_location.device_model,
"sw_version": self._service_location.firmware_version,
}
async def async_update(self):
"""Get the latest data from Smappee and update the state."""
await self._smappee_base.async_update()
if self._sensor == "total_power":
self._state = self._service_location.total_power
elif self._sensor == "total_reactive_power":
self._state = self._service_location.total_reactive_power
elif self._sensor == "solar_power":
self._state = self._service_location.solar_power
elif self._sensor == "alwayson":
self._state = self._service_location.alwayson
elif self._sensor in [
"phase_voltages_a",
"phase_voltages_b",
"phase_voltages_c",
]:
phase_voltages = self._service_location.phase_voltages
if phase_voltages is not None:
if self._sensor == "phase_voltages_a":
self._state = phase_voltages[0]
elif self._sensor == "phase_voltages_b":
self._state = phase_voltages[1]
elif self._sensor == "phase_voltages_c":
self._state = phase_voltages[2]
elif self._sensor in ["line_voltages_a", "line_voltages_b", "line_voltages_c"]:
line_voltages = self._service_location.line_voltages
if line_voltages is not None:
if self._sensor == "line_voltages_a":
self._state = line_voltages[0]
elif self._sensor == "line_voltages_b":
self._state = line_voltages[1]
elif self._sensor == "line_voltages_c":
self._state = line_voltages[2]
elif self._sensor in [
"power_today",
"power_current_hour",
"power_last_5_minutes",
"solar_today",
"solar_current_hour",
"alwayson_today",
]:
trend_value = self._service_location.aggregated_values.get(self._sensor)
self._state = round(trend_value) if trend_value is not None else None
elif self._sensor == "load":
self._state = self._service_location.measurements.get(
self._sensor_id
).active_total
elif self._sensor == "sensor":
sensor_id, channel_id = self._sensor_id.split("-")
sensor = self._service_location.sensors.get(int(sensor_id))
for channel in sensor.channels:
if channel.get("channel") == int(channel_id):
self._state = channel.get("value_today")
|
apache-2.0
|
j-mracek/ci-dnf-stack
|
dnf-docker-test/features/steps/gpg_steps.py
|
2
|
4886
|
from __future__ import absolute_import
from __future__ import unicode_literals
import jinja2
from whichcraft import which
from behave import given
from command_steps import step_i_successfully_run_command
import table_utils
GPGKEY_CONF_TMPL = """
%no-protection
%transient-key
Key-Type: {{ key_type|default("RSA") }}
Key-Length: {{ key_length|default("2048") }}
{%- if subkey_type is defined %}
Subkey-Type: {{ subkey_type }}
{%- endif %}
{%- if subkey_length is defined %}
Subkey-Length: {{ subkey_length }}
{%- endif %}
Name-Real: {{ name_real|default("DNFtest") }}
Name-Comment: {{ name_comment|default("No Comment") }}
Name-Email: {{ name_email|default("dnf@noreply") }}
Expire-Date: {{ expire_date|default("0") }}
%commit
"""
GPGKEY_FILEPATH_TMPL = "/root/{!s}.{!s}"
JINJA_ENV = jinja2.Environment(undefined=jinja2.StrictUndefined)
@given('GPG key "{signed_key}" signed by "{signing_key}"')
def step_gpg_key_signed_by(ctx, signed_key, signing_key):
"""
Signs one GPG with another GPG key producing a detached signature file.
Examples:
.. code-block:: gherkin
Feature: GPG key signing
Scenario: Sign one GPG with another
Given GPG key "James Bond"
And GPG key "M"
And GPG key "James Bond" signed by "M"
"""
signed_key_path = GPGKEY_FILEPATH_TMPL.format(signed_key, "pubkey")
gpgbin = which("gpg2")
cmd = "{!s} --detach-sig --armor --default-key '{!s}' '{!s}'".format(gpgbin, signing_key, signed_key_path)
step_i_successfully_run_command(ctx, cmd)
@given('GPG key "{name_real}"')
def step_gpg_key(ctx, name_real):
"""
Generates for the root user GPG key with a given identity,
a.k.a. the Name-Real attribute.
GPG key attributes can be optionally specified using the table with
following headers:
======= =========
Tag Value
======= =========
Supported GPG key attrubutes are:
============= ===============
Tag Default value
============= ===============
Key-Type RSA
Key-Length 2048
Subkey-Type <not present>
Subkey-Length <not present>
Name-Comment No Comment
Name-Email dnf@noreply
Expire-Date 0
=============================
.. note::
GPG key configuration is saved in a file /root/${Name-Real}.keyconf
respective public key is exported to a file /root/${Name-Real}.pubkey
Examples:
.. code-block:: gherkin
Feature: Package signatures
Scenario: Setup repository with signed packages
Given GPG key "James Bond"
And GPG key "James Bond" imported in rpm database
And repository "TestRepo" with packages signed by "James Bond"
| Package | Tag | Value |
| TestA | | |
"""
if ctx.table: # additional GPG key configuration listed in the table
GPGKEY_HEADINGS = ['Tag', 'Value']
GPGKEY_TAGS = ['Key-Type', 'Key-Length', 'Subkey-Type', 'Subkey-Length', 'Name-Comment', 'Name-Email', 'Expire-Date']
gpgkey_conf_table = table_utils.parse_kv_table(ctx, GPGKEY_HEADINGS, GPGKEY_TAGS)
else: # no table present
gpgkey_conf_table = {}
template = JINJA_ENV.from_string(GPGKEY_CONF_TMPL)
settings = {k.lower().replace('-', '_'): v for k, v in gpgkey_conf_table.items()}
gpgkey_conf = template.render(name_real=name_real, **settings)
# write gpgkey configuration to a file
fpath = GPGKEY_FILEPATH_TMPL.format(name_real, "keyconf")
with open(fpath, 'w') as fw:
fw.write(gpgkey_conf)
# generate the GPG key
gpgbin = which("gpg2")
cmd = "{!s} --batch --gen-key '{!s}'".format(gpgbin, fpath)
step_i_successfully_run_command(ctx, cmd)
# export the public key
cmd = "{!s} --export --armor '{!s}'".format(gpgbin, name_real)
step_i_successfully_run_command(ctx, cmd)
fpath = GPGKEY_FILEPATH_TMPL.format(name_real, "pubkey")
with open(fpath, 'w') as fw:
fw.write(ctx.cmd_result.stdout)
@given('GPG key "{name_real}" imported in rpm database')
def step_gpg_key_imported_in_rpm_database(ctx, name_real):
"""
Imports the public key for the previously generated GPG key into the rpm database.
Examples:
.. code-block:: gherkin
Feature: Package signatures
Scenario: Setup repository with signed packages
Given GPG key "James Bond"
And GPG key "James Bond" imported in rpm database
And repository "TestRepo" with packages signed by "James Bond"
| Package | Tag | Value |
| TestA | | |
"""
pubkey = GPGKEY_FILEPATH_TMPL.format(name_real, 'pubkey')
rpm = which("rpm")
cmd = "{!s} --import '{!s}'".format(rpm, pubkey)
step_i_successfully_run_command(ctx, cmd)
|
gpl-3.0
|
shizhai/wprobe
|
staging_dir/host/lib/scons-2.1.0/SCons/compat/_scons_hashlib.py
|
21
|
2524
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
hashlib backwards-compatibility module for older (pre-2.5) Python versions
This does not not NOT (repeat, *NOT*) provide complete hashlib
functionality. It only wraps the portions of MD5 functionality used
by SCons, in an interface that looks like hashlib (or enough for our
purposes, anyway). In fact, this module will raise an ImportError if
the underlying md5 module isn't available.
"""
__revision__ = "src/engine/SCons/compat/_scons_hashlib.py 5357 2011/09/09 21:31:03 bdeegan"
import md5
from string import hexdigits
class md5obj(object):
md5_module = md5
def __init__(self, name, string=''):
if not name in ('MD5', 'md5'):
raise ValueError("unsupported hash type")
self.name = 'md5'
self.m = self.md5_module.md5()
def __repr__(self):
return '<%s HASH object @ %#x>' % (self.name, id(self))
def copy(self):
import copy
result = copy.copy(self)
result.m = self.m.copy()
return result
def digest(self):
return self.m.digest()
def update(self, arg):
return self.m.update(arg)
def hexdigest(self):
return self.m.hexdigest()
new = md5obj
def md5(string=''):
return md5obj('md5', string)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gpl-2.0
|
stefanfoulis/django-cms
|
cms/south_migrations/0001_initial.py
|
48
|
30500
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.model_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Placeholder'
db.create_table('cms_placeholder', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slot', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('default_width', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
))
db.send_create_signal('cms', ['Placeholder'])
# Adding model 'CMSPlugin'
db.create_table('cms_cmsplugin', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('placeholder', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Placeholder'], null=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.CMSPlugin'], null=True, blank=True)),
('position', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('language', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('plugin_type', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('changed_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal('cms', ['CMSPlugin'])
# Adding model 'Page'
db.create_table('cms_page', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_by', self.gf('django.db.models.fields.CharField')(max_length=70)),
('changed_by', self.gf('django.db.models.fields.CharField')(max_length=70)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='children', null=True, to=orm['cms.Page'])),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('publication_date', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True)),
('publication_end_date', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True)),
('in_navigation', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)),
('soft_root', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
('reverse_id', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=40, null=True, blank=True)),
('navigation_extenders', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=80, null=True, blank=True)),
('published', self.gf('django.db.models.fields.BooleanField')(default=False)),
('template', self.gf('django.db.models.fields.CharField')(max_length=100)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
('moderator_state', self.gf('django.db.models.fields.SmallIntegerField')(default=1, blank=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('login_required', self.gf('django.db.models.fields.BooleanField')(default=False)),
('limit_visibility_in_menu', self.gf('django.db.models.fields.SmallIntegerField')(default=None, null=True, db_index=True, blank=True)),
('publisher_is_draft', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)),
('publisher_public', self.gf('django.db.models.fields.related.OneToOneField')(related_name='publisher_draft', unique=True, null=True, to=orm['cms.Page'])),
('publisher_state', self.gf('django.db.models.fields.SmallIntegerField')(default=0, db_index=True)),
))
db.send_create_signal('cms', ['Page'])
# Adding M2M table for field placeholders on 'Page'
db.create_table('cms_page_placeholders', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('page', models.ForeignKey(orm['cms.page'], null=False)),
('placeholder', models.ForeignKey(orm['cms.placeholder'], null=False))
))
db.create_unique('cms_page_placeholders', ['page_id', 'placeholder_id'])
# Adding model 'PageModerator'
db.create_table('cms_pagemoderator', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Page'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label])),
('moderate_page', self.gf('django.db.models.fields.BooleanField')(default=False)),
('moderate_children', self.gf('django.db.models.fields.BooleanField')(default=False)),
('moderate_descendants', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('cms', ['PageModerator'])
# Adding model 'PageModeratorState'
db.create_table('cms_pagemoderatorstate', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Page'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label], null=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('action', self.gf('django.db.models.fields.CharField')(max_length=3, null=True, blank=True)),
('message', self.gf('django.db.models.fields.TextField')(default='', max_length=1000, blank=True)),
))
db.send_create_signal('cms', ['PageModeratorState'])
# Adding model 'GlobalPagePermission'
db.create_table('cms_globalpagepermission', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label], null=True, blank=True)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.Group'], null=True, blank=True)),
('can_change', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_add', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_delete', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_change_advanced_settings', self.gf('django.db.models.fields.BooleanField')(default=False)),
('can_publish', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_change_permissions', self.gf('django.db.models.fields.BooleanField')(default=False)),
('can_move_page', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_moderate', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_view', self.gf('django.db.models.fields.BooleanField')(default=False)),
('can_recover_page', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('cms', ['GlobalPagePermission'])
# Adding M2M table for field sites on 'GlobalPagePermission'
db.create_table('cms_globalpagepermission_sites', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('globalpagepermission', models.ForeignKey(orm['cms.globalpagepermission'], null=False)),
('site', models.ForeignKey(orm['sites.site'], null=False))
))
db.create_unique('cms_globalpagepermission_sites', ['globalpagepermission_id', 'site_id'])
# Adding model 'PagePermission'
db.create_table('cms_pagepermission', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label], null=True, blank=True)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.Group'], null=True, blank=True)),
('can_change', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_add', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_delete', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_change_advanced_settings', self.gf('django.db.models.fields.BooleanField')(default=False)),
('can_publish', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_change_permissions', self.gf('django.db.models.fields.BooleanField')(default=False)),
('can_move_page', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_moderate', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_view', self.gf('django.db.models.fields.BooleanField')(default=False)),
('grant_on', self.gf('django.db.models.fields.IntegerField')(default=5)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Page'], null=True, blank=True)),
))
db.send_create_signal('cms', ['PagePermission'])
# Adding model 'PageUser'
db.create_table('cms_pageuser', (
(user_ptr_name, self.gf('django.db.models.fields.related.OneToOneField')(to=orm[user_orm_label], unique=True, primary_key=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='created_users', to=orm[user_orm_label])),
))
db.send_create_signal('cms', ['PageUser'])
# Adding model 'PageUserGroup'
db.create_table('cms_pageusergroup', (
('group_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.Group'], unique=True, primary_key=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='created_usergroups', to=orm[user_orm_label])),
))
db.send_create_signal('cms', ['PageUserGroup'])
# Adding model 'Title'
db.create_table('cms_title', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('language', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('menu_title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=255)),
('path', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('has_url_overwrite', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
('application_urls', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=200, null=True, blank=True)),
('redirect', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('meta_description', self.gf('django.db.models.fields.TextField')(max_length=255, null=True, blank=True)),
('meta_keywords', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('page_title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(related_name='title_set', to=orm['cms.Page'])),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('cms', ['Title'])
# Adding unique constraint on 'Title', fields ['language', 'page']
db.create_unique('cms_title', ['language', 'page_id'])
def backwards(self, orm):
# Removing unique constraint on 'Title', fields ['language', 'page']
db.delete_unique('cms_title', ['language', 'page_id'])
# Deleting model 'Placeholder'
db.delete_table('cms_placeholder')
# Deleting model 'CMSPlugin'
db.delete_table('cms_cmsplugin')
# Deleting model 'Page'
db.delete_table('cms_page')
# Removing M2M table for field placeholders on 'Page'
db.delete_table('cms_page_placeholders')
# Deleting model 'PageModerator'
db.delete_table('cms_pagemoderator')
# Deleting model 'PageModeratorState'
db.delete_table('cms_pagemoderatorstate')
# Deleting model 'GlobalPagePermission'
db.delete_table('cms_globalpagepermission')
# Removing M2M table for field sites on 'GlobalPagePermission'
db.delete_table('cms_globalpagepermission_sites')
# Deleting model 'PagePermission'
db.delete_table('cms_pagepermission')
# Deleting model 'PageUser'
db.delete_table('cms_pageuser')
# Deleting model 'PageUserGroup'
db.delete_table('cms_pageusergroup')
# Deleting model 'Title'
db.delete_table('cms_title')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['%s']" % user_orm_label, 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
|
bsd-3-clause
|
kevlar1818/dotfiles
|
bspwm/.config/polybar/package_update_checker.py
|
1
|
1205
|
#!/usr/bin/env python
import sys
from subprocess import run
def colorize_text(text, color):
return f'%{{F{color}}}{text}%{{F-}}'
if __name__ == '__main__':
xrdb = run('xrdb -query', shell=True, capture_output=True, text=True, check=True)
raw_colors = (
line.replace('*', '').split(':')
for line in xrdb.stdout.split('\n')
if line.startswith('*')
)
x_colors = {
name: color.strip()
for name, color in raw_colors
}
checkupdates = run('checkupdates', capture_output=True, text=True)
# checkupdates returns non-zero with no output if there's no packages to
# upgrade
if checkupdates.returncode != 0 and checkupdates.stderr:
print(colorize_text('ERR', x_colors['color1']))
sys.exit(0)
stripped_lines = (
line.strip()
for line in checkupdates.stdout.split('\n')
)
packages = [
line.split(' ')[0]
for line in stripped_lines
if line
]
if 'linux' in packages:
color = x_colors['color3']
elif packages:
color = x_colors['foreground']
else:
color = x_colors['color8']
print(colorize_text(len(packages), color))
|
mit
|
djboersma/puppy
|
mhd_zpad.py
|
1
|
7768
|
#!/usr/bin/env python
import SimpleITK as sitk
import numpy as np
def zpad(img,nzbot=0,nztop=0):
"""
Return an augmented copy of an image object: the central part is the same
as the original, but we add nzbot (nztop) copies of the bottom (top) layer
to the bottom (top).
"""
if type(img) != sitk.SimpleITK.Image:
raise TypeError("first arg of zpad should be a SimpleITK image, I got a {}".format(type(img)))
if type(nzbot) != int or type(nztop) != int:
raise TypeError("wrong type for nzbot({}) and/or nztop({}), should both be ints".format(type(nzbot),type(nztop)))
if nzbot<0 or nztop<0 or nztop+nzbot==0:
raise ValueError("nzbot and nztop should be nonnegative and at least one should be positive");
origin = list(img.GetOrigin());
spacing = list(img.GetSpacing());
oldsize = list(img.GetSize());
if len(oldsize) != 3:
raise ValueError("this function should only be used with 3D images");
if type(img[0,0,0]) == tuple:
raise TypeError("for now, this function only works with scalar voxel values")
nx,ny,nz = oldsize
oldarray = sitk.GetArrayFromImage(img)
assert(oldarray.shape == (nz,ny,nx) )
newarray = oldarray.copy()
newarray.resize( (nz+nzbot+nztop, ny, nx) )
newarray[nzbot:nzbot+nz,:,:] = oldarray[0:nz,:,:]
for iz in range(nzbot):
newarray[iz,:,:] = oldarray[0,:,:]
for iz in range(nztop):
newarray[nzbot+nz+iz,:,:] = oldarray[-1,:,:]
padded_img = sitk.GetImageFromArray(newarray)
padded_img.SetSpacing(spacing)
origin[2] -= nzbot * spacing[2]
padded_img.SetOrigin(origin)
return padded_img
if __name__ == '__main__':
import argparse, os
parser = argparse.ArgumentParser(description='Python script to copy a 3D MHD image file with extra padding to the top and/or bottom.')
parser.add_argument('-I','--inputpath',type=str,dest='INPUTPATH',help='input mhd file name')
parser.add_argument('-O','--outputpath',type=str,dest='OUTPUTPATH',help='output mhd file name')
parser.add_argument('-t','--toppadding',type=int,default=0,dest='TOPPADDING',help='number of layers to add to the top')
parser.add_argument('-b','--bottompadding',type=int,default=0,dest='BOTTOMPADDING',help='number of layers to add to the bottom')
myargs = parser.parse_args()
#print("myargs = {}".format(myargs))
assert(myargs.INPUTPATH != myargs.OUTPUTPATH)
oldimg = sitk.ReadImage(myargs.INPUTPATH)
newimg = zpad(oldimg,myargs.BOTTOMPADDING,myargs.TOPPADDING)
sitk.WriteImage(newimg,myargs.OUTPUTPATH)
#######################################################################
# TESTING
#######################################################################
import unittest
class test_zpad(unittest.TestCase):
"""
What are we testing today:
- zpad throws an exception unless
- first arg is a SimpleITK image
- SimpleITK image is 3D
- SimpleITK image voxel values are scalar
- second and third arg are int
- second and third arg are not negative
- at least one of second and third arg must be positive
- zpad will add any number of layers (copies of bottom layer) to the bottom
- zpad will add any number of layers (copies of top layer) to the top
- zpad will both of those in one call
- zpad will never change the input image
"""
def setUp(self):
self.a2 = np.random.randint(-1024,high=4096,size=(3,4))
self.img2 = sitk.GetImageFromArray(self.a2)
self.a3 = np.random.randint(-1024,high=4096,size=(3,4,5))
#print("3d image has {} voxels with value zero".format(np.sum(self.a3==0)))
self.img3 = sitk.GetImageFromArray(self.a3.swapaxes(0,2))
self.a4 = np.random.randint(-1024,high=4096,size=(3,4,5,6))
self.img4 = sitk.GetImageFromArray(self.a4)
def test_wrong_types(self):
with self.assertRaises(TypeError):
zpad(self.a3,1,2); # not an image
with self.assertRaises(TypeError):
zpad(self.img3,1.,2); # float instead of int
with self.assertRaises(TypeError):
zpad(self.img3,1,2.); # float instead of int
with self.assertRaises(TypeError):
zpad(self.img3,1.,2.); # float instead of int
with self.assertRaises(ValueError):
zpad(self.img3,0,0); # at least one padding argument should be larger than 0
with self.assertRaises(ValueError):
zpad(self.img3,-5,10); # both ints should be nonnegative
with self.assertRaises(ValueError):
zpad(self.img3,10,-5); # both ints should be nonnegative
with self.assertRaises(ValueError):
zpad(self.img2,1,2); # image should be 3D
with self.assertRaises(TypeError):
zpad(self.img4,1,2); # image should be 3D and have scalar voxel type
def test_bottom(self):
#print("bottom layer is {}".format(self.a3[:,:,0]))
for p in [1,5,15]:
img3 = sitk.Image(self.img3)
img3.MakeUnique()
size3 = img3.GetSize()
padded_img3 = zpad(img3,p,0)
bi3 = sitk.GetArrayFromImage(img3).swapaxes(0,2)
self.assertTrue((bi3 == self.a3).all()) # check that zpad did not alter the input image
psize3 = padded_img3.GetSize()
self.assertEqual(size3[0],psize3[0])
self.assertEqual(size3[1],psize3[1])
self.assertEqual(size3[2]+p,psize3[2])
api3 = sitk.GetArrayFromImage(padded_img3).swapaxes(0,2)
for iz in range(p):
#print("testing bottom iz={} for p={}".format(iz,p))
self.assertTrue((api3[:,:,iz]==self.a3[:,:,0]).all())
def test_top(self):
#print("top layer is {}".format(self.a3[:,:,-1]))
for q in [1,5,15]:
img3 = sitk.Image(self.img3)
img3.MakeUnique()
size3 = img3.GetSize()
padded_img3 = zpad(img3,0,q)
bi3 = sitk.GetArrayFromImage(img3).swapaxes(0,2)
self.assertTrue((bi3 == self.a3).all()) # check that zpad did not alter the input image
psize3 = padded_img3.GetSize()
self.assertEqual(size3[0],psize3[0])
self.assertEqual(size3[1],psize3[1])
self.assertEqual(size3[2]+q,psize3[2])
api3 = sitk.GetArrayFromImage(padded_img3).swapaxes(0,2)
for iz in range(q):
#print("testing top iz={} for q={}".format(iz,q))
self.assertTrue((api3[:,:,-iz-1]==self.a3[:,:,-1]).all())
def test_both(self):
for p in [0,1,5,15]:
for q in [0,4,16]:
if p==0 and q==0:
continue
img3 = sitk.Image(self.img3)
img3.MakeUnique()
size3 = img3.GetSize()
padded_img3 = zpad(img3,p,q)
bi3 = sitk.GetArrayFromImage(img3).swapaxes(0,2)
self.assertTrue((bi3 == self.a3).all()) # check that zpad did not alter the input image
psize3 = padded_img3.GetSize()
self.assertEqual(size3[0],psize3[0])
self.assertEqual(size3[1],psize3[1])
self.assertEqual(size3[2]+p+q,psize3[2])
api3 = sitk.GetArrayFromImage(padded_img3).swapaxes(0,2)
for iz in range(p):
#print("both: testing bottom iz={} for p={}".format(iz,q))
self.assertTrue((api3[:,:,iz]==self.a3[:,:,0]).all())
for iz in range(q):
#print("both: testing top iz={} for q={}".format(iz,q))
self.assertTrue((api3[:,:,-iz-1]==self.a3[:,:,-1]).all())
|
gpl-3.0
|
nubark/odoo
|
addons/mrp/mrp.py
|
2
|
73120
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from collections import OrderedDict
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from openerp.tools import float_compare, float_is_zero
from openerp.tools.translate import _
from openerp import tools, SUPERUSER_ID
from openerp.exceptions import UserError, AccessError
class mrp_property_group(osv.osv):
"""
Group of mrp properties.
"""
_name = 'mrp.property.group'
_description = 'Property Group'
_columns = {
'name': fields.char('Property Group', required=True),
'description': fields.text('Description'),
}
class mrp_property(osv.osv):
"""
Properties of mrp.
"""
_name = 'mrp.property'
_description = 'Property'
_columns = {
'name': fields.char('Name', required=True),
'composition': fields.selection([('min','min'),('max','max'),('plus','plus')], 'Properties composition', required=True, help="Not used in computations, for information purpose only."),
'group_id': fields.many2one('mrp.property.group', 'Property Group', required=True),
'description': fields.text('Description'),
}
_defaults = {
'composition': lambda *a: 'min',
}
#----------------------------------------------------------
# Work Centers
#----------------------------------------------------------
# capacity_hour : capacity per hour. default: 1.0.
# Eg: If 5 concurrent operations at one time: capacity = 5 (because 5 employees)
# unit_per_cycle : how many units are produced for one cycle
class mrp_workcenter(osv.osv):
_name = 'mrp.workcenter'
_description = 'Work Center'
_inherits = {'resource.resource':"resource_id"}
_columns = {
'note': fields.text('Description', help="Description of the Work Center. Explain here what's a cycle according to this Work Center."),
'capacity_per_cycle': fields.float('Capacity per Cycle', help="Number of operations this Work Center can do in parallel. If this Work Center represents a team of 5 workers, the capacity per cycle is 5."),
'time_cycle': fields.float('Time for 1 cycle (hour)', help="Time in hours for doing one cycle."),
'time_start': fields.float('Time before prod.', help="Time in hours for the setup."),
'time_stop': fields.float('Time after prod.', help="Time in hours for the cleaning."),
'costs_hour': fields.float('Cost per hour', help="Specify Cost of Work Center per hour."),
'costs_hour_account_id': fields.many2one('account.analytic.account', 'Hour Account',
help="Fill this only if you want automatic analytic accounting entries on production orders.", domain=[('account_type', '=', 'normal')]),
'costs_cycle': fields.float('Cost per cycle', help="Specify Cost of Work Center per cycle."),
'costs_cycle_account_id': fields.many2one('account.analytic.account', 'Cycle Account',
help="Fill this only if you want automatic analytic accounting entries on production orders.", domain=[('account_type', '=', 'normal')]),
'costs_general_account_id': fields.many2one('account.account', 'General Account', domain=[('deprecated', '=', False)]),
'resource_id': fields.many2one('resource.resource','Resource', ondelete='cascade', required=True),
'product_id': fields.many2one('product.product','Work Center Product', help="Fill this product to easily track your production costs in the analytic accounting."),
}
_defaults = {
'capacity_per_cycle': 1.0,
'resource_type': 'material',
}
def on_change_product_cost(self, cr, uid, ids, product_id, context=None):
value = {}
if product_id:
cost = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
value = {'costs_hour': cost.standard_price}
return {'value': value}
def _check_capacity_per_cycle(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
if obj.capacity_per_cycle <= 0.0:
return False
return True
_constraints = [
(_check_capacity_per_cycle, 'The capacity per cycle must be strictly positive.', ['capacity_per_cycle']),
]
class mrp_routing(osv.osv):
"""
For specifying the routings of Work Centers.
"""
_name = 'mrp.routing'
_description = 'Routings'
_columns = {
'name': fields.char('Name', required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the routing without removing it."),
'code': fields.char('Code', size=8),
'note': fields.text('Description'),
'workcenter_lines': fields.one2many('mrp.routing.workcenter', 'routing_id', 'Work Centers', copy=True),
'location_id': fields.many2one('stock.location', 'Production Location',
help="Keep empty if you produce at the location where the finished products are needed." \
"Set a location if you produce at a fixed location. This can be a partner location " \
"if you subcontract the manufacturing operations."
),
'company_id': fields.many2one('res.company', 'Company'),
}
_defaults = {
'active': lambda *a: 1,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.routing', context=context)
}
class mrp_routing_workcenter(osv.osv):
"""
Defines working cycles and hours of a Work Center using routings.
"""
_name = 'mrp.routing.workcenter'
_description = 'Work Center Usage'
_order = 'sequence, id'
_columns = {
'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True),
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of routing Work Centers."),
'cycle_nbr': fields.float('Number of Cycles', required=True,
help="Number of iterations this work center has to do in the specified operation of the routing."),
'hour_nbr': fields.float('Number of Hours', required=True, help="Time in hours for this Work Center to achieve the operation of the specified routing."),
'routing_id': fields.many2one('mrp.routing', 'Parent Routing', select=True, ondelete='cascade',
help="Routings indicates all the Work Centers used, for how long and/or cycles." \
"If Routings is set then,the third tab of a production order (Work Centers) will be automatically pre-completed."),
'note': fields.text('Description'),
'company_id': fields.related('routing_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
_defaults = {
'cycle_nbr': lambda *a: 1.0,
'hour_nbr': lambda *a: 0.0,
'sequence': 100,
}
class mrp_bom(osv.osv):
"""
Defines bills of material for a product.
"""
_name = 'mrp.bom'
_description = 'Bill of Material'
_inherit = ['mail.thread']
_columns = {
'code': fields.char('Reference', size=16),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the bills of material without removing it."),
'type': fields.selection([('normal','Manufacture this product'),('phantom','Ship this product as a set of components (kit)')], 'BoM Type', required=True,
help= "Set: When processing a sales order for this product, the delivery order will contain the raw materials, instead of the finished product."),
'position': fields.char('Internal Reference', help="Reference to a position in an external plan."),
'product_tmpl_id': fields.many2one('product.template', 'Product', domain="[('type', 'in', ['product', 'consu'])]", required=True),
'product_id': fields.many2one('product.product', 'Product Variant',
domain="['&', ('product_tmpl_id','=',product_tmpl_id), ('type', 'in', ['product', 'consu'])]",
help="If a product variant is defined the BOM is available only for this product."),
'bom_line_ids': fields.one2many('mrp.bom.line', 'bom_id', 'BoM Lines', copy=True),
'product_qty': fields.float('Product Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure')),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, help="Unit of Measure (Unit of Measure) is the unit of measurement for the inventory control"),
'date_start': fields.date('Valid From', help="Validity of this BoM. Keep empty if it's always valid."),
'date_stop': fields.date('Valid Until', help="Validity of this BoM. Keep empty if it's always valid."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of bills of material."),
'routing_id': fields.many2one('mrp.routing', 'Routing', help="The list of operations (list of work centers) to produce the finished product. "\
"The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production planning."),
'product_rounding': fields.float('Product Rounding', help="Rounding applied on the product quantity."),
'product_efficiency': fields.float('Manufacturing Efficiency', required=True, help="A factor of 0.9 means a loss of 10% during the production process."),
'property_ids': fields.many2many('mrp.property', string='Properties'),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
def _get_uom_id(self, cr, uid, *args):
return self.pool["product.uom"].search(cr, uid, [], limit=1, order='id')[0]
_defaults = {
'active': lambda *a: 1,
'product_qty': lambda *a: 1.0,
'product_efficiency': lambda *a: 1.0,
'product_rounding': lambda *a: 0.0,
'type': lambda *a: 'normal',
'product_uom': _get_uom_id,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.bom', context=c),
}
_order = "sequence"
def _bom_find(self, cr, uid, product_tmpl_id=None, product_id=None, properties=None, context=None):
""" Finds BoM for particular product and product uom.
@param product_tmpl_id: Selected product.
@param product_uom: Unit of measure of a product.
@param properties: List of related properties.
@return: False or BoM id.
"""
if not context:
context = {}
if properties is None:
properties = []
if product_id:
if not product_tmpl_id:
product_tmpl_id = self.pool['product.product'].browse(cr, uid, product_id, context=context).product_tmpl_id.id
domain = [
'|',
('product_id', '=', product_id),
'&',
('product_id', '=', False),
('product_tmpl_id', '=', product_tmpl_id)
]
elif product_tmpl_id:
domain = [('product_id', '=', False), ('product_tmpl_id', '=', product_tmpl_id)]
else:
# neither product nor template, makes no sense to search
return False
if context.get('company_id'):
domain = domain + [('company_id', '=', context['company_id'])]
domain = domain + [ '|', ('date_start', '=', False), ('date_start', '<=', time.strftime(DEFAULT_SERVER_DATE_FORMAT)),
'|', ('date_stop', '=', False), ('date_stop', '>=', time.strftime(DEFAULT_SERVER_DATE_FORMAT))]
# order to prioritize bom with product_id over the one without
ids = self.search(cr, uid, domain, order='sequence, product_id', context=context)
# Search a BoM which has all properties specified, or if you can not find one, you could
# pass a BoM without any properties with the smallest sequence
bom_empty_prop = False
for bom in self.pool.get('mrp.bom').browse(cr, uid, ids, context=context):
if not set(map(int, bom.property_ids or [])) - set(properties or []):
if not properties or bom.property_ids:
return bom.id
elif not bom_empty_prop:
bom_empty_prop = bom.id
return bom_empty_prop
def _skip_bom_line(self, cr, uid, line, product, context=None):
""" Control if a BoM line should be produce, can be inherited for add
custom control.
@param line: BoM line.
@param product: Selected product produced.
@return: True or False
"""
if line.date_start and line.date_start > time.strftime(DEFAULT_SERVER_DATE_FORMAT) or \
line.date_stop and line.date_stop < time.strftime(DEFAULT_SERVER_DATE_FORMAT):
return True
# all bom_line_id variant values must be in the product
if line.attribute_value_ids:
if not product or (set(map(int,line.attribute_value_ids or [])) - set(map(int,product.attribute_value_ids))):
return True
return False
def _prepare_wc_line(self, cr, uid, bom, wc_use, level=0, factor=1, context=None):
wc = wc_use.workcenter_id
d, m = divmod(factor, wc_use.workcenter_id.capacity_per_cycle)
mult = (d + (m and 1.0 or 0.0))
cycle = mult * wc_use.cycle_nbr
return {
'name': tools.ustr(wc_use.name) + ' - ' + tools.ustr(bom.product_tmpl_id.name_get()[0][1]),
'workcenter_id': wc.id,
'sequence': level + (wc_use.sequence or 0),
'cycle': cycle,
'hour': float(wc_use.hour_nbr * mult + ((wc.time_start or 0.0) + (wc.time_stop or 0.0) + cycle * (wc.time_cycle or 0.0)) * (wc.time_efficiency or 1.0)),
}
def _prepare_consume_line(self, cr, uid, bom_line_id, quantity, context=None):
return {
'name': bom_line_id.product_id.name,
'product_id': bom_line_id.product_id.id,
'product_qty': quantity,
'product_uom': bom_line_id.product_uom.id
}
def _bom_explode(self, cr, uid, bom, product, factor, properties=None, level=0, routing_id=False, previous_products=None, master_bom=None, context=None):
""" Finds Products and Work Centers for related BoM for manufacturing order.
@param bom: BoM of particular product template.
@param product: Select a particular variant of the BoM. If False use BoM without variants.
@param factor: Factor represents the quantity, but in UoM of the BoM, taking into account the numbers produced by the BoM
@param properties: A List of properties Ids.
@param level: Depth level to find BoM lines starts from 10.
@param previous_products: List of product previously use by bom explore to avoid recursion
@param master_bom: When recursion, used to display the name of the master bom
@return: result: List of dictionaries containing product details.
result2: List of dictionaries containing Work Center details.
"""
uom_obj = self.pool.get("product.uom")
routing_obj = self.pool.get('mrp.routing')
master_bom = master_bom or bom
def _factor(factor, product_efficiency, product_rounding):
factor = factor / (product_efficiency or 1.0)
if product_rounding:
factor = tools.float_round(factor,
precision_rounding=product_rounding,
rounding_method='UP')
if factor < product_rounding:
factor = product_rounding
return factor
factor = _factor(factor, bom.product_efficiency, bom.product_rounding)
result = []
result2 = []
routing = (routing_id and routing_obj.browse(cr, uid, routing_id)) or bom.routing_id or False
if routing:
for wc_use in routing.workcenter_lines:
result2.append(self._prepare_wc_line(
cr, uid, bom, wc_use, level=level, factor=factor,
context=context))
for bom_line_id in bom.bom_line_ids:
if self._skip_bom_line(cr, uid, bom_line_id, product, context=context):
continue
if set(map(int, bom_line_id.property_ids or [])) - set(properties or []):
continue
if previous_products and bom_line_id.product_id.product_tmpl_id.id in previous_products:
raise UserError(_('BoM "%s" contains a BoM line with a product recursion: "%s".') % (master_bom.code or "", bom_line_id.product_id.name_get()[0][1]))
quantity = _factor(bom_line_id.product_qty * factor, bom_line_id.product_efficiency, bom_line_id.product_rounding)
bom_id = self._bom_find(cr, uid, product_id=bom_line_id.product_id.id, properties=properties, context=context)
#If BoM should not behave like kit, just add the product, otherwise explode further
if (not bom_id) or (self.browse(cr, uid, bom_id, context=context).type != "phantom"):
result.append(self._prepare_consume_line(
cr, uid, bom_line_id, quantity, context=context))
else:
all_prod = [bom.product_tmpl_id.id] + (previous_products or [])
bom2 = self.browse(cr, uid, bom_id, context=context)
# We need to convert to units/UoM of chosen BoM
factor2 = uom_obj._compute_qty(cr, uid, bom_line_id.product_uom.id, quantity, bom2.product_uom.id)
quantity2 = factor2 / bom2.product_qty
res = self._bom_explode(cr, uid, bom2, bom_line_id.product_id, quantity2,
properties=properties, level=level + 10, previous_products=all_prod, master_bom=master_bom, context=context)
result = result + res[0]
result2 = result2 + res[1]
return result, result2
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
bom_data = self.read(cr, uid, id, [], context=context)
default.update(name=_("%s (copy)") % (bom_data['display_name']))
return super(mrp_bom, self).copy_data(cr, uid, id, default, context=context)
def onchange_uom(self, cr, uid, ids, product_tmpl_id, product_uom, context=None):
res = {'value': {}}
if not product_uom or not product_tmpl_id:
return res
product = self.pool.get('product.template').browse(cr, uid, product_tmpl_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
def unlink(self, cr, uid, ids, context=None):
if self.pool['mrp.production'].search(cr, uid, [('bom_id', 'in', ids), ('state', 'not in', ['done', 'cancel'])], context=context):
raise UserError(_('You can not delete a Bill of Material with running manufacturing orders.\nPlease close or cancel it first.'))
return super(mrp_bom, self).unlink(cr, uid, ids, context=context)
def onchange_product_tmpl_id(self, cr, uid, ids, product_tmpl_id, product_qty=0, context=None):
""" Changes UoM and name if product_id changes.
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
res = {}
if product_tmpl_id:
prod = self.pool.get('product.template').browse(cr, uid, product_tmpl_id, context=context)
res['value'] = {
'product_uom': prod.uom_id.id,
}
return res
def name_get(self, cr, uid, ids, context=None):
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.product_tmpl_id.name
if record.code:
name = '[%s] %s' % (record.code, name)
res.append((record.id, name))
return res
class mrp_bom_line(osv.osv):
_name = 'mrp.bom.line'
_order = "sequence"
_rec_name = "product_id"
def _get_child_bom_lines(self, cr, uid, ids, field_name, arg, context=None):
"""If the BOM line refers to a BOM, return the ids of the child BOM lines"""
bom_obj = self.pool['mrp.bom']
res = {}
for bom_line in self.browse(cr, uid, ids, context=context):
bom_id = bom_obj._bom_find(cr, uid,
product_tmpl_id=bom_line.product_id.product_tmpl_id.id,
product_id=bom_line.product_id.id, context=context)
if bom_id:
child_bom = bom_obj.browse(cr, uid, bom_id, context=context)
res[bom_line.id] = [x.id for x in child_bom.bom_line_ids]
else:
res[bom_line.id] = False
return res
_columns = {
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_qty': fields.float('Product Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure')),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True,
help="Unit of Measure (Unit of Measure) is the unit of measurement for the inventory control"),
'date_start': fields.date('Valid From', help="Validity of component. Keep empty if it's always valid."),
'date_stop': fields.date('Valid Until', help="Validity of component. Keep empty if it's always valid."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying."),
'routing_id': fields.many2one('mrp.routing', 'Routing', help="The list of operations (list of work centers) to produce the finished product. The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production planning."),
'product_rounding': fields.float('Product Rounding', help="Rounding applied on the product quantity."),
'product_efficiency': fields.float('Manufacturing Efficiency', required=True, help="A factor of 0.9 means a loss of 10% within the production process."),
'property_ids': fields.many2many('mrp.property', string='Properties'), #Not used
'bom_id': fields.many2one('mrp.bom', 'Parent BoM', ondelete='cascade', select=True, required=True),
'attribute_value_ids': fields.many2many('product.attribute.value', string='Variants', help="BOM Product Variants needed form apply this line."),
'child_line_ids': fields.function(_get_child_bom_lines, relation="mrp.bom.line", string="BOM lines of the referred bom", type="one2many")
}
def _get_uom_id(self, cr, uid, *args):
return self.pool["product.uom"].search(cr, uid, [], limit=1, order='id')[0]
_defaults = {
'product_qty': lambda *a: 1.0,
'product_efficiency': lambda *a: 1.0,
'product_rounding': lambda *a: 0.0,
'product_uom': _get_uom_id,
'sequence': 1,
}
_sql_constraints = [
('bom_qty_zero', 'CHECK (product_qty>0)', 'All product quantities must be greater than 0.\n' \
'You should install the mrp_byproduct module if you want to manage extra products on BoMs !'),
]
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
product_obj = self.pool.get('product.product')
if 'product_id' in values and not 'product_uom' in values:
values['product_uom'] = product_obj.browse(cr, uid, values.get('product_id'), context=context).uom_id.id
return super(mrp_bom_line, self).create(cr, uid, values, context=context)
def onchange_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value': {}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
def onchange_product_id(self, cr, uid, ids, product_id, product_qty=0, context=None):
""" Changes UoM if product_id changes.
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
res = {}
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
res['value'] = {
'product_uom': prod.uom_id.id,
}
return res
class mrp_production(osv.osv):
"""
Production Orders / Manufacturing Orders
"""
_name = 'mrp.production'
_description = 'Manufacturing Order'
_date_name = 'date_planned'
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _production_calc(self, cr, uid, ids, prop, unknow_none, context=None):
""" Calculates total hours and total no. of cycles for a production order.
@param prop: Name of field.
@param unknow_none:
@return: Dictionary of values.
"""
result = {}
for prod in self.browse(cr, uid, ids, context=context):
result[prod.id] = {
'hour_total': 0.0,
'cycle_total': 0.0,
}
for wc in prod.workcenter_lines:
result[prod.id]['hour_total'] += wc.hour
result[prod.id]['cycle_total'] += wc.cycle
return result
def _get_workcenter_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool['mrp.production.workcenter.line'].browse(cr, uid, ids, context=context):
result[line.production_id.id] = True
return result.keys()
def _src_id_default(self, cr, uid, ids, context=None):
try:
location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (AccessError, ValueError):
location_id = False
return location_id
def _dest_id_default(self, cr, uid, ids, context=None):
try:
location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (AccessError, ValueError):
location_id = False
return location_id
def _get_progress(self, cr, uid, ids, name, arg, context=None):
""" Return product quantity percentage """
result = dict.fromkeys(ids, 100)
for mrp_production in self.browse(cr, uid, ids, context=context):
if mrp_production.product_qty:
done = 0.0
for move in mrp_production.move_created_ids2:
if not move.scrapped and move.product_id == mrp_production.product_id:
done += move.product_qty
result[mrp_production.id] = done / mrp_production.product_qty * 100
return result
def _moves_assigned(self, cr, uid, ids, name, arg, context=None):
""" Test whether all the consume lines are assigned """
res = {}
for production in self.browse(cr, uid, ids, context=context):
res[production.id] = True
states = [x.state != 'assigned' for x in production.move_lines if x]
if any(states) or len(states) == 0: #When no moves, ready_production will be False, but test_ready will pass
res[production.id] = False
return res
def _mrp_from_move(self, cr, uid, ids, context=None):
""" Return mrp"""
res = []
for move in self.browse(cr, uid, ids, context=context):
res += self.pool.get("mrp.production").search(cr, uid, [('move_lines', 'in', move.id)], context=context)
return res
_columns = {
'name': fields.char('Reference', required=True, readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'origin': fields.char('Source Document', readonly=True, states={'draft': [('readonly', False)]},
help="Reference of the document that generated this production order request.", copy=False),
'priority': fields.selection([('0', 'Not urgent'), ('1', 'Normal'), ('2', 'Urgent'), ('3', 'Very Urgent')], 'Priority',
select=True, readonly=True, states=dict.fromkeys(['draft', 'confirmed'], [('readonly', False)])),
'product_id': fields.many2one('product.product', 'Product', required=True, readonly=True, states={'draft': [('readonly', False)]},
domain=[('type', 'in', ['product', 'consu'])]),
'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'progress': fields.function(_get_progress, type='float',
string='Production progress'),
'location_src_id': fields.many2one('stock.location', 'Raw Materials Location', required=True,
readonly=True, states={'draft': [('readonly', False)]},
help="Location where the system will look for components."),
'location_dest_id': fields.many2one('stock.location', 'Finished Products Location', required=True,
readonly=True, states={'draft': [('readonly', False)]},
help="Location where the system will stock the finished products."),
'date_planned': fields.datetime('Scheduled Date', required=True, select=1, readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'date_start': fields.datetime('Start Date', select=True, readonly=True, copy=False),
'date_finished': fields.datetime('End Date', select=True, readonly=True, copy=False),
'bom_id': fields.many2one('mrp.bom', 'Bill of Material', readonly=True, states={'draft': [('readonly', False)]},
help="Bill of Materials allow you to define the list of required raw materials to make a finished product."),
'routing_id': fields.many2one('mrp.routing', string='Routing', on_delete='set null', readonly=True, states={'draft': [('readonly', False)]},
help="The list of operations (list of work centers) to produce the finished product. The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production plannification."),
'move_prod_id': fields.many2one('stock.move', 'Product Move', readonly=True, copy=False),
'move_lines': fields.one2many('stock.move', 'raw_material_production_id', 'Products to Consume',
domain=[('state', 'not in', ('done', 'cancel'))], readonly=True, states={'draft': [('readonly', False)]}),
'move_lines2': fields.one2many('stock.move', 'raw_material_production_id', 'Consumed Products',
domain=[('state', 'in', ('done', 'cancel'))], readonly=True),
'move_created_ids': fields.one2many('stock.move', 'production_id', 'Products to Produce',
domain=[('state', 'not in', ('done', 'cancel'))], readonly=True),
'move_created_ids2': fields.one2many('stock.move', 'production_id', 'Produced Products',
domain=[('state', 'in', ('done', 'cancel'))], readonly=True),
'product_lines': fields.one2many('mrp.production.product.line', 'production_id', 'Scheduled goods',
readonly=True),
'workcenter_lines': fields.one2many('mrp.production.workcenter.line', 'production_id', 'Work Centers Utilisation',
readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection(
[('draft', 'New'), ('cancel', 'Cancelled'), ('confirmed', 'Awaiting Raw Materials'),
('ready', 'Ready to Produce'), ('in_production', 'Production Started'), ('done', 'Done')],
string='Status', readonly=True,
track_visibility='onchange', copy=False,
help="When the production order is created the status is set to 'Draft'.\n"
"If the order is confirmed the status is set to 'Waiting Goods.\n"
"If any exceptions are there, the status is set to 'Picking Exception.\n"
"If the stock is available then the status is set to 'Ready to Produce.\n"
"When the production gets started then the status is set to 'In Production.\n"
"When the production is over, the status is set to 'Done'."),
'hour_total': fields.function(_production_calc, type='float', string='Total Hours', multi='workorder', store={
_name: (lambda self, cr, uid, ids, c={}: ids, ['workcenter_lines'], 40),
'mrp.production.workcenter.line': (_get_workcenter_line, ['hour', 'cycle'], 40),
}),
'cycle_total': fields.function(_production_calc, type='float', string='Total Cycles', multi='workorder', store={
_name: (lambda self, cr, uid, ids, c={}: ids, ['workcenter_lines'], 40),
'mrp.production.workcenter.line': (_get_workcenter_line, ['hour', 'cycle'], 40),
}),
'user_id': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'ready_production': fields.function(_moves_assigned, type='boolean', string="Ready for production", store={'stock.move': (_mrp_from_move, ['state'], 10)}),
'product_tmpl_id': fields.related('product_id', 'product_tmpl_id', type='many2one', relation='product.template', string='Product'),
}
_defaults = {
'priority': lambda *a: '1',
'state': lambda *a: 'draft',
'date_planned': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'product_qty': lambda *a: 1.0,
'user_id': lambda self, cr, uid, c: uid,
'name': lambda self, cr, uid, context: self.pool['ir.sequence'].next_by_code(cr, uid, 'mrp.production', context=context) or '/',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.production', context=c),
'location_src_id': _src_id_default,
'location_dest_id': _dest_id_default
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per Company!'),
]
_order = 'priority desc, date_planned asc'
def _check_qty(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
if order.product_qty <= 0:
return False
return True
_constraints = [
(_check_qty, 'Order quantity cannot be negative or zero!', ['product_qty']),
]
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
product_obj = self.pool.get('product.product')
if 'product_id' in values and not 'product_uom' in values:
values['product_uom'] = product_obj.browse(cr, uid, values.get('product_id'), context=context).uom_id.id
return super(mrp_production, self).create(cr, uid, values, context=context)
def unlink(self, cr, uid, ids, context=None):
for production in self.browse(cr, uid, ids, context=context):
if production.state not in ('draft', 'cancel'):
state_label = dict(production.fields_get(['state'])['state']['selection']).get(production.state)
raise UserError(_('Cannot delete a manufacturing order in state \'%s\'.') % state_label)
return super(mrp_production, self).unlink(cr, uid, ids, context=context)
def location_id_change(self, cr, uid, ids, src, dest, context=None):
""" Changes destination location if source location is changed.
@param src: Source location id.
@param dest: Destination location id.
@return: Dictionary of values.
"""
if dest:
return {}
if src:
return {'value': {'location_dest_id': src}}
return {}
def product_id_change(self, cr, uid, ids, product_id, product_qty=0, context=None):
""" Finds UoM of changed product.
@param product_id: Id of changed product.
@return: Dictionary of values.
"""
result = {}
if not product_id:
return {'value': {
'product_uom': False,
'bom_id': False,
'routing_id': False,
'product_tmpl_id': False
}}
bom_obj = self.pool.get('mrp.bom')
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
bom_id = bom_obj._bom_find(cr, uid, product_id=product.id, properties=[], context=context)
routing_id = False
if bom_id:
bom_point = bom_obj.browse(cr, uid, bom_id, context=context)
routing_id = bom_point.routing_id.id or False
product_uom_id = product.uom_id and product.uom_id.id or False
result['value'] = {'product_uom': product_uom_id, 'bom_id': bom_id, 'routing_id': routing_id, 'product_tmpl_id': product.product_tmpl_id}
return result
def bom_id_change(self, cr, uid, ids, bom_id, context=None):
""" Finds routing for changed BoM.
@param product: Id of product.
@return: Dictionary of values.
"""
if not bom_id:
return {'value': {
'routing_id': False
}}
bom_point = self.pool.get('mrp.bom').browse(cr, uid, bom_id, context=context)
routing_id = bom_point.routing_id.id or False
result = {
'routing_id': routing_id
}
return {'value': result}
def _prepare_lines(self, cr, uid, production, properties=None, context=None):
# search BoM structure and route
bom_obj = self.pool.get('mrp.bom')
uom_obj = self.pool.get('product.uom')
bom_point = production.bom_id
bom_id = production.bom_id.id
if not bom_point:
bom_id = bom_obj._bom_find(cr, uid, product_id=production.product_id.id, properties=properties, context=context)
if bom_id:
bom_point = bom_obj.browse(cr, uid, bom_id)
routing_id = bom_point.routing_id.id or False
self.write(cr, uid, [production.id], {'bom_id': bom_id, 'routing_id': routing_id})
if not bom_id:
raise UserError(_("Cannot find a bill of material for this product."))
# get components and workcenter_lines from BoM structure
factor = uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, bom_point.product_uom.id)
# product_lines, workcenter_lines
return bom_obj._bom_explode(cr, uid, bom_point, production.product_id, factor / bom_point.product_qty, properties, routing_id=production.routing_id.id, context=context)
def _action_compute_lines(self, cr, uid, ids, properties=None, context=None):
""" Compute product_lines and workcenter_lines from BoM structure
@return: product_lines
"""
if properties is None:
properties = []
results = []
prod_line_obj = self.pool.get('mrp.production.product.line')
workcenter_line_obj = self.pool.get('mrp.production.workcenter.line')
for production in self.browse(cr, uid, ids, context=context):
#unlink product_lines
prod_line_obj.unlink(cr, SUPERUSER_ID, [line.id for line in production.product_lines], context=context)
#unlink workcenter_lines
workcenter_line_obj.unlink(cr, SUPERUSER_ID, [line.id for line in production.workcenter_lines], context=context)
res = self._prepare_lines(cr, uid, production, properties=properties, context=context)
results = res[0] # product_lines
results2 = res[1] # workcenter_lines
# reset product_lines in production order
for line in results:
line['production_id'] = production.id
prod_line_obj.create(cr, uid, line)
#reset workcenter_lines in production order
for line in results2:
line['production_id'] = production.id
workcenter_line_obj.create(cr, uid, line, context)
return results
def action_compute(self, cr, uid, ids, properties=None, context=None):
""" Computes bills of material of a product.
@param properties: List containing dictionaries of properties.
@return: No. of products.
"""
return len(self._action_compute_lines(cr, uid, ids, properties=properties, context=context))
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels the production order and related stock moves.
@return: True
"""
if context is None:
context = {}
move_obj = self.pool.get('stock.move')
proc_obj = self.pool.get('procurement.order')
for production in self.browse(cr, uid, ids, context=context):
if production.move_created_ids:
move_obj.action_cancel(cr, uid, [x.id for x in production.move_created_ids], context=context)
procs = proc_obj.search(cr, uid, [('move_dest_id', 'in', [x.id for x in production.move_lines])], context=context)
if procs:
proc_obj.cancel(cr, uid, procs, context=context)
move_obj.action_cancel(cr, uid, [x.id for x in production.move_lines], context=context)
self.write(cr, uid, ids, {'state': 'cancel'})
# Put related procurements in exception
proc_obj = self.pool.get("procurement.order")
procs = proc_obj.search(cr, uid, [('production_id', 'in', ids)], context=context)
if procs:
for proc in procs:
proc_obj.message_post(cr, uid, proc, body=_('Manufacturing order cancelled.'), context=context)
proc_obj.write(cr, uid, procs, {'state': 'exception'}, context=context)
return True
def action_ready(self, cr, uid, ids, context=None):
""" Changes the production state to Ready and location id of stock move.
@return: True
"""
move_obj = self.pool.get('stock.move')
self.write(cr, uid, ids, {'state': 'ready'})
for production in self.browse(cr, uid, ids, context=context):
if not production.move_created_ids:
self._make_production_produce_line(cr, uid, production, context=context)
if production.move_prod_id and production.move_prod_id.location_id.id != production.location_dest_id.id:
move_obj.write(cr, uid, [production.move_prod_id.id],
{'location_id': production.location_dest_id.id})
return True
def _compute_costs_from_production(self, cr, uid, ids, context=None):
""" Generate workcenter costs in analytic accounts"""
for production in self.browse(cr, uid, ids):
total_cost = self._costs_generate(cr, uid, production)
def action_production_end(self, cr, uid, ids, context=None):
""" Changes production state to Finish and writes finished date.
@return: True
"""
self._compute_costs_from_production(cr, uid, ids, context)
write_res = self.write(cr, uid, ids, {'state': 'done', 'date_finished': time.strftime('%Y-%m-%d %H:%M:%S')})
# Check related procurements
proc_obj = self.pool.get("procurement.order")
procs = proc_obj.search(cr, uid, [('production_id', 'in', ids)], context=context)
proc_obj.check(cr, uid, procs, context=context)
return write_res
def test_production_done(self, cr, uid, ids):
""" Tests whether production is done or not.
@return: True or False
"""
res = True
for production in self.browse(cr, uid, ids):
if production.move_lines:
res = False
if production.move_created_ids:
res = False
return res
def _get_subproduct_factor(self, cr, uid, production_id, move_id=None, context=None):
""" Compute the factor to compute the qty of procucts to produce for the given production_id. By default,
it's always equal to the quantity encoded in the production order or the production wizard, but if the
module mrp_subproduct is installed, then we must use the move_id to identify the product to produce
and its quantity.
:param production_id: ID of the mrp.order
:param move_id: ID of the stock move that needs to be produced. Will be used in mrp_subproduct.
:return: The factor to apply to the quantity that we should produce for the given production order.
"""
return 1
def _get_produced_qty(self, cr, uid, production, context=None):
''' returns the produced quantity of product 'production.product_id' for the given production, in the product UoM
'''
produced_qty = 0
for produced_product in production.move_created_ids2:
if (produced_product.scrapped) or (produced_product.product_id.id != production.product_id.id):
continue
produced_qty += produced_product.product_qty
return produced_qty
def _get_consumed_data(self, cr, uid, production, context=None):
''' returns a dictionary containing for each raw material of the given production, its quantity already consumed (in the raw material UoM)
'''
consumed_data = {}
# Calculate already consumed qtys
for consumed in production.move_lines2:
if consumed.scrapped:
continue
if not consumed_data.get(consumed.product_id.id, False):
consumed_data[consumed.product_id.id] = 0
consumed_data[consumed.product_id.id] += consumed.product_qty
return consumed_data
def _calculate_qty(self, cr, uid, production, product_qty=0.0, context=None):
"""
Calculates the quantity still needed to produce an extra number of products
product_qty is in the uom of the product
"""
quant_obj = self.pool.get("stock.quant")
uom_obj = self.pool.get("product.uom")
produced_qty = self._get_produced_qty(cr, uid, production, context=context)
consumed_data = self._get_consumed_data(cr, uid, production, context=context)
#In case no product_qty is given, take the remaining qty to produce for the given production
if not product_qty:
product_qty = uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, production.product_id.uom_id.id) - produced_qty
production_qty = uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, production.product_id.uom_id.id)
scheduled_qty = OrderedDict()
for scheduled in production.product_lines:
if scheduled.product_id.type not in ['product', 'consu']:
continue
qty = uom_obj._compute_qty(cr, uid, scheduled.product_uom.id, scheduled.product_qty, scheduled.product_id.uom_id.id)
if scheduled_qty.get(scheduled.product_id.id):
scheduled_qty[scheduled.product_id.id] += qty
else:
scheduled_qty[scheduled.product_id.id] = qty
dicts = OrderedDict()
# Find product qty to be consumed and consume it
for product_id in scheduled_qty.keys():
consumed_qty = consumed_data.get(product_id, 0.0)
# qty available for consume and produce
sched_product_qty = scheduled_qty[product_id]
qty_avail = sched_product_qty - consumed_qty
if qty_avail <= 0.0:
# there will be nothing to consume for this raw material
continue
if not dicts.get(product_id):
dicts[product_id] = {}
# total qty of consumed product we need after this consumption
if product_qty + produced_qty <= production_qty:
total_consume = ((product_qty + produced_qty) * sched_product_qty / production_qty)
else:
total_consume = sched_product_qty
qty = total_consume - consumed_qty
# Search for quants related to this related move
for move in production.move_lines:
if qty <= 0.0:
break
if move.product_id.id != product_id:
continue
q = min(move.product_qty, qty)
quants = quant_obj.quants_get_preferred_domain(cr, uid, q, move, domain=[('qty', '>', 0.0)],
preferred_domain_list=[[('reservation_id', '=', move.id)]], context=context)
for quant, quant_qty in quants:
if quant:
lot_id = quant.lot_id.id
if not product_id in dicts.keys():
dicts[product_id] = {lot_id: quant_qty}
elif lot_id in dicts[product_id].keys():
dicts[product_id][lot_id] += quant_qty
else:
dicts[product_id][lot_id] = quant_qty
qty -= quant_qty
if float_compare(qty, 0, self.pool['decimal.precision'].precision_get(cr, uid, 'Product Unit of Measure')) == 1:
if dicts[product_id].get(False):
dicts[product_id][False] += qty
else:
dicts[product_id][False] = qty
consume_lines = []
for prod in dicts.keys():
for lot, qty in dicts[prod].items():
consume_lines.append({'product_id': prod, 'product_qty': qty, 'lot_id': lot})
return consume_lines
def _calculate_total_cost(self, cr, uid, total_consume_moves, context=None):
total_cost = 0
for consumed_move in self.pool['stock.move'].browse(cr, uid, total_consume_moves, context=context):
total_cost += sum([x.inventory_value for x in consumed_move.quant_ids if x.qty > 0])
return total_cost
def _calculate_workcenter_cost(self, cr, uid, production_id, context=None):
""" Compute the planned production cost from the workcenters """
production = self.browse(cr, uid, production_id, context=context)
total_cost = 0.0
for wc_line in production.workcenter_lines:
wc = wc_line.workcenter_id
total_cost += wc_line.hour*wc.costs_hour + wc_line.cycle*wc.costs_cycle
return total_cost
def action_produce(self, cr, uid, production_id, production_qty, production_mode, wiz=False, context=None):
""" To produce final product based on production mode (consume/consume&produce).
If Production mode is consume, all stock move lines of raw materials will be done/consumed.
If Production mode is consume & produce, all stock move lines of raw materials will be done/consumed
and stock move lines of final product will be also done/produced.
@param production_id: the ID of mrp.production object
@param production_qty: specify qty to produce in the uom of the production order
@param production_mode: specify production mode (consume/consume&produce).
@param wiz: the mrp produce product wizard, which will tell the amount of consumed products needed
@return: True
"""
stock_mov_obj = self.pool.get('stock.move')
uom_obj = self.pool.get("product.uom")
production = self.browse(cr, uid, production_id, context=context)
production_qty_uom = uom_obj._compute_qty(cr, uid, production.product_uom.id, production_qty, production.product_id.uom_id.id)
precision = self.pool['decimal.precision'].precision_get(cr, uid, 'Product Unit of Measure')
main_production_move = False
if production_mode == 'consume_produce':
for produce_product in production.move_created_ids:
if produce_product.product_id.id == production.product_id.id:
main_production_move = produce_product.id
total_consume_moves = set()
if production_mode in ['consume', 'consume_produce']:
if wiz:
consume_lines = []
for cons in wiz.consume_lines:
consume_lines.append({'product_id': cons.product_id.id, 'lot_id': cons.lot_id.id, 'product_qty': cons.product_qty})
else:
consume_lines = self._calculate_qty(cr, uid, production, production_qty_uom, context=context)
for consume in consume_lines:
remaining_qty = consume['product_qty']
for raw_material_line in production.move_lines:
if raw_material_line.state in ('done', 'cancel'):
continue
if remaining_qty <= 0:
break
if consume['product_id'] != raw_material_line.product_id.id:
continue
consumed_qty = min(remaining_qty, raw_material_line.product_qty)
stock_mov_obj.action_consume(cr, uid, [raw_material_line.id], consumed_qty, raw_material_line.location_id.id,
restrict_lot_id=consume['lot_id'], consumed_for=main_production_move, context=context)
total_consume_moves.add(raw_material_line.id)
remaining_qty -= consumed_qty
if not float_is_zero(remaining_qty, precision_digits=precision):
#consumed more in wizard than previously planned
product = self.pool.get('product.product').browse(cr, uid, consume['product_id'], context=context)
extra_move_id = self._make_consume_line_from_data(cr, uid, production, product, product.uom_id.id, remaining_qty, context=context)
stock_mov_obj.write(cr, uid, [extra_move_id], {'restrict_lot_id': consume['lot_id'],
'consumed_for': main_production_move}, context=context)
stock_mov_obj.action_done(cr, uid, [extra_move_id], context=context)
total_consume_moves.add(extra_move_id)
if production_mode == 'consume_produce':
# add production lines that have already been consumed since the last 'consume & produce'
last_production_date = production.move_created_ids2 and max(production.move_created_ids2.mapped('date')) or False
already_consumed_lines = production.move_lines2.filtered(lambda l: l.date > last_production_date)
total_consume_moves = total_consume_moves.union(already_consumed_lines.ids)
price_unit = 0
for produce_product in production.move_created_ids:
is_main_product = (produce_product.product_id.id == production.product_id.id) and production.product_id.cost_method=='real'
if is_main_product:
total_cost = self._calculate_total_cost(cr, uid, list(total_consume_moves), context=context)
production_cost = self._calculate_workcenter_cost(cr, uid, production_id, context=context)
price_unit = (total_cost + production_cost) / production_qty_uom
subproduct_factor = self._get_subproduct_factor(cr, uid, production.id, produce_product.id, context=context)
lot_id = False
if wiz:
lot_id = wiz.lot_id.id
qty = min(subproduct_factor * production_qty_uom, produce_product.product_qty) #Needed when producing more than maximum quantity
if is_main_product and price_unit:
stock_mov_obj.write(cr, uid, [produce_product.id], {'price_unit': price_unit}, context=context)
new_moves = stock_mov_obj.action_consume(cr, uid, [produce_product.id], qty,
location_id=produce_product.location_id.id, restrict_lot_id=lot_id, context=context)
stock_mov_obj.write(cr, uid, new_moves, {'production_id': production_id}, context=context)
remaining_qty = subproduct_factor * production_qty_uom - qty
if not float_is_zero(remaining_qty, precision_digits=precision):
# In case you need to make more than planned
#consumed more in wizard than previously planned
extra_move_id = stock_mov_obj.copy(cr, uid, produce_product.id, default={'product_uom_qty': remaining_qty,
'production_id': production_id}, context=context)
if is_main_product:
stock_mov_obj.write(cr, uid, [extra_move_id], {'price_unit': price_unit}, context=context)
stock_mov_obj.action_confirm(cr, uid, [extra_move_id], context=context)
stock_mov_obj.action_done(cr, uid, [extra_move_id], context=context)
self.message_post(cr, uid, production_id, body=_("%s produced") % self._description, context=context)
# Remove remaining products to consume if no more products to produce
if not production.move_created_ids and production.move_lines:
stock_mov_obj.action_cancel(cr, uid, [x.id for x in production.move_lines], context=context)
self.signal_workflow(cr, uid, [production_id], 'button_produce_done')
return True
def _costs_generate(self, cr, uid, production):
""" Calculates total costs at the end of the production.
@param production: Id of production order.
@return: Calculated amount.
"""
amount = 0.0
analytic_line_obj = self.pool.get('account.analytic.line')
for wc_line in production.workcenter_lines:
wc = wc_line.workcenter_id
if wc.costs_general_account_id:
# Cost per hour
value = wc_line.hour * wc.costs_hour
account = wc.costs_hour_account_id.id
if value and account:
amount += value
# we user SUPERUSER_ID as we do not garantee an mrp user
# has access to account analytic lines but still should be
# able to produce orders
analytic_line_obj.create(cr, SUPERUSER_ID, {
'name': wc_line.name + ' (H)',
'amount': value,
'account_id': account,
'general_account_id': wc.costs_general_account_id.id,
'ref': wc.code,
'product_id': wc.product_id.id,
'unit_amount': wc_line.hour,
'product_uom_id': wc.product_id and wc.product_id.uom_id.id or False
})
# Cost per cycle
value = wc_line.cycle * wc.costs_cycle
account = wc.costs_cycle_account_id.id
if value and account:
amount += value
analytic_line_obj.create(cr, SUPERUSER_ID, {
'name': wc_line.name + ' (C)',
'amount': value,
'account_id': account,
'general_account_id': wc.costs_general_account_id.id,
'ref': wc.code,
'product_id': wc.product_id.id,
'unit_amount': wc_line.cycle,
'product_uom_id': wc.product_id and wc.product_id.uom_id.id or False
})
return amount
def action_in_production(self, cr, uid, ids, context=None):
""" Changes state to In Production and writes starting date.
@return: True
"""
return self.write(cr, uid, ids, {'state': 'in_production', 'date_start': time.strftime('%Y-%m-%d %H:%M:%S')})
def consume_lines_get(self, cr, uid, ids, *args):
res = []
for order in self.browse(cr, uid, ids, context={}):
res += [x.id for x in order.move_lines]
return res
def test_ready(self, cr, uid, ids):
res = True
for production in self.browse(cr, uid, ids):
if production.move_lines and not production.ready_production:
res = False
return res
def _make_production_produce_line(self, cr, uid, production, context=None):
stock_move = self.pool.get('stock.move')
proc_obj = self.pool.get('procurement.order')
source_location_id = production.product_id.property_stock_production.id
destination_location_id = production.location_dest_id.id
procs = proc_obj.search(cr, uid, [('production_id', '=', production.id)], context=context)
procurement = procs and\
proc_obj.browse(cr, uid, procs[0], context=context) or False
data = {
'name': production.name,
'date': production.date_planned,
'date_expected': production.date_planned,
'product_id': production.product_id.id,
'product_uom': production.product_uom.id,
'product_uom_qty': production.product_qty,
'location_id': source_location_id,
'location_dest_id': destination_location_id,
'move_dest_id': production.move_prod_id.id,
'procurement_id': procurement and procurement.id,
'company_id': production.company_id.id,
'production_id': production.id,
'origin': production.name,
'group_id': procurement and procurement.group_id.id,
}
move_id = stock_move.create(cr, uid, data, context=context)
return stock_move.action_confirm(cr, uid, [move_id], context=context)[0]
def _get_raw_material_procure_method(self, cr, uid, product, location_id=False, location_dest_id=False, context=None):
'''This method returns the procure_method to use when creating the stock move for the production raw materials
Besides the standard configuration of looking if the product or product category has the MTO route,
you can also define a rule e.g. from Stock to Production (which might be used in the future like the sale orders)
'''
warehouse_obj = self.pool['stock.warehouse']
routes = product.route_ids + product.categ_id.total_route_ids
if location_id and location_dest_id:
pull_obj = self.pool['procurement.rule']
pulls = pull_obj.search(cr, uid, [('route_id', 'in', [x.id for x in routes]),
('location_id', '=', location_dest_id),
('location_src_id', '=', location_id)], limit=1, context=context)
if pulls:
return pull_obj.browse(cr, uid, pulls[0], context=context).procure_method
try:
mto_route = warehouse_obj._get_mto_route(cr, uid, context=context)
except:
return "make_to_stock"
if mto_route in [x.id for x in routes]:
return "make_to_order"
return "make_to_stock"
def _create_previous_move(self, cr, uid, move_id, product, source_location_id, dest_location_id, context=None):
'''
When the routing gives a different location than the raw material location of the production order,
we should create an extra move from the raw material location to the location of the routing, which
precedes the consumption line (chained). The picking type depends on the warehouse in which this happens
and the type of locations.
'''
loc_obj = self.pool.get("stock.location")
stock_move = self.pool.get('stock.move')
type_obj = self.pool.get('stock.picking.type')
# Need to search for a picking type
move = stock_move.browse(cr, uid, move_id, context=context)
src_loc = loc_obj.browse(cr, uid, source_location_id, context=context)
dest_loc = loc_obj.browse(cr, uid, dest_location_id, context=context)
code = stock_move.get_code_from_locs(cr, uid, move, src_loc, dest_loc, context=context)
if code == 'outgoing':
check_loc = src_loc
else:
check_loc = dest_loc
wh = loc_obj.get_warehouse(cr, uid, check_loc, context=context)
domain = [('code', '=', code)]
if wh:
domain += [('warehouse_id', '=', wh)]
types = type_obj.search(cr, uid, domain, context=context)
move = stock_move.copy(cr, uid, move_id, default = {
'location_id': source_location_id,
'location_dest_id': dest_location_id,
'procure_method': self._get_raw_material_procure_method(cr, uid, product, location_id=source_location_id,
location_dest_id=dest_location_id, context=context),
'raw_material_production_id': False,
'move_dest_id': move_id,
'picking_type_id': types and types[0] or False,
}, context=context)
return move
def _make_consume_line_from_data(self, cr, uid, production, product, uom_id, qty, context=None):
stock_move = self.pool.get('stock.move')
loc_obj = self.pool.get('stock.location')
# Internal shipment is created for Stockable and Consumer Products
if product.type not in ('product', 'consu'):
return False
# Take routing location as a Source Location.
source_location_id = production.location_src_id.id
prod_location_id = source_location_id
prev_move= False
if production.routing_id:
routing = production.routing_id
else:
routing = production.bom_id.routing_id
if routing and routing.location_id and routing.location_id.id != source_location_id:
source_location_id = routing.location_id.id
prev_move = True
destination_location_id = production.product_id.property_stock_production.id
move_id = stock_move.create(cr, uid, {
'name': production.name,
'date': production.date_planned,
'date_expected': production.date_planned,
'product_id': product.id,
'product_uom_qty': qty,
'product_uom': uom_id,
'location_id': source_location_id,
'location_dest_id': destination_location_id,
'company_id': production.company_id.id,
'procure_method': prev_move and 'make_to_stock' or self._get_raw_material_procure_method(cr, uid, product, location_id=source_location_id,
location_dest_id=destination_location_id, context=context), #Make_to_stock avoids creating procurement
'raw_material_production_id': production.id,
#this saves us a browse in create()
'price_unit': product.standard_price,
'origin': production.name,
'warehouse_id': loc_obj.get_warehouse(cr, uid, production.location_src_id, context=context),
'group_id': production.move_prod_id.group_id.id,
}, context=context)
if prev_move:
prev_move = self._create_previous_move(cr, uid, move_id, product, prod_location_id, source_location_id, context=context)
stock_move.action_confirm(cr, uid, [prev_move], context=context)
return move_id
def _make_production_consume_line(self, cr, uid, line, context=None):
return self._make_consume_line_from_data(cr, uid, line.production_id, line.product_id, line.product_uom.id, line.product_qty, context=context)
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms production order.
@return: Newly generated Shipment Id.
"""
user_lang = self.pool.get('res.users').browse(cr, uid, [uid]).partner_id.lang
context = dict(context, lang=user_lang)
uncompute_ids = filter(lambda x: x, [not x.product_lines and x.id or False for x in self.browse(cr, uid, ids, context=context)])
self.action_compute(cr, uid, uncompute_ids, context=context)
for production in self.browse(cr, uid, ids, context=context):
self._make_production_produce_line(cr, uid, production, context=context)
stock_moves = []
for line in production.product_lines:
if line.product_id.type in ['product', 'consu']:
stock_move_id = self._make_production_consume_line(cr, uid, line, context=context)
stock_moves.append(stock_move_id)
if stock_moves:
self.pool.get('stock.move').action_confirm(cr, uid, stock_moves, context=context)
production.write({'state': 'confirmed'})
return 0
def action_assign(self, cr, uid, ids, context=None):
"""
Checks the availability on the consume lines of the production order
"""
from openerp import workflow
move_obj = self.pool.get("stock.move")
for production in self.browse(cr, uid, ids, context=context):
move_obj.action_assign(cr, uid, [x.id for x in production.move_lines], context=context)
if self.pool.get('mrp.production').test_ready(cr, uid, [production.id]):
workflow.trg_validate(uid, 'mrp.production', production.id, 'moves_ready', cr)
def force_production(self, cr, uid, ids, *args):
""" Assigns products.
@param *args: Arguments
@return: True
"""
from openerp import workflow
move_obj = self.pool.get('stock.move')
for order in self.browse(cr, uid, ids):
move_obj.force_assign(cr, uid, [x.id for x in order.move_lines])
if self.pool.get('mrp.production').test_ready(cr, uid, [order.id]):
workflow.trg_validate(uid, 'mrp.production', order.id, 'moves_ready', cr)
return True
class mrp_production_workcenter_line(osv.osv):
_name = 'mrp.production.workcenter.line'
_description = 'Work Order'
_order = 'sequence'
_inherit = ['mail.thread']
_columns = {
'name': fields.char('Work Order', required=True),
'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True),
'cycle': fields.float('Number of Cycles', digits=(16, 2)),
'hour': fields.float('Number of Hours', digits=(16, 2)),
'sequence': fields.integer('Sequence', required=True, help="Gives the sequence order when displaying a list of work orders."),
'production_id': fields.many2one('mrp.production', 'Manufacturing Order',
track_visibility='onchange', select=True, ondelete='cascade', required=True),
}
_defaults = {
'sequence': lambda *a: 1,
'hour': lambda *a: 0,
'cycle': lambda *a: 0,
}
class mrp_production_product_line(osv.osv):
_name = 'mrp.production.product.line'
_description = 'Production Scheduled Product'
_columns = {
'name': fields.char('Name', required=True),
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'production_id': fields.many2one('mrp.production', 'Production Order', select=True),
}
|
gpl-3.0
|
jabesq/home-assistant
|
homeassistant/components/file/notify.py
|
7
|
1796
|
"""Support for file notification."""
import logging
import os
import voluptuous as vol
from homeassistant.const import CONF_FILENAME
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.components.notify import (
ATTR_TITLE, ATTR_TITLE_DEFAULT, PLATFORM_SCHEMA, BaseNotificationService)
CONF_TIMESTAMP = 'timestamp'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_FILENAME): cv.string,
vol.Optional(CONF_TIMESTAMP, default=False): cv.boolean,
})
_LOGGER = logging.getLogger(__name__)
def get_service(hass, config, discovery_info=None):
"""Get the file notification service."""
filename = config[CONF_FILENAME]
timestamp = config[CONF_TIMESTAMP]
return FileNotificationService(hass, filename, timestamp)
class FileNotificationService(BaseNotificationService):
"""Implement the notification service for the File service."""
def __init__(self, hass, filename, add_timestamp):
"""Initialize the service."""
self.filepath = os.path.join(hass.config.config_dir, filename)
self.add_timestamp = add_timestamp
def send_message(self, message="", **kwargs):
"""Send a message to a file."""
with open(self.filepath, 'a') as file:
if os.stat(self.filepath).st_size == 0:
title = '{} notifications (Log started: {})\n{}\n'.format(
kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT),
dt_util.utcnow().isoformat(),
'-' * 80)
file.write(title)
if self.add_timestamp:
text = '{} {}\n'.format(dt_util.utcnow().isoformat(), message)
else:
text = '{}\n'.format(message)
file.write(text)
|
apache-2.0
|
bsipocz/statsmodels
|
statsmodels/graphics/tests/test_mosaicplot.py
|
17
|
18878
|
from __future__ import division
from statsmodels.compat.python import iterkeys, zip, lrange, iteritems, range
from numpy.testing import assert_, assert_raises, dec
from numpy.testing import run_module_suite
# utilities for the tests
from statsmodels.compat.collections import OrderedDict
from statsmodels.api import datasets
import numpy as np
from itertools import product
try:
import matplotlib.pyplot as pylab
have_matplotlib = True
except:
have_matplotlib = False
import pandas
pandas_old = int(pandas.__version__.split('.')[1]) < 9
# the main drawing function
from statsmodels.graphics.mosaicplot import mosaic
# other functions to be tested for accuracy
from statsmodels.graphics.mosaicplot import _hierarchical_split
from statsmodels.graphics.mosaicplot import _reduce_dict
from statsmodels.graphics.mosaicplot import _key_splitting
from statsmodels.graphics.mosaicplot import _normalize_split
from statsmodels.graphics.mosaicplot import _split_rect
@dec.skipif(not have_matplotlib or pandas_old)
def test_data_conversion():
# It will not reorder the elements
# so the dictionary will look odd
# as it key order has the c and b
# keys swapped
import pandas
fig, ax = pylab.subplots(4, 4)
data = {'ax': 1, 'bx': 2, 'cx': 3}
mosaic(data, ax=ax[0, 0], title='basic dict', axes_label=False)
data = pandas.Series(data)
mosaic(data, ax=ax[0, 1], title='basic series', axes_label=False)
data = [1, 2, 3]
mosaic(data, ax=ax[0, 2], title='basic list', axes_label=False)
data = np.asarray(data)
mosaic(data, ax=ax[0, 3], title='basic array', axes_label=False)
data = {('ax', 'cx'): 1, ('bx', 'cx'): 2, ('ax', 'dx'): 3, ('bx', 'dx'): 4}
mosaic(data, ax=ax[1, 0], title='compound dict', axes_label=False)
mosaic(data, ax=ax[2, 0], title='inverted keys dict', index=[1, 0], axes_label=False)
data = pandas.Series(data)
mosaic(data, ax=ax[1, 1], title='compound series', axes_label=False)
mosaic(data, ax=ax[2, 1], title='inverted keys series', index=[1, 0])
data = [[1, 2], [3, 4]]
mosaic(data, ax=ax[1, 2], title='compound list', axes_label=False)
mosaic(data, ax=ax[2, 2], title='inverted keys list', index=[1, 0])
data = np.array([[1, 2], [3, 4]])
mosaic(data, ax=ax[1, 3], title='compound array', axes_label=False)
mosaic(data, ax=ax[2, 3], title='inverted keys array', index=[1, 0], axes_label=False)
gender = ['male', 'male', 'male', 'female', 'female', 'female']
pet = ['cat', 'dog', 'dog', 'cat', 'dog', 'cat']
data = pandas.DataFrame({'gender': gender, 'pet': pet})
mosaic(data, ['gender'], ax=ax[3, 0], title='dataframe by key 1', axes_label=False)
mosaic(data, ['pet'], ax=ax[3, 1], title='dataframe by key 2', axes_label=False)
mosaic(data, ['gender', 'pet'], ax=ax[3, 2], title='both keys', axes_label=False)
mosaic(data, ['pet', 'gender'], ax=ax[3, 3], title='keys inverted', axes_label=False)
pylab.suptitle('testing data conversion (plot 1 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_mosaic_simple():
# display a simple plot of 4 categories of data, splitted in four
# levels with increasing size for each group
# creation of the levels
key_set = (['male', 'female'], ['old', 'adult', 'young'],
['worker', 'unemployed'], ['healty', 'ill'])
# the cartesian product of all the categories is
# the complete set of categories
keys = list(product(*key_set))
data = OrderedDict(zip(keys, range(1, 1 + len(keys))))
# which colours should I use for the various categories?
# put it into a dict
props = {}
#males and females in blue and red
props[('male',)] = {'color': 'b'}
props[('female',)] = {'color': 'r'}
# all the groups corresponding to ill groups have a different color
for key in keys:
if 'ill' in key:
if 'male' in key:
props[key] = {'color': 'BlueViolet' , 'hatch': '+'}
else:
props[key] = {'color': 'Crimson' , 'hatch': '+'}
# mosaic of the data, with given gaps and colors
mosaic(data, gap=0.05, properties=props, axes_label=False)
pylab.suptitle('syntetic data, 4 categories (plot 2 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib or pandas_old)
def test_mosaic():
# make the same analysis on a known dataset
# load the data and clean it a bit
affairs = datasets.fair.load_pandas()
datas = affairs.exog
# any time greater than 0 is cheating
datas['cheated'] = affairs.endog > 0
# sort by the marriage quality and give meaningful name
# [rate_marriage, age, yrs_married, children,
# religious, educ, occupation, occupation_husb]
datas = datas.sort(['rate_marriage', 'religious'])
num_to_desc = {1: 'awful', 2: 'bad', 3: 'intermediate',
4: 'good', 5: 'wonderful'}
datas['rate_marriage'] = datas['rate_marriage'].map(num_to_desc)
num_to_faith = {1: 'non religious', 2: 'poorly religious', 3: 'religious',
4: 'very religious'}
datas['religious'] = datas['religious'].map(num_to_faith)
num_to_cheat = {False: 'faithful', True: 'cheated'}
datas['cheated'] = datas['cheated'].map(num_to_cheat)
# finished cleaning
fig, ax = pylab.subplots(2, 2)
mosaic(datas, ['rate_marriage', 'cheated'], ax=ax[0, 0],
title='by marriage happiness')
mosaic(datas, ['religious', 'cheated'], ax=ax[0, 1],
title='by religiosity')
mosaic(datas, ['rate_marriage', 'religious', 'cheated'], ax=ax[1, 0],
title='by both', labelizer=lambda k:'')
ax[1, 0].set_xlabel('marriage rating')
ax[1, 0].set_ylabel('religion status')
mosaic(datas, ['religious', 'rate_marriage'], ax=ax[1, 1],
title='inter-dependence', axes_label=False)
pylab.suptitle("extramarital affairs (plot 3 of 4)")
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_mosaic_very_complex():
# make a scattermatrix of mosaic plots to show the correlations between
# each pair of variable in a dataset. Could be easily converted into a
# new function that does this automatically based on the type of data
key_name = ['gender', 'age', 'health', 'work']
key_base = (['male', 'female'], ['old', 'young'],
['healty', 'ill'], ['work', 'unemployed'])
keys = list(product(*key_base))
data = OrderedDict(zip(keys, range(1, 1 + len(keys))))
props = {}
props[('male', 'old')] = {'color': 'r'}
props[('female',)] = {'color': 'pink'}
L = len(key_base)
fig, axes = pylab.subplots(L, L)
for i in range(L):
for j in range(L):
m = set(range(L)).difference(set((i, j)))
if i == j:
axes[i, i].text(0.5, 0.5, key_name[i],
ha='center', va='center')
axes[i, i].set_xticks([])
axes[i, i].set_xticklabels([])
axes[i, i].set_yticks([])
axes[i, i].set_yticklabels([])
else:
ji = max(i, j)
ij = min(i, j)
temp_data = OrderedDict([((k[ij], k[ji]) + tuple(k[r] for r in m), v)
for k, v in iteritems(data)])
keys = list(iterkeys(temp_data))
for k in keys:
value = _reduce_dict(temp_data, k[:2])
temp_data[k[:2]] = value
del temp_data[k]
mosaic(temp_data, ax=axes[i, j], axes_label=False,
properties=props, gap=0.05, horizontal=i > j)
pylab.suptitle('old males should look bright red, (plot 4 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_axes_labeling():
from numpy.random import rand
key_set = (['male', 'female'], ['old', 'adult', 'young'],
['worker', 'unemployed'], ['yes', 'no'])
# the cartesian product of all the categories is
# the complete set of categories
keys = list(product(*key_set))
data = OrderedDict(zip(keys, rand(len(keys))))
lab = lambda k: ''.join(s[0] for s in k)
fig, (ax1, ax2) = pylab.subplots(1, 2, figsize=(16, 8))
mosaic(data, ax=ax1, labelizer=lab, horizontal=True, label_rotation=45)
mosaic(data, ax=ax2, labelizer=lab, horizontal=False,
label_rotation=[0, 45, 90, 0])
#fig.tight_layout()
fig.suptitle("correct alignment of the axes labels")
#pylab.show()
@dec.skipif(not have_matplotlib or pandas_old)
def test_mosaic_empty_cells():
# SMOKE test see #2286
import pandas as pd
mydata = pd.DataFrame({'id2': {64: 'Angelica',
65: 'DXW_UID', 66: 'casuid01',
67: 'casuid01', 68: 'EC93_uid',
69: 'EC93_uid', 70: 'EC93_uid',
60: 'DXW_UID', 61: 'AtmosFox',
62: 'DXW_UID', 63: 'DXW_UID'},
'id1': {64: 'TGP',
65: 'Retention01', 66: 'default',
67: 'default', 68: 'Musa_EC_9_3',
69: 'Musa_EC_9_3', 70: 'Musa_EC_9_3',
60: 'default', 61: 'default',
62: 'default', 63: 'default'}})
ct = pd.crosstab(mydata.id1, mydata.id2)
fig, vals = mosaic(ct.T.unstack())
fig, vals = mosaic(mydata, ['id1','id2'])
eq = lambda x, y: assert_(np.allclose(x, y))
def test_recursive_split():
keys = list(product('mf'))
data = OrderedDict(zip(keys, [1] * len(keys)))
res = _hierarchical_split(data, gap=0)
assert_(list(iterkeys(res)) == keys)
res[('m',)] = (0.0, 0.0, 0.5, 1.0)
res[('f',)] = (0.5, 0.0, 0.5, 1.0)
keys = list(product('mf', 'yao'))
data = OrderedDict(zip(keys, [1] * len(keys)))
res = _hierarchical_split(data, gap=0)
assert_(list(iterkeys(res)) == keys)
res[('m', 'y')] = (0.0, 0.0, 0.5, 1 / 3)
res[('m', 'a')] = (0.0, 1 / 3, 0.5, 1 / 3)
res[('m', 'o')] = (0.0, 2 / 3, 0.5, 1 / 3)
res[('f', 'y')] = (0.5, 0.0, 0.5, 1 / 3)
res[('f', 'a')] = (0.5, 1 / 3, 0.5, 1 / 3)
res[('f', 'o')] = (0.5, 2 / 3, 0.5, 1 / 3)
def test__reduce_dict():
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), [1] * 8))
eq(_reduce_dict(data, ('m',)), 4)
eq(_reduce_dict(data, ('m', 'o')), 2)
eq(_reduce_dict(data, ('m', 'o', 'w')), 1)
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), lrange(8)))
eq(_reduce_dict(data, ('m',)), 6)
eq(_reduce_dict(data, ('m', 'o')), 1)
eq(_reduce_dict(data, ('m', 'o', 'w')), 0)
def test__key_splitting():
# subdivide starting with an empty tuple
base_rect = {tuple(): (0, 0, 1, 1)}
res = _key_splitting(base_rect, ['a', 'b'], [1, 1], tuple(), True, 0)
assert_(list(iterkeys(res)) == [('a',), ('b',)])
eq(res[('a',)], (0, 0, 0.5, 1))
eq(res[('b',)], (0.5, 0, 0.5, 1))
# subdivide a in two sublevel
res_bis = _key_splitting(res, ['c', 'd'], [1, 1], ('a',), False, 0)
assert_(list(iterkeys(res_bis)) == [('a', 'c'), ('a', 'd'), ('b',)])
eq(res_bis[('a', 'c')], (0.0, 0.0, 0.5, 0.5))
eq(res_bis[('a', 'd')], (0.0, 0.5, 0.5, 0.5))
eq(res_bis[('b',)], (0.5, 0, 0.5, 1))
# starting with a non empty tuple and uneven distribution
base_rect = {('total',): (0, 0, 1, 1)}
res = _key_splitting(base_rect, ['a', 'b'], [1, 2], ('total',), True, 0)
assert_(list(iterkeys(res)) == [('total',) + (e,) for e in ['a', 'b']])
eq(res[('total', 'a')], (0, 0, 1 / 3, 1))
eq(res[('total', 'b')], (1 / 3, 0, 2 / 3, 1))
def test_proportion_normalization():
# extremes should give the whole set, as well
# as if 0 is inserted
eq(_normalize_split(0.), [0.0, 0.0, 1.0])
eq(_normalize_split(1.), [0.0, 1.0, 1.0])
eq(_normalize_split(2.), [0.0, 1.0, 1.0])
# negative values should raise ValueError
assert_raises(ValueError, _normalize_split, -1)
assert_raises(ValueError, _normalize_split, [1., -1])
assert_raises(ValueError, _normalize_split, [1., -1, 0.])
# if everything is zero it will complain
assert_raises(ValueError, _normalize_split, [0.])
assert_raises(ValueError, _normalize_split, [0., 0.])
# one-element array should return the whole interval
eq(_normalize_split([0.5]), [0.0, 1.0])
eq(_normalize_split([1.]), [0.0, 1.0])
eq(_normalize_split([2.]), [0.0, 1.0])
# simple division should give two pieces
for x in [0.3, 0.5, 0.9]:
eq(_normalize_split(x), [0., x, 1.0])
# multiple division should split as the sum of the components
for x, y in [(0.25, 0.5), (0.1, 0.8), (10., 30.)]:
eq(_normalize_split([x, y]), [0., x / (x + y), 1.0])
for x, y, z in [(1., 1., 1.), (0.1, 0.5, 0.7), (10., 30., 40)]:
eq(_normalize_split(
[x, y, z]), [0., x / (x + y + z), (x + y) / (x + y + z), 1.0])
def test_false_split():
# if you ask it to be divided in only one piece, just return the original
# one
pure_square = [0., 0., 1., 1.]
conf_h = dict(proportion=[1], gap=0.0, horizontal=True)
conf_v = dict(proportion=[1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_h), pure_square)
eq(_split_rect(*pure_square, **conf_v), pure_square)
conf_h = dict(proportion=[1], gap=0.5, horizontal=True)
conf_v = dict(proportion=[1], gap=0.5, horizontal=False)
eq(_split_rect(*pure_square, **conf_h), pure_square)
eq(_split_rect(*pure_square, **conf_v), pure_square)
# identity on a void rectangle should not give anything strange
null_square = [0., 0., 0., 0.]
conf = dict(proportion=[1], gap=0.0, horizontal=True)
eq(_split_rect(*null_square, **conf), null_square)
conf = dict(proportion=[1], gap=1.0, horizontal=True)
eq(_split_rect(*null_square, **conf), null_square)
# splitting a negative rectangle should raise error
neg_square = [0., 0., -1., 0.]
conf = dict(proportion=[1], gap=0.0, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1, 1], gap=0.0, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1], gap=0.5, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1, 1], gap=0.5, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
def test_rect_pure_split():
pure_square = [0., 0., 1., 1.]
# division in two equal pieces from the perfect square
h_2split = [(0.0, 0.0, 0.5, 1.0), (0.5, 0.0, 0.5, 1.0)]
conf_h = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 0.5), (0.0, 0.5, 1.0, 0.5)]
conf_v = dict(proportion=[1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in two non-equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 3, 1.0), (1 / 3, 0.0, 2 / 3, 1.0)]
conf_h = dict(proportion=[1, 2], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 3), (0.0, 1 / 3, 1.0, 2 / 3)]
conf_v = dict(proportion=[1, 2], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in three equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 3, 1.0), (1 / 3, 0.0, 1 / 3, 1.0), (2 / 3, 0.0,
1 / 3, 1.0)]
conf_h = dict(proportion=[1, 1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 3), (0.0, 1 / 3, 1.0, 1 / 3), (0.0, 2 / 3,
1.0, 1 / 3)]
conf_v = dict(proportion=[1, 1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in three non-equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 4, 1.0), (1 / 4, 0.0, 1 / 2, 1.0), (3 / 4, 0.0,
1 / 4, 1.0)]
conf_h = dict(proportion=[1, 2, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 4), (0.0, 1 / 4, 1.0, 1 / 2), (0.0, 3 / 4,
1.0, 1 / 4)]
conf_v = dict(proportion=[1, 2, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# splitting on a void rectangle should give multiple void
null_square = [0., 0., 0., 0.]
conf = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*null_square, **conf), [null_square, null_square])
conf = dict(proportion=[1, 2], gap=1.0, horizontal=True)
eq(_split_rect(*null_square, **conf), [null_square, null_square])
def test_rect_deformed_split():
non_pure_square = [1., -1., 1., 0.5]
# division in two equal pieces from the perfect square
h_2split = [(1.0, -1.0, 0.5, 0.5), (1.5, -1.0, 0.5, 0.5)]
conf_h = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*non_pure_square, **conf_h), h_2split)
v_2split = [(1.0, -1.0, 1.0, 0.25), (1.0, -0.75, 1.0, 0.25)]
conf_v = dict(proportion=[1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*non_pure_square, **conf_v), v_2split)
# division in two non-equal pieces from the perfect square
h_2split = [(1.0, -1.0, 1 / 3, 0.5), (1 + 1 / 3, -1.0, 2 / 3, 0.5)]
conf_h = dict(proportion=[1, 2], gap=0.0, horizontal=True)
eq(_split_rect(*non_pure_square, **conf_h), h_2split)
v_2split = [(1.0, -1.0, 1.0, 1 / 6), (1.0, 1 / 6 - 1, 1.0, 2 / 6)]
conf_v = dict(proportion=[1, 2], gap=0.0, horizontal=False)
eq(_split_rect(*non_pure_square, **conf_v), v_2split)
def test_gap_split():
pure_square = [0., 0., 1., 1.]
# null split
conf_h = dict(proportion=[1], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), pure_square)
# equal split
h_2split = [(0.0, 0.0, 0.25, 1.0), (0.75, 0.0, 0.25, 1.0)]
conf_h = dict(proportion=[1, 1], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
# disequal split
h_2split = [(0.0, 0.0, 1 / 6, 1.0), (0.5 + 1 / 6, 0.0, 1 / 3, 1.0)]
conf_h = dict(proportion=[1, 2], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
def test_default_arg_index():
# 2116
import pandas as pd
df = pd.DataFrame({'size' : ['small', 'large', 'large', 'small', 'large',
'small'],
'length' : ['long', 'short', 'short', 'long', 'long',
'short']})
assert_raises(ValueError, mosaic, data=df, title='foobar')
if __name__ == '__main__':
run_module_suite()
|
bsd-3-clause
|
RJVB/audacity
|
lib-src/lv2/lv2/waflib/Build.py
|
265
|
20971
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,sys,errno,re,shutil
try:
import cPickle
except ImportError:
import pickle as cPickle
from waflib import Runner,TaskGen,Utils,ConfigSet,Task,Logs,Options,Context,Errors
import waflib.Node
CACHE_DIR='c4che'
CACHE_SUFFIX='_cache.py'
INSTALL=1337
UNINSTALL=-1337
SAVED_ATTRS='root node_deps raw_deps task_sigs'.split()
CFG_FILES='cfg_files'
POST_AT_ONCE=0
POST_LAZY=1
POST_BOTH=2
class BuildContext(Context.Context):
'''executes the build'''
cmd='build'
variant=''
def __init__(self,**kw):
super(BuildContext,self).__init__(**kw)
self.is_install=0
self.top_dir=kw.get('top_dir',Context.top_dir)
self.run_dir=kw.get('run_dir',Context.run_dir)
self.post_mode=POST_AT_ONCE
self.out_dir=kw.get('out_dir',Context.out_dir)
self.cache_dir=kw.get('cache_dir',None)
if not self.cache_dir:
self.cache_dir=self.out_dir+os.sep+CACHE_DIR
self.all_envs={}
self.task_sigs={}
self.node_deps={}
self.raw_deps={}
self.cache_dir_contents={}
self.task_gen_cache_names={}
self.launch_dir=Context.launch_dir
self.jobs=Options.options.jobs
self.targets=Options.options.targets
self.keep=Options.options.keep
self.cache_global=Options.cache_global
self.nocache=Options.options.nocache
self.progress_bar=Options.options.progress_bar
self.deps_man=Utils.defaultdict(list)
self.current_group=0
self.groups=[]
self.group_names={}
def get_variant_dir(self):
if not self.variant:
return self.out_dir
return os.path.join(self.out_dir,self.variant)
variant_dir=property(get_variant_dir,None)
def __call__(self,*k,**kw):
kw['bld']=self
ret=TaskGen.task_gen(*k,**kw)
self.task_gen_cache_names={}
self.add_to_group(ret,group=kw.get('group',None))
return ret
def rule(self,*k,**kw):
def f(rule):
ret=self(*k,**kw)
ret.rule=rule
return ret
return f
def __copy__(self):
raise Errors.WafError('build contexts are not supposed to be copied')
def install_files(self,*k,**kw):
pass
def install_as(self,*k,**kw):
pass
def symlink_as(self,*k,**kw):
pass
def load_envs(self):
node=self.root.find_node(self.cache_dir)
if not node:
raise Errors.WafError('The project was not configured: run "waf configure" first!')
lst=node.ant_glob('**/*%s'%CACHE_SUFFIX,quiet=True)
if not lst:
raise Errors.WafError('The cache directory is empty: reconfigure the project')
for x in lst:
name=x.path_from(node).replace(CACHE_SUFFIX,'').replace('\\','/')
env=ConfigSet.ConfigSet(x.abspath())
self.all_envs[name]=env
for f in env[CFG_FILES]:
newnode=self.root.find_resource(f)
try:
h=Utils.h_file(newnode.abspath())
except(IOError,AttributeError):
Logs.error('cannot find %r'%f)
h=Utils.SIG_NIL
newnode.sig=h
def init_dirs(self):
if not(os.path.isabs(self.top_dir)and os.path.isabs(self.out_dir)):
raise Errors.WafError('The project was not configured: run "waf configure" first!')
self.path=self.srcnode=self.root.find_dir(self.top_dir)
self.bldnode=self.root.make_node(self.variant_dir)
self.bldnode.mkdir()
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.execute_build()
def execute_build(self):
Logs.info("Waf: Entering directory `%s'"%self.variant_dir)
self.recurse([self.run_dir])
self.pre_build()
self.timer=Utils.Timer()
if self.progress_bar:
sys.stderr.write(Logs.colors.cursor_off)
try:
self.compile()
finally:
if self.progress_bar==1:
c=len(self.returned_tasks)or 1
self.to_log(self.progress_line(c,c,Logs.colors.BLUE,Logs.colors.NORMAL))
print('')
sys.stdout.flush()
sys.stderr.write(Logs.colors.cursor_on)
Logs.info("Waf: Leaving directory `%s'"%self.variant_dir)
self.post_build()
def restore(self):
try:
env=ConfigSet.ConfigSet(os.path.join(self.cache_dir,'build.config.py'))
except(IOError,OSError):
pass
else:
if env['version']<Context.HEXVERSION:
raise Errors.WafError('Version mismatch! reconfigure the project')
for t in env['tools']:
self.setup(**t)
dbfn=os.path.join(self.variant_dir,Context.DBFILE)
try:
data=Utils.readf(dbfn,'rb')
except(IOError,EOFError):
Logs.debug('build: Could not load the build cache %s (missing)'%dbfn)
else:
try:
waflib.Node.pickle_lock.acquire()
waflib.Node.Nod3=self.node_class
try:
data=cPickle.loads(data)
except Exception ,e:
Logs.debug('build: Could not pickle the build cache %s: %r'%(dbfn,e))
else:
for x in SAVED_ATTRS:
setattr(self,x,data[x])
finally:
waflib.Node.pickle_lock.release()
self.init_dirs()
def store(self):
data={}
for x in SAVED_ATTRS:
data[x]=getattr(self,x)
db=os.path.join(self.variant_dir,Context.DBFILE)
try:
waflib.Node.pickle_lock.acquire()
waflib.Node.Nod3=self.node_class
x=cPickle.dumps(data,-1)
finally:
waflib.Node.pickle_lock.release()
Utils.writef(db+'.tmp',x,m='wb')
try:
st=os.stat(db)
os.remove(db)
if not Utils.is_win32:
os.chown(db+'.tmp',st.st_uid,st.st_gid)
except(AttributeError,OSError):
pass
os.rename(db+'.tmp',db)
def compile(self):
Logs.debug('build: compile()')
self.producer=Runner.Parallel(self,self.jobs)
self.producer.biter=self.get_build_iterator()
self.returned_tasks=[]
try:
self.producer.start()
except KeyboardInterrupt:
self.store()
raise
else:
if self.producer.dirty:
self.store()
if self.producer.error:
raise Errors.BuildError(self.producer.error)
def setup(self,tool,tooldir=None,funs=None):
if isinstance(tool,list):
for i in tool:self.setup(i,tooldir)
return
module=Context.load_tool(tool,tooldir)
if hasattr(module,"setup"):module.setup(self)
def get_env(self):
try:
return self.all_envs[self.variant]
except KeyError:
return self.all_envs['']
def set_env(self,val):
self.all_envs[self.variant]=val
env=property(get_env,set_env)
def add_manual_dependency(self,path,value):
if path is None:
raise ValueError('Invalid input')
if isinstance(path,waflib.Node.Node):
node=path
elif os.path.isabs(path):
node=self.root.find_resource(path)
else:
node=self.path.find_resource(path)
if isinstance(value,list):
self.deps_man[id(node)].extend(value)
else:
self.deps_man[id(node)].append(value)
def launch_node(self):
try:
return self.p_ln
except AttributeError:
self.p_ln=self.root.find_dir(self.launch_dir)
return self.p_ln
def hash_env_vars(self,env,vars_lst):
if not env.table:
env=env.parent
if not env:
return Utils.SIG_NIL
idx=str(id(env))+str(vars_lst)
try:
cache=self.cache_env
except AttributeError:
cache=self.cache_env={}
else:
try:
return self.cache_env[idx]
except KeyError:
pass
lst=[env[a]for a in vars_lst]
ret=Utils.h_list(lst)
Logs.debug('envhash: %s %r',Utils.to_hex(ret),lst)
cache[idx]=ret
return ret
def get_tgen_by_name(self,name):
cache=self.task_gen_cache_names
if not cache:
for g in self.groups:
for tg in g:
try:
cache[tg.name]=tg
except AttributeError:
pass
try:
return cache[name]
except KeyError:
raise Errors.WafError('Could not find a task generator for the name %r'%name)
def progress_line(self,state,total,col1,col2):
n=len(str(total))
Utils.rot_idx+=1
ind=Utils.rot_chr[Utils.rot_idx%4]
pc=(100.*state)/total
eta=str(self.timer)
fs="[%%%dd/%%%dd][%%s%%2d%%%%%%s][%s]["%(n,n,ind)
left=fs%(state,total,col1,pc,col2)
right='][%s%s%s]'%(col1,eta,col2)
cols=Logs.get_term_cols()-len(left)-len(right)+2*len(col1)+2*len(col2)
if cols<7:cols=7
ratio=((cols*state)//total)-1
bar=('='*ratio+'>').ljust(cols)
msg=Utils.indicator%(left,bar,right)
return msg
def declare_chain(self,*k,**kw):
return TaskGen.declare_chain(*k,**kw)
def pre_build(self):
for m in getattr(self,'pre_funs',[]):
m(self)
def post_build(self):
for m in getattr(self,'post_funs',[]):
m(self)
def add_pre_fun(self,meth):
try:
self.pre_funs.append(meth)
except AttributeError:
self.pre_funs=[meth]
def add_post_fun(self,meth):
try:
self.post_funs.append(meth)
except AttributeError:
self.post_funs=[meth]
def get_group(self,x):
if not self.groups:
self.add_group()
if x is None:
return self.groups[self.current_group]
if x in self.group_names:
return self.group_names[x]
return self.groups[x]
def add_to_group(self,tgen,group=None):
assert(isinstance(tgen,TaskGen.task_gen)or isinstance(tgen,Task.TaskBase))
tgen.bld=self
self.get_group(group).append(tgen)
def get_group_name(self,g):
if not isinstance(g,list):
g=self.groups[g]
for x in self.group_names:
if id(self.group_names[x])==id(g):
return x
return''
def get_group_idx(self,tg):
se=id(tg)
for i in range(len(self.groups)):
for t in self.groups[i]:
if id(t)==se:
return i
return None
def add_group(self,name=None,move=True):
if name and name in self.group_names:
Logs.error('add_group: name %s already present'%name)
g=[]
self.group_names[name]=g
self.groups.append(g)
if move:
self.current_group=len(self.groups)-1
def set_group(self,idx):
if isinstance(idx,str):
g=self.group_names[idx]
for i in range(len(self.groups)):
if id(g)==id(self.groups[i]):
self.current_group=i
else:
self.current_group=idx
def total(self):
total=0
for group in self.groups:
for tg in group:
try:
total+=len(tg.tasks)
except AttributeError:
total+=1
return total
def get_targets(self):
to_post=[]
min_grp=0
for name in self.targets.split(','):
tg=self.get_tgen_by_name(name)
if not tg:
raise Errors.WafError('target %r does not exist'%name)
m=self.get_group_idx(tg)
if m>min_grp:
min_grp=m
to_post=[tg]
elif m==min_grp:
to_post.append(tg)
return(min_grp,to_post)
def get_all_task_gen(self):
lst=[]
for g in self.groups:
lst.extend(g)
return lst
def post_group(self):
if self.targets=='*':
for tg in self.groups[self.cur]:
try:
f=tg.post
except AttributeError:
pass
else:
f()
elif self.targets:
if self.cur<self._min_grp:
for tg in self.groups[self.cur]:
try:
f=tg.post
except AttributeError:
pass
else:
f()
else:
for tg in self._exact_tg:
tg.post()
else:
ln=self.launch_node()
if ln.is_child_of(self.bldnode):
Logs.warn('Building from the build directory, forcing --targets=*')
ln=self.srcnode
elif not ln.is_child_of(self.srcnode):
Logs.warn('CWD %s is not under %s, forcing --targets=* (run distclean?)'%(ln.abspath(),self.srcnode.abspath()))
ln=self.srcnode
for tg in self.groups[self.cur]:
try:
f=tg.post
except AttributeError:
pass
else:
if tg.path.is_child_of(ln):
f()
def get_tasks_group(self,idx):
tasks=[]
for tg in self.groups[idx]:
try:
tasks.extend(tg.tasks)
except AttributeError:
tasks.append(tg)
return tasks
def get_build_iterator(self):
self.cur=0
if self.targets and self.targets!='*':
(self._min_grp,self._exact_tg)=self.get_targets()
global lazy_post
if self.post_mode!=POST_LAZY:
while self.cur<len(self.groups):
self.post_group()
self.cur+=1
self.cur=0
while self.cur<len(self.groups):
if self.post_mode!=POST_AT_ONCE:
self.post_group()
tasks=self.get_tasks_group(self.cur)
Task.set_file_constraints(tasks)
Task.set_precedence_constraints(tasks)
self.cur_tasks=tasks
self.cur+=1
if not tasks:
continue
yield tasks
while 1:
yield[]
class inst(Task.Task):
color='CYAN'
def uid(self):
lst=[self.dest,self.path]+self.source
return Utils.h_list(repr(lst))
def post(self):
buf=[]
for x in self.source:
if isinstance(x,waflib.Node.Node):
y=x
else:
y=self.path.find_resource(x)
if not y:
if Logs.verbose:
Logs.warn('Could not find %s immediately (may cause broken builds)'%x)
idx=self.generator.bld.get_group_idx(self)
for tg in self.generator.bld.groups[idx]:
if not isinstance(tg,inst)and id(tg)!=id(self):
tg.post()
y=self.path.find_resource(x)
if y:
break
else:
raise Errors.WafError('Could not find %r in %r'%(x,self.path))
buf.append(y)
self.inputs=buf
def runnable_status(self):
ret=super(inst,self).runnable_status()
if ret==Task.SKIP_ME:
return Task.RUN_ME
return ret
def __str__(self):
return''
def run(self):
return self.generator.exec_task()
def get_install_path(self,destdir=True):
dest=Utils.subst_vars(self.dest,self.env)
dest=dest.replace('/',os.sep)
if destdir and Options.options.destdir:
dest=os.path.join(Options.options.destdir,os.path.splitdrive(dest)[1].lstrip(os.sep))
return dest
def exec_install_files(self):
destpath=self.get_install_path()
if not destpath:
raise Errors.WafError('unknown installation path %r'%self.generator)
for x,y in zip(self.source,self.inputs):
if self.relative_trick:
destfile=os.path.join(destpath,y.path_from(self.path))
else:
destfile=os.path.join(destpath,y.name)
self.generator.bld.do_install(y.abspath(),destfile,self.chmod)
def exec_install_as(self):
destfile=self.get_install_path()
self.generator.bld.do_install(self.inputs[0].abspath(),destfile,self.chmod)
def exec_symlink_as(self):
destfile=self.get_install_path()
src=self.link
if self.relative_trick:
src=os.path.relpath(src,os.path.dirname(destfile))
self.generator.bld.do_link(src,destfile)
class InstallContext(BuildContext):
'''installs the targets on the system'''
cmd='install'
def __init__(self,**kw):
super(InstallContext,self).__init__(**kw)
self.uninstall=[]
self.is_install=INSTALL
def do_install(self,src,tgt,chmod=Utils.O644):
d,_=os.path.split(tgt)
if not d:
raise Errors.WafError('Invalid installation given %r->%r'%(src,tgt))
Utils.check_dir(d)
srclbl=src.replace(self.srcnode.abspath()+os.sep,'')
if not Options.options.force:
try:
st1=os.stat(tgt)
st2=os.stat(src)
except OSError:
pass
else:
if st1.st_mtime+2>=st2.st_mtime and st1.st_size==st2.st_size:
if not self.progress_bar:
Logs.info('- install %s (from %s)'%(tgt,srclbl))
return False
if not self.progress_bar:
Logs.info('+ install %s (from %s)'%(tgt,srclbl))
try:
os.remove(tgt)
except OSError:
pass
try:
shutil.copy2(src,tgt)
os.chmod(tgt,chmod)
except IOError:
try:
os.stat(src)
except(OSError,IOError):
Logs.error('File %r does not exist'%src)
raise Errors.WafError('Could not install the file %r'%tgt)
def do_link(self,src,tgt):
d,_=os.path.split(tgt)
Utils.check_dir(d)
link=False
if not os.path.islink(tgt):
link=True
elif os.readlink(tgt)!=src:
link=True
if link:
try:os.remove(tgt)
except OSError:pass
if not self.progress_bar:
Logs.info('+ symlink %s (to %s)'%(tgt,src))
os.symlink(src,tgt)
else:
if not self.progress_bar:
Logs.info('- symlink %s (to %s)'%(tgt,src))
def run_task_now(self,tsk,postpone):
tsk.post()
if not postpone:
if tsk.runnable_status()==Task.ASK_LATER:
raise self.WafError('cannot post the task %r'%tsk)
tsk.run()
def install_files(self,dest,files,env=None,chmod=Utils.O644,relative_trick=False,cwd=None,add=True,postpone=True):
tsk=inst(env=env or self.env)
tsk.bld=self
tsk.path=cwd or self.path
tsk.chmod=chmod
if isinstance(files,waflib.Node.Node):
tsk.source=[files]
else:
tsk.source=Utils.to_list(files)
tsk.dest=dest
tsk.exec_task=tsk.exec_install_files
tsk.relative_trick=relative_trick
if add:self.add_to_group(tsk)
self.run_task_now(tsk,postpone)
return tsk
def install_as(self,dest,srcfile,env=None,chmod=Utils.O644,cwd=None,add=True,postpone=True):
tsk=inst(env=env or self.env)
tsk.bld=self
tsk.path=cwd or self.path
tsk.chmod=chmod
tsk.source=[srcfile]
tsk.dest=dest
tsk.exec_task=tsk.exec_install_as
if add:self.add_to_group(tsk)
self.run_task_now(tsk,postpone)
return tsk
def symlink_as(self,dest,src,env=None,cwd=None,add=True,postpone=True,relative_trick=False):
if Utils.is_win32:
return
tsk=inst(env=env or self.env)
tsk.bld=self
tsk.dest=dest
tsk.path=cwd or self.path
tsk.source=[]
tsk.link=src
tsk.relative_trick=relative_trick
tsk.exec_task=tsk.exec_symlink_as
if add:self.add_to_group(tsk)
self.run_task_now(tsk,postpone)
return tsk
class UninstallContext(InstallContext):
'''removes the targets installed'''
cmd='uninstall'
def __init__(self,**kw):
super(UninstallContext,self).__init__(**kw)
self.is_install=UNINSTALL
def do_install(self,src,tgt,chmod=Utils.O644):
if not self.progress_bar:
Logs.info('- remove %s'%tgt)
self.uninstall.append(tgt)
try:
os.remove(tgt)
except OSError ,e:
if e.errno!=errno.ENOENT:
if not getattr(self,'uninstall_error',None):
self.uninstall_error=True
Logs.warn('build: some files could not be uninstalled (retry with -vv to list them)')
if Logs.verbose>1:
Logs.warn('Could not remove %s (error code %r)'%(e.filename,e.errno))
while tgt:
tgt=os.path.dirname(tgt)
try:
os.rmdir(tgt)
except OSError:
break
def do_link(self,src,tgt):
try:
if not self.progress_bar:
Logs.info('- remove %s'%tgt)
os.remove(tgt)
except OSError:
pass
while tgt:
tgt=os.path.dirname(tgt)
try:
os.rmdir(tgt)
except OSError:
break
def execute(self):
try:
def runnable_status(self):
return Task.SKIP_ME
setattr(Task.Task,'runnable_status_back',Task.Task.runnable_status)
setattr(Task.Task,'runnable_status',runnable_status)
super(UninstallContext,self).execute()
finally:
setattr(Task.Task,'runnable_status',Task.Task.runnable_status_back)
class CleanContext(BuildContext):
'''cleans the project'''
cmd='clean'
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
try:
self.clean()
finally:
self.store()
def clean(self):
Logs.debug('build: clean called')
if self.bldnode!=self.srcnode:
lst=[]
for e in self.all_envs.values():
lst.extend(self.root.find_or_declare(f)for f in e[CFG_FILES])
for n in self.bldnode.ant_glob('**/*',excl='.lock* *conf_check_*/** config.log c4che/*',quiet=True):
if n in lst:
continue
n.delete()
self.root.children={}
for v in'node_deps task_sigs raw_deps'.split():
setattr(self,v,{})
class ListContext(BuildContext):
'''lists the targets to execute'''
cmd='list'
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
self.pre_build()
self.timer=Utils.Timer()
for g in self.groups:
for tg in g:
try:
f=tg.post
except AttributeError:
pass
else:
f()
try:
self.get_tgen_by_name('')
except Exception:
pass
lst=list(self.task_gen_cache_names.keys())
lst.sort()
for k in lst:
Logs.pprint('GREEN',k)
class StepContext(BuildContext):
'''executes tasks in a step-by-step fashion, for debugging'''
cmd='step'
def __init__(self,**kw):
super(StepContext,self).__init__(**kw)
self.files=Options.options.files
def compile(self):
if not self.files:
Logs.warn('Add a pattern for the debug build, for example "waf step --files=main.c,app"')
BuildContext.compile(self)
return
targets=None
if self.targets and self.targets!='*':
targets=self.targets.split(',')
for g in self.groups:
for tg in g:
if targets and tg.name not in targets:
continue
try:
f=tg.post
except AttributeError:
pass
else:
f()
for pat in self.files.split(','):
matcher=self.get_matcher(pat)
for tg in g:
if isinstance(tg,Task.TaskBase):
lst=[tg]
else:
lst=tg.tasks
for tsk in lst:
do_exec=False
for node in getattr(tsk,'inputs',[]):
if matcher(node,output=False):
do_exec=True
break
for node in getattr(tsk,'outputs',[]):
if matcher(node,output=True):
do_exec=True
break
if do_exec:
ret=tsk.run()
Logs.info('%s -> exit %r'%(str(tsk),ret))
def get_matcher(self,pat):
inn=True
out=True
if pat.startswith('in:'):
out=False
pat=pat.replace('in:','')
elif pat.startswith('out:'):
inn=False
pat=pat.replace('out:','')
anode=self.root.find_node(pat)
pattern=None
if not anode:
if not pat.startswith('^'):
pat='^.+?%s'%pat
if not pat.endswith('$'):
pat='%s$'%pat
pattern=re.compile(pat)
def match(node,output):
if output==True and not out:
return False
if output==False and not inn:
return False
if anode:
return anode==node
else:
return pattern.match(node.abspath())
return match
BuildContext.store=Utils.nogc(BuildContext.store)
BuildContext.restore=Utils.nogc(BuildContext.restore)
|
gpl-2.0
|
aselle/tensorflow
|
tensorflow/contrib/tensorrt/test/test_tftrt.py
|
13
|
9983
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import six as _six
# normally we should do import tensorflow as tf and then
# tf.placeholder, tf.constant, tf.nn.conv2d etc but
# it looks like internal builds don't like it so
# importing every module individually
from tensorflow.contrib import tensorrt as trt
from tensorflow.core.protobuf import config_pb2 as cpb2
from tensorflow.core.protobuf import rewriter_config_pb2 as rwpb2
from tensorflow.python.client import session as csess
from tensorflow.python.framework import constant_op as cop
from tensorflow.python.framework import dtypes as dtypes
from tensorflow.python.framework import importer as importer
from tensorflow.python.framework import ops as ops
from tensorflow.python.ops import array_ops as aops
from tensorflow.python.ops import math_ops as mops
from tensorflow.python.ops import nn as nn
from tensorflow.python.ops import nn_ops as nn_ops
def py2bytes(inp):
return inp
def py3bytes(inp):
return inp.encode("utf-8", errors="surrogateescape")
def py2string(inp):
return inp
def py3string(inp):
return inp.decode("utf-8")
if _six.PY2:
to_bytes = py2bytes
to_string = py2string
else:
to_bytes = py3bytes
to_string = py3string
def get_multi_engine_graph_def(mode="FP32"):
"""Create a simple graph and return its graph_def."""
dtype = dtypes.float32
if mode.upper() == "FP16":
dtype = dtypes.float16
else:
pass
g = ops.Graph()
with g.as_default():
x = aops.placeholder(shape=[None, 3, 7, 5], name="input", dtype=dtype)
with g.name_scope("Global_scope"):
with g.name_scope("first_scope"):
e = cop.constant(
np.random.randn(3, 2, 3, 4), name="weights", dtype=dtype)
conv = nn.conv2d(
input=x,
filter=e,
data_format="NCHW",
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
b = cop.constant(np.random.randn(1, 4, 1, 1), name="bias1", dtype=dtype)
t = conv * b
b = cop.constant(np.random.randn(1, 4, 1, 1), name="bias2", dtype=dtype)
q = conv / b
edge = mops.sin(q)
edge1 = mops.cos(conv)
with g.name_scope("test_scope"):
de = edge + edge1
t -= edge1
q *= edge
t += q
t -= de
k = aops.squeeze(t, name="output")
print(k.dtype)
return g.as_graph_def()
def get_simple_graph_def():
"""Create a simple graph and return its graph_def."""
g = ops.Graph()
with g.as_default():
a = aops.placeholder(
dtype=dtypes.float32, shape=(None, 24, 24, 2), name="input")
e = cop.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtypes.float32)
conv = nn.conv2d(
input=a, filter=e, strides=[1, 2, 2, 1], padding="SAME", name="conv")
b = cop.constant(
[4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtypes.float32)
t = nn.bias_add(conv, b, name="biasAdd")
relu = nn.relu(t, "relu")
idty = aops.identity(relu, "ID")
v = nn_ops.max_pool(
idty, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
aops.squeeze(v, name="output")
return g.as_graph_def()
def execute_graph(gdef, dumm_inp):
"""Run given graphdef once."""
print("executing")
gpu_options = None
if trt.trt_convert.get_linked_tensorrt_version()[0] == 3:
gpu_options = cpb2.GPUOptions(per_process_gpu_memory_fraction=0.50)
sessconfig = cpb2.ConfigProto(gpu_options=gpu_options)
ops.reset_default_graph()
g = ops.Graph()
with g.as_default():
inp, out = importer.import_graph_def(
graph_def=gdef, return_elements=["input", "output"])
inp = inp.outputs[0]
out = out.outputs[0]
with csess.Session(config=sessconfig, graph=g) as sess:
val = sess.run(out, {inp: dumm_inp})
return val
# Use real data that is representative of the inference dataset
# for calibration. For this test script it is random data.
def execute_calibration(gdef, dumm_inp):
"""Run given calibration graph multiple times."""
gpu_options = None
if trt.trt_convert.get_linked_tensorrt_version()[0] == 3:
gpu_options = cpb2.GPUOptions(per_process_gpu_memory_fraction=0.50)
ops.reset_default_graph()
g = ops.Graph()
with g.as_default():
inp, out = importer.import_graph_def(
graph_def=gdef, return_elements=["input", "output"])
inp = inp.outputs[0]
out = out.outputs[0]
with csess.Session(
config=cpb2.ConfigProto(gpu_options=gpu_options), graph=g) as sess:
# run over real calibration data here, we are mimicking a calibration set of
# 30 different batches. Use as much calibration data as you want
for _ in range(30):
val = sess.run(out, {inp: dumm_inp})
return val
def user(multi_engine,
run_graph=execute_graph,
run_calibration=execute_calibration):
"""Example function that converts a graph to TFTRT graph."""
if multi_engine:
inp_dims = (2, 3, 7, 5)
orig_graph = get_multi_engine_graph_def()
else:
inp_dims = (100, 24, 24, 2)
orig_graph = get_simple_graph_def() # use a frozen graph for inference
dummy_input = np.random.random_sample(inp_dims)
# Get optimized graph
trt_graph = trt.create_inference_graph(
input_graph_def=orig_graph,
outputs=["output"],
max_batch_size=inp_dims[0],
max_workspace_size_bytes=1 << 25,
precision_mode="FP32", # TRT Engine precision "FP32","FP16" or "INT8"
minimum_segment_size=2, # minimum number of nodes in an engine
is_dynamic_op=False,
maximum_cached_engines=1,
cached_engine_batches=[])
o1 = run_graph(orig_graph, dummy_input)
o2 = run_graph(trt_graph, dummy_input)
o3 = run_graph(trt_graph, dummy_input)
assert np.array_equal(o1, o2)
assert np.array_equal(o3, o2) # sanity check
fp16_graph = trt.create_inference_graph(
input_graph_def=orig_graph,
outputs=["output"],
max_batch_size=inp_dims[0],
max_workspace_size_bytes=1 << 25,
precision_mode="FP16", # TRT Engine precision "FP32","FP16" or "INT8"
minimum_segment_size=2, # minimum number of nodes in an engine
is_dynamic_op=False,
maximum_cached_engines=1,
cached_engine_batches=[])
int8_calib_gdef = trt.create_inference_graph(
input_graph_def=orig_graph,
outputs=["output"],
max_batch_size=inp_dims[0],
max_workspace_size_bytes=1 << 25,
precision_mode="INT8", # TRT Engine precision "FP32","FP16" or "INT8"
minimum_segment_size=2, # minimum number of nodes in an engine
is_dynamic_op=False,
maximum_cached_engines=1,
cached_engine_batches=[])
o4 = run_graph(fp16_graph, dummy_input)
_ = run_calibration(int8_calib_gdef, dummy_input)
int8_graph = trt.calib_graph_to_infer_graph(int8_calib_gdef)
o5 = run_graph(int8_graph, dummy_input)
print("Is FP32 == FP16? %s (False is possible)" % np.allclose(o1, o4))
print("Is FP32 == INT8? %s (False is possible)" % np.allclose(o1, o5))
print("Pass")
def auto(multi_engine):
"""Run the conversion as an optimization pass."""
if multi_engine:
inp_dims = (2, 3, 7, 5)
orig_graph = get_multi_engine_graph_def()
else:
inp_dims = (100, 24, 24, 2)
orig_graph = get_simple_graph_def() # use a frozen graph for inference
dummy_input = np.random.random_sample(inp_dims)
opt_config = rwpb2.RewriterConfig()
opt_config.meta_optimizer_iterations = opt_config.ONE
opt_config.optimizers.extend(["constfold", "layout"])
custom_op = opt_config.custom_optimizers.add()
custom_op.name = "TensorRTOptimizer"
custom_op.parameter_map["minimum_segment_size"].i = 3
custom_op.parameter_map["precision_mode"].s = to_bytes("FP32")
custom_op.parameter_map["max_batch_size"].i = inp_dims[0]
custom_op.parameter_map["max_workspace_size_bytes"].i = 1 << 25
print(custom_op)
gpu_options = None
if trt.trt_convert.get_linked_tensorrt_version()[0] == 3:
gpu_options = cpb2.GPUOptions(per_process_gpu_memory_fraction=0.50)
graph_options = cpb2.GraphOptions(rewrite_options=opt_config)
sessconfig = cpb2.ConfigProto(
gpu_options=gpu_options, graph_options=graph_options)
print(sessconfig)
g = ops.Graph()
ops.reset_default_graph()
with g.as_default():
inp, out = importer.import_graph_def(
graph_def=orig_graph, return_elements=["input", "output"], name="")
inp = inp.outputs[0]
out = out.outputs[0]
with csess.Session(config=sessconfig, graph=g) as sess:
val = sess.run(out, {inp: dummy_input})
print(val.shape)
if "__main__" in __name__:
P = argparse.ArgumentParser(
prog="tftrt_test",
description="Example utilization of TensorFlow-TensorRT integration")
P.add_argument(
"--automatic",
"-a",
action="store_true",
help="Do TRT conversion automatically",
default=False)
P.add_argument(
"--multi-engine",
"-m",
action="store_true",
help="Use a graph that will result in 2 engines",
default=False)
flags, unparsed = P.parse_known_args()
if flags.automatic:
auto(flags.multi_engine)
else:
user(flags.multi_engine)
|
apache-2.0
|
Ragowit/fireplace
|
tests/test_tgt.py
|
2
|
19665
|
from utils import *
def test_anubarak():
game = prepare_empty_game()
anubarak = game.player1.give("AT_036")
anubarak.play()
game.player1.discard_hand()
assert len(game.player1.field) == 1
assert len(game.player1.hand) == 0
anubarak.destroy()
assert len(game.player1.field) == 1
assert len(game.player1.hand) == 1
token = game.player1.field[0]
assert token.id == "AT_036t"
anubarak = game.player1.hand[0]
assert anubarak.id == "AT_036"
game.end_turn(); game.end_turn()
# Test for issue #283: play Anub'arak again
anubarak.play()
assert len(game.player1.field) == 2
assert anubarak in game.player1.field
assert token in game.player1.field
assert len(game.player1.hand) == 0
def test_astral_communion():
game = prepare_game(game_class=Game)
game.player1.discard_hand()
astral = game.player1.give("AT_043")
game.player1.give(INNERVATE).play()
game.player1.give(INNERVATE).play()
for i in range(5):
game.player1.give(WISP)
assert game.player1.max_mana == 1
assert game.player1.mana == 5
astral.play()
assert not game.player1.hand
assert game.player1.mana == game.player1.max_mana == 10
def test_astral_communion_full_mana():
game = prepare_game()
assert game.player1.mana == 10
astral = game.player1.give("AT_043")
for i in range(5):
game.player1.give(WISP)
astral.play()
assert len(game.player1.hand) == 1
assert game.player1.hand[0].id == "CS2_013t"
assert game.player1.max_mana == 10
assert game.player1.mana == 6
def test_aviana():
game = prepare_game()
aviana = game.player1.give("AT_045")
wisp1 = game.player1.give(WISP)
assert wisp1.cost == 0
deathwing = game.player1.give("NEW1_030")
assert deathwing.cost == 10
molten = game.player1.give("EX1_620")
assert molten.cost == 25
game.player1.give(MOONFIRE).play(game.player1.hero)
assert molten.cost == 25 - 1
aviana.play()
for minion in (wisp1, deathwing, molten):
assert minion.cost == 1
wisp2 = game.player2.give(WISP)
assert wisp2.cost == 0
aviana.destroy()
assert wisp1.cost == 0
assert deathwing.cost == 10
assert molten.cost == 25 - 1
def test_beneath_the_grounds():
game = prepare_empty_game()
game.player2.discard_hand()
assert len(game.player2.deck) == 0
grounds = game.player1.give("AT_035")
grounds.play()
assert len(game.player2.deck) == 3
assert len(game.player2.hand) == 0
game.end_turn()
assert len(game.player2.hand) == 0
assert len(game.player1.field) == 3
for minion in game.player1.field:
assert minion.id == "AT_036t"
def test_bolf_ramshield():
game = prepare_game()
bolf = game.player1.give("AT_124")
bolf.play()
game.player1.give(MOONFIRE).play(target=game.player1.hero)
assert game.player1.hero.health == 30
assert bolf.damage == 1
game.player1.give(DAMAGE_5).play(target=game.player1.hero)
assert game.player1.hero.health == 30
assert bolf.damage == 1 + 5
game.player1.give(DAMAGE_5).play(target=game.player1.hero)
assert bolf.dead
assert game.player1.hero.health == 30
def test_burgle():
game = prepare_empty_game()
burgle = game.player1.give("AT_033")
burgle.play()
assert len(game.player1.hand) == 2
assert game.player1.hand[0].card_class == game.player2.hero.card_class
assert game.player1.hand[0].type != CardType.HERO
assert game.player1.hand[1].card_class == game.player2.hero.card_class
assert game.player1.hand[1].type != CardType.HERO
def test_dalaran_aspirant():
game = prepare_game(ROGUE, ROGUE)
aspirant = game.player1.give("AT_006")
aspirant.play()
assert aspirant.spellpower == game.player1.spellpower == 0
game.player1.hero.power.use()
assert aspirant.spellpower == game.player1.spellpower == 1
game.end_turn(); game.end_turn()
game.player1.hero.power.use()
assert aspirant.spellpower == game.player1.spellpower == 2
game.player1.give(MOONFIRE).play(target=game.player2.hero)
assert game.player2.hero.health == 30 - 3
game.end_turn()
# Steal the aspirant (HearthSim/hs-bugs#412)
game.player2.give(MIND_CONTROL).play(target=aspirant)
assert game.player1.spellpower == 0
assert aspirant.spellpower == game.player2.spellpower == 2
game.player2.give(MOONFIRE).play(target=game.player1.hero)
assert game.player1.hero.health == 30 - 3
def test_dark_bargain():
game = prepare_game()
for i in range(3):
game.player2.summon(WISP)
assert len(game.player2.field) == 3
assert len(game.player1.hand) == 4
bargain = game.player1.give("AT_025")
bargain.play()
assert len(game.player2.field) == 1
assert len(game.player1.hand) == 2
def test_demonfuse():
game = prepare_game()
game.player2.max_mana = 9
demonfuse = game.player1.give("AT_024")
game.player2.summon(WISP)
imp = game.player1.give(IMP)
imp.play()
game.player2.summon(IMP)
assert len(demonfuse.targets) == 2
assert imp.atk == imp.health == 1
demonfuse.play(target=imp)
assert imp.atk == imp.health == 4
assert game.player2.max_mana == 10
def test_demonfuse_sense_demons():
# https://github.com/HearthSim/hs-bugs/issues/111
game = prepare_empty_game()
demonfuse1 = game.player1.give("AT_024")
demonfuse1.shuffle_into_deck()
demonfuse2 = game.player1.give("AT_024")
demonfuse2.shuffle_into_deck()
sense = game.player1.give("EX1_317")
sense.play()
assert demonfuse1.zone == Zone.DECK
assert demonfuse2.zone == Zone.DECK
def test_dragonhawk_rider():
game = prepare_game(WARRIOR, WARRIOR)
rider = game.player1.give("AT_083")
game.player1.hero.power.use()
rider.play()
assert not rider.windfury
game.end_turn()
# do not trigger on enemy hero power
game.player2.hero.power.use()
assert not rider.windfury
game.end_turn()
# should gain windfury on inspire for single turn
game.player1.hero.power.use()
assert rider.windfury
rider.attack(game.player2.hero)
rider.attack(game.player2.hero)
game.end_turn()
assert not rider.windfury
game.end_turn()
# should lose windfury and effect when silenced
game.player1.hero.power.use()
assert rider.windfury
rider.attack(game.player2.hero)
assert rider.can_attack()
game.player1.give(SILENCE).play(target=rider)
assert not rider.windfury
assert not rider.can_attack()
game.end_turn(); game.end_turn()
game.player1.hero.power.use()
assert not rider.windfury
def test_dreadsteed():
game = prepare_game()
dreadsteed = game.player1.give("AT_019")
dreadsteed.play()
assert len(game.player1.field) == 1
game.player1.give(MOONFIRE).play(target=dreadsteed)
assert dreadsteed.dead
assert len(game.player1.field) == 1
def test_enter_the_coliseum():
game = prepare_game()
game.player1.give("AT_078").play()
assert len(game.player1.field) == 0
assert len(game.player2.field) == 0
game.end_turn()
game.end_turn()
game.player1.give(WISP).play()
game.player1.give(WISP).play()
game.player1.give(WISP).play()
game.player1.give(WISP).play()
game.player1.give(WISP).play()
game.player1.give(ANIMATED_STATUE).play()
game.player1.give(ANIMATED_STATUE).play()
game.end_turn()
game.player2.give(WISP).play()
game.player2.give(WISP).play()
game.player2.give(ANIMATED_STATUE).play()
game.player2.give("AT_078").play()
assert len(game.player1.field) == 1
assert game.player1.field[0].id == ANIMATED_STATUE
assert len(game.player2.field) == 1
assert game.player2.field[0].id == ANIMATED_STATUE
def test_fencing_coach():
game = prepare_game(WARRIOR, WARRIOR)
coach = game.player1.give("AT_115")
assert game.player1.hero.power.cost == 2
coach.play()
assert game.player1.hero.power.cost == 0
game.end_turn(); game.end_turn()
assert game.player1.hero.power.cost == 0
game.player1.hero.power.activate()
assert game.player1.hero.power.cost == 2
def test_fist_of_jaraxxus():
game = prepare_empty_game()
fist1 = game.player1.give("AT_022")
assert game.player2.hero.health == 30
game.player1.give(SOULFIRE).play(target=game.player1.hero)
assert game.player2.hero.health == 30 - 4
assert fist1.zone == Zone.DISCARD
fist2 = game.player1.give("AT_022")
fist2.play()
assert game.player2.hero.health == 30 - 4 - 4
def test_garrison_commander():
game = prepare_game(HUNTER, HUNTER)
heropower = game.player1.hero.power
heropower.use()
assert not heropower.is_usable()
commander = game.player1.give("AT_080")
commander.play()
assert heropower.additional_activations == 1
assert heropower.is_usable()
heropower.use()
assert not heropower.is_usable()
game.end_turn(); game.end_turn()
assert heropower.is_usable()
heropower.use()
assert heropower.is_usable()
commander.destroy()
assert not heropower.is_usable()
def test_gormok_the_impaler():
game = prepare_game()
yeti = game.player1.give("CS2_182")
dummy1 = game.player1.give(TARGET_DUMMY)
yeti.play()
dummy1.play()
game.end_turn()
gormok1 = game.player2.give("AT_122")
assert not gormok1.has_target()
gormok1.play()
assert game.player1.hero.health == game.player1.hero.max_health
assert yeti.health == 5
assert dummy1.health == 2
game.player2.discard_hand()
gormok2 = game.player2.give("AT_122")
wisp1 = game.player2.give(WISP)
wisp2 = game.player2.give(WISP)
dummy2 = game.player2.give(TARGET_DUMMY)
wisp1.play()
wisp2.play()
dummy2.play()
assert len(game.player2.field) == 4
assert gormok2.has_target()
assert game.player1.hero in gormok2.targets
assert game.player2.hero in gormok2.targets
assert yeti in gormok2.targets
assert dummy1 in gormok2.targets
assert gormok1 in gormok2.targets
assert wisp1 in gormok2.targets
assert wisp2 in gormok2.targets
assert dummy2 in gormok2.targets
gormok2.play(target=yeti)
assert yeti.health == 1
assert gormok2.atk == 4 == gormok2.health == 4
def test_grand_crusader():
game = prepare_game()
game.player1.discard_hand()
crusader = game.player1.give("AT_118")
assert len(game.player1.hand) == 1
crusader.play()
assert len(game.player1.hand) == 1
card = game.player1.hand[0]
assert card.card_class == CardClass.PALADIN
assert card.data.collectible
assert card.type != CardType.HERO
def test_icehowl():
game = prepare_game()
icehowl = game.player1.give("AT_125")
icehowl.play()
assert icehowl.charge
assert icehowl.cannot_attack_heroes
assert not icehowl.can_attack()
assert not icehowl.can_attack(game.player2.hero)
assert not icehowl.can_attack(game.player1.hero)
game.end_turn()
wisp = game.player2.give(WISP)
wisp.play()
game.end_turn()
assert icehowl.can_attack()
assert len(icehowl.attack_targets) == 1
assert game.player2.hero not in icehowl.attack_targets
game.player1.give(SILENCE).play(target=icehowl)
assert not icehowl.cannot_attack_heroes
assert len(icehowl.attack_targets) == 2
assert game.player2.hero in icehowl.attack_targets
def test_kings_elekk():
game = prepare_empty_game()
wisp = game.player1.give(WISP)
wisp.shuffle_into_deck()
elekk = game.player1.give("AT_058")
elekk.play()
assert wisp in game.player1.hand
deathwing = game.player2.give("NEW1_030")
deathwing.shuffle_into_deck()
wisp2 = game.player1.give(WISP)
wisp2.shuffle_into_deck()
elekk2 = game.player1.give("AT_058")
elekk2.play()
assert wisp2 in game.player1.deck
assert deathwing in game.player2.deck
def test_lance_carrier():
game = prepare_game()
wisp = game.player2.summon(WISP)
carrier1 = game.player1.give("AT_084")
assert len(carrier1.targets) == 0
carrier1.play()
game.end_turn()
carrier2 = game.player2.give("AT_084")
assert wisp.atk == 1
carrier2.play(target=wisp)
assert wisp.atk == 3
game.end_turn(); game.end_turn()
assert wisp.atk == 3
def test_lock_and_load():
game = prepare_empty_game()
lockandload = game.player1.give("AT_061")
game.player1.give(THE_COIN).play()
assert len(game.player1.hand) == 1
lockandload.play()
assert game.player1.hero.buffs
assert len(game.player1.hand) == 0
game.player1.give(THE_COIN).play()
assert len(game.player1.hand) == 1
card = game.player1.hand[0]
assert card.card_class == CardClass.HUNTER
assert card.data.collectible
assert card.type != CardType.HERO
def test_lowly_squire():
game = prepare_game(HUNTER, HUNTER)
squire = game.player1.give("AT_082")
squire.play()
assert squire.atk == 1
game.player1.hero.power.use()
assert squire.atk == 2
game.end_turn(); game.end_turn()
assert squire.atk == 2
game.player1.hero.power.use()
assert squire.atk == 3
def test_master_of_ceremonies():
game = prepare_game()
master = game.player1.give("AT_117")
assert not master.powered_up
master.play()
assert master.atk == 4
assert master.health == 2
kobold = game.player1.give(KOBOLD_GEOMANCER)
kobold.play()
master2 = game.player1.give("AT_117")
assert master2.powered_up
master2.play()
assert master2.atk == 4 + 2
assert master2.health == 2 + 2
def test_master_of_ceremonies_friendly_jungle_moonkin():
game = prepare_game()
moonkin = game.player1.give("LOE_051")
moonkin.play()
master = game.player1.give("AT_117")
assert master.powered_up
master.play()
assert master.atk == 4 + 2
assert master.health == 2 + 2
def test_master_of_ceremonies_enemy_jungle_moonkin():
# Test for https://github.com/HearthSim/hs-bugs/issues/337
game = prepare_game()
moonkin = game.player1.give("LOE_051")
moonkin.play()
game.end_turn()
master = game.player2.give("AT_117")
assert not master.powered_up
master.play()
assert master.atk == 4
assert master.health == 2
def test_power_word_glory():
game = prepare_game()
wisp1 = game.player1.give(WISP)
wisp1.play()
glory1 = game.player1.give("AT_013")
glory1.play(target=wisp1)
game.end_turn(); game.end_turn()
assert game.player1.hero.health == 30
wisp1.attack(game.player2.hero)
assert game.player1.hero.health == 30
game.end_turn(); game.end_turn()
game.player1.hero.set_current_health(15)
assert game.player1.hero.health == 15
wisp1.attack(game.player2.hero)
assert game.player1.hero.health == 15 + 4
wisp2 = game.player2.summon(WISP)
glory2 = game.player1.give("AT_013")
glory2.play(target=wisp2)
game.end_turn()
assert game.player1.hero.health == 15 + 4
wisp2.attack(wisp1)
assert game.player1.hero.health == 15 + 4 + 4
def test_saboteur():
game = prepare_game()
assert game.player1.hero.power.cost == 2
assert game.player2.hero.power.cost == 2
saboteur = game.player1.give("AT_086")
saboteur.play()
assert game.player1.hero.power.cost == 2
assert game.player2.hero.power.cost == 2
game.end_turn()
assert game.player1.hero.power.cost == 2
assert game.player2.hero.power.cost == 2 + 5
saboteur.destroy()
assert game.player1.hero.power.cost == 2
assert game.player2.hero.power.cost == 2 + 5
game.end_turn()
assert game.player1.hero.power.cost == 2
assert game.player2.hero.power.cost == 2
def test_seal_of_champions():
game = prepare_game()
wisp = game.player1.give(WISP)
wisp.play()
assert wisp.atk == 1
seal1 = game.player1.give("AT_074")
seal1.play(target=wisp)
assert wisp.divine_shield
assert wisp.atk == 1 + 3
game.end_turn(); game.end_turn()
assert wisp.divine_shield
assert wisp.atk == 1 + 3
game.player1.give(MOONFIRE).play(target=wisp)
assert not wisp.divine_shield
assert wisp.atk == 1 + 3
seal2 = game.player1.give("AT_074")
seal2.play(target=wisp)
assert wisp.atk == 1 + 3 + 3
assert wisp.divine_shield
game.player1.give(SILENCE).play(target=wisp)
assert wisp.atk == 1
assert not wisp.divine_shield
def test_seal_of_champions_shrinkmeister():
game = prepare_game()
wisp = game.player1.give(WISP)
wisp.play()
assert wisp.atk == 1
seal = game.player1.give("AT_074")
seal.play(target=wisp)
assert wisp.atk == 1 + 3
shrinkmeister = game.player1.give("GVG_011")
shrinkmeister.play(target=wisp)
assert wisp.atk == 1 + 3 - 2
game.end_turn()
assert wisp.atk == 1 + 3
def test_silver_hand_regent():
game = prepare_game(HUNTER, HUNTER)
regent = game.player1.give("AT_100")
regent.play()
assert len(game.player1.field) == 1
game.player1.hero.power.use()
assert len(game.player1.field) == 2
assert game.player1.field[1].id == "CS2_101t"
def test_skycapn_kragg():
game = prepare_game()
kragg = game.player1.give("AT_070")
wisp = game.player1.give(WISP)
wisp.play()
assert kragg.cost == 7
game.end_turn()
pirate = game.player2.give("CS2_146")
pirate.play()
assert kragg.cost == 7
game.end_turn()
game.player1.give("CS2_146").play()
assert kragg.cost == 7 - 1
game.player1.field[-1].destroy()
assert kragg.cost == 7
game.end_turn(); game.end_turn()
game.player1.summon("CS2_146")
assert kragg.cost == 7 - 1
assert game.player1.mana == 10
kragg.play()
assert game.player1.mana == 10 - 6
def test_the_skeleton_knight():
game = prepare_empty_game()
sk = game.player1.give("AT_128")
sk.play()
# prepare joust
deathwing = game.player1.give("NEW1_030")
deathwing.shuffle_into_deck()
wisp = game.player2.give(WISP)
wisp.shuffle_into_deck()
# Joust deathwing vs wisp
sk.destroy()
assert len(game.player1.field) == 0
assert game.player1.hand.contains("AT_128")
def test_the_skeleton_knight_full_hand():
game = prepare_empty_game()
sk = game.player1.give("AT_128")
sk.play()
# prepare joust
deathwing = game.player1.give("NEW1_030")
deathwing.shuffle_into_deck()
wisp = game.player2.give(WISP)
wisp.shuffle_into_deck()
for i in range(10):
game.player1.give(WISP)
assert len(game.player1.hand) == 10
sk.destroy()
assert len(game.player1.field) == 0
assert not game.player1.hand.contains("AT_128")
def test_tiny_knight_of_evil():
game = prepare_empty_game()
knight = game.player1.give("AT_021")
knight.play()
assert len(game.player1.hand) == 0
game.player1.give(SOULFIRE).play(target=game.player2.hero)
assert not knight.buffs
assert knight.atk == 3
assert knight.health == 2
game.player1.give(WISP)
game.player1.give(SOULFIRE).play(target=game.player2.hero)
assert knight.buffs
assert knight.atk == 3 + 1
assert knight.health == 2 + 1
def test_varian_wrynn():
game = prepare_empty_game()
wisp1 = game.player1.give(WISP)
wisp1.shuffle_into_deck()
wisp2 = game.player1.give(WISP)
wisp2.shuffle_into_deck()
moonfire = game.player1.give(MOONFIRE)
moonfire.shuffle_into_deck()
wrynn = game.player1.give("AT_072")
wrynn.play()
assert len(game.player1.hand) == 1
assert moonfire in game.player1.hand
assert wrynn in game.player1.field
assert wisp1 in game.player1.field
assert wisp2 in game.player1.field
def test_void_crusher():
game = prepare_game(WARLOCK, WARLOCK)
for i in range(3):
game.player2.summon(WISP)
crusher = game.player1.give("AT_023")
crusher.play()
assert len(game.player1.field) == 1
assert len(game.player2.field) == 3
game.player1.hero.power.use()
assert crusher.dead
assert len(game.player1.field) == 0
assert len(game.player2.field) == 2
def test_wilfred_fizzlebang():
game = prepare_empty_game(WARLOCK, WARLOCK)
game.player1.discard_hand()
fizzlebang = game.player1.give("AT_027")
fizzlebang.play()
game.player1.give("CS2_029").shuffle_into_deck()
game.player1.give("CS2_029").shuffle_into_deck()
assert len(game.player1.deck) == 2
assert len(game.player1.hand) == 0
game.player1.hero.power.use()
assert len(game.player1.hand) == 1
fireball1 = game.player1.hand[0]
assert fireball1.cost == 0
fireball1.discard()
game.end_turn(); game.end_turn()
fireball2 = game.player1.hand[0]
assert fireball2.cost == 4
def test_wrathguard():
game = prepare_game()
wrathguard = game.player1.give("AT_026")
wrathguard.play()
assert game.player1.hero.health == 30
game.player1.give(MOONFIRE).play(target=game.player2.hero)
assert game.player1.hero.health == 30
game.player1.give(MOONFIRE).play(target=wrathguard)
assert game.player1.hero.health == 30 - 1
game.player1.give(CIRCLE_OF_HEALING)
assert game.player1.hero.health == 29
game.end_turn()
wargolem = game.player2.give("CS2_186")
wargolem.play()
game.end_turn()
wrathguard.attack(target=wargolem)
assert wrathguard.dead
assert game.player1.hero.health == 29 - 7
|
agpl-3.0
|
mattsch/Sickbeard
|
cherrypy/tutorial/tut04_complex_site.py
|
2
|
2889
|
"""
Tutorial - Multiple objects
This tutorial shows you how to create a site structure through multiple
possibly nested request handler objects.
"""
import cherrypy
class HomePage:
def index(self):
return '''
<p>Hi, this is the home page! Check out the other
fun stuff on this site:</p>
<ul>
<li><a href="/joke/">A silly joke</a></li>
<li><a href="/links/">Useful links</a></li>
</ul>'''
index.exposed = True
class JokePage:
def index(self):
return '''
<p>"In Python, how do you create a string of random
characters?" -- "Read a Perl file!"</p>
<p>[<a href="../">Return</a>]</p>'''
index.exposed = True
class LinksPage:
def __init__(self):
# Request handler objects can create their own nested request
# handler objects. Simply create them inside their __init__
# methods!
self.extra = ExtraLinksPage()
def index(self):
# Note the way we link to the extra links page (and back).
# As you can see, this object doesn't really care about its
# absolute position in the site tree, since we use relative
# links exclusively.
return '''
<p>Here are some useful links:</p>
<ul>
<li><a href="http://www.cherrypy.org">The CherryPy Homepage</a></li>
<li><a href="http://www.python.org">The Python Homepage</a></li>
</ul>
<p>You can check out some extra useful
links <a href="./extra/">here</a>.</p>
<p>[<a href="../">Return</a>]</p>
'''
index.exposed = True
class ExtraLinksPage:
def index(self):
# Note the relative link back to the Links page!
return '''
<p>Here are some extra useful links:</p>
<ul>
<li><a href="http://del.icio.us">del.icio.us</a></li>
<li><a href="http://www.mornography.de">Hendrik's weblog</a></li>
</ul>
<p>[<a href="../">Return to links page</a>]</p>'''
index.exposed = True
# Of course we can also mount request handler objects right here!
root = HomePage()
root.joke = JokePage()
root.links = LinksPage()
cherrypy.tree.mount(root)
# Remember, we don't need to mount ExtraLinksPage here, because
# LinksPage does that itself on initialization. In fact, there is
# no reason why you shouldn't let your root object take care of
# creating all contained request handler objects.
if __name__ == '__main__':
import os.path
thisdir = os.path.dirname(__file__)
cherrypy.quickstart(config=os.path.join(thisdir, 'tutorial.conf'))
|
gpl-3.0
|
ZHAW-INES/rioxo-uClinux-dist
|
user/python/python-2.4.4/Lib/xdrlib.py
|
5
|
7353
|
"""Implements (a subset of) Sun XDR -- eXternal Data Representation.
See: RFC 1014
"""
import struct
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
__all__ = ["Error", "Packer", "Unpacker", "ConversionError"]
# exceptions
class Error(Exception):
"""Exception class for this module. Use:
except xdrlib.Error, var:
# var has the Error instance for the exception
Public ivars:
msg -- contains the message
"""
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return repr(self.msg)
def __str__(self):
return str(self.msg)
class ConversionError(Error):
pass
class Packer:
"""Pack various data representations into a buffer."""
def __init__(self):
self.reset()
def reset(self):
self.__buf = _StringIO()
def get_buffer(self):
return self.__buf.getvalue()
# backwards compatibility
get_buf = get_buffer
def pack_uint(self, x):
self.__buf.write(struct.pack('>L', x))
pack_int = pack_uint
pack_enum = pack_int
def pack_bool(self, x):
if x: self.__buf.write('\0\0\0\1')
else: self.__buf.write('\0\0\0\0')
def pack_uhyper(self, x):
self.pack_uint(x>>32 & 0xffffffffL)
self.pack_uint(x & 0xffffffffL)
pack_hyper = pack_uhyper
def pack_float(self, x):
try: self.__buf.write(struct.pack('>f', x))
except struct.error, msg:
raise ConversionError, msg
def pack_double(self, x):
try: self.__buf.write(struct.pack('>d', x))
except struct.error, msg:
raise ConversionError, msg
def pack_fstring(self, n, s):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
data = s[:n]
n = ((n+3)/4)*4
data = data + (n - len(data)) * '\0'
self.__buf.write(data)
pack_fopaque = pack_fstring
def pack_string(self, s):
n = len(s)
self.pack_uint(n)
self.pack_fstring(n, s)
pack_opaque = pack_string
pack_bytes = pack_string
def pack_list(self, list, pack_item):
for item in list:
self.pack_uint(1)
pack_item(item)
self.pack_uint(0)
def pack_farray(self, n, list, pack_item):
if len(list) != n:
raise ValueError, 'wrong array size'
for item in list:
pack_item(item)
def pack_array(self, list, pack_item):
n = len(list)
self.pack_uint(n)
self.pack_farray(n, list, pack_item)
class Unpacker:
"""Unpacks various data representations from the given buffer."""
def __init__(self, data):
self.reset(data)
def reset(self, data):
self.__buf = data
self.__pos = 0
def get_position(self):
return self.__pos
def set_position(self, position):
self.__pos = position
def get_buffer(self):
return self.__buf
def done(self):
if self.__pos < len(self.__buf):
raise Error('unextracted data remains')
def unpack_uint(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
x = struct.unpack('>L', data)[0]
try:
return int(x)
except OverflowError:
return x
def unpack_int(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>l', data)[0]
unpack_enum = unpack_int
unpack_bool = unpack_int
def unpack_uhyper(self):
hi = self.unpack_uint()
lo = self.unpack_uint()
return long(hi)<<32 | lo
def unpack_hyper(self):
x = self.unpack_uhyper()
if x >= 0x8000000000000000L:
x = x - 0x10000000000000000L
return x
def unpack_float(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>f', data)[0]
def unpack_double(self):
i = self.__pos
self.__pos = j = i+8
data = self.__buf[i:j]
if len(data) < 8:
raise EOFError
return struct.unpack('>d', data)[0]
def unpack_fstring(self, n):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
i = self.__pos
j = i + (n+3)/4*4
if j > len(self.__buf):
raise EOFError
self.__pos = j
return self.__buf[i:i+n]
unpack_fopaque = unpack_fstring
def unpack_string(self):
n = self.unpack_uint()
return self.unpack_fstring(n)
unpack_opaque = unpack_string
unpack_bytes = unpack_string
def unpack_list(self, unpack_item):
list = []
while 1:
x = self.unpack_uint()
if x == 0: break
if x != 1:
raise ConversionError, '0 or 1 expected, got %r' % (x,)
item = unpack_item()
list.append(item)
return list
def unpack_farray(self, n, unpack_item):
list = []
for i in range(n):
list.append(unpack_item())
return list
def unpack_array(self, unpack_item):
n = self.unpack_uint()
return self.unpack_farray(n, unpack_item)
# test suite
def _test():
p = Packer()
packtest = [
(p.pack_uint, (9,)),
(p.pack_bool, (None,)),
(p.pack_bool, ('hello',)),
(p.pack_uhyper, (45L,)),
(p.pack_float, (1.9,)),
(p.pack_double, (1.9,)),
(p.pack_string, ('hello world',)),
(p.pack_list, (range(5), p.pack_uint)),
(p.pack_array, (['what', 'is', 'hapnin', 'doctor'], p.pack_string)),
]
succeedlist = [1] * len(packtest)
count = 0
for method, args in packtest:
print 'pack test', count,
try:
method(*args)
print 'succeeded'
except ConversionError, var:
print 'ConversionError:', var.msg
succeedlist[count] = 0
count = count + 1
data = p.get_buffer()
# now verify
up = Unpacker(data)
unpacktest = [
(up.unpack_uint, (), lambda x: x == 9),
(up.unpack_bool, (), lambda x: not x),
(up.unpack_bool, (), lambda x: x),
(up.unpack_uhyper, (), lambda x: x == 45L),
(up.unpack_float, (), lambda x: 1.89 < x < 1.91),
(up.unpack_double, (), lambda x: 1.89 < x < 1.91),
(up.unpack_string, (), lambda x: x == 'hello world'),
(up.unpack_list, (up.unpack_uint,), lambda x: x == range(5)),
(up.unpack_array, (up.unpack_string,),
lambda x: x == ['what', 'is', 'hapnin', 'doctor']),
]
count = 0
for method, args, pred in unpacktest:
print 'unpack test', count,
try:
if succeedlist[count]:
x = method(*args)
print pred(x) and 'succeeded' or 'failed', ':', x
else:
print 'skipping'
except ConversionError, var:
print 'ConversionError:', var.msg
count = count + 1
if __name__ == '__main__':
_test()
|
gpl-2.0
|
imsparsh/python-social-auth
|
social/backends/exacttarget.py
|
12
|
3066
|
"""
ExactTarget OAuth support.
Support Authentication from IMH using JWT token and pre-shared key.
Requires package pyjwt
"""
from datetime import timedelta, datetime
import jwt
from social.exceptions import AuthFailed, AuthCanceled
from social.backends.oauth import BaseOAuth2
class ExactTargetOAuth2(BaseOAuth2):
name = 'exacttarget'
def get_user_details(self, response):
"""Use the email address of the user, suffixed by _et"""
user = response.get('token', {})\
.get('request', {})\
.get('user', {})
if 'email' in user:
user['username'] = user['email']
return user
def uses_redirect(self):
return False
def auth_url(self):
return None
def process_error(self, data):
if data.get('error'):
error = self.data.get('error_description') or self.data['error']
raise AuthFailed(self, error)
def do_auth(self, token, *args, **kwargs):
dummy, secret = self.get_key_and_secret()
try: # Decode the token, using the Application Signature from settings
decoded = jwt.decode(token, secret)
except jwt.DecodeError: # Wrong signature, fail authentication
raise AuthCanceled(self)
kwargs.update({'response': {'token': decoded}, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
def auth_complete(self, *args, **kwargs):
"""Completes login process, must return user instance"""
token = self.data.get('jwt', {})
if not token:
raise AuthFailed(self, 'Authentication Failed')
return self.do_auth(token, *args, **kwargs)
def extra_data(self, user, uid, response, details):
"""Load extra details from the JWT token"""
data = {
'id': details.get('id'),
'email': details.get('email'),
# OAuth token, for use with legacy SOAP API calls:
# http://bit.ly/13pRHfo
'internalOauthToken': details.get('internalOauthToken'),
# Token for use with the Application ClientID for the FUEL API
'oauthToken': details.get('oauthToken'),
# If the token has expired, use the FUEL API to get a new token see
# http://bit.ly/10v1K5l and http://bit.ly/11IbI6F - set legacy=1
'refreshToken': details.get('refreshToken'),
}
# The expiresIn value determines how long the tokens are valid for.
# Take a bit off, then convert to an int timestamp
expiresSeconds = details.get('expiresIn', 0) - 30
expires = datetime.utcnow() + timedelta(seconds=expiresSeconds)
data['expires'] = (expires - datetime(1970, 1, 1)).total_seconds()
if response.get('token'):
token = response['token']
org = token.get('request', {}).get('organization')
if org:
data['stack'] = org.get('stackKey')
data['enterpriseId'] = org.get('enterpriseId')
return data
|
bsd-3-clause
|
Russell-IO/ansible
|
lib/ansible/modules/cloud/ovirt/ovirt_networks_facts.py
|
73
|
3447
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_networks_facts
short_description: Retrieve facts about one or more oVirt/RHV networks
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV networks."
notes:
- "This module creates a new top-level C(ovirt_networks) fact, which
contains a list of networks."
options:
pattern:
description:
- "Search term which is accepted by oVirt/RHV search backend."
- "For example to search network starting with string vlan1 use: name=vlan1*"
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all networks which names start with C(vlan1):
- ovirt_networks_facts:
pattern: name=vlan1*
- debug:
var: ovirt_networks
'''
RETURN = '''
ovirt_networks:
description: "List of dictionaries describing the networks. Network attribues are mapped to dictionary keys,
all networks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/network."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
networks_service = connection.system_service().networks_service()
networks = networks_service.list(search=module.params['pattern'])
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_networks=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in networks
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
|
gpl-3.0
|
camilonova/django-pipeline
|
pipeline/manifest.py
|
18
|
2084
|
from __future__ import unicode_literals
import os
from django.contrib.staticfiles.finders import get_finders
from pipeline.conf import settings
from manifesto import Manifest
from pipeline.packager import Packager
class PipelineManifest(Manifest):
def __init__(self):
self.packager = Packager()
self.packages = self.collect_packages()
self.finders = get_finders()
self.package_files = []
def collect_packages(self):
packages = []
for package_name in self.packager.packages['css']:
package = self.packager.package_for('css', package_name)
if package.manifest:
packages.append(package)
for package_name in self.packager.packages['js']:
package = self.packager.package_for('js', package_name)
if package.manifest:
packages.append(package)
return packages
def cache(self):
ignore_patterns = getattr(settings, "STATICFILES_IGNORE_PATTERNS", None)
if settings.PIPELINE_ENABLED:
for package in self.packages:
self.package_files.append(package.output_filename)
yield str(self.packager.individual_url(package.output_filename))
else:
for package in self.packages:
for path in self.packager.compile(package.paths):
self.package_files.append(path)
yield str(self.packager.individual_url(path))
for finder in self.finders:
for path, storage in finder.list(ignore_patterns):
# Prefix the relative path if the source storage contains it
if getattr(storage, 'prefix', None):
prefixed_path = os.path.join(storage.prefix, path)
else:
prefixed_path = path
# Dont add any doubles
if prefixed_path not in self.package_files:
self.package_files.append(prefixed_path)
yield str(self.packager.individual_url(prefixed_path))
|
mit
|
freevo/freevo1
|
src/plugins/idlebar/cdstatus.py
|
1
|
3044
|
# -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------
# cdstatus.py - IdleBarplugin for monitoring the cdstatus
# -----------------------------------------------------------------------
# $Id$
#
# -----------------------------------------------------------------------
# Freevo - A Home Theater PC framework
# Copyright (C) 2003 Krister Lagerstrom, et al.
# Please see the file freevo/Docs/CREDITS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------
# python modules
import os
# freevo modules
from plugins.idlebar import IdleBarPlugin
import plugin, config
class PluginInterface(IdleBarPlugin):
"""
Show the status of all rom drives.
Activate with:
| plugin.activate('idlebar.cdstatus')
"""
def __init__(self):
IdleBarPlugin.__init__(self)
self.plugin_name = 'idlebar.cdstatus'
icondir = os.path.join(config.ICON_DIR, 'status')
self.cdimages = {}
self.cdimages ['empty_cdrom'] = os.path.join(icondir, 'cd_inactive.png')
self.cdimages ['audiocd'] = os.path.join(icondir, 'cd_audio.png')
self.cdimages ['audio'] = os.path.join(icondir, 'cd_audio.png')
self.cdimages ['images'] = os.path.join(icondir, 'cd_photo.png')
self.cdimages ['video'] = os.path.join(icondir, 'cd_video.png')
self.cdimages ['dvd'] = os.path.join(icondir, 'cd_video.png')
self.cdimages ['burn'] = os.path.join(icondir, 'cd_burn.png')
self.cdimages ['cdrip'] = os.path.join(icondir, 'cd_rip.png')
self.cdimages ['mixed'] = os.path.join(icondir, 'cd_mixed.png')
def draw(self, (type, object), x, osd):
image = self.cdimages['empty_cdrom']
width = 0
for media in config.REMOVABLE_MEDIA:
image = self.cdimages['empty_cdrom']
if media.type == 'empty_cdrom':
image = self.cdimages['empty_cdrom']
if media.type and self.cdimages.has_key(media.type):
image = self.cdimages[media.type]
else:
image = self.cdimages['mixed']
width += osd.draw_image(image, (x+width, osd.y + 10, -1, -1))[0] + 10
if width:
width -= 10
return width
|
gpl-2.0
|
proxysh/Safejumper-for-Desktop
|
buildlinux/env64/lib/python2.7/site-packages/twisted/internet/iocpreactor/tcp.py
|
13
|
19849
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
TCP support for IOCP reactor
"""
import socket, operator, errno, struct
from zope.interface import implementer, classImplements
from twisted.internet import interfaces, error, address, main, defer
from twisted.internet.protocol import Protocol
from twisted.internet.abstract import _LogOwner, isIPv6Address
from twisted.internet.tcp import _SocketCloser, Connector as TCPConnector
from twisted.internet.tcp import _AbortingMixin, _BaseBaseClient, _BaseTCPClient
from twisted.python import log, failure, reflect
from twisted.python.compat import _PY3, nativeString
from twisted.internet.iocpreactor import iocpsupport as _iocp, abstract
from twisted.internet.iocpreactor.interfaces import IReadWriteHandle
from twisted.internet.iocpreactor.const import ERROR_IO_PENDING
from twisted.internet.iocpreactor.const import SO_UPDATE_CONNECT_CONTEXT
from twisted.internet.iocpreactor.const import SO_UPDATE_ACCEPT_CONTEXT
from twisted.internet.iocpreactor.const import ERROR_CONNECTION_REFUSED
from twisted.internet.iocpreactor.const import ERROR_NETWORK_UNREACHABLE
try:
from twisted.internet._newtls import startTLS as _startTLS
except ImportError:
_startTLS = None
# ConnectEx returns these. XXX: find out what it does for timeout
connectExErrors = {
ERROR_CONNECTION_REFUSED: errno.WSAECONNREFUSED,
ERROR_NETWORK_UNREACHABLE: errno.WSAENETUNREACH,
}
@implementer(IReadWriteHandle, interfaces.ITCPTransport,
interfaces.ISystemHandle)
class Connection(abstract.FileHandle, _SocketCloser, _AbortingMixin):
"""
@ivar TLS: C{False} to indicate the connection is in normal TCP mode,
C{True} to indicate that TLS has been started and that operations must
be routed through the L{TLSMemoryBIOProtocol} instance.
"""
TLS = False
def __init__(self, sock, proto, reactor=None):
abstract.FileHandle.__init__(self, reactor)
self.socket = sock
self.getFileHandle = sock.fileno
self.protocol = proto
def getHandle(self):
return self.socket
def dataReceived(self, rbuffer):
"""
@param rbuffer: Data received.
@type rbuffer: L{bytes} or L{bytearray}
"""
if isinstance(rbuffer, bytes):
pass
elif isinstance(rbuffer, bytearray):
# XXX: some day, we'll have protocols that can handle raw buffers
rbuffer = bytes(rbuffer)
else:
raise TypeError("data must be bytes or bytearray, not " +
type(rbuffer))
self.protocol.dataReceived(rbuffer)
def readFromHandle(self, bufflist, evt):
return _iocp.recv(self.getFileHandle(), bufflist, evt)
def writeToHandle(self, buff, evt):
"""
Send C{buff} to current file handle using C{_iocp.send}. The buffer
sent is limited to a size of C{self.SEND_LIMIT}.
"""
writeView = memoryview(buff)
return _iocp.send(self.getFileHandle(),
writeView[0:self.SEND_LIMIT].tobytes(), evt)
def _closeWriteConnection(self):
try:
self.socket.shutdown(1)
except socket.error:
pass
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.writeConnectionLost()
except:
f = failure.Failure()
log.err()
self.connectionLost(f)
def readConnectionLost(self, reason):
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.readConnectionLost()
except:
log.err()
self.connectionLost(failure.Failure())
else:
self.connectionLost(reason)
def connectionLost(self, reason):
if self.disconnected:
return
abstract.FileHandle.connectionLost(self, reason)
isClean = (reason is None or
not reason.check(error.ConnectionAborted))
self._closeSocket(isClean)
protocol = self.protocol
del self.protocol
del self.socket
del self.getFileHandle
protocol.connectionLost(reason)
def logPrefix(self):
"""
Return the prefix to log with when I own the logging thread.
"""
return self.logstr
def getTcpNoDelay(self):
return operator.truth(self.socket.getsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY))
def setTcpNoDelay(self, enabled):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, enabled)
def getTcpKeepAlive(self):
return operator.truth(self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE))
def setTcpKeepAlive(self, enabled):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, enabled)
if _startTLS is not None:
def startTLS(self, contextFactory, normal=True):
"""
@see: L{ITLSTransport.startTLS}
"""
_startTLS(self, contextFactory, normal, abstract.FileHandle)
def write(self, data):
"""
Write some data, either directly to the underlying handle or, if TLS
has been started, to the L{TLSMemoryBIOProtocol} for it to encrypt and
send.
@see: L{twisted.internet.interfaces.ITransport.write}
"""
if self.disconnected:
return
if self.TLS:
self.protocol.write(data)
else:
abstract.FileHandle.write(self, data)
def writeSequence(self, iovec):
"""
Write some data, either directly to the underlying handle or, if TLS
has been started, to the L{TLSMemoryBIOProtocol} for it to encrypt and
send.
@see: L{twisted.internet.interfaces.ITransport.writeSequence}
"""
if self.disconnected:
return
if self.TLS:
self.protocol.writeSequence(iovec)
else:
abstract.FileHandle.writeSequence(self, iovec)
def loseConnection(self, reason=None):
"""
Close the underlying handle or, if TLS has been started, first shut it
down.
@see: L{twisted.internet.interfaces.ITransport.loseConnection}
"""
if self.TLS:
if self.connected and not self.disconnecting:
self.protocol.loseConnection()
else:
abstract.FileHandle.loseConnection(self, reason)
def registerProducer(self, producer, streaming):
"""
Register a producer.
If TLS is enabled, the TLS connection handles this.
"""
if self.TLS:
# Registering a producer before we're connected shouldn't be a
# problem. If we end up with a write(), that's already handled in
# the write() code above, and there are no other potential
# side-effects.
self.protocol.registerProducer(producer, streaming)
else:
abstract.FileHandle.registerProducer(self, producer, streaming)
def unregisterProducer(self):
"""
Unregister a producer.
If TLS is enabled, the TLS connection handles this.
"""
if self.TLS:
self.protocol.unregisterProducer()
else:
abstract.FileHandle.unregisterProducer(self)
if _startTLS is not None:
classImplements(Connection, interfaces.ITLSTransport)
class Client(_BaseBaseClient, _BaseTCPClient, Connection):
"""
@ivar _tlsClientDefault: Always C{True}, indicating that this is a client
connection, and by default when TLS is negotiated this class will act as
a TLS client.
"""
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
_tlsClientDefault = True
_commonConnection = Connection
def __init__(self, host, port, bindAddress, connector, reactor):
# ConnectEx documentation says socket _has_ to be bound
if bindAddress is None:
bindAddress = ('', 0)
self.reactor = reactor # createInternetSocket needs this
_BaseTCPClient.__init__(self, host, port, bindAddress, connector,
reactor)
def createInternetSocket(self):
"""
Create a socket registered with the IOCP reactor.
@see: L{_BaseTCPClient}
"""
return self.reactor.createSocket(self.addressFamily, self.socketType)
def _collectSocketDetails(self):
"""
Clean up potentially circular references to the socket and to its
C{getFileHandle} method.
@see: L{_BaseBaseClient}
"""
del self.socket, self.getFileHandle
def _stopReadingAndWriting(self):
"""
Remove the active handle from the reactor.
@see: L{_BaseBaseClient}
"""
self.reactor.removeActiveHandle(self)
def cbConnect(self, rc, data, evt):
if rc:
rc = connectExErrors.get(rc, rc)
self.failIfNotConnected(error.getConnectError((rc,
errno.errorcode.get(rc, 'Unknown error'))))
else:
self.socket.setsockopt(
socket.SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT,
struct.pack('P', self.socket.fileno()))
self.protocol = self.connector.buildProtocol(self.getPeer())
self.connected = True
logPrefix = self._getLogPrefix(self.protocol)
self.logstr = logPrefix + ",client"
if self.protocol is None:
# Factory.buildProtocol is allowed to return None. In that
# case, make up a protocol to satisfy the rest of the
# implementation; connectionLost is going to be called on
# something, for example. This is easier than adding special
# case support for a None protocol throughout the rest of the
# transport implementation.
self.protocol = Protocol()
# But dispose of the connection quickly.
self.loseConnection()
else:
self.protocol.makeConnection(self)
self.startReading()
def doConnect(self):
if not hasattr(self, "connector"):
# this happens if we connector.stopConnecting in
# factory.startedConnecting
return
assert _iocp.have_connectex
self.reactor.addActiveHandle(self)
evt = _iocp.Event(self.cbConnect, self)
rc = _iocp.connect(self.socket.fileno(), self.realAddress, evt)
if rc and rc != ERROR_IO_PENDING:
self.cbConnect(rc, 0, evt)
class Server(Connection):
"""
Serverside socket-stream connection class.
I am a serverside network connection transport; a socket which came from an
accept() on a server.
@ivar _tlsClientDefault: Always C{False}, indicating that this is a server
connection, and by default when TLS is negotiated this class will act as
a TLS server.
"""
_tlsClientDefault = False
def __init__(self, sock, protocol, clientAddr, serverAddr, sessionno, reactor):
"""
Server(sock, protocol, client, server, sessionno)
Initialize me with a socket, a protocol, a descriptor for my peer (a
tuple of host, port describing the other end of the connection), an
instance of Port, and a session number.
"""
Connection.__init__(self, sock, protocol, reactor)
self.serverAddr = serverAddr
self.clientAddr = clientAddr
self.sessionno = sessionno
logPrefix = self._getLogPrefix(self.protocol)
self.logstr = "%s,%s,%s" % (logPrefix, sessionno, self.clientAddr.host)
self.repstr = "<%s #%s on %s>" % (self.protocol.__class__.__name__,
self.sessionno, self.serverAddr.port)
self.connected = True
self.startReading()
def __repr__(self):
"""
A string representation of this connection.
"""
return self.repstr
def getHost(self):
"""
Returns an IPv4Address.
This indicates the server's address.
"""
return self.serverAddr
def getPeer(self):
"""
Returns an IPv4Address.
This indicates the client's address.
"""
return self.clientAddr
class Connector(TCPConnector):
def _makeTransport(self):
return Client(self.host, self.port, self.bindAddress, self,
self.reactor)
@implementer(interfaces.IListeningPort)
class Port(_SocketCloser, _LogOwner):
connected = False
disconnected = False
disconnecting = False
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
_addressType = address.IPv4Address
sessionno = 0
# Actual port number being listened on, only set to a non-None
# value when we are actually listening.
_realPortNumber = None
# A string describing the connections which will be created by this port.
# Normally this is C{"TCP"}, since this is a TCP port, but when the TLS
# implementation re-uses this class it overrides the value with C{"TLS"}.
# Only used for logging.
_type = 'TCP'
def __init__(self, port, factory, backlog=50, interface='', reactor=None):
self.port = port
self.factory = factory
self.backlog = backlog
self.interface = interface
self.reactor = reactor
if isIPv6Address(interface):
self.addressFamily = socket.AF_INET6
self._addressType = address.IPv6Address
def __repr__(self):
if self._realPortNumber is not None:
return "<%s of %s on %s>" % (self.__class__,
self.factory.__class__,
self._realPortNumber)
else:
return "<%s of %s (not listening)>" % (self.__class__,
self.factory.__class__)
def startListening(self):
try:
skt = self.reactor.createSocket(self.addressFamily,
self.socketType)
# TODO: resolve self.interface if necessary
if self.addressFamily == socket.AF_INET6:
addr = socket.getaddrinfo(self.interface, self.port)[0][4]
else:
addr = (self.interface, self.port)
skt.bind(addr)
except socket.error as le:
raise error.CannotListenError(self.interface, self.port, le)
self.addrLen = _iocp.maxAddrLen(skt.fileno())
# Make sure that if we listened on port 0, we update that to
# reflect what the OS actually assigned us.
self._realPortNumber = skt.getsockname()[1]
log.msg("%s starting on %s" % (self._getLogPrefix(self.factory),
self._realPortNumber))
self.factory.doStart()
skt.listen(self.backlog)
self.connected = True
self.disconnected = False
self.reactor.addActiveHandle(self)
self.socket = skt
self.getFileHandle = self.socket.fileno
self.doAccept()
def loseConnection(self, connDone=failure.Failure(main.CONNECTION_DONE)):
"""
Stop accepting connections on this port.
This will shut down my socket and call self.connectionLost().
It returns a deferred which will fire successfully when the
port is actually closed.
"""
self.disconnecting = True
if self.connected:
self.deferred = defer.Deferred()
self.reactor.callLater(0, self.connectionLost, connDone)
return self.deferred
stopListening = loseConnection
def _logConnectionLostMsg(self):
"""
Log message for closing port
"""
log.msg('(%s Port %s Closed)' % (self._type, self._realPortNumber))
def connectionLost(self, reason):
"""
Cleans up the socket.
"""
self._logConnectionLostMsg()
self._realPortNumber = None
d = None
if hasattr(self, "deferred"):
d = self.deferred
del self.deferred
self.disconnected = True
self.reactor.removeActiveHandle(self)
self.connected = False
self._closeSocket(True)
del self.socket
del self.getFileHandle
try:
self.factory.doStop()
except:
self.disconnecting = False
if d is not None:
d.errback(failure.Failure())
else:
raise
else:
self.disconnecting = False
if d is not None:
d.callback(None)
def logPrefix(self):
"""
Returns the name of my class, to prefix log entries with.
"""
return reflect.qual(self.factory.__class__)
def getHost(self):
"""
Returns an IPv4Address.
This indicates the server's address.
"""
host, port = self.socket.getsockname()[:2]
return self._addressType('TCP', host, port)
def cbAccept(self, rc, data, evt):
self.handleAccept(rc, evt)
if not (self.disconnecting or self.disconnected):
self.doAccept()
def handleAccept(self, rc, evt):
if self.disconnecting or self.disconnected:
return False
# possible errors:
# (WSAEMFILE, WSAENOBUFS, WSAENFILE, WSAENOMEM, WSAECONNABORTED)
if rc:
log.msg("Could not accept new connection -- %s (%s)" %
(errno.errorcode.get(rc, 'unknown error'), rc))
return False
else:
evt.newskt.setsockopt(
socket.SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
struct.pack('P', self.socket.fileno()))
family, lAddr, rAddr = _iocp.get_accept_addrs(evt.newskt.fileno(),
evt.buff)
if not _PY3:
# In _makesockaddr(), we use the Win32 API which
# gives us an address of the form: (unicode host, port).
# Only on Python 2 do we need to convert it to a
# non-unicode str.
# On Python 3, we leave it alone as unicode.
lAddr = (nativeString(lAddr[0]), lAddr[1])
rAddr = (nativeString(rAddr[0]), rAddr[1])
assert family == self.addressFamily
protocol = self.factory.buildProtocol(
self._addressType('TCP', rAddr[0], rAddr[1]))
if protocol is None:
evt.newskt.close()
else:
s = self.sessionno
self.sessionno = s+1
transport = Server(evt.newskt, protocol,
self._addressType('TCP', rAddr[0], rAddr[1]),
self._addressType('TCP', lAddr[0], lAddr[1]),
s, self.reactor)
protocol.makeConnection(transport)
return True
def doAccept(self):
evt = _iocp.Event(self.cbAccept, self)
# see AcceptEx documentation
evt.buff = buff = bytearray(2 * (self.addrLen + 16))
evt.newskt = newskt = self.reactor.createSocket(self.addressFamily,
self.socketType)
rc = _iocp.accept(self.socket.fileno(), newskt.fileno(), buff, evt)
if rc and rc != ERROR_IO_PENDING:
self.handleAccept(rc, evt)
|
gpl-2.0
|
rgfernandes/libesedb
|
setup.py
|
3
|
7773
|
#!/usr/bin/env python
#
# Script to build and install Python-bindings.
# Version: 20150725
import glob
import platform
import os
import shutil
import subprocess
import sys
from distutils import sysconfig
from distutils import util
from distutils.ccompiler import new_compiler
from distutils.command.build_ext import build_ext
from distutils.command.bdist import bdist
from distutils.command.sdist import sdist
from distutils.core import Command, Extension, setup
class custom_bdist_rpm(bdist):
"""Custom handler for the bdist_rpm command."""
def run(self):
print("'setup.py bdist_rpm' command not supported use 'rpmbuild' instead.")
sys.exit(1)
class custom_build_ext(build_ext):
"""Custom handler for the build_ext command."""
def build_extensions(self):
# TODO: move build customization here?
build_ext.build_extensions(self)
def run(self):
compiler = new_compiler(compiler=self.compiler)
if compiler.compiler_type == "msvc":
self.define = [
("UNICODE", ""),
]
else:
# Using "sh" here to make sure that configure works on mingw32
# with the standard python.org binaries.
command = "sh configure"
exit_code = subprocess.call(command, shell=True)
if exit_code != 0:
raise RuntimeError("Running: {0:s} failed.".format(command))
self.define = [
("HAVE_CONFIG_H", ""),
("LOCALEDIR", "\"/usr/share/locale\""),
]
build_ext.run(self)
class custom_sdist(sdist):
"""Custom handler for the sdist command."""
def run(self):
if self.formats != ["gztar"]:
print("'setup.py bdist_sdist' unsupported format.")
sys.exit(1)
if glob.glob("*.tar.gz"):
print("'setup.py bdist_sdist' remove existing *.tar.gz files from source directory.")
sys.exit(1)
command = "make dist"
exit_code = subprocess.call(command, shell=True)
if exit_code != 0:
raise RuntimeError("Running: {0:s} failed.".format(command))
if not os.path.exists("dist"):
os.mkdir("dist")
source_package_file = glob.glob("*.tar.gz")[0]
python_module_file = "py{0:s}".format(source_package_file[3:])
python_module_file = os.path.join("dist", python_module_file)
os.rename(source_package_file, python_module_file)
# Inform distutils what files were created.
dist_files = getattr(self.distribution, "dist_files", [])
dist_files.append(("sdist", "", python_module_file))
class ProjectInformation(object):
"""Class to define the project information."""
def __init__(self):
"""Initializes a project information object."""
super(ProjectInformation, self).__init__()
self.include_directories = []
self.library_name = None
self.library_names = []
self.library_version = None
self._ReadConfigureAc()
self._ReadMakefileAm()
@property
def dll_filename(self):
"""The DLL filename."""
return "{0:s}.dll".format(self.library_name)
@property
def module_name(self):
"""The Python module name."""
return "py{0:s}".format(self.library_name[3:])
@property
def project_description(self):
"""The project description."""
return "Python bindings module for {0:s}".format(self.library_name)
@property
def project_url(self):
"""The project URL."""
return "https://github.com/libyal/{0:s}/".format(self.library_name)
def _ReadConfigureAc(self):
"""Reads configure.ac to initialize the project information."""
file_object = open("configure.ac", "rb")
if not file_object:
raise IOError("Unable to open: configure.ac")
found_ac_init = False
found_library_name = False
for line in file_object.readlines():
line = line.strip()
if found_library_name:
self.library_version = line[1:-2]
break
elif found_ac_init:
self.library_name = line[1:-2]
found_library_name = True
elif line.startswith("AC_INIT"):
found_ac_init = True
file_object.close()
if not self.library_name or not self.library_version:
raise RuntimeError(
"Unable to find library name and version in: configure.ac")
def _ReadMakefileAm(self):
"""Reads Makefile.am to initialize the project information."""
if not self.library_name:
raise RuntimeError("Missing library name")
file_object = open("Makefile.am", "rb")
if not file_object:
raise IOError("Unable to open: Makefile.am")
found_subdirs = False
for line in file_object.readlines():
line = line.strip()
if found_subdirs:
library_name, _, _ = line.partition(" ")
self.include_directories.append(library_name)
if library_name.startswith("lib"):
self.library_names.append(library_name)
if library_name == self.library_name:
break
elif line.startswith("SUBDIRS"):
found_subdirs = True
file_object.close()
if not self.include_directories or not self.library_names:
raise RuntimeError(
"Unable to find include directories and library names in: "
"Makefile.am")
def GetPythonLibraryDirectoryPath():
"""Retrieves the Python library directory path."""
path = sysconfig.get_python_lib(True)
_, _, path = path.rpartition(sysconfig.PREFIX)
if path.startswith(os.sep):
path = path[1:]
return path
project_information = ProjectInformation()
MODULE_VERSION = project_information.library_version
if "bdist_msi" in sys.argv:
# bdist_msi does not support the library version so we add ".1"
# as a work around.
MODULE_VERSION = "{0:s}.1".format(MODULE_VERSION)
PYTHON_LIBRARY_DIRECTORY = GetPythonLibraryDirectoryPath()
SOURCES = []
# TODO: replace by detection of MSC
DEFINE_MACROS = []
if platform.system() == "Windows":
# TODO: determine how to handle third party DLLs.
for library_name in project_information.library_names:
if library_name != project_information.library_name:
definition = "HAVE_LOCAL_{0:s}".format(library_name.upper())
DEFINE_MACROS.append((definition, ""))
# Put everything inside the Python module to prevent issues with finding
# shared libaries since pip does not integrate well with the system package
# management.
for library_name in project_information.library_names:
source_files = glob.glob(os.path.join(library_name, "*.c"))
SOURCES.extend(source_files)
source_files = glob.glob(os.path.join(project_information.module_name, "*.c"))
SOURCES.extend(source_files)
# Add the LICENSE file to the distribution.
copying_file = os.path.join("COPYING")
license_file = "LICENSE.{0:s}".format(project_information.module_name)
shutil.copyfile(copying_file, license_file)
LIBRARY_DATA_FILES = [license_file]
# TODO: find a way to detect missing python.h
# e.g. on Ubuntu python-dev is not installed by python-pip
# TODO: what about description and platform in egg file
setup(
name=project_information.module_name,
url=project_information.project_url,
version=MODULE_VERSION,
description=project_information.project_description,
long_description=project_information.project_description,
author="Joachim Metz",
author_email="[email protected]",
license="GNU Lesser General Public License v3 or later (LGPLv3+)",
cmdclass={
"build_ext": custom_build_ext,
"bdist_rpm": custom_bdist_rpm,
"sdist": custom_sdist,
},
ext_modules=[
Extension(
project_information.module_name,
define_macros=DEFINE_MACROS,
include_dirs=project_information.include_directories,
libraries=[],
library_dirs=[],
sources=SOURCES,
),
],
data_files=[(PYTHON_LIBRARY_DIRECTORY, LIBRARY_DATA_FILES)],
)
os.remove(license_file)
|
lgpl-3.0
|
himleyb85/django
|
django/contrib/gis/db/backends/spatialite/operations.py
|
24
|
10581
|
"""
SQL functions reference lists:
http://www.gaia-gis.it/spatialite-3.0.0-BETA/spatialite-sql-3.0.0.html
https://web.archive.org/web/20130407175746/http://www.gaia-gis.it/gaia-sins/spatialite-sql-4.0.0.html
http://www.gaia-gis.it/gaia-sins/spatialite-sql-4.2.1.html
"""
import re
import sys
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.spatialite.adapter import SpatiaLiteAdapter
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.operations import DatabaseOperations
from django.utils import six
from django.utils.functional import cached_property
class SpatiaLiteOperations(BaseSpatialOperations, DatabaseOperations):
name = 'spatialite'
spatialite = True
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
Adapter = SpatiaLiteAdapter
area = 'Area'
centroid = 'Centroid'
collect = 'Collect'
contained = 'MbrWithin'
difference = 'Difference'
distance = 'Distance'
envelope = 'Envelope'
extent = 'Extent'
geojson = 'AsGeoJSON'
gml = 'AsGML'
intersection = 'Intersection'
kml = 'AsKML'
length = 'GLength' # OpenGis defines Length, but this conflicts with an SQLite reserved keyword
makeline = 'MakeLine'
num_geom = 'NumGeometries'
num_points = 'NumPoints'
point_on_surface = 'PointOnSurface'
scale = 'ScaleCoords'
svg = 'AsSVG'
sym_difference = 'SymDifference'
transform = 'Transform'
translate = 'ShiftCoords'
union = 'GUnion' # OpenGis defines Union, but this conflicts with an SQLite reserved keyword
unionagg = 'GUnion'
from_text = 'GeomFromText'
from_wkb = 'GeomFromWKB'
select = 'AsText(%s)'
gis_operators = {
'equals': SpatialOperator(func='Equals'),
'disjoint': SpatialOperator(func='Disjoint'),
'touches': SpatialOperator(func='Touches'),
'crosses': SpatialOperator(func='Crosses'),
'within': SpatialOperator(func='Within'),
'overlaps': SpatialOperator(func='Overlaps'),
'contains': SpatialOperator(func='Contains'),
'intersects': SpatialOperator(func='Intersects'),
'relate': SpatialOperator(func='Relate'),
# Returns true if B's bounding box completely contains A's bounding box.
'contained': SpatialOperator(func='MbrWithin'),
# Returns true if A's bounding box completely contains B's bounding box.
'bbcontains': SpatialOperator(func='MbrContains'),
# Returns true if A's bounding box overlaps B's bounding box.
'bboverlaps': SpatialOperator(func='MbrOverlaps'),
# These are implemented here as synonyms for Equals
'same_as': SpatialOperator(func='Equals'),
'exact': SpatialOperator(func='Equals'),
'distance_gt': SpatialOperator(func='Distance', op='>'),
'distance_gte': SpatialOperator(func='Distance', op='>='),
'distance_lt': SpatialOperator(func='Distance', op='<'),
'distance_lte': SpatialOperator(func='Distance', op='<='),
}
disallowed_aggregates = (aggregates.Extent3D,)
@cached_property
def function_names(self):
return {
'Length': 'ST_Length',
'Reverse': 'ST_Reverse',
'Scale': 'ScaleCoords',
'Translate': 'ST_Translate' if self.spatial_version >= (3, 1, 0) else 'ShiftCoords',
'Union': 'ST_Union',
}
@cached_property
def unsupported_functions(self):
unsupported = {'BoundingCircle', 'ForceRHR', 'MemSize'}
if self.spatial_version < (3, 1, 0):
unsupported.add('SnapToGrid')
if self.spatial_version < (4, 0, 0):
unsupported.update({'Perimeter', 'Reverse'})
elif not self.lwgeom_version():
unsupported.add('GeoHash')
return unsupported
@cached_property
def spatial_version(self):
"""Determine the version of the SpatiaLite library."""
try:
version = self.spatialite_version_tuple()[1:]
except Exception as msg:
new_msg = (
'Cannot determine the SpatiaLite version for the "%s" '
'database (error was "%s"). Was the SpatiaLite initialization '
'SQL loaded on this database?') % (self.connection.settings_dict['NAME'], msg)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(new_msg), sys.exc_info()[2])
if version < (3, 0, 0):
raise ImproperlyConfigured('GeoDjango only supports SpatiaLite versions 3.0.0 and above.')
return version
def convert_extent(self, box, srid):
"""
Convert the polygon data received from Spatialite to min/max values.
"""
if box is None:
return None
shell = Geometry(box, srid).shell
xmin, ymin = shell[0][:2]
xmax, ymax = shell[2][:2]
return (xmin, ymin, xmax, ymax)
def convert_geom(self, wkt, geo_field):
"""
Converts geometry WKT returned from a SpatiaLite aggregate.
"""
if wkt:
return Geometry(wkt, geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Returns None because geometry columnas are added via the
`AddGeometryColumn` stored procedure on SpatiaLite.
"""
return None
def get_distance(self, f, value, lookup_type, **kwargs):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type. SpatiaLite only supports regular
cartesian-based queries (no spheroid/sphere calculations for point
geometries like PostGIS).
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
raise ValueError('SpatiaLite does not support distance queries on '
'geometry fields with a geodetic coordinate system. '
'Distance objects; use a numeric value of your '
'distance in degrees instead.')
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
Transform() and GeomFromText() function call(s).
"""
def transform_value(value, srid):
return not (value is None or value.srid == srid)
if hasattr(value, 'as_sql'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitute in
# the column name instead.
sql, _ = compiler.compile(value)
return placeholder % sql
else:
if transform_value(value, f.srid):
# Adding Transform() to the SQL placeholder.
return '%s(%s(%%s,%s), %s)' % (self.transform, self.from_text, value.srid, f.srid)
else:
return '%s(%%s,%s)' % (self.from_text, f.srid)
def _get_spatialite_func(self, func):
"""
Helper routine for calling SpatiaLite functions and returning
their result.
Any error occurring in this method should be handled by the caller.
"""
cursor = self.connection._cursor()
try:
cursor.execute('SELECT %s' % func)
row = cursor.fetchone()
finally:
cursor.close()
return row[0]
def geos_version(self):
"Returns the version of GEOS used by SpatiaLite as a string."
return self._get_spatialite_func('geos_version()')
def proj4_version(self):
"Returns the version of the PROJ.4 library used by SpatiaLite."
return self._get_spatialite_func('proj4_version()')
def lwgeom_version(self):
"""Return the version of LWGEOM library used by SpatiaLite."""
return self._get_spatialite_func('lwgeom_version()')
def spatialite_version(self):
"Returns the SpatiaLite library version as a string."
return self._get_spatialite_func('spatialite_version()')
def spatialite_version_tuple(self):
"""
Returns the SpatiaLite version as a tuple (version string, major,
minor, subminor).
"""
version = self.spatialite_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse SpatiaLite version string: %s' % version)
return (version, major, minor1, minor2)
def spatial_aggregate_name(self, agg_name):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = 'unionagg' if agg_name.lower() == 'union' else agg_name.lower()
return getattr(self, agg_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialiteGeometryColumns
return SpatialiteGeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialiteSpatialRefSys
return SpatialiteSpatialRefSys
def get_db_converters(self, expression):
converters = super(SpatiaLiteOperations, self).get_db_converters(expression)
if hasattr(expression.output_field, 'geom_type'):
converters.append(self.convert_geometry)
return converters
def convert_geometry(self, value, expression, connection, context):
if value:
value = Geometry(value)
if 'transformed_srid' in context:
value.srid = context['transformed_srid']
return value
|
bsd-3-clause
|
mlc0202/vitess
|
test/queryservice_tests/cache_tests.py
|
9
|
11106
|
from vtdb import dbexceptions
from vtdb import field_types
import framework
import cache_cases1
import cache_cases2
import cases_framework
class TestWillNotBeCached(framework.TestCase):
def setUp(self):
self.env.log.reset()
def tearDown(self):
self.env.execute("drop table vtocc_nocache")
def test_nocache(self):
self.env.execute("create table vtocc_nocache(eid int, primary key (eid)) comment 'vtocc_nocache'")
self.assertContains(self.env.log.read(), "Will not be cached")
def test_nopk(self):
self.env.execute("create table vtocc_nocache(eid int)")
self.assertContains(self.env.log.read(), "Will not be cached")
def test_charcol(self):
self.env.execute("create table vtocc_nocache(eid varchar(10), primary key (eid))")
self.assertContains(self.env.log.read(), "Will not be cached")
class TestCache(framework.TestCase):
def test_num_str(self):
try:
self.env.execute("select bid, eid from vtocc_cached2 where eid = 1 and bid = 1")
except dbexceptions.DatabaseError as e:
self.assertContains(str(e), "error: type mismatch")
else:
self.fail("Did not receive exception")
def test_cache_list_arg(self):
cu = self.env.execute("select * from vtocc_cached1 where eid in ::list", {"list": field_types.List([3, 4, 32768])})
self.assertEqual(cu.rowcount, 2)
cu = self.env.execute("select * from vtocc_cached1 where eid in ::list", {"list": field_types.List([3, 4])})
self.assertEqual(cu.rowcount, 2)
cu = self.env.execute("select * from vtocc_cached1 where eid in ::list", {"list": field_types.List([3])})
self.assertEqual(cu.rowcount, 1)
with self.assertRaises(dbexceptions.DatabaseError):
cu = self.env.execute("select * from vtocc_cached1 where eid in ::list", {"list": field_types.List()})
def test_uncache(self):
try:
# Verify row cache is working
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tstart = self.env.table_stats()["vtocc_cached2"]
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tend = self.env.table_stats()["vtocc_cached2"]
self.assertEqual(tstart["Hits"]+1, tend["Hits"])
# disable
self.env.execute("alter table vtocc_cached2 comment 'vtocc_nocache'")
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
try:
tstart = self.env.table_stats()["vtocc_cached2"]
except KeyError:
pass
else:
self.fail("Did not receive exception")
finally:
self.env.execute("alter table vtocc_cached2 comment ''")
# Verify row cache is working again
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tstart = self.env.table_stats()["vtocc_cached2"]
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tend = self.env.table_stats()["vtocc_cached2"]
self.assertEqual(tstart["Hits"]+1, tend["Hits"])
def test_bad_limit(self):
try:
with self.assertRaises(dbexceptions.DatabaseError):
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo' limit :a", {"a": -1})
finally:
self.env.execute("alter table vtocc_cached2 comment ''")
def test_rename(self):
try:
# Verify row cache is working
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tstart = self.env.table_stats()["vtocc_cached2"]
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tend = self.env.table_stats()["vtocc_cached2"]
self.assertEqual(tstart["Hits"]+1, tend["Hits"])
# rename
self.env.execute("alter table vtocc_cached2 rename to vtocc_renamed")
try:
tstart = self.env.table_stats()["vtocc_cached2"]
except KeyError:
pass
else:
self.fail("Did not receive exception")
# Verify row cache is working
self.env.execute("select * from vtocc_renamed where eid = 2 and bid = 'foo'")
tstart = self.env.table_stats()["vtocc_renamed"]
self.env.execute("select * from vtocc_renamed where eid = 2 and bid = 'foo'")
tend = self.env.table_stats()["vtocc_renamed"]
self.assertEqual(tstart["Hits"]+1, tend["Hits"])
finally:
# alter table so there's no hash collision when renamed
self.env.execute("alter table vtocc_renamed comment 'renamed'")
self.env.execute("rename table vtocc_renamed to vtocc_cached2")
# Verify row cache is working again
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tstart = self.env.table_stats()["vtocc_cached2"]
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tend = self.env.table_stats()["vtocc_cached2"]
self.assertEqual(tstart["Hits"]+1, tend["Hits"])
def test_overrides(self):
tstart = self.env.table_stats()["vtocc_view"]
self.env.querylog.reset()
cu = self.env.execute("select * from vtocc_view where key2 = 1")
self.assertEqual(cu.fetchone(), (1L, 10L, 1L, 3L))
tend = self.env.table_stats()["vtocc_view"]
self.assertEqual(tstart["Misses"]+1, tend["Misses"])
log = self.env.querylog.tailer.read()
self.assertContains(log, "select * from vtocc_view where 1 != 1")
self.assertContains(log, "select key2, key1, data1, data2 from vtocc_view where key2 in (1)")
tstart = self.env.table_stats()["vtocc_view"]
cu = self.env.execute("select * from vtocc_view where key2 = 1")
self.assertEqual(cu.fetchone(), (1L, 10L, 1L, 3L))
tend = self.env.table_stats()["vtocc_view"]
self.assertEqual(tstart["Hits"]+1, tend["Hits"])
tstart = self.env.table_stats()["vtocc_view"]
self.env.conn.begin()
self.env.querylog.reset()
self.env.execute("update vtocc_part1 set data1 = 2 where key2 = 1")
log = self.env.querylog.tailer.read()
self.env.conn.commit()
self.assertContains(log, "update vtocc_part1 set data1 = 2 where key2 in (1) /* _stream vtocc_part1 (key2 ) (1 ); */")
self.env.querylog.reset()
cu = self.env.execute("select * from vtocc_view where key2 = 1")
self.assertEqual(cu.fetchone(), (1L, 10L, 2L, 3L))
tend = self.env.table_stats()["vtocc_view"]
self.assertEqual(tstart["Misses"]+1, tend["Misses"])
log = self.env.querylog.tailer.read()
self.assertContains(log, "select key2, key1, data1, data2 from vtocc_view where key2 in (1)")
tstart = self.env.table_stats()["vtocc_view"]
cu = self.env.execute("select * from vtocc_view where key2 = 1")
self.assertEqual(cu.fetchone(), (1L, 10L, 2L, 3L))
tend = self.env.table_stats()["vtocc_view"]
self.assertEqual(tstart["Hits"]+1, tend["Hits"])
tstart = self.env.table_stats()["vtocc_view"]
self.env.conn.begin()
self.env.execute("update vtocc_part2 set data2 = 2 where key3 = 1")
self.env.conn.commit()
self.env.querylog.reset()
cu = self.env.execute("select * from vtocc_view where key2 = 1")
self.assertEqual(cu.fetchone(), (1L, 10L, 2L, 2L))
tend = self.env.table_stats()["vtocc_view"]
self.assertEqual(tstart["Misses"]+1, tend["Misses"])
log = self.env.querylog.tailer.read()
self.assertContains(log, "select key2, key1, data1, data2 from vtocc_view where key2 in (1)")
tstart = self.env.table_stats()["vtocc_view"]
cu = self.env.execute("select * from vtocc_view where key2 = 1")
self.assertEqual(cu.fetchone(), (1L, 10L, 2L, 2L))
tend = self.env.table_stats()["vtocc_view"]
self.assertEqual(tstart["Hits"]+1, tend["Hits"])
def test_nodata(self):
# This should not fail
cu = self.env.execute("select * from vtocc_cached2 where eid = 6 and name = 'bar'")
self.assertEqual(cu.rowcount, 0)
def test_types(self):
self._verify_mismatch("select * from vtocc_cached2 where eid = 'str' and bid = 'str'")
self._verify_mismatch("select * from vtocc_cached2 where eid = :str and bid = :str", {"str": "str"})
self._verify_mismatch("select * from vtocc_cached2 where eid = 1 and bid = 1")
self._verify_mismatch("select * from vtocc_cached2 where eid = :id and bid = :id", {"id": 1})
self._verify_mismatch("select * from vtocc_cached2 where eid = 1.2 and bid = 1.2")
self._verify_mismatch("select * from vtocc_cached2 where eid = :fl and bid = :fl", {"fl": 1.2})
def test_stats(self):
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tstartHits = self._get_vars_table_stats(self.env.debug_vars()["RowcacheStats"], "vtocc_cached2", "Hits")
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
tendHits = self._get_vars_table_stats(self.env.debug_vars()["RowcacheStats"], "vtocc_cached2", "Hits")
self.assertEqual(tstartHits+1, tendHits)
tstartMisses = self._get_vars_table_stats(self.env.debug_vars()["RowcacheStats"], "vtocc_view", "Misses")
self.env.conn.begin()
self.env.execute("update vtocc_part2 set data2 = 2 where key3 = 1")
self.env.conn.commit()
self.env.execute("select * from vtocc_view where key2 = 1")
tendMisses = self._get_vars_table_stats(self.env.debug_vars()["RowcacheStats"], "vtocc_view", "Misses")
self.assertEqual(tstartMisses+1, tendMisses)
def test_spot_check(self):
vstart = self.env.debug_vars()
self.assertEqual(vstart["RowcacheSpotCheckRatio"], 0)
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
self.assertEqual(vstart["RowcacheSpotCheckCount"], self.env.debug_vars()["RowcacheSpotCheckCount"])
self.env.execute("set vt_spot_check_ratio=1")
self.assertEqual(self.env.debug_vars()["RowcacheSpotCheckRatio"], 1)
self.env.execute("select * from vtocc_cached2 where eid = 2 and bid = 'foo'")
self.assertEqual(vstart["RowcacheSpotCheckCount"]+1, self.env.debug_vars()["RowcacheSpotCheckCount"])
vstart = self.env.debug_vars()
self.env.execute("select * from vtocc_cached1 where eid in (9)")
self.assertEqual(vstart["RowcacheSpotCheckCount"], self.env.debug_vars()["RowcacheSpotCheckCount"])
self.env.execute("select * from vtocc_cached1 where eid in (9)")
self.assertEqual(vstart["RowcacheSpotCheckCount"]+1, self.env.debug_vars()["RowcacheSpotCheckCount"])
self.env.execute("set vt_spot_check_ratio=0")
self.assertEqual(self.env.debug_vars()["RowcacheSpotCheckRatio"], 0)
def _verify_mismatch(self, query, bindvars=None):
try:
self.env.execute(query, bindvars)
except dbexceptions.DatabaseError as e:
self.assertContains(str(e), "error: type mismatch")
else:
self.fail("Did not receive exception")
def test_cache1_sqls(self):
error_count = self.env.run_cases(cache_cases1.cases)
if error_count != 0:
self.fail("test_cache1_sqls errors: %d" % error_count)
def test_cache2_sqls(self):
error_count = self.env.run_cases(cache_cases2.cases)
if error_count != 0:
self.fail("test_cache2_sqls errors: %d" % error_count)
def _get_vars_table_stats(self, table_stats, table, stats):
return table_stats[table + "." + stats]
|
bsd-3-clause
|
jundongl/scikit-feast
|
skfeature/example/test_SPEC.py
|
3
|
1385
|
import scipy.io
from skfeature.function.similarity_based import SPEC
from skfeature.utility import unsupervised_evaluation
def main():
# load data
mat = scipy.io.loadmat('../data/COIL20.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
# specify the second ranking function which uses all except the 1st eigenvalue
kwargs = {'style': 0}
# obtain the scores of features
score = SPEC.spec(X, **kwargs)
# sort the feature scores in an descending order according to the feature scores
idx = SPEC.feature_ranking(score, **kwargs)
# perform evaluation on clustering task
num_fea = 100 # number of selected features
num_cluster = 20 # number of clusters, it is usually set as the number of classes in the ground truth
# obtain the dataset on the selected features
selected_features = X[:, idx[0:num_fea]]
# perform kmeans clustering based on the selected features and repeats 20 times
nmi_total = 0
acc_total = 0
for i in range(0, 20):
nmi, acc = unsupervised_evaluation.evaluation(X_selected=selected_features, n_clusters=num_cluster, y=y)
nmi_total += nmi
acc_total += acc
# output the average NMI and average ACC
print 'NMI:', float(nmi_total)/20
print 'ACC:', float(acc_total)/20
if __name__ == '__main__':
main()
|
gpl-2.0
|
playm2mboy/edx-platform
|
common/test/acceptance/pages/studio/component_editor.py
|
73
|
5069
|
from bok_choy.page_object import PageObject
from selenium.webdriver.common.keys import Keys
from ..common.utils import click_css
from selenium.webdriver.support.ui import Select
class BaseComponentEditorView(PageObject):
"""
A base :class:`.PageObject` for the component and visibility editors.
This class assumes that the editor is our default editor as displayed for xmodules.
"""
BODY_SELECTOR = '.xblock-editor'
def __init__(self, browser, locator):
"""
Args:
browser (selenium.webdriver): The Selenium-controlled browser that this page is loaded in.
locator (str): The locator that identifies which xblock this :class:`.xblock-editor` relates to.
"""
super(BaseComponentEditorView, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `ComponentEditorView` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
def url(self):
"""
Returns None because this is not directly accessible via URL.
"""
return None
def save(self):
"""
Clicks save button.
"""
click_css(self, 'a.action-save')
def cancel(self):
"""
Clicks cancel button.
"""
click_css(self, 'a.action-cancel', require_notification=False)
class ComponentEditorView(BaseComponentEditorView):
"""
A :class:`.PageObject` representing the rendered view of a component editor.
"""
def get_setting_element(self, label):
"""
Returns the index of the setting entry with given label (display name) within the Settings modal.
"""
settings_button = self.q(css='.edit-xblock-modal .editor-modes .settings-button')
if settings_button.is_present():
settings_button.click()
setting_labels = self.q(css=self._bounded_selector('.metadata_edit .wrapper-comp-setting .setting-label'))
for index, setting in enumerate(setting_labels):
if setting.text == label:
return self.q(css=self._bounded_selector('.metadata_edit div.wrapper-comp-setting .setting-input'))[index]
return None
def set_field_value_and_save(self, label, value):
"""
Sets the text field with given label (display name) to the specified value, and presses Save.
"""
elem = self.get_setting_element(label)
# Clear the current value, set the new one, then
# Tab to move to the next field (so change event is triggered).
elem.clear()
elem.send_keys(value)
elem.send_keys(Keys.TAB)
self.save()
def set_select_value_and_save(self, label, value):
"""
Sets the select with given label (display name) to the specified value, and presses Save.
"""
elem = self.get_setting_element(label)
select = Select(elem)
select.select_by_value(value)
self.save()
def get_selected_option_text(self, label):
"""
Returns the text of the first selected option for the select with given label (display name).
"""
elem = self.get_setting_element(label)
if elem:
select = Select(elem)
return select.first_selected_option.text
else:
return None
class ComponentVisibilityEditorView(BaseComponentEditorView):
"""
A :class:`.PageObject` representing the rendered view of a component visibility editor.
"""
OPTION_SELECTOR = '.modal-section-content .field'
@property
def all_options(self):
"""
Return all visibility options.
"""
return self.q(css=self._bounded_selector(self.OPTION_SELECTOR)).results
@property
def selected_options(self):
"""
Return all selected visibility options.
"""
results = []
for option in self.all_options:
button = option.find_element_by_css_selector('input.input')
if button.is_selected():
results.append(option)
return results
def select_option(self, label_text, save=True):
"""
Click the first option which has a label matching `label_text`.
Arguments:
label_text (str): Text of a label accompanying the input
which should be clicked.
save (boolean): Whether the "save" button should be clicked
afterwards.
Returns:
bool: Whether the label was found and clicked.
"""
for option in self.all_options:
if label_text in option.text:
option.click()
if save:
self.save()
return True
return False
|
agpl-3.0
|
basicthinker/Sexain-MemController
|
gem5-stable/src/mem/slicc/ast/CheckAllocateStatementAST.py
|
91
|
2239
|
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.StatementAST import StatementAST
class CheckAllocateStatementAST(StatementAST):
def __init__(self, slicc, variable):
super(StatementAST, self).__init__(slicc)
self.variable = variable
def __repr__(self):
return "[CheckAllocateStatementAst: %r]" % self.variable
def generate(self, code, return_type):
# FIXME - check the type of the variable
# Make sure the variable is valid
self.variable.var
def findResources(self, resources):
var = self.variable.var
res_count = int(resources.get(var, 0))
resources[var] = str(res_count + 1)
|
apache-2.0
|
memtoko/django
|
docs/conf.py
|
6
|
11881
|
# -*- coding: utf-8 -*-
#
# Django documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 27 09:06:53 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import unicode_literals
import sys
from os.path import abspath, dirname, join
# Make sure we get the version of this copy of Django
sys.path.insert(1, dirname(dirname(abspath(__file__))))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(abspath(join(dirname(__file__), "_ext")))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"djangodocs",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"ticket_role",
]
# Spelling check needs an additional module that is not installed by default.
# Add it only if spelling check is requested so docs can be generated without it.
if 'spelling' in sys.argv:
extensions.append("sphinxcontrib.spelling")
# Spelling language.
spelling_lang = 'en_US'
# Location of word list.
spelling_word_list_filename = 'spelling_wordlist'
# Add any paths that contain templates here, relative to this directory.
# templates_path = []
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General substitutions.
project = 'Django'
copyright = 'Django Software Foundation and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.9'
# The full version, including alpha/beta/rc tags.
try:
from django import VERSION, get_version
except ImportError:
release = version
else:
def django_release():
pep386ver = get_version()
if VERSION[3:5] == ('alpha', 0) and 'dev' not in pep386ver:
return pep386ver + '.dev'
return pep386ver
release = django_release()
# The "development version" of Django
django_next_version = '1.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# Location for .po/.mo translation files used when language is set
locale_dirs = ['locale/']
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# Links to Python's docs should reference the most recent version of the 3.x
# branch, which is located at this URL.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'sphinx': ('http://sphinx-doc.org/', None),
'six': ('http://pythonhosted.org/six/', None),
'formtools': ('http://django-formtools.readthedocs.org/en/latest/', None),
'psycopg2': ('http://initd.org/psycopg/docs/', None),
}
# Python's docs don't change every week.
intersphinx_cache_limit = 90 # days
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "djangodocs"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_theme"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# HTML translator class for the builder
html_translator_class = "djangodocs.DjangoHTMLTranslator"
# Content template for the index page.
# html_index = ''
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Djangodoc'
modindex_common_prefix = ["django."]
# Appended to every page
rst_epilog = """
.. |django-users| replace:: :ref:`django-users <django-users-mailing-list>`
.. |django-core-mentorship| replace:: :ref:`django-core-mentorship <django-core-mentorship-mailing-list>`
.. |django-developers| replace:: :ref:`django-developers <django-developers-mailing-list>`
.. |django-announce| replace:: :ref:`django-announce <django-announce-mailing-list>`
.. |django-updates| replace:: :ref:`django-updates <django-updates-mailing-list>`
"""
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'preamble': ('\\DeclareUnicodeCharacter{2264}{\\ensuremath{\\le}}'
'\\DeclareUnicodeCharacter{2265}{\\ensuremath{\\ge}}')
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
# latex_documents = []
latex_documents = [
('contents', 'django.tex', 'Django Documentation',
'Django Software Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('contents', 'django', 'Django Documentation', ['Django Software Foundation'], 1)
]
# -- Options for Texinfo output ------------------------------------------------
# List of tuples (startdocname, targetname, title, author, dir_entry,
# description, category, toctree_only)
texinfo_documents = [(
master_doc, "django", "", "", "Django",
"Documentation of the Django framework", "Web development", False
)]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = 'Django Software Foundation'
epub_publisher = 'Django Software Foundation'
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = 'Django'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
epub_theme = 'djangodocs-epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be an ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
epub_cover = ('', 'epub-cover.html')
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
# -- ticket options ------------------------------------------------------------
ticket_url = 'https://code.djangoproject.com/ticket/%s'
|
bsd-3-clause
|
im-infamou5/volatility
|
volatility/timefmt.py
|
54
|
4522
|
# Volatility
#
# Authors:
# Mike Auty <[email protected]>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import os, time, calendar
import datetime
import volatility.conf as conf
import volatility.debug as debug
try:
import pytz
tz_pytz = True
except ImportError:
tz_pytz = False
config = conf.ConfObject()
class OffsetTzInfo(datetime.tzinfo):
"""Timezone implementation that allows offsets specified in seconds"""
def __init__(self, offset = None, *args, **kwargs):
"""Accepts offset in seconds"""
self.offset = offset
datetime.tzinfo.__init__(self, *args, **kwargs)
def set_offset(self, offset):
"""Simple setter for offset"""
self.offset = offset
def utcoffset(self, dt):
"""Returns the offset from UTC"""
if self.offset is None:
return None
return datetime.timedelta(seconds = self.offset) + self.dst(dt)
def dst(self, _dt):
"""We almost certainly can't know about DST, so we say it's always off"""
# FIXME: Maybe we can know or make guesses about DST?
return datetime.timedelta(0)
def tzname(self, _dt):
"""Return a useful timezone name"""
if self.offset is None:
return "UNKNOWN"
return ""
class UTC(datetime.tzinfo):
"""Concrete instance of the UTC timezone"""
def utcoffset(self, _dt):
"""Returns an offset from UTC of 0"""
return datetime.timedelta(0)
def dst(self, _dt):
"""Returns no daylight savings offset"""
return datetime.timedelta(0)
def tzname(self, _dt):
"""Returns the timezone name"""
return "UTC"
def display_datetime(dt, custom_tz = None):
"""Returns a string from a datetime according to the display TZ (or a custom one"""
timeformat = "%Y-%m-%d %H:%M:%S %Z%z"
if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None:
if custom_tz is not None:
dt = dt.astimezone(custom_tz)
elif config.TZ is not None:
if isinstance(config.TZ, str):
secs = calendar.timegm(dt.timetuple())
os.environ['TZ'] = config.TZ
time.tzset()
# Remove the %z which appears not to work
timeformat = timeformat[:-2]
return time.strftime(timeformat, time.localtime(secs))
else:
dt = dt.astimezone(config.tz)
return ("{0:" + timeformat + "}").format(dt)
def tz_from_string(_option, _opt_str, value, parser):
"""Stores a tzinfo object from a string"""
if value is not None:
if value[0] in ['+', '-']:
# Handed a numeric offset, create an OffsetTzInfo
valarray = [value[i:i + 2] for i in range(1, len(value), 2)]
multipliers = [3600, 60]
offset = 0
for i in range(min(len(valarray), len(multipliers))):
offset += int(valarray[i]) * multipliers[i]
if value[0] == '-':
offset = -offset
timezone = OffsetTzInfo(offset = offset)
else:
# Value is a lookup, choose pytz over time.tzset
if tz_pytz:
try:
timezone = pytz.timezone(value)
except pytz.UnknownTimeZoneError:
debug.error("Unknown display timezone specified")
else:
if not hasattr(time, 'tzset'):
debug.error("This operating system doesn't support tzset, please either specify an offset (eg. +1000) or install pytz")
timezone = value
parser.values.tz = timezone
config.add_option("TZ", action = "callback", callback = tz_from_string,
cache_invalidator = False,
help = "Sets the timezone for displaying timestamps",
default = None, nargs = 1, type = str)
|
gpl-2.0
|
dcroc16/skunk_works
|
google_appengine/lib/django-1.2/django/core/handlers/modpython.py
|
71
|
9160
|
import os
from pprint import pformat
from django import http
from django.core import signals
from django.core.handlers.base import BaseHandler
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.encoding import force_unicode, smart_str, iri_to_uri
# NOTE: do *not* import settings (or any module which eventually imports
# settings) until after ModPythonHandler has been called; otherwise os.environ
# won't be set up correctly (with respect to settings).
class ModPythonRequest(http.HttpRequest):
def __init__(self, req):
self._req = req
# FIXME: This isn't ideal. The request URI may be encoded (it's
# non-normalized) slightly differently to the "real" SCRIPT_NAME
# and PATH_INFO values. This causes problems when we compute path_info,
# below. For now, don't use script names that will be subject to
# encoding/decoding.
self.path = force_unicode(req.uri)
root = req.get_options().get('django.root', '')
self.django_root = root
# req.path_info isn't necessarily computed correctly in all
# circumstances (it's out of mod_python's control a bit), so we use
# req.uri and some string manipulations to get the right value.
if root and req.uri.startswith(root):
self.path_info = force_unicode(req.uri[len(root):])
else:
self.path_info = self.path
if not self.path_info:
# Django prefers empty paths to be '/', rather than '', to give us
# a common start character for URL patterns. So this is a little
# naughty, but also pretty harmless.
self.path_info = u'/'
self._post_parse_error = False
def __repr__(self):
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = pformat(self.GET)
except:
get = '<could not parse>'
if self._post_parse_error:
post = '<could not parse>'
else:
try:
post = pformat(self.POST)
except:
post = '<could not parse>'
try:
cookies = pformat(self.COOKIES)
except:
cookies = '<could not parse>'
try:
meta = pformat(self.META)
except:
meta = '<could not parse>'
return smart_str(u'<ModPythonRequest\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' %
(self.path, unicode(get), unicode(post),
unicode(cookies), unicode(meta)))
def get_full_path(self):
# RFC 3986 requires self._req.args to be in the ASCII range, but this
# doesn't always happen, so rather than crash, we defensively encode it.
return '%s%s' % (self.path, self._req.args and ('?' + iri_to_uri(self._req.args)) or '')
def is_secure(self):
try:
return self._req.is_https()
except AttributeError:
# mod_python < 3.2.10 doesn't have req.is_https().
return self._req.subprocess_env.get('HTTPS', '').lower() in ('on', '1')
def _load_post_and_files(self):
"Populates self._post and self._files"
if self.method != 'POST':
self._post, self._files = http.QueryDict('', encoding=self._encoding), datastructures.MultiValueDict()
return
if 'content-type' in self._req.headers_in and self._req.headers_in['content-type'].startswith('multipart'):
self._raw_post_data = ''
try:
self._post, self._files = self.parse_file_upload(self.META, self._req)
except:
# See django.core.handlers.wsgi.WSGIHandler for an explanation
# of what's going on here.
self._post = http.QueryDict('')
self._files = datastructures.MultiValueDict()
self._post_parse_error = True
raise
else:
self._post, self._files = http.QueryDict(self.raw_post_data, encoding=self._encoding), datastructures.MultiValueDict()
def _get_request(self):
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
def _get_get(self):
if not hasattr(self, '_get'):
self._get = http.QueryDict(self._req.args, encoding=self._encoding)
return self._get
def _set_get(self, get):
self._get = get
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
def _get_cookies(self):
if not hasattr(self, '_cookies'):
self._cookies = http.parse_cookie(self._req.headers_in.get('cookie', ''))
return self._cookies
def _set_cookies(self, cookies):
self._cookies = cookies
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
def _get_meta(self):
"Lazy loader that returns self.META dictionary"
if not hasattr(self, '_meta'):
self._meta = {
'AUTH_TYPE': self._req.ap_auth_type,
'CONTENT_LENGTH': self._req.headers_in.get('content-length', 0),
'CONTENT_TYPE': self._req.headers_in.get('content-type'),
'GATEWAY_INTERFACE': 'CGI/1.1',
'PATH_INFO': self.path_info,
'PATH_TRANSLATED': None, # Not supported
'QUERY_STRING': self._req.args,
'REMOTE_ADDR': self._req.connection.remote_ip,
'REMOTE_HOST': None, # DNS lookups not supported
'REMOTE_IDENT': self._req.connection.remote_logname,
'REMOTE_USER': self._req.user,
'REQUEST_METHOD': self._req.method,
'SCRIPT_NAME': self.django_root,
'SERVER_NAME': self._req.server.server_hostname,
'SERVER_PORT': self._req.connection.local_addr[1],
'SERVER_PROTOCOL': self._req.protocol,
'SERVER_SOFTWARE': 'mod_python'
}
for key, value in self._req.headers_in.items():
key = 'HTTP_' + key.upper().replace('-', '_')
self._meta[key] = value
return self._meta
def _get_raw_post_data(self):
try:
return self._raw_post_data
except AttributeError:
self._raw_post_data = self._req.read()
return self._raw_post_data
def _get_method(self):
return self.META['REQUEST_METHOD'].upper()
GET = property(_get_get, _set_get)
POST = property(_get_post, _set_post)
COOKIES = property(_get_cookies, _set_cookies)
FILES = property(_get_files)
META = property(_get_meta)
REQUEST = property(_get_request)
raw_post_data = property(_get_raw_post_data)
method = property(_get_method)
class ModPythonHandler(BaseHandler):
request_class = ModPythonRequest
def __call__(self, req):
# mod_python fakes the environ, and thus doesn't process SetEnv. This fixes that
os.environ.update(req.subprocess_env)
# now that the environ works we can see the correct settings, so imports
# that use settings now can work
from django.conf import settings
# if we need to set up middleware, now that settings works we can do it now.
if self._request_middleware is None:
self.load_middleware()
set_script_prefix(req.get_options().get('django.root', ''))
signals.request_started.send(sender=self.__class__)
try:
try:
request = self.request_class(req)
except UnicodeDecodeError:
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
# Apply response middleware
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
finally:
signals.request_finished.send(sender=self.__class__)
# Convert our custom HttpResponse object back into the mod_python req.
req.content_type = response['Content-Type']
for key, value in response.items():
if key != 'content-type':
req.headers_out[str(key)] = str(value)
for c in response.cookies.values():
req.headers_out.add('Set-Cookie', c.output(header=''))
req.status = response.status_code
try:
for chunk in response:
req.write(chunk)
finally:
response.close()
return 0 # mod_python.apache.OK
def handler(req):
# mod_python hooks into this function.
return ModPythonHandler()(req)
|
mit
|
elopio/snapcraft
|
snapcraft/plugins/jdk.py
|
13
|
1395
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import snapcraft
class JdkPlugin(snapcraft.BasePlugin):
def __init__(self, name, options, project):
super().__init__(name, options, project)
self.stage_packages.append('default-jdk')
def env(self, root):
return ['JAVA_HOME=%s/usr/lib/jvm/default-java' % root,
'PATH=%s/usr/lib/jvm/default-java/bin:'
'%s/usr/lib/jvm/default-java/jre/bin:$PATH' % (root, root)]
def snap_fileset(self):
# Cut out jdk bits (jre bits are in default-java/jre)
return (['-usr/lib/jvm/default-java/bin',
'-usr/lib/jvm/default-java/include',
'-usr/lib/jvm/default-java/lib',
'-usr/share/doc',
])
|
gpl-3.0
|
pamapa/callblocker
|
usr/var/www/callblocker/python-fcgi/api.py
|
1
|
2252
|
#!/usr/bin/env python3
# callblocker - blocking unwanted calls from your home phone
# Copyright (C) 2015-2020 Patrick Ammann <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# dojo json exchange format see:
# http://dojotoolkit.org/reference-guide/1.10/dojo/data/ItemFileReadStore.html#input-data-format
import urllib.parse
import settings
import logs
def application(environ, start_response):
#print("environ='%s'\n" % environ, file=sys.stderr)
path = environ.get('PATH_INFO', '')
params = dict(urllib.parse.parse_qsl(environ.get('QUERY_STRING', '')))
#print("params='%s'\n" % params, file=sys.stderr)
if path == "/phones":
return settings.handle_phones(environ, start_response, params)
if path == "/online_credentials":
return settings.handle_online_credentials(environ, start_response, params)
if path == "/get_list":
return settings.handle_get_list(environ, start_response, params)
if path == "/get_lists":
return settings.handle_get_lists(environ, start_response, params)
if path == "/get_online_scripts":
return settings.handle_get_online_scripts(environ, start_response, params)
if path == "/callerlog":
return logs.handle_callerlog(environ, start_response, params)
if path == "/journal":
return logs.handle_journal(environ, start_response, params)
# return error
start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
return ['Not Found']
if __name__ == '__main__':
from flipflop import WSGIServer
WSGIServer(application).run()
|
gpl-2.0
|
aimas/TuniErp-8.0
|
addons/account_followup/__init__.py
|
436
|
1098
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_followup
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ogenstad/ansible
|
lib/ansible/modules/network/avi/avi_sslprofile.py
|
23
|
8496
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_sslprofile
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of SSLProfile Avi RESTful Object
description:
- This module is used to configure SSLProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
accepted_ciphers:
description:
- Ciphers suites represented as defined by U(http://www.openssl.org/docs/apps/ciphers.html).
- Default value when not specified in API or module is interpreted by Avi Controller as AES:3DES:RC4.
accepted_versions:
description:
- Set of versions accepted by the server.
cipher_enums:
description:
- Enum options - tls_ecdhe_ecdsa_with_aes_128_gcm_sha256, tls_ecdhe_ecdsa_with_aes_256_gcm_sha384, tls_ecdhe_rsa_with_aes_128_gcm_sha256,
- tls_ecdhe_rsa_with_aes_256_gcm_sha384, tls_ecdhe_ecdsa_with_aes_128_cbc_sha256, tls_ecdhe_ecdsa_with_aes_256_cbc_sha384,
- tls_ecdhe_rsa_with_aes_128_cbc_sha256, tls_ecdhe_rsa_with_aes_256_cbc_sha384, tls_rsa_with_aes_128_gcm_sha256, tls_rsa_with_aes_256_gcm_sha384,
- tls_rsa_with_aes_128_cbc_sha256, tls_rsa_with_aes_256_cbc_sha256, tls_ecdhe_ecdsa_with_aes_128_cbc_sha, tls_ecdhe_ecdsa_with_aes_256_cbc_sha,
- tls_ecdhe_rsa_with_aes_128_cbc_sha, tls_ecdhe_rsa_with_aes_256_cbc_sha, tls_rsa_with_aes_128_cbc_sha, tls_rsa_with_aes_256_cbc_sha,
- tls_rsa_with_3des_ede_cbc_sha, tls_rsa_with_rc4_128_sha.
description:
description:
- User defined description for the object.
dhparam:
description:
- Dh parameters used in ssl.
- At this time, it is not configurable and is set to 2048 bits.
enable_ssl_session_reuse:
description:
- Enable ssl session re-use.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
name:
description:
- Name of the object.
required: true
prefer_client_cipher_ordering:
description:
- Prefer the ssl cipher ordering presented by the client during the ssl handshake over the one specified in the ssl profile.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
send_close_notify:
description:
- Send 'close notify' alert message for a clean shutdown of the ssl connection.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
ssl_rating:
description:
- Sslrating settings for sslprofile.
ssl_session_timeout:
description:
- The amount of time before an ssl session expires.
- Default value when not specified in API or module is interpreted by Avi Controller as 86400.
- Units(SEC).
tags:
description:
- List of tag.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Ssl profile type.
- Enum options - SSL_PROFILE_TYPE_APPLICATION, SSL_PROFILE_TYPE_SYSTEM.
- Field introduced in 17.2.8.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_PROFILE_TYPE_APPLICATION.
version_added: "2.6"
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create SSL profile with list of allowed ciphers
avi_sslprofile:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
accepted_ciphers: >
ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:
ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:
AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:
AES256-SHA:DES-CBC3-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:
ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA
accepted_versions:
- type: SSL_VERSION_TLS1
- type: SSL_VERSION_TLS1_1
- type: SSL_VERSION_TLS1_2
cipher_enums:
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384
- TLS_RSA_WITH_AES_128_GCM_SHA256
- TLS_RSA_WITH_AES_256_GCM_SHA384
- TLS_RSA_WITH_AES_128_CBC_SHA256
- TLS_RSA_WITH_AES_256_CBC_SHA256
- TLS_RSA_WITH_AES_128_CBC_SHA
- TLS_RSA_WITH_AES_256_CBC_SHA
- TLS_RSA_WITH_3DES_EDE_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
name: PFS-BOTH-RSA-EC
send_close_notify: true
ssl_rating:
compatibility_rating: SSL_SCORE_EXCELLENT
performance_rating: SSL_SCORE_EXCELLENT
security_score: '100.0'
tenant_ref: Demo
"""
RETURN = '''
obj:
description: SSLProfile (api/sslprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
accepted_ciphers=dict(type='str',),
accepted_versions=dict(type='list',),
cipher_enums=dict(type='list',),
description=dict(type='str',),
dhparam=dict(type='str',),
enable_ssl_session_reuse=dict(type='bool',),
name=dict(type='str', required=True),
prefer_client_cipher_ordering=dict(type='bool',),
send_close_notify=dict(type='bool',),
ssl_rating=dict(type='dict',),
ssl_session_timeout=dict(type='int',),
tags=dict(type='list',),
tenant_ref=dict(type='str',),
type=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'sslprofile',
set([]))
if __name__ == '__main__':
main()
|
gpl-3.0
|
liorvh/golismero
|
thirdparty_libs/yaml/resolver.py
|
474
|
8972
|
__all__ = ['BaseResolver', 'Resolver']
from error import *
from nodes import *
import re
class ResolverError(YAMLError):
pass
class BaseResolver(object):
DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
yaml_implicit_resolvers = {}
yaml_path_resolvers = {}
def __init__(self):
self.resolver_exact_paths = []
self.resolver_prefix_paths = []
def add_implicit_resolver(cls, tag, regexp, first):
if not 'yaml_implicit_resolvers' in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
if first is None:
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
add_implicit_resolver = classmethod(add_implicit_resolver)
def add_path_resolver(cls, tag, path, kind=None):
# Note: `add_path_resolver` is experimental. The API could be changed.
# `new_path` is a pattern that is matched against the path from the
# root to the node that is being considered. `node_path` elements are
# tuples `(node_check, index_check)`. `node_check` is a node class:
# `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
# matches any kind of a node. `index_check` could be `None`, a boolean
# value, a string value, or a number. `None` and `False` match against
# any _value_ of sequence and mapping nodes. `True` matches against
# any _key_ of a mapping node. A string `index_check` matches against
# a mapping value that corresponds to a scalar key which content is
# equal to the `index_check` value. An integer `index_check` matches
# against a sequence value with the index equal to `index_check`.
if not 'yaml_path_resolvers' in cls.__dict__:
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
new_path = []
for element in path:
if isinstance(element, (list, tuple)):
if len(element) == 2:
node_check, index_check = element
elif len(element) == 1:
node_check = element[0]
index_check = True
else:
raise ResolverError("Invalid path element: %s" % element)
else:
node_check = None
index_check = element
if node_check is str:
node_check = ScalarNode
elif node_check is list:
node_check = SequenceNode
elif node_check is dict:
node_check = MappingNode
elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
and not isinstance(node_check, basestring) \
and node_check is not None:
raise ResolverError("Invalid node checker: %s" % node_check)
if not isinstance(index_check, (basestring, int)) \
and index_check is not None:
raise ResolverError("Invalid index checker: %s" % index_check)
new_path.append((node_check, index_check))
if kind is str:
kind = ScalarNode
elif kind is list:
kind = SequenceNode
elif kind is dict:
kind = MappingNode
elif kind not in [ScalarNode, SequenceNode, MappingNode] \
and kind is not None:
raise ResolverError("Invalid node kind: %s" % kind)
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
add_path_resolver = classmethod(add_path_resolver)
def descend_resolver(self, current_node, current_index):
if not self.yaml_path_resolvers:
return
exact_paths = {}
prefix_paths = []
if current_node:
depth = len(self.resolver_prefix_paths)
for path, kind in self.resolver_prefix_paths[-1]:
if self.check_resolver_prefix(depth, path, kind,
current_node, current_index):
if len(path) > depth:
prefix_paths.append((path, kind))
else:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
for path, kind in self.yaml_path_resolvers:
if not path:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
prefix_paths.append((path, kind))
self.resolver_exact_paths.append(exact_paths)
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
if not self.yaml_path_resolvers:
return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
def check_resolver_prefix(self, depth, path, kind,
current_node, current_index):
node_check, index_check = path[depth-1]
if isinstance(node_check, basestring):
if current_node.tag != node_check:
return
elif node_check is not None:
if not isinstance(current_node, node_check):
return
if index_check is True and current_index is not None:
return
if (index_check is False or index_check is None) \
and current_index is None:
return
if isinstance(index_check, basestring):
if not (isinstance(current_index, ScalarNode)
and index_check == current_index.value):
return
elif isinstance(index_check, int) and not isinstance(index_check, bool):
if index_check != current_index:
return
return True
def resolve(self, kind, value, implicit):
if kind is ScalarNode and implicit[0]:
if value == u'':
resolvers = self.yaml_implicit_resolvers.get(u'', [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
resolvers += self.yaml_implicit_resolvers.get(None, [])
for tag, regexp in resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if self.yaml_path_resolvers:
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
if None in exact_paths:
return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
return self.DEFAULT_SEQUENCE_TAG
elif kind is MappingNode:
return self.DEFAULT_MAPPING_TAG
class Resolver(BaseResolver):
pass
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:bool',
re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
|true|True|TRUE|false|False|FALSE
|on|On|ON|off|Off|OFF)$''', re.X),
list(u'yYnNtTfFoO'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
|\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|[-+]?\.(?:inf|Inf|INF)
|\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:int',
re.compile(ur'''^(?:[-+]?0b[0-1_]+
|[-+]?0[0-7_]+
|[-+]?(?:0|[1-9][0-9_]*)
|[-+]?0x[0-9a-fA-F_]+
|[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
list(u'-+0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:merge',
re.compile(ur'^(?:<<)$'),
[u'<'])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:null',
re.compile(ur'''^(?: ~
|null|Null|NULL
| )$''', re.X),
[u'~', u'n', u'N', u''])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:timestamp',
re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
|[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
(?:[Tt]|[ \t]+)[0-9][0-9]?
:[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
(?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
list(u'0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:value',
re.compile(ur'^(?:=)$'),
[u'='])
# The following resolver is only for documentation purposes. It cannot work
# because plain scalars cannot start with '!', '&', or '*'.
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:yaml',
re.compile(ur'^(?:!|&|\*)$'),
list(u'!&*'))
|
gpl-2.0
|
Sorsly/subtle
|
google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/emr/test_emr_responses.py
|
98
|
17266
|
# Copyright (c) 2010 Jeremy Thurgood <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# NOTE: These tests only cover the very simple cases I needed to test
# for the InstanceGroup fix.
import xml.sax
from boto import handler
from boto.emr import emrobject
from boto.resultset import ResultSet
from tests.compat import unittest
JOB_FLOW_EXAMPLE = b"""
<DescribeJobFlowsResponse
xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-01-15">
<DescribeJobFlowsResult>
<JobFlows>
<member>
<ExecutionStatusDetail>
<CreationDateTime>2009-01-28T21:49:16Z</CreationDateTime>
<StartDateTime>2009-01-28T21:49:16Z</StartDateTime>
<State>STARTING</State>
</ExecutionStatusDetail>
<BootstrapActions>
<member>
<BootstrapActionConfig>
<ScriptBootstrapAction>
<Args/>
<Path>s3://elasticmapreduce/libs/hue/install-hue</Path>
</ScriptBootstrapAction>
<Name>Install Hue</Name>
</BootstrapActionConfig>
</member>
</BootstrapActions>
<VisibleToAllUsers>true</VisibleToAllUsers>
<SupportedProducts>
<member>Hue</member>
</SupportedProducts>
<Name>MyJobFlowName</Name>
<LogUri>mybucket/subdir/</LogUri>
<Steps>
<member>
<ExecutionStatusDetail>
<CreationDateTime>2009-01-28T21:49:16Z</CreationDateTime>
<State>PENDING</State>
</ExecutionStatusDetail>
<StepConfig>
<HadoopJarStep>
<Jar>MyJarFile</Jar>
<MainClass>MyMailClass</MainClass>
<Args>
<member>arg1</member>
<member>arg2</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>MyStepName</Name>
<ActionOnFailure>CONTINUE</ActionOnFailure>
</StepConfig>
</member>
</Steps>
<JobFlowId>j-3UN6WX5RRO2AG</JobFlowId>
<Instances>
<Placement>
<AvailabilityZone>us-east-1a</AvailabilityZone>
</Placement>
<SlaveInstanceType>m1.small</SlaveInstanceType>
<MasterInstanceType>m1.small</MasterInstanceType>
<Ec2KeyName>myec2keyname</Ec2KeyName>
<InstanceCount>4</InstanceCount>
<KeepJobFlowAliveWhenNoSteps>true</KeepJobFlowAliveWhenNoSteps>
</Instances>
</member>
</JobFlows>
</DescribeJobFlowsResult>
<ResponseMetadata>
<RequestId>9cea3229-ed85-11dd-9877-6fad448a8419</RequestId>
</ResponseMetadata>
</DescribeJobFlowsResponse>
"""
JOB_FLOW_COMPLETED = b"""
<DescribeJobFlowsResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
<DescribeJobFlowsResult>
<JobFlows>
<member>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<LastStateChangeReason>Steps completed</LastStateChangeReason>
<StartDateTime>2010-10-21T01:03:59Z</StartDateTime>
<ReadyDateTime>2010-10-21T01:03:59Z</ReadyDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:44:18Z</EndDateTime>
</ExecutionStatusDetail>
<BootstrapActions/>
<Name>RealJobFlowName</Name>
<LogUri>s3n://example.emrtest.scripts/jobflow_logs/</LogUri>
<Steps>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>s3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar</Jar>
<Args>
<member>s3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>Setup Hadoop Debugging</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:03:59Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:04:22Z</EndDateTime>
</ExecutionStatusDetail>
</member>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
<Args>
<member>-mapper</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-InitialMapper.py</member>
<member>-reducer</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-InitialReducer.py</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/20/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/19/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/18/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/17/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/16/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/15/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/14/*</member>
<member>-output</member>
<member>s3://example.emrtest.crunched/</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>testjob_Initial</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:04:22Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:36:18Z</EndDateTime>
</ExecutionStatusDetail>
</member>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
<Args>
<member>-mapper</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step1Mapper.py</member>
<member>-reducer</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step1Reducer.py</member>
<member>-input</member>
<member>s3://example.emrtest.crunched/*</member>
<member>-output</member>
<member>s3://example.emrtest.step1/</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>testjob_step1</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:36:18Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:37:51Z</EndDateTime>
</ExecutionStatusDetail>
</member>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
<Args>
<member>-mapper</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step2Mapper.py</member>
<member>-reducer</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step2Reducer.py</member>
<member>-input</member>
<member>s3://example.emrtest.crunched/*</member>
<member>-output</member>
<member>s3://example.emrtest.step2/</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>testjob_step2</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:37:51Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:39:32Z</EndDateTime>
</ExecutionStatusDetail>
</member>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
<Args>
<member>-mapper</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step3Mapper.py</member>
<member>-reducer</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step3Reducer.py</member>
<member>-input</member>
<member>s3://example.emrtest.step1/*</member>
<member>-output</member>
<member>s3://example.emrtest.step3/</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>testjob_step3</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:39:32Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:41:22Z</EndDateTime>
</ExecutionStatusDetail>
</member>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
<Args>
<member>-mapper</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step4Mapper.py</member>
<member>-reducer</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step4Reducer.py</member>
<member>-input</member>
<member>s3://example.emrtest.step1/*</member>
<member>-output</member>
<member>s3://example.emrtest.step4/</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>testjob_step4</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:41:22Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:43:03Z</EndDateTime>
</ExecutionStatusDetail>
</member>
</Steps>
<JobFlowId>j-3H3Q13JPFLU22</JobFlowId>
<Instances>
<SlaveInstanceType>m1.large</SlaveInstanceType>
<MasterInstanceId>i-64c21609</MasterInstanceId>
<Placement>
<AvailabilityZone>us-east-1b</AvailabilityZone>
</Placement>
<InstanceGroups>
<member>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<InstanceRunningCount>0</InstanceRunningCount>
<StartDateTime>2010-10-21T01:02:09Z</StartDateTime>
<ReadyDateTime>2010-10-21T01:03:03Z</ReadyDateTime>
<State>ENDED</State>
<EndDateTime>2010-10-21T01:44:18Z</EndDateTime>
<InstanceRequestCount>1</InstanceRequestCount>
<InstanceType>m1.large</InstanceType>
<Market>ON_DEMAND</Market>
<LastStateChangeReason>Job flow terminated</LastStateChangeReason>
<InstanceRole>MASTER</InstanceRole>
<InstanceGroupId>ig-EVMHOZJ2SCO8</InstanceGroupId>
<Name>master</Name>
</member>
<member>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<InstanceRunningCount>0</InstanceRunningCount>
<StartDateTime>2010-10-21T01:03:59Z</StartDateTime>
<ReadyDateTime>2010-10-21T01:03:59Z</ReadyDateTime>
<State>ENDED</State>
<EndDateTime>2010-10-21T01:44:18Z</EndDateTime>
<InstanceRequestCount>9</InstanceRequestCount>
<InstanceType>m1.large</InstanceType>
<Market>ON_DEMAND</Market>
<LastStateChangeReason>Job flow terminated</LastStateChangeReason>
<InstanceRole>CORE</InstanceRole>
<InstanceGroupId>ig-YZHDYVITVHKB</InstanceGroupId>
<Name>slave</Name>
</member>
</InstanceGroups>
<NormalizedInstanceHours>40</NormalizedInstanceHours>
<HadoopVersion>0.20</HadoopVersion>
<MasterInstanceType>m1.large</MasterInstanceType>
<MasterPublicDnsName>ec2-184-72-153-139.compute-1.amazonaws.com</MasterPublicDnsName>
<Ec2KeyName>myubersecurekey</Ec2KeyName>
<InstanceCount>10</InstanceCount>
<KeepJobFlowAliveWhenNoSteps>false</KeepJobFlowAliveWhenNoSteps>
</Instances>
</member>
</JobFlows>
</DescribeJobFlowsResult>
<ResponseMetadata>
<RequestId>c31e701d-dcb4-11df-b5d9-337fc7fe4773</RequestId>
</ResponseMetadata>
</DescribeJobFlowsResponse>
"""
class TestEMRResponses(unittest.TestCase):
def _parse_xml(self, body, markers):
rs = ResultSet(markers)
h = handler.XmlHandler(rs, None)
xml.sax.parseString(body, h)
return rs
def _assert_fields(self, response, **fields):
for field, expected in fields.items():
actual = getattr(response, field)
self.assertEquals(expected, actual,
"Field %s: %r != %r" % (field, expected, actual))
def test_JobFlows_example(self):
[jobflow] = self._parse_xml(JOB_FLOW_EXAMPLE,
[('member', emrobject.JobFlow)])
self._assert_fields(jobflow,
creationdatetime='2009-01-28T21:49:16Z',
startdatetime='2009-01-28T21:49:16Z',
state='STARTING',
instancecount='4',
jobflowid='j-3UN6WX5RRO2AG',
loguri='mybucket/subdir/',
name='MyJobFlowName',
availabilityzone='us-east-1a',
slaveinstancetype='m1.small',
masterinstancetype='m1.small',
ec2keyname='myec2keyname',
keepjobflowalivewhennosteps='true')
def test_JobFlows_completed(self):
[jobflow] = self._parse_xml(JOB_FLOW_COMPLETED,
[('member', emrobject.JobFlow)])
self._assert_fields(jobflow,
creationdatetime='2010-10-21T01:00:25Z',
startdatetime='2010-10-21T01:03:59Z',
enddatetime='2010-10-21T01:44:18Z',
state='COMPLETED',
instancecount='10',
jobflowid='j-3H3Q13JPFLU22',
loguri='s3n://example.emrtest.scripts/jobflow_logs/',
name='RealJobFlowName',
availabilityzone='us-east-1b',
slaveinstancetype='m1.large',
masterinstancetype='m1.large',
ec2keyname='myubersecurekey',
keepjobflowalivewhennosteps='false')
self.assertEquals(6, len(jobflow.steps))
self.assertEquals(2, len(jobflow.instancegroups))
|
mit
|
aselle/tensorflow
|
tensorflow/examples/speech_commands/freeze_test.py
|
5
|
3289
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data input for speech commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.examples.speech_commands import freeze
from tensorflow.python.platform import test
class FreezeTest(test.TestCase):
def testCreateInferenceGraphWithMfcc(self):
with self.test_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=40,
model_architecture='conv',
preprocess='mfcc')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
ops = [node.op for node in sess.graph_def.node]
self.assertEqual(1, ops.count('Mfcc'))
def testCreateInferenceGraphWithoutMfcc(self):
with self.test_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=40,
model_architecture='conv',
preprocess='average')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
ops = [node.op for node in sess.graph_def.node]
self.assertEqual(0, ops.count('Mfcc'))
def testFeatureBinCount(self):
with self.test_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=80,
model_architecture='conv',
preprocess='average')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
ops = [node.op for node in sess.graph_def.node]
self.assertEqual(0, ops.count('Mfcc'))
if __name__ == '__main__':
test.main()
|
apache-2.0
|
Russell-IO/ansible
|
lib/ansible/modules/commands/telnet.py
|
33
|
2576
|
# this is a virtual module that is entirely implemented server side
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: telnet
short_description: Executes a low-down and dirty telnet command
version_added: 2.4
description:
- Executes a low-down and dirty telnet command, not going through the module subsystem.
- This is mostly to be used for enabling ssh on devices that only have telnet enabled by default.
options:
command:
description:
- List of commands to be executed in the telnet session.
required: True
aliases: ['commands']
host:
description:
- The host/target on which to execute the command
required: False
default: remote_addr
user:
description:
- The user for login
required: False
default: remote_user
password:
description:
- The password for login
port:
description:
- Remote port to use
default: 23
timeout:
description:
- timeout for remote operations
default: 120
prompts:
description:
- List of prompts expected before sending next command
required: False
default: ['$']
login_prompt:
description:
- Login or username prompt to expect
required: False
default: 'login: '
password_prompt:
description:
- Login or username prompt to expect
required: False
default: 'Password: '
pause:
description:
- Seconds to pause between each command issued
required: False
default: 1
notes:
- The C(environment) keyword does not work with this task
author:
- Ansible Core Team
'''
EXAMPLES = '''
- name: send configuration commands to IOS
telnet:
user: cisco
password: cisco
login_prompt: "Username: "
prompts:
- "[>|#]"
command:
- terminal length 0
- configure terminal
- hostname ios01
- name: run show commands
telnet:
user: cisco
password: cisco
login_prompt: "Username: "
prompts:
- "[>|#]"
command:
- terminal length 0
- show version
'''
RETURN = '''
output:
description: output of each command is an element in this list
type: list
returned: always
sample: [ 'success', 'success', '', 'warning .. something' ]
'''
|
gpl-3.0
|
rigid/jobpip3
|
tests/test_record.py
|
1
|
1172
|
import unittest
from jobpip3 import Record
class TestRecord(unittest.TestCase):
"""Record() tests"""
def test_dict(self):
"""a Record() mimics a dict. Test that here"""
# create a record
a = Record()
# __setattr__
a['foo'] = 2.5
self.assertEqual(a['foo'], 2.5)
# __getattr__
self.assertEqual(a['foo'], 2.5)
# keys
self.assertTrue('foo' in a)
def test_parse(self):
"""a Record() can be created by passing a dict. Test that here."""
# create a record
a = Record({ 'foo' : 5.0, 'bar': 'string' })
self.assertEqual(a['foo'], 5.0)
self.assertEqual(a['bar'], 'string')
def test_serialization(self):
"""a Record() can be serialized to a string. Test that here."""
# create record
a = Record({ 'foo' : 5.0 })
# serialize to string
a_string = a.dump()
# create record from that string (shortcut for Record().parse(string))
b = Record(a_string)
# serialize again
b_string = b.dump()
# compare strings
self.assertEqual(a_string, b_string)
|
gpl-3.0
|
karanvivekbhargava/robot-butler-enpm808x
|
vendor/googletest/googletest/test/gtest_test_utils.py
|
344
|
10823
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary "%s". Please make sure to provide\n'
'a path to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.' % path)
sys.stdout.write(message)
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO([email protected]): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
|
mit
|
plilja/project-euler
|
problem_60/prime_set.py
|
1
|
1038
|
import sys
from ast import literal_eval
import os
infile = open(os.path.join(os.path.dirname(__file__), 'prime_pairs.txt'))
PRIME_PAIRS = literal_eval(infile.read())
for key in PRIME_PAIRS.keys():
PRIME_PAIRS[key] = set(PRIME_PAIRS[key]) # literal_eval doesn't support sets, hence we convert here
infile.close()
def prime_set(wanted_size):
candidate_queue = [([], set(PRIME_PAIRS.keys()))]
best_sum, best_val = sys.maxint, None
while candidate_queue:
verified, candidates = candidate_queue.pop()
if sum(verified) >= best_sum:
continue
if len(verified) == wanted_size:
best_sum = sum(verified)
best_val = verified
continue
if len(candidates) + len(verified) < wanted_size:
continue
candidate = candidates.pop()
candidate_queue = [(verified + [candidate], PRIME_PAIRS[candidate] & candidates)] + candidate_queue
candidate_queue = [(verified, candidates)] + candidate_queue
return sorted(best_val)
|
apache-2.0
|
vjmac15/Lyilis
|
lib/youtube_dl/extractor/skysports (VJ Washington's conflicted copy 2017-08-29).py
|
34
|
1156
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import strip_or_none
class SkySportsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?skysports\.com/watch/video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.skysports.com/watch/video/10328419/bale-its-our-time-to-shine',
'md5': '77d59166cddc8d3cb7b13e35eaf0f5ec',
'info_dict': {
'id': '10328419',
'ext': 'mp4',
'title': 'Bale: It\'s our time to shine',
'description': 'md5:e88bda94ae15f7720c5cb467e777bb6d',
},
'add_ie': ['Ooyala'],
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
return {
'_type': 'url_transparent',
'id': video_id,
'url': 'ooyala:%s' % self._search_regex(
r'data-video-id="([^"]+)"', webpage, 'ooyala id'),
'title': self._og_search_title(webpage),
'description': strip_or_none(self._og_search_description(webpage)),
'ie_key': 'Ooyala',
}
|
gpl-3.0
|
piyush0609/scipy
|
scipy/weave/tests/test_numpy_scalar_spec.py
|
91
|
4001
|
from __future__ import absolute_import, print_function
import os
import sys
import tempfile
import numpy
from numpy.testing import TestCase, assert_, run_module_suite
from scipy.weave import inline_tools, ext_tools
from scipy.weave.build_tools import msvc_exists, gcc_exists
from scipy.weave.catalog import unique_file
from scipy.weave.numpy_scalar_spec import numpy_complex_scalar_converter
from weave_test_utils import dec
def unique_mod(d,file_name):
f = os.path.basename(unique_file(d,file_name))
m = os.path.splitext(f)[0]
return m
#----------------------------------------------------------------------------
# Scalar conversion test classes
# int, float, complex
#----------------------------------------------------------------------------
class NumpyComplexScalarConverter(TestCase):
compiler = ''
def setUp(self):
self.converter = numpy_complex_scalar_converter()
@dec.slow
def test_type_match_string(self):
assert_(not self.converter.type_match('string'))
@dec.slow
def test_type_match_int(self):
assert_(not self.converter.type_match(5))
@dec.slow
def test_type_match_float(self):
assert_(not self.converter.type_match(5.))
@dec.slow
def test_type_match_complex128(self):
assert_(self.converter.type_match(numpy.complex128(5.+1j)))
@dec.slow
def test_complex_var_in(self):
mod_name = sys._getframe().f_code.co_name + self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = numpy.complex(1.+1j)
code = "a=std::complex<double>(2.,2.);"
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = numpy.complex128(1.+1j)
test(b)
try:
b = 1.
test(b)
except TypeError:
pass
try:
b = 'abc'
test(b)
except TypeError:
pass
@dec.slow
def test_complex_return(self):
mod_name = sys._getframe().f_code.co_name + self.compiler
mod_name = unique_mod(test_dir,mod_name)
mod = ext_tools.ext_module(mod_name)
a = 1.+1j
code = """
a= a + std::complex<double>(2.,2.);
return_val = PyComplex_FromDoubles(a.real(),a.imag());
"""
test = ext_tools.ext_function('test',code,['a'])
mod.add_function(test)
mod.compile(location=test_dir, compiler=self.compiler)
exec('from ' + mod_name + ' import test')
b = 1.+1j
c = test(b)
assert_(c == 3.+3j)
@dec.slow
def test_inline(self):
a = numpy.complex128(1+1j)
result = inline_tools.inline("return_val=1.0/a;",['a'])
assert_(result == .5-.5j)
for _n in dir():
if _n[-9:] == 'Converter':
if msvc_exists():
exec("class Test%sMsvc(%s):\n compiler = 'msvc'" % (_n,_n))
else:
exec("class Test%sUnix(%s):\n compiler = ''" % (_n,_n))
if gcc_exists():
exec("class Test%sGcc(%s):\n compiler = 'gcc'" % (_n,_n))
def setup_test_location():
test_dir = tempfile.mkdtemp()
sys.path.insert(0,test_dir)
return test_dir
test_dir = setup_test_location()
def teardown_test_location():
import tempfile
test_dir = os.path.join(tempfile.gettempdir(),'test_files')
if sys.path[0] == test_dir:
sys.path = sys.path[1:]
return test_dir
if not msvc_exists():
for _n in dir():
if _n[:8] == 'TestMsvc':
exec('del '+_n)
else:
for _n in dir():
if _n[:8] == 'TestUnix':
exec('del '+_n)
if not (gcc_exists() and msvc_exists() and sys.platform == 'win32'):
for _n in dir():
if _n[:7] == 'TestGcc':
exec('del '+_n)
if __name__ == "__main__":
run_module_suite()
|
bsd-3-clause
|
weblabdeusto/weblabdeusto
|
server/src/test/unit/weblab/translator/test_stores_everything_except_for_files_translator.py
|
3
|
1892
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Jaime Irurzun <[email protected]>
#
from __future__ import print_function, unicode_literals
from weblab.translator.translators import StoresEverythingExceptForFilesTranslator
import test.unit.configuration as configuration_module
import unittest
import voodoo.configuration as ConfigurationManager
class StoresEverythingExceptForFilesTranslatorTestCase(unittest.TestCase):
def setUp(self):
self._cfg_manager = ConfigurationManager.ConfigurationManager()
self._cfg_manager.append_module(configuration_module)
self.translator = StoresEverythingExceptForFilesTranslator(None, None, self._cfg_manager)
def test(self):
self.assertEquals(
None,
self.translator.do_on_start('session_id')
)
self.assertEquals(
'command',
self.translator.do_before_send_command('session_id', 'command')
)
self.assertEquals(
'response',
self.translator.do_after_send_command('session_id', 'response')
)
self.assertEquals(
None,
self.translator.do_before_send_file('session_id', 'file')
)
self.assertEquals(
None,
self.translator.do_after_send_file('session_id', 'response')
)
self.assertEquals(
None,
self.translator.do_on_finish('session_id')
)
def suite():
return unittest.makeSuite(StoresEverythingExceptForFilesTranslatorTestCase)
if __name__ == '__main__':
unittest.main()
|
bsd-2-clause
|
srthurman/transitland-python-client
|
transitland/route.py
|
2
|
1704
|
"""Route Entity."""
import geom
import util
import errors
from entity import Entity
class Route(Entity):
"""Transitland Route Entity."""
onestop_type = 'r'
def geohash(self):
"""Return 10 characters of geohash."""
return geom.geohash_features(self.stops())
def add_tags_gtfs(self, gtfs_entity):
keys = [
'route_long_name',
'route_desc',
'route_url',
'route_color',
'route_text_color'
]
tags = gtfs_entity.data._asdict()
for key in keys:
if key in tags:
self.set_tag(key, tags[key])
self.set_tag('vehicle_type', gtfs_entity.vehicle())
# Load / dump
def json(self):
return {
'type': 'Feature',
'properties': {},
'geometry': self.geometry(),
'onestopId': self.onestop(),
'name': self.name(),
'tags': self.tags(),
'operatedBy': self.operatedBy(),
'identifiers': sorted(self.identifiers()),
'serves': sorted(self.serves()),
}
# Graph
def serves(self):
ret = set([i.onestop() for i in self.stops()])
ret |= set(self.data.get('serves', []))
return ret
def operatedBy(self):
"""Return the first operator."""
ret = set(i.onestop() for i in self.operators())
ret |= set(self.data.get('operatedBy', []))
return sorted(ret)[0]
def operators(self):
return set(self.parents) # copy
def operator(self, onestop_id):
"""Return a single operator by Onestop ID."""
return util.filtfirst(self.operators(), onestop=onestop_id)
def stops(self):
return set(self.children) # copy
def stop(self, onestop_id):
"""Return a single stop by Onestop ID."""
return util.filtfirst(self.stops(), onestop=onestop_id)
|
mit
|
meisterpeeps/grpc
|
src/python/src/grpc/_cython/adapter_low.py
|
3
|
3285
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Adapter from grpc._cython.types to the surface expected by
# grpc._adapter._intermediary_low.
#
# TODO(atash): Once this is plugged into grpc._adapter._intermediary_low, remove
# both grpc._adapter._intermediary_low and this file. The fore and rear links in
# grpc._adapter should be able to use grpc._cython.types directly.
from grpc._adapter import _types as type_interfaces
from grpc._cython import cygrpc
class ClientCredentials(object):
def __init__(self):
raise NotImplementedError()
@staticmethod
def google_default():
raise NotImplementedError()
@staticmethod
def ssl():
raise NotImplementedError()
@staticmethod
def composite():
raise NotImplementedError()
@staticmethod
def compute_engine():
raise NotImplementedError()
@staticmethod
def service_account():
raise NotImplementedError()
@staticmethod
def jwt():
raise NotImplementedError()
@staticmethod
def refresh_token():
raise NotImplementedError()
@staticmethod
def fake_transport_security():
raise NotImplementedError()
@staticmethod
def iam():
raise NotImplementedError()
class ServerCredentials(object):
def __init__(self):
raise NotImplementedError()
@staticmethod
def ssl():
raise NotImplementedError()
@staticmethod
def fake_transport_security():
raise NotImplementedError()
class CompletionQueue(type_interfaces.CompletionQueue):
def __init__(self):
raise NotImplementedError()
class Call(type_interfaces.Call):
def __init__(self):
raise NotImplementedError()
class Channel(type_interfaces.Channel):
def __init__(self):
raise NotImplementedError()
class Server(type_interfaces.Server):
def __init__(self):
raise NotImplementedError()
|
bsd-3-clause
|
gerco/p2pool
|
p2pool/test/bitcoin/test_getwork.py
|
275
|
4273
|
import unittest
from p2pool.bitcoin import getwork, data as bitcoin_data
class Test(unittest.TestCase):
def test_all(self):
cases = [
{
'target': '0000000000000000000000000000000000000000000000f2b944000000000000',
'midstate': '5982f893102dec03e374b472647c4f19b1b6d21ae4b2ac624f3d2f41b9719404',
'hash1': '00000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000010000',
'data': '0000000163930d52a5ffca79b29b95a659a302cd4e1654194780499000002274000000002e133d9e51f45bc0886d05252038e421e82bff18b67dc14b90d9c3c2f422cd5c4dd4598e1a44b9f200000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000'
},
{
'midstate' : 'f4a9b048c0cb9791bc94b13ee0eec21e713963d524fd140b58bb754dd7b0955f',
'data' : '000000019a1d7342fb62090bda686b22d90f9f73d0f5c418b9c980cd0000011a00000000680b07c8a2f97ecd831f951806857e09f98a3b81cdef1fa71982934fef8dc3444e18585d1a0abbcf00000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000',
'hash1' : '00000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000010000',
'target' : '0000000000000000000000000000000000000000000000cfbb0a000000000000',
'extrathing': 'hi!',
},
{
'data' : '000000019a1d7342fb62090bda686b22d90f9f73d0f5c418b9c980cd0000011a00000000680b07c8a2f97ecd831f951806857e09f98a3b81cdef1fa71982934fef8dc3444e18585d1a0abbcf00000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000',
'hash1' : '00000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000010000',
'target' : '0000000000000000000000000000000000000000000000cfbb0a000000000000',
'extrathing': 'hi!',
},
]
for case in cases:
ba = getwork.BlockAttempt.from_getwork(case)
extra = dict(case)
del extra['data'], extra['hash1'], extra['target']
extra.pop('midstate', None)
getwork_check = ba.getwork(**extra)
assert getwork_check == case or dict((k, v) for k, v in getwork_check.iteritems() if k != 'midstate') == case
case2s = [
getwork.BlockAttempt(
1,
0x148135e10208db85abb62754341a392eab1f186aab077a831cf7,
0x534ea08be1ab529f484369344b6d5423ef5a0767db9b3ebb4e182bbb67962520,
1305759879,
bitcoin_data.FloatingInteger.from_target_upper_bound(0x44b9f20000000000000000000000000000000000000000000000),
0x44b9f20000000000000000000000000000000000000000000000,
),
getwork.BlockAttempt(
1,
0x148135e10208db85abb62754341a392eab1f186aab077a831cf7,
0x534ea08be1ab529f484369344b6d5423ef5a0767db9b3ebb4e182bbb67962520,
1305759879,
bitcoin_data.FloatingInteger.from_target_upper_bound(0x44b9f20000000000000000000000000000000000000000000000),
432*2**230,
),
getwork.BlockAttempt(
1,
0x148135e10208db85abb62754341a392eab1f186aab077a831cf7,
0x534ea08be1ab529f484369344b6d5423ef5a0767db9b3ebb4e182bbb67962520,
1305759879,
bitcoin_data.FloatingInteger.from_target_upper_bound(0x44b9f20000000000000000000000000000000000000000000000),
7*2**240,
)
]
for case2 in case2s:
assert getwork.BlockAttempt.from_getwork(case2.getwork()) == case2
assert getwork.BlockAttempt.from_getwork(case2.getwork(ident='hi')) == case2
case2 = case2.update(previous_block=case2.previous_block - 10)
assert getwork.BlockAttempt.from_getwork(case2.getwork()) == case2
assert getwork.BlockAttempt.from_getwork(case2.getwork(ident='hi')) == case2
|
gpl-3.0
|
alexandrucoman/vbox-nova-driver
|
nova/tests/functional/v3/test_availability_zone.py
|
23
|
2082
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.v3 import test_servers
CONF = cfg.CONF
CONF.import_opt('manager', 'nova.cells.opts', group='cells')
class AvailabilityZoneJsonTest(test_servers.ServersSampleBase):
ADMIN_API = True
extension_name = "os-availability-zone"
def _setup_services(self):
self.conductor = self.start_service('conductor',
host='conductor', manager=CONF.conductor.manager)
self.compute = self.start_service('compute', host='compute')
self.cert = self.start_service('cert', host='cert')
self.consoleauth = self.start_service('consoleauth',
host='consoleauth')
self.network = self.start_service('network', host='network')
self.scheduler = self.start_service('scheduler', host='scheduler')
self.cells = self.start_service('cells', host='cells',
manager=CONF.cells.manager)
def test_availability_zone_list(self):
response = self._do_get('os-availability-zone')
self._verify_response('availability-zone-list-resp', {}, response, 200)
def test_availability_zone_detail(self):
response = self._do_get('os-availability-zone/detail')
subs = self._get_regexes()
self._verify_response('availability-zone-detail-resp', subs, response,
200)
def test_availability_zone_post(self):
self._post_server()
|
apache-2.0
|
Carmezim/tensorflow
|
tensorflow/contrib/slim/python/slim/nets/inception_v2_test.py
|
111
|
11157
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.slim.python.slim import model_analyzer
from tensorflow.contrib.slim.python.slim.nets import inception_v2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class InceptionV2Test(test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, end_points = inception_v2.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = random_ops.random_uniform((batch_size, height, width, 3))
mixed_5c, end_points = inception_v2.inception_v2_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV2/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = [
'Mixed_3b', 'Mixed_3c', 'Mixed_4a', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d',
'Mixed_4e', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Conv2d_1a_7x7',
'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3'
]
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = [
'Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3',
'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a', 'Mixed_4b',
'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c'
]
for index, endpoint in enumerate(endpoints):
with ops.Graph().as_default():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception_v2.inception_v2_base(
inputs, final_endpoint=endpoint)
self.assertTrue(
out_tensor.op.name.startswith('InceptionV2/' + endpoint))
self.assertItemsEqual(endpoints[:index + 1], end_points)
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
height, width = 224, 224
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v2.inception_v2_base(
inputs, final_endpoint='Mixed_5c')
endpoints_shapes = {
'Mixed_3b': [batch_size, 28, 28, 256],
'Mixed_3c': [batch_size, 28, 28, 320],
'Mixed_4a': [batch_size, 14, 14, 576],
'Mixed_4b': [batch_size, 14, 14, 576],
'Mixed_4c': [batch_size, 14, 14, 576],
'Mixed_4d': [batch_size, 14, 14, 576],
'Mixed_4e': [batch_size, 14, 14, 576],
'Mixed_5a': [batch_size, 7, 7, 1024],
'Mixed_5b': [batch_size, 7, 7, 1024],
'Mixed_5c': [batch_size, 7, 7, 1024],
'Conv2d_1a_7x7': [batch_size, 112, 112, 64],
'MaxPool_2a_3x3': [batch_size, 56, 56, 64],
'Conv2d_2b_1x1': [batch_size, 56, 56, 64],
'Conv2d_2c_3x3': [batch_size, 56, 56, 192],
'MaxPool_3a_3x3': [batch_size, 28, 28, 192]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = random_ops.random_uniform((batch_size, height, width, 3))
with arg_scope(inception_v2.inception_v2_arg_scope()):
inception_v2.inception_v2_base(inputs)
total_params, _ = model_analyzer.analyze_vars(
variables_lib.get_model_variables())
self.assertAlmostEqual(10173112, total_params)
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v2.inception_v2(inputs, num_classes)
endpoint_keys = [
key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')
]
_, end_points_with_multiplier = inception_v2.inception_v2(
inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=0.5)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v2.inception_v2(inputs, num_classes)
endpoint_keys = [
key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')
]
_, end_points_with_multiplier = inception_v2.inception_v2(
inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=2.0)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(2.0 * original_depth, new_depth)
def testRaiseValueErrorWithInvalidDepthMultiplier(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = inception_v2.inception_v2(inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
_ = inception_v2.inception_v2(inputs, num_classes, depth_multiplier=0.0)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, end_points = inception_v2.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
ops.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = array_ops.placeholder(
dtypes.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception_v2.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
variables.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testUnknownBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = array_ops.placeholder(dtypes.float32, (None, height, width, 3))
logits, _ = inception_v2.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
images = random_ops.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = inception_v2.inception_v2(
eval_inputs, num_classes, is_training=False)
predictions = math_ops.argmax(logits, 1)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
train_inputs = random_ops.random_uniform(
(train_batch_size, height, width, 3))
inception_v2.inception_v2(train_inputs, num_classes)
eval_inputs = random_ops.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception_v2.inception_v2(eval_inputs, num_classes, reuse=True)
predictions = math_ops.argmax(logits, 1)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = random_ops.random_uniform([1, 224, 224, 3])
logits, _ = inception_v2.inception_v2(
images, num_classes=num_classes, spatial_squeeze=False)
with self.test_session() as sess:
variables.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
if __name__ == '__main__':
test.main()
|
apache-2.0
|
betoesquivel/fil2014
|
build/django/build/lib.linux-x86_64-2.7/django/contrib/gis/gdal/prototypes/geom.py
|
219
|
4737
|
from ctypes import c_char_p, c_double, c_int, c_void_p, POINTER
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal
from django.contrib.gis.gdal.prototypes.errcheck import check_bool, check_envelope
from django.contrib.gis.gdal.prototypes.generation import (const_string_output,
double_output, geom_output, int_output, srs_output, string_output, void_output)
### Generation routines specific to this module ###
def env_func(f, argtypes):
"For getting OGREnvelopes."
f.argtypes = argtypes
f.restype = None
f.errcheck = check_envelope
return f
def pnt_func(f):
"For accessing point information."
return double_output(f, [c_void_p, c_int])
def topology_func(f):
f.argtypes = [c_void_p, c_void_p]
f.restype = c_int
f.errchck = check_bool
return f
### OGR_G ctypes function prototypes ###
# GeoJSON routines.
from_json = geom_output(lgdal.OGR_G_CreateGeometryFromJson, [c_char_p])
to_json = string_output(lgdal.OGR_G_ExportToJson, [c_void_p], str_result=True, decoding='ascii')
to_kml = string_output(lgdal.OGR_G_ExportToKML, [c_void_p, c_char_p], str_result=True, decoding='ascii')
# GetX, GetY, GetZ all return doubles.
getx = pnt_func(lgdal.OGR_G_GetX)
gety = pnt_func(lgdal.OGR_G_GetY)
getz = pnt_func(lgdal.OGR_G_GetZ)
# Geometry creation routines.
from_wkb = geom_output(lgdal.OGR_G_CreateFromWkb, [c_char_p, c_void_p, POINTER(c_void_p), c_int], offset=-2)
from_wkt = geom_output(lgdal.OGR_G_CreateFromWkt, [POINTER(c_char_p), c_void_p, POINTER(c_void_p)], offset=-1)
create_geom = geom_output(lgdal.OGR_G_CreateGeometry, [c_int])
clone_geom = geom_output(lgdal.OGR_G_Clone, [c_void_p])
get_geom_ref = geom_output(lgdal.OGR_G_GetGeometryRef, [c_void_p, c_int])
get_boundary = geom_output(lgdal.OGR_G_GetBoundary, [c_void_p])
geom_convex_hull = geom_output(lgdal.OGR_G_ConvexHull, [c_void_p])
geom_diff = geom_output(lgdal.OGR_G_Difference, [c_void_p, c_void_p])
geom_intersection = geom_output(lgdal.OGR_G_Intersection, [c_void_p, c_void_p])
geom_sym_diff = geom_output(lgdal.OGR_G_SymmetricDifference, [c_void_p, c_void_p])
geom_union = geom_output(lgdal.OGR_G_Union, [c_void_p, c_void_p])
# Geometry modification routines.
add_geom = void_output(lgdal.OGR_G_AddGeometry, [c_void_p, c_void_p])
import_wkt = void_output(lgdal.OGR_G_ImportFromWkt, [c_void_p, POINTER(c_char_p)])
# Destroys a geometry
destroy_geom = void_output(lgdal.OGR_G_DestroyGeometry, [c_void_p], errcheck=False)
# Geometry export routines.
to_wkb = void_output(lgdal.OGR_G_ExportToWkb, None, errcheck=True) # special handling for WKB.
to_wkt = string_output(lgdal.OGR_G_ExportToWkt, [c_void_p, POINTER(c_char_p)], decoding='ascii')
to_gml = string_output(lgdal.OGR_G_ExportToGML, [c_void_p], str_result=True, decoding='ascii')
get_wkbsize = int_output(lgdal.OGR_G_WkbSize, [c_void_p])
# Geometry spatial-reference related routines.
assign_srs = void_output(lgdal.OGR_G_AssignSpatialReference, [c_void_p, c_void_p], errcheck=False)
get_geom_srs = srs_output(lgdal.OGR_G_GetSpatialReference, [c_void_p])
# Geometry properties
get_area = double_output(lgdal.OGR_G_GetArea, [c_void_p])
get_centroid = void_output(lgdal.OGR_G_Centroid, [c_void_p, c_void_p])
get_dims = int_output(lgdal.OGR_G_GetDimension, [c_void_p])
get_coord_dim = int_output(lgdal.OGR_G_GetCoordinateDimension, [c_void_p])
set_coord_dim = void_output(lgdal.OGR_G_SetCoordinateDimension, [c_void_p, c_int], errcheck=False)
get_geom_count = int_output(lgdal.OGR_G_GetGeometryCount, [c_void_p])
get_geom_name = const_string_output(lgdal.OGR_G_GetGeometryName, [c_void_p], decoding='ascii')
get_geom_type = int_output(lgdal.OGR_G_GetGeometryType, [c_void_p])
get_point_count = int_output(lgdal.OGR_G_GetPointCount, [c_void_p])
get_point = void_output(lgdal.OGR_G_GetPoint, [c_void_p, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)], errcheck=False)
geom_close_rings = void_output(lgdal.OGR_G_CloseRings, [c_void_p], errcheck=False)
# Topology routines.
ogr_contains = topology_func(lgdal.OGR_G_Contains)
ogr_crosses = topology_func(lgdal.OGR_G_Crosses)
ogr_disjoint = topology_func(lgdal.OGR_G_Disjoint)
ogr_equals = topology_func(lgdal.OGR_G_Equals)
ogr_intersects = topology_func(lgdal.OGR_G_Intersects)
ogr_overlaps = topology_func(lgdal.OGR_G_Overlaps)
ogr_touches = topology_func(lgdal.OGR_G_Touches)
ogr_within = topology_func(lgdal.OGR_G_Within)
# Transformation routines.
geom_transform = void_output(lgdal.OGR_G_Transform, [c_void_p, c_void_p])
geom_transform_to = void_output(lgdal.OGR_G_TransformTo, [c_void_p, c_void_p])
# For retrieving the envelope of the geometry.
get_envelope = env_func(lgdal.OGR_G_GetEnvelope, [c_void_p, POINTER(OGREnvelope)])
|
mit
|
yongshengwang/hue
|
build/env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py
|
2040
|
8935
|
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
apache-2.0
|
ishay2b/tensorflow
|
tensorflow/contrib/learn/python/learn/dataframe/transforms/sum.py
|
102
|
1987
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `Transform` that computes the sum of two `Series`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import series
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
@series.Series.register_binary_op("__add__")
class Sum(transform.TensorFlowTransform):
"""Adds two `Series`."""
def __init__(self):
super(Sum, self).__init__()
@property
def name(self):
return "sum"
@property
def input_valency(self):
return 2
@property
def _output_names(self):
return "output",
def _apply_transform(self, input_tensors, **kwargs):
pair_sparsity = (isinstance(input_tensors[0], sparse_tensor.SparseTensor),
isinstance(input_tensors[1], sparse_tensor.SparseTensor))
if pair_sparsity == (False, False):
result = input_tensors[0] + input_tensors[1]
# note tf.sparse_add accepts the mixed cases,
# so long as at least one input is sparse.
else:
result = sparse_ops.sparse_add(input_tensors[0], input_tensors[1])
# pylint: disable=not-callable
return self.return_type(result)
|
apache-2.0
|
geier/alot
|
alot/commands/thread.py
|
1
|
46406
|
# Copyright (C) 2011-2012 Patrick Totzke <[email protected]>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
from __future__ import absolute_import
import argparse
import logging
import mailcap
import os
import re
import subprocess
import tempfile
from email.utils import getaddresses, parseaddr, formataddr
from email.message import Message
from twisted.internet.defer import inlineCallbacks
from io import BytesIO
from . import Command, registerCommand
from .globals import ExternalCommand
from .globals import FlushCommand
from .globals import ComposeCommand
from .globals import MoveCommand
from .globals import CommandCanceled
from .envelope import SendCommand
from ..completion import ContactsCompleter, PathCompleter
from ..db.utils import decode_header
from ..db.utils import encode_header
from ..db.utils import extract_headers
from ..db.utils import extract_body
from ..db.envelope import Envelope
from ..db.attachment import Attachment
from ..db.errors import DatabaseROError
from ..settings.const import settings
from ..helper import parse_mailcap_nametemplate
from ..helper import split_commandstring
from ..helper import email_as_string
from ..utils import argparse as cargparse
from ..widgets.globals import AttachmentWidget
MODE = 'thread'
def determine_sender(mail, action='reply'):
"""
Inspect a given mail to reply/forward/bounce and find the most appropriate
account to act from and construct a suitable From-Header to use.
:param mail: the email to inspect
:type mail: `email.message.Message`
:param action: intended use case: one of "reply", "forward" or "bounce"
:type action: str
"""
assert action in ['reply', 'forward', 'bounce']
# get accounts
my_accounts = settings.get_accounts()
assert my_accounts, 'no accounts set!'
# extract list of addresses to check for my address
# X-Envelope-To and Envelope-To are used to store the recipient address
# if not included in other fields
# Process the headers in order of importance: if a mail was sent with
# account X, with account Y in e.g. CC or delivered-to, make sure that
# account X is the one selected and not account Y.
candidate_headers = settings.get("reply_account_header_priority")
for candidate_header in candidate_headers:
candidate_addresses = getaddresses(mail.get_all(candidate_header, []))
logging.debug('candidate addresses: %s', candidate_addresses)
# pick the most important account that has an address in candidates
# and use that accounts realname and the address found here
for account in my_accounts:
acc_addresses = [re.escape(unicode(a)) for a in account.get_addresses()]
if account.alias_regexp is not None:
acc_addresses.append(account.alias_regexp)
for alias in acc_addresses:
regex = re.compile(
u'^' + unicode(alias) + u'$',
flags=re.IGNORECASE if not account.address.case_sensitive else 0)
for seen_name, seen_address in candidate_addresses:
if regex.match(seen_address):
logging.debug("match!: '%s' '%s'", seen_address, alias)
if settings.get(action + '_force_realname'):
realname = account.realname
else:
realname = seen_name
if settings.get(action + '_force_address'):
address = account.address
else:
address = seen_address
logging.debug('using realname: "%s"', realname)
logging.debug('using address: %s', address)
from_value = formataddr((realname, address))
return from_value, account
# revert to default account if nothing found
account = my_accounts[0]
realname = account.realname
address = account.address
logging.debug('using realname: "%s"', realname)
logging.debug('using address: %s', address)
from_value = formataddr((realname, address))
return from_value, account
@registerCommand(MODE, 'reply', arguments=[
(['--all'], {'action': 'store_true', 'help': 'reply to all'}),
(['--list'], {'action': cargparse.BooleanAction, 'default': None,
'dest': 'listreply', 'help': 'reply to list'}),
(['--spawn'], {'action': cargparse.BooleanAction, 'default': None,
'help': 'open editor in new window'})])
class ReplyCommand(Command):
"""reply to message"""
repeatable = True
def __init__(self, message=None, all=False, listreply=None, spawn=None,
**kwargs):
"""
:param message: message to reply to (defaults to selected message)
:type message: `alot.db.message.Message`
:param all: group reply; copies recipients from Bcc/Cc/To to the reply
:type all: bool
:param listreply: reply to list; autodetect if unset and enabled in
config
:type listreply: bool
:param spawn: force spawning of editor in a new terminal
:type spawn: bool
"""
self.message = message
self.groupreply = all
self.listreply = listreply
self.force_spawn = spawn
Command.__init__(self, **kwargs)
def apply(self, ui):
# get message to reply to if not given in constructor
if not self.message:
self.message = ui.current_buffer.get_selected_message()
mail = self.message.get_email()
# set body text
name, address = self.message.get_author()
timestamp = self.message.get_date()
qf = settings.get_hook('reply_prefix')
if qf:
quotestring = qf(name, address, timestamp, ui=ui, dbm=ui.dbman)
else:
quotestring = 'Quoting %s (%s)\n' % (name or address, timestamp)
mailcontent = quotestring
quotehook = settings.get_hook('text_quote')
if quotehook:
mailcontent += quotehook(self.message.accumulate_body())
else:
quote_prefix = settings.get('quote_prefix')
for line in self.message.accumulate_body().splitlines():
mailcontent += quote_prefix + line + '\n'
envelope = Envelope(bodytext=mailcontent)
# copy subject
subject = decode_header(mail.get('Subject', ''))
reply_subject_hook = settings.get_hook('reply_subject')
if reply_subject_hook:
subject = reply_subject_hook(subject)
else:
rsp = settings.get('reply_subject_prefix')
if not subject.lower().startswith(('re:', rsp.lower())):
subject = rsp + subject
envelope.add('Subject', subject)
# Auto-detect ML
auto_replyto_mailinglist = settings.get('auto_replyto_mailinglist')
if mail['List-Id'] and self.listreply is None:
# mail['List-Id'] is need to enable reply-to-list
self.listreply = auto_replyto_mailinglist
elif mail['List-Id'] and self.listreply is True:
self.listreply = True
elif self.listreply is False:
# In this case we only need the sender
self.listreply = False
# set From-header and sending account
try:
from_header, _ = determine_sender(mail, 'reply')
except AssertionError as e:
ui.notify(str(e), priority='error')
return
envelope.add('From', from_header)
# set To
sender = mail['Reply-To'] or mail['From']
my_addresses = settings.get_addresses()
sender_address = parseaddr(sender)[1]
cc = []
# check if reply is to self sent message
if sender_address in my_addresses:
recipients = mail.get_all('To', [])
emsg = 'Replying to own message, set recipients to: %s' \
% recipients
logging.debug(emsg)
else:
recipients = self.clear_my_address([], [sender])
if self.groupreply:
# make sure that our own address is not included
# if the message was self-sent, then our address is not included
MFT = mail.get_all('Mail-Followup-To', [])
followupto = self.clear_my_address(my_addresses, MFT)
if followupto and settings.get('honor_followup_to'):
logging.debug('honor followup to: %s', ', '.join(followupto))
recipients = followupto
# since Mail-Followup-To was set, ignore the Cc header
else:
if sender != mail['From']:
recipients.append(mail['From'])
# append To addresses if not replying to self sent message
if sender_address not in my_addresses:
cleared = self.clear_my_address(
my_addresses, mail.get_all('To', []))
recipients.extend(cleared)
# copy cc for group-replies
if 'Cc' in mail:
cc = self.clear_my_address(
my_addresses, mail.get_all('Cc', []))
envelope.add('Cc', decode_header(', '.join(cc)))
to = ', '.join(self.ensure_unique_address(recipients))
logging.debug('reply to: %s', to)
if self.listreply:
# To choose the target of the reply --list
# Reply-To is standart reply target RFC 2822:, RFC 1036: 2.2.1
# X-BeenThere is needed by sourceforge ML also winehq
# X-Mailing-List is also standart and is used by git-send-mail
to = mail['Reply-To'] or mail['X-BeenThere'] or mail['X-Mailing-List']
# Some mail server (gmail) will not resend you own mail, so you
# have to deal with the one in sent
if to is None:
to = mail['To']
logging.debug('mail list reply to: %s', to)
# Cleaning the 'To' in this case
if envelope.get('To') is not None:
envelope.__delitem__('To')
# Finally setup the 'To' header
envelope.add('To', decode_header(to))
# if any of the recipients is a mailinglist that we are subscribed to,
# set Mail-Followup-To header so that duplicates are avoided
if settings.get('followup_to'):
# to and cc are already cleared of our own address
allrecipients = [to] + cc
lists = settings.get('mailinglists')
# check if any recipient address matches a known mailing list
if any(addr in lists for n, addr in getaddresses(allrecipients)):
followupto = ', '.join(allrecipients)
logging.debug('mail followup to: %s', followupto)
envelope.add('Mail-Followup-To', decode_header(followupto))
# set In-Reply-To header
envelope.add('In-Reply-To', '<%s>' % self.message.get_message_id())
# set References header
old_references = mail.get('References', '')
if old_references:
old_references = old_references.split()
references = old_references[-8:]
if len(old_references) > 8:
references = old_references[:1] + references
references.append('<%s>' % self.message.get_message_id())
envelope.add('References', ' '.join(references))
else:
envelope.add('References', '<%s>' % self.message.get_message_id())
# continue to compose
encrypt = mail.get_content_subtype() == 'encrypted'
ui.apply_command(ComposeCommand(envelope=envelope,
spawn=self.force_spawn,
encrypt=encrypt))
@staticmethod
def clear_my_address(my_addresses, value):
"""return recipient header without the addresses in my_addresses
:param my_addresses: a list of my email addresses (no real name part)
:type my_addresses: list(str)
:param value: a list of recipient or sender strings (with or without
real names as taken from email headers)
:type value: list(str)
:returns: a new, potentially shortend list
:rtype: list(str)
"""
new_value = []
for name, address in getaddresses(value):
if address not in my_addresses:
new_value.append(formataddr((name, address)))
return new_value
@staticmethod
def ensure_unique_address(recipients):
"""
clean up a list of name,address pairs so that
no address appears multiple times.
"""
res = dict()
for name, address in getaddresses(recipients):
res[address] = name
urecipients = [formataddr((n, a)) for a, n in res.iteritems()]
return sorted(urecipients)
@registerCommand(MODE, 'forward', arguments=[
(['--attach'], {'action': 'store_true', 'help': 'attach original mail'}),
(['--spawn'], {'action': cargparse.BooleanAction, 'default': None,
'help': 'open editor in new window'})])
class ForwardCommand(Command):
"""forward message"""
repeatable = True
def __init__(self, message=None, attach=True, spawn=None, **kwargs):
"""
:param message: message to forward (defaults to selected message)
:type message: `alot.db.message.Message`
:param attach: attach original mail instead of inline quoting its body
:type attach: bool
:param spawn: force spawning of editor in a new terminal
:type spawn: bool
"""
self.message = message
self.inline = not attach
self.force_spawn = spawn
Command.__init__(self, **kwargs)
def apply(self, ui):
# get message to forward if not given in constructor
if not self.message:
self.message = ui.current_buffer.get_selected_message()
mail = self.message.get_email()
envelope = Envelope()
if self.inline: # inline mode
# set body text
name, address = self.message.get_author()
timestamp = self.message.get_date()
qf = settings.get_hook('forward_prefix')
if qf:
quote = qf(name, address, timestamp, ui=ui, dbm=ui.dbman)
else:
quote = 'Forwarded message from %s (%s):\n' % (
name or address, timestamp)
mailcontent = quote
quotehook = settings.get_hook('text_quote')
if quotehook:
mailcontent += quotehook(self.message.accumulate_body())
else:
quote_prefix = settings.get('quote_prefix')
for line in self.message.accumulate_body().splitlines():
mailcontent += quote_prefix + line + '\n'
envelope.body = mailcontent
for a in self.message.get_attachments():
envelope.attach(a)
else: # attach original mode
# attach original msg
original_mail = Message()
original_mail.set_type('message/rfc822')
original_mail['Content-Disposition'] = 'attachment'
original_mail.set_payload(email_as_string(mail))
envelope.attach(Attachment(original_mail))
# copy subject
subject = decode_header(mail.get('Subject', ''))
subject = 'Fwd: ' + subject
forward_subject_hook = settings.get_hook('forward_subject')
if forward_subject_hook:
subject = forward_subject_hook(subject)
else:
fsp = settings.get('forward_subject_prefix')
if not subject.startswith(('Fwd:', fsp)):
subject = fsp + subject
envelope.add('Subject', subject)
# set From-header and sending account
try:
from_header, _ = determine_sender(mail, 'reply')
except AssertionError as e:
ui.notify(str(e), priority='error')
return
envelope.add('From', from_header)
# continue to compose
ui.apply_command(ComposeCommand(envelope=envelope,
spawn=self.force_spawn))
@registerCommand(MODE, 'bounce')
class BounceMailCommand(Command):
"""directly re-send selected message"""
repeatable = True
def __init__(self, message=None, **kwargs):
"""
:param message: message to bounce (defaults to selected message)
:type message: `alot.db.message.Message`
"""
self.message = message
Command.__init__(self, **kwargs)
@inlineCallbacks
def apply(self, ui):
# get mail to bounce
if not self.message:
self.message = ui.current_buffer.get_selected_message()
mail = self.message.get_email()
# look if this makes sense: do we have any accounts set up?
my_accounts = settings.get_accounts()
if not my_accounts:
ui.notify('no accounts set', priority='error')
return
# remove "Resent-*" headers if already present
del mail['Resent-From']
del mail['Resent-To']
del mail['Resent-Cc']
del mail['Resent-Date']
del mail['Resent-Message-ID']
# set Resent-From-header and sending account
try:
resent_from_header, account = determine_sender(mail, 'bounce')
except AssertionError as e:
ui.notify(str(e), priority='error')
return
mail['Resent-From'] = resent_from_header
# set Reset-To
allbooks = not settings.get('complete_matching_abook_only')
logging.debug('allbooks: %s', allbooks)
if account is not None:
abooks = settings.get_addressbooks(order=[account],
append_remaining=allbooks)
logging.debug(abooks)
completer = ContactsCompleter(abooks)
else:
completer = None
to = yield ui.prompt('To', completer=completer,
history=ui.recipienthistory)
if to is None:
raise CommandCanceled()
mail['Resent-To'] = to.strip(' \t\n,')
logging.debug("bouncing mail")
logging.debug(mail.__class__)
ui.apply_command(SendCommand(mail=mail))
@registerCommand(MODE, 'editnew', arguments=[
(['--spawn'], {'action': cargparse.BooleanAction, 'default': None,
'help': 'open editor in new window'})])
class EditNewCommand(Command):
"""edit message in as new"""
def __init__(self, message=None, spawn=None, **kwargs):
"""
:param message: message to reply to (defaults to selected message)
:type message: `alot.db.message.Message`
:param spawn: force spawning of editor in a new terminal
:type spawn: bool
"""
self.message = message
self.force_spawn = spawn
Command.__init__(self, **kwargs)
def apply(self, ui):
if not self.message:
self.message = ui.current_buffer.get_selected_message()
mail = self.message.get_email()
# copy most tags to the envelope
tags = set(self.message.get_tags())
tags.difference_update({'inbox', 'sent', 'draft', 'killed', 'replied',
'signed', 'encrypted', 'unread', 'attachment'})
tags = list(tags)
# set body text
mailcontent = self.message.accumulate_body()
envelope = Envelope(bodytext=mailcontent, tags=tags)
# copy selected headers
to_copy = ['Subject', 'From', 'To', 'Cc', 'Bcc', 'In-Reply-To',
'References']
for key in to_copy:
value = decode_header(mail.get(key, ''))
if value:
envelope.add(key, value)
# copy attachments
for b in self.message.get_attachments():
envelope.attach(b)
ui.apply_command(ComposeCommand(envelope=envelope,
spawn=self.force_spawn,
omit_signature=True))
@registerCommand(
MODE, 'fold', help='fold message(s)', forced={'visible': False},
arguments=[(['query'], {'help': 'query used to filter messages to affect',
'nargs': '*'})])
@registerCommand(
MODE, 'unfold', help='unfold message(s)', forced={'visible': True},
arguments=[(['query'], {'help': 'query used to filter messages to affect',
'nargs': '*'})])
@registerCommand(
MODE, 'togglesource', help='display message source',
forced={'raw': 'toggle'},
arguments=[(['query'], {'help': 'query used to filter messages to affect',
'nargs': '*'})])
@registerCommand(
MODE, 'toggleheaders', help='display all headers',
forced={'all_headers': 'toggle'},
arguments=[(['query'], {'help': 'query used to filter messages to affect',
'nargs': '*'})])
class ChangeDisplaymodeCommand(Command):
"""fold or unfold messages"""
repeatable = True
def __init__(self, query=None, visible=None, raw=None, all_headers=None,
**kwargs):
"""
:param query: notmuch query string used to filter messages to affect
:type query: str
:param visible: unfold if `True`, fold if `False`, ignore if `None`
:type visible: True, False, 'toggle' or None
:param raw: display raw message text.
:type raw: True, False, 'toggle' or None
:param all_headers: show all headers (only visible if not in raw mode)
:type all_headers: True, False, 'toggle' or None
"""
self.query = None
if query:
self.query = ' '.join(query)
self.visible = visible
self.raw = raw
self.all_headers = all_headers
Command.__init__(self, **kwargs)
def apply(self, ui):
tbuffer = ui.current_buffer
logging.debug('matching lines %s...', self.query)
if self.query is None:
messagetrees = [tbuffer.get_selected_messagetree()]
else:
messagetrees = tbuffer.messagetrees()
if self.query != '*':
def matches(msgt):
msg = msgt.get_message()
return msg.matches(self.query)
messagetrees = [m for m in messagetrees if matches(m)]
for mt in messagetrees:
# determine new display values for this message
if self.visible == 'toggle':
visible = mt.is_collapsed(mt.root)
else:
visible = self.visible
if self.raw == 'toggle':
tbuffer.focus_selected_message()
raw = not mt.display_source if self.raw == 'toggle' else self.raw
all_headers = not mt.display_all_headers \
if self.all_headers == 'toggle' else self.all_headers
# collapse/expand depending on new 'visible' value
if visible is False:
mt.collapse(mt.root)
elif visible is True: # could be None
mt.expand(mt.root)
tbuffer.focus_selected_message()
# set new values in messagetree obj
if raw is not None:
mt.display_source = raw
if all_headers is not None:
mt.display_all_headers = all_headers
mt.debug()
# let the messagetree reassemble itself
mt.reassemble()
# refresh the buffer (clears Tree caches etc)
tbuffer.refresh()
@registerCommand(MODE, 'pipeto', arguments=[
(['cmd'], {'help': 'shellcommand to pipe to', 'nargs': '+'}),
(['--all'], {'action': 'store_true', 'help': 'pass all messages'}),
(['--format'], {'help': 'output format', 'default': 'raw',
'choices': ['raw', 'decoded', 'id', 'filepath']}),
(['--separately'], {'action': 'store_true',
'help': 'call command once for each message'}),
(['--background'], {'action': 'store_true',
'help': 'don\'t stop the interface'}),
(['--add_tags'], {'action': 'store_true',
'help': 'add \'Tags\' header to the message'}),
(['--shell'], {'action': 'store_true',
'help': 'let the shell interpret the command'}),
(['--notify_stdout'], {'action': 'store_true',
'help': 'display cmd\'s stdout as notification'}),
(['--field_key'], {'help': 'mailcap field key for decoding',
'default': 'copiousoutput'}),
])
class PipeCommand(Command):
"""pipe message(s) to stdin of a shellcommand"""
repeatable = True
def __init__(self, cmd, all=False, separately=False, background=False,
shell=False, notify_stdout=False, format='raw',
add_tags=False, noop_msg='no command specified',
confirm_msg='', done_msg=None, field_key='copiousoutput',
**kwargs):
"""
:param cmd: shellcommand to open
:type cmd: str or list of str
:param all: pipe all, not only selected message
:type all: bool
:param separately: call command once per message
:type separately: bool
:param background: do not suspend the interface
:type background: bool
:param notify_stdout: display command\'s stdout as notification message
:type notify_stdout: bool
:param shell: let the shell interpret the command
:type shell: bool
'raw': message content as is,
'decoded': message content, decoded quoted printable,
'id': message ids, separated by newlines,
'filepath': paths to message files on disk
:type format: str
:param add_tags: add 'Tags' header to the message
:type add_tags: bool
:param noop_msg: error notification to show if `cmd` is empty
:type noop_msg: str
:param confirm_msg: confirmation question to ask (continues directly if
unset)
:type confirm_msg: str
:param done_msg: notification message to show upon success
:type done_msg: str
:param field_key: malcap field key for decoding
:type field_key: str
"""
Command.__init__(self, **kwargs)
if isinstance(cmd, unicode):
cmd = split_commandstring(cmd)
self.cmd = cmd
self.whole_thread = all
self.separately = separately
self.background = background
self.shell = shell
self.notify_stdout = notify_stdout
self.output_format = format
self.add_tags = add_tags
self.noop_msg = noop_msg
self.confirm_msg = confirm_msg
self.done_msg = done_msg
self.field_key = field_key
@inlineCallbacks
def apply(self, ui):
# abort if command unset
if not self.cmd:
ui.notify(self.noop_msg, priority='error')
return
# get messages to pipe
if self.whole_thread:
thread = ui.current_buffer.get_selected_thread()
if not thread:
return
to_print = thread.get_messages().keys()
else:
to_print = [ui.current_buffer.get_selected_message()]
# ask for confirmation if needed
if self.confirm_msg:
if (yield ui.choice(self.confirm_msg, select='yes',
cancel='no')) == 'no':
return
# prepare message sources
pipestrings = []
separator = '\n\n'
logging.debug('PIPETO format')
logging.debug(self.output_format)
if self.output_format == 'id':
pipestrings = [e.get_message_id() for e in to_print]
separator = '\n'
elif self.output_format == 'filepath':
pipestrings = [e.get_filename() for e in to_print]
separator = '\n'
else:
for msg in to_print:
mail = msg.get_email()
if self.add_tags:
mail['Tags'] = encode_header('Tags',
', '.join(msg.get_tags()))
if self.output_format == 'raw':
pipestrings.append(mail.as_string())
elif self.output_format == 'decoded':
headertext = extract_headers(mail)
bodytext = extract_body(mail, field_key=self.field_key)
msgtext = '%s\n\n%s' % (headertext, bodytext)
pipestrings.append(msgtext.encode('utf-8'))
if not self.separately:
pipestrings = [separator.join(pipestrings)]
if self.shell:
self.cmd = [' '.join(self.cmd)]
# do the monkey
for mail in pipestrings:
if self.background:
logging.debug('call in background: %s', self.cmd)
proc = subprocess.Popen(self.cmd,
shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate(mail)
if self.notify_stdout:
ui.notify(out)
else:
logging.debug('stop urwid screen')
ui.mainloop.screen.stop()
logging.debug('call: %s', self.cmd)
# if proc.stdout is defined later calls to communicate
# seem to be non-blocking!
proc = subprocess.Popen(self.cmd, shell=True,
stdin=subprocess.PIPE,
# stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate(mail)
logging.debug('start urwid screen')
ui.mainloop.screen.start()
if err:
ui.notify(err, priority='error')
return
# display 'done' message
if self.done_msg:
ui.notify(self.done_msg)
@registerCommand(MODE, 'remove', arguments=[
(['--all'], {'action': 'store_true', 'help': 'remove whole thread'})])
class RemoveCommand(Command):
"""remove message(s) from the index"""
repeatable = True
def __init__(self, all=False, **kwargs):
"""
:param all: remove all messages from thread, not just selected one
:type all: bool
"""
Command.__init__(self, **kwargs)
self.all = all
@inlineCallbacks
def apply(self, ui):
threadbuffer = ui.current_buffer
# get messages and notification strings
if self.all:
thread = threadbuffer.get_selected_thread()
tid = thread.get_thread_id()
messages = thread.get_messages().keys()
confirm_msg = 'remove all messages in thread?'
ok_msg = 'removed all messages in thread: %s' % tid
else:
msg = threadbuffer.get_selected_message()
messages = [msg]
confirm_msg = 'remove selected message?'
ok_msg = 'removed message: %s' % msg.get_message_id()
# ask for confirmation
if (yield ui.choice(confirm_msg, select='yes', cancel='no')) == 'no':
return
# notify callback
def callback():
threadbuffer.rebuild()
ui.notify(ok_msg)
# remove messages
for m in messages:
ui.dbman.remove_message(m, afterwards=callback)
ui.apply_command(FlushCommand())
@registerCommand(MODE, 'print', arguments=[
(['--all'], {'action': 'store_true', 'help': 'print all messages'}),
(['--raw'], {'action': 'store_true', 'help': 'pass raw mail string'}),
(['--separately'], {'action': 'store_true',
'help': 'call print command once for each message'}),
(['--add_tags'], {'action': 'store_true',
'help': 'add \'Tags\' header to the message'}),
])
class PrintCommand(PipeCommand):
"""print message(s)"""
repeatable = True
def __init__(self, all=False, separately=False, raw=False, add_tags=False,
**kwargs):
"""
:param all: print all, not only selected messages
:type all: bool
:param separately: call print command once per message
:type separately: bool
:param raw: pipe raw message string to print command
:type raw: bool
:param add_tags: add 'Tags' header to the message
:type add_tags: bool
"""
# get print command
cmd = settings.get('print_cmd') or ''
# set up notification strings
if all:
confirm_msg = 'print all messages in thread?'
ok_msg = 'printed thread using %s' % cmd
else:
confirm_msg = 'print selected message?'
ok_msg = 'printed message using %s' % cmd
# no print cmd set
noop_msg = 'no print command specified. Set "print_cmd" in the '\
'global section.'
PipeCommand.__init__(self, [cmd], all=all, separately=separately,
background=True,
shell=False,
format='raw' if raw else 'decoded',
add_tags=add_tags,
noop_msg=noop_msg, confirm_msg=confirm_msg,
done_msg=ok_msg, **kwargs)
@registerCommand(MODE, 'save', arguments=[
(['--all'], {'action': 'store_true', 'help': 'save all attachments'}),
(['path'], {'nargs': '?', 'help': 'path to save to'})])
class SaveAttachmentCommand(Command):
"""save attachment(s)"""
def __init__(self, all=False, path=None, **kwargs):
"""
:param all: save all, not only selected attachment
:type all: bool
:param path: path to write to. if `all` is set, this must be a
directory.
:type path: str
"""
Command.__init__(self, **kwargs)
self.all = all
self.path = path
@inlineCallbacks
def apply(self, ui):
pcomplete = PathCompleter()
savedir = settings.get('attachment_prefix', '~')
if self.all:
msg = ui.current_buffer.get_selected_message()
if not self.path:
self.path = yield ui.prompt('save attachments to',
text=os.path.join(savedir, ''),
completer=pcomplete)
if self.path:
if os.path.isdir(os.path.expanduser(self.path)):
for a in msg.get_attachments():
dest = a.save(self.path)
name = a.get_filename()
if name:
ui.notify('saved %s as: %s' % (name, dest))
else:
ui.notify('saved attachment as: %s' % dest)
else:
ui.notify('not a directory: %s' % self.path,
priority='error')
else:
raise CommandCanceled()
else: # save focussed attachment
focus = ui.get_deep_focus()
if isinstance(focus, AttachmentWidget):
attachment = focus.get_attachment()
filename = attachment.get_filename()
if not self.path:
msg = 'save attachment (%s) to ' % filename
initialtext = os.path.join(savedir, filename)
self.path = yield ui.prompt(msg,
completer=pcomplete,
text=initialtext)
if self.path:
try:
dest = attachment.save(self.path)
ui.notify('saved attachment as: %s' % dest)
except (IOError, OSError) as e:
ui.notify(str(e), priority='error')
else:
raise CommandCanceled()
class OpenAttachmentCommand(Command):
"""displays an attachment according to mailcap"""
def __init__(self, attachment, **kwargs):
"""
:param attachment: attachment to open
:type attachment: :class:`~alot.db.attachment.Attachment`
"""
Command.__init__(self, **kwargs)
self.attachment = attachment
def apply(self, ui):
logging.info('open attachment')
mimetype = self.attachment.get_content_type()
# returns pair of preliminary command string and entry dict containing
# more info. We only use the dict and construct the command ourselves
_, entry = settings.mailcap_find_match(mimetype)
if entry:
afterwards = None # callback, will rm tempfile if used
handler_stdin = None
tempfile_name = None
handler_raw_commandstring = entry['view']
# read parameter
part = self.attachment.get_mime_representation()
parms = tuple('='.join(p) for p in part.get_params())
# in case the mailcap defined command contains no '%s',
# we pipe the files content to the handling command via stdin
if '%s' in handler_raw_commandstring:
nametemplate = entry.get('nametemplate', '%s')
prefix, suffix = parse_mailcap_nametemplate(nametemplate)
fn_hook = settings.get_hook('sanitize_attachment_filename')
if fn_hook:
# get filename
filename = self.attachment.get_filename()
prefix, suffix = fn_hook(filename, prefix, suffix)
with tempfile.NamedTemporaryFile(delete=False, prefix=prefix,
suffix=suffix) as tmpfile:
tempfile_name = tmpfile.name
self.attachment.write(tmpfile)
def afterwards():
os.unlink(tempfile_name)
else:
handler_stdin = BytesIO()
self.attachment.write(handler_stdin)
# create handler command list
handler_cmd = mailcap.subst(handler_raw_commandstring, mimetype,
filename=tempfile_name, plist=parms)
handler_cmdlist = split_commandstring(handler_cmd)
# 'needsterminal' makes handler overtake the terminal
# XXX: could this be repalced with "'needsterminal' not in entry"?
overtakes = entry.get('needsterminal') is None
ui.apply_command(ExternalCommand(handler_cmdlist,
stdin=handler_stdin,
on_success=afterwards,
thread=overtakes))
else:
ui.notify('unknown mime type')
@registerCommand(
MODE, 'move', help='move focus in current buffer',
arguments=[
(['movement'],
{'nargs': argparse.REMAINDER,
'help': '''up, down, [half]page up, [half]page down, first, last, \
parent, first reply, last reply, \
next sibling, previous sibling, next, previous, \
next unfolded, previous unfolded, \
next NOTMUCH_QUERY, previous NOTMUCH_QUERY'''})])
class MoveFocusCommand(MoveCommand):
def apply(self, ui):
logging.debug(self.movement)
tbuffer = ui.current_buffer
if self.movement == 'parent':
tbuffer.focus_parent()
elif self.movement == 'first reply':
tbuffer.focus_first_reply()
elif self.movement == 'last reply':
tbuffer.focus_last_reply()
elif self.movement == 'next sibling':
tbuffer.focus_next_sibling()
elif self.movement == 'previous sibling':
tbuffer.focus_prev_sibling()
elif self.movement == 'next':
tbuffer.focus_next()
elif self.movement == 'previous':
tbuffer.focus_prev()
elif self.movement == 'next unfolded':
tbuffer.focus_next_unfolded()
elif self.movement == 'previous unfolded':
tbuffer.focus_prev_unfolded()
elif self.movement.startswith('next '):
query = self.movement[5:].strip()
tbuffer.focus_next_matching(query)
elif self.movement.startswith('previous '):
query = self.movement[9:].strip()
tbuffer.focus_prev_matching(query)
else:
MoveCommand.apply(self, ui)
# TODO add 'next matching' if threadbuffer stores the original query
# TODO: add next by date..
tbuffer.body.refresh()
@registerCommand(MODE, 'select')
class ThreadSelectCommand(Command):
"""select focussed element. The fired action depends on the focus:
- if message summary, this toggles visibility of the message,
- if attachment line, this opens the attachment"""
def apply(self, ui):
focus = ui.get_deep_focus()
if isinstance(focus, AttachmentWidget):
logging.info('open attachment')
ui.apply_command(OpenAttachmentCommand(focus.get_attachment()))
else:
ui.apply_command(ChangeDisplaymodeCommand(visible='toggle'))
@registerCommand(
MODE, 'tag', forced={'action': 'add'},
arguments=[
(['--all'], {'action': 'store_true',
'help': 'tag all messages in thread'}),
(['--no-flush'], {'action': 'store_false', 'dest': 'flush',
'help': 'postpone a writeout to the index'}),
(['tags'], {'help': 'comma separated list of tags'})],
help='add tags to message(s)',
)
@registerCommand(
MODE, 'retag', forced={'action': 'set'},
arguments=[
(['--all'], {'action': 'store_true',
'help': 'tag all messages in thread'}),
(['--no-flush'], {'action': 'store_false', 'dest': 'flush',
'help': 'postpone a writeout to the index'}),
(['tags'], {'help': 'comma separated list of tags'})],
help='set message(s) tags.',
)
@registerCommand(
MODE, 'untag', forced={'action': 'remove'},
arguments=[
(['--all'], {'action': 'store_true',
'help': 'tag all messages in thread'}),
(['--no-flush'], {'action': 'store_false', 'dest': 'flush',
'help': 'postpone a writeout to the index'}),
(['tags'], {'help': 'comma separated list of tags'})],
help='remove tags from message(s)',
)
@registerCommand(
MODE, 'toggletags', forced={'action': 'toggle'},
arguments=[
(['--all'], {'action': 'store_true',
'help': 'tag all messages in thread'}),
(['--no-flush'], {'action': 'store_false', 'dest': 'flush',
'help': 'postpone a writeout to the index'}),
(['tags'], {'help': 'comma separated list of tags'})],
help='flip presence of tags on message(s)',
)
class TagCommand(Command):
"""manipulate message tags"""
repeatable = True
def __init__(self, tags=u'', action='add', all=False, flush=True,
**kwargs):
"""
:param tags: comma separated list of tagstrings to set
:type tags: str
:param action: adds tags if 'add', removes them if 'remove', adds tags
and removes all other if 'set' or toggle individually if
'toggle'
:type action: str
:param all: tag all messages in thread
:type all: bool
:param flush: immediately write out to the index
:type flush: bool
"""
self.tagsstring = tags
self.all = all
self.action = action
self.flush = flush
Command.__init__(self, **kwargs)
def apply(self, ui):
tbuffer = ui.current_buffer
if self.all:
messagetrees = tbuffer.messagetrees()
else:
messagetrees = [tbuffer.get_selected_messagetree()]
def refresh_widgets():
for mt in messagetrees:
mt.refresh()
# put currently selected message id on a block list for the
# auto-remove-unread feature. This makes sure that explicit
# tag-unread commands for the current message are not undone on the
# next keypress (triggering the autorm again)...
mid = tbuffer.get_selected_mid()
tbuffer._auto_unread_dont_touch_mids.add(mid)
tbuffer.refresh()
tags = [t for t in self.tagsstring.split(',') if t]
try:
for mt in messagetrees:
m = mt.get_message()
if self.action == 'add':
m.add_tags(tags, afterwards=refresh_widgets)
if self.action == 'set':
m.add_tags(tags, afterwards=refresh_widgets,
remove_rest=True)
elif self.action == 'remove':
m.remove_tags(tags, afterwards=refresh_widgets)
elif self.action == 'toggle':
to_remove = []
to_add = []
for t in tags:
if t in m.get_tags():
to_remove.append(t)
else:
to_add.append(t)
m.remove_tags(to_remove)
m.add_tags(to_add, afterwards=refresh_widgets)
except DatabaseROError:
ui.notify('index in read-only mode', priority='error')
return
# flush index
if self.flush:
ui.apply_command(FlushCommand())
|
gpl-3.0
|
raxenak/borg
|
src/borg/platform/base.py
|
2
|
6272
|
import errno
import os
from borg.helpers import truncate_and_unlink
"""
platform base module
====================
Contains platform API implementations based on what Python itself provides. More specific
APIs are stubs in this module.
When functions in this module use platform APIs themselves they access the public
platform API: that way platform APIs provided by the platform-specific support module
are correctly composed into the base functionality.
"""
API_VERSION = '1.1_01'
fdatasync = getattr(os, 'fdatasync', os.fsync)
def acl_get(path, item, st, numeric_owner=False):
"""
Saves ACL Entries
If `numeric_owner` is True the user/group field is not preserved only uid/gid
"""
def acl_set(path, item, numeric_owner=False):
"""
Restore ACL Entries
If `numeric_owner` is True the stored uid/gid is used instead
of the user/group names
"""
try:
from os import lchflags
def set_flags(path, bsd_flags, fd=None):
lchflags(path, bsd_flags)
except ImportError:
def set_flags(path, bsd_flags, fd=None):
pass
def get_flags(path, st):
"""Return BSD-style file flags for path or stat without following symlinks."""
return getattr(st, 'st_flags', 0)
def sync_dir(path):
fd = os.open(path, os.O_RDONLY)
try:
os.fsync(fd)
except OSError as os_error:
# Some network filesystems don't support this and fail with EINVAL.
# Other error codes (e.g. EIO) shouldn't be silenced.
if os_error.errno != errno.EINVAL:
raise
finally:
os.close(fd)
def safe_fadvise(fd, offset, len, advice):
if hasattr(os, 'posix_fadvise'):
advice = getattr(os, 'POSIX_FADV_' + advice)
try:
os.posix_fadvise(fd, offset, len, advice)
except OSError:
# usually, posix_fadvise can't fail for us, but there seem to
# be failures when running borg under docker on ARM, likely due
# to a bug outside of borg.
# also, there is a python wrapper bug, always giving errno = 0.
# https://github.com/borgbackup/borg/issues/2095
# as this call is not critical for correct function (just to
# optimize cache usage), we ignore these errors.
pass
class SyncFile:
"""
A file class that is supposed to enable write ordering (one way or another) and data durability after close().
The degree to which either is possible varies with operating system, file system and hardware.
This fallback implements a naive and slow way of doing this. On some operating systems it can't actually
guarantee any of the above, since fsync() doesn't guarantee it. Furthermore it may not be possible at all
to satisfy the above guarantees on some hardware or operating systems. In these cases we hope that the thorough
checksumming implemented catches any corrupted data due to misordered, delayed or partial writes.
Note that POSIX doesn't specify *anything* about power failures (or similar failures). A system that
routinely loses files or corrupts file on power loss is POSIX compliant.
TODO: Use F_FULLSYNC on OSX.
TODO: A Windows implementation should use CreateFile with FILE_FLAG_WRITE_THROUGH.
"""
def __init__(self, path, binary=False):
mode = 'xb' if binary else 'x'
self.fd = open(path, mode)
self.fileno = self.fd.fileno()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def write(self, data):
self.fd.write(data)
def sync(self):
"""
Synchronize file contents. Everything written prior to sync() must become durable before anything written
after sync().
"""
from .. import platform
self.fd.flush()
platform.fdatasync(self.fileno)
# tell the OS that it does not need to cache what we just wrote,
# avoids spoiling the cache for the OS and other processes.
safe_fadvise(self.fileno, 0, 0, 'DONTNEED')
def close(self):
"""sync() and close."""
from .. import platform
dirname = None
try:
dirname = os.path.dirname(self.fd.name)
self.sync()
finally:
self.fd.close()
if dirname:
platform.sync_dir(dirname)
class SaveFile:
"""
Update file contents atomically.
Must be used as a context manager (defining the scope of the transaction).
On a journaling file system the file contents are always updated
atomically and won't become corrupted, even on power failures or
crashes (for caveats see SyncFile).
"""
SUFFIX = '.tmp'
def __init__(self, path, binary=False):
self.binary = binary
self.path = path
self.tmppath = self.path + self.SUFFIX
def __enter__(self):
from .. import platform
try:
truncate_and_unlink(self.tmppath)
except FileNotFoundError:
pass
self.fd = platform.SyncFile(self.tmppath, self.binary)
return self.fd
def __exit__(self, exc_type, exc_val, exc_tb):
from .. import platform
self.fd.close()
if exc_type is not None:
truncate_and_unlink(self.tmppath)
return
os.replace(self.tmppath, self.path)
platform.sync_dir(os.path.dirname(self.path))
def swidth(s):
"""terminal output width of string <s>
For western scripts, this is just len(s), but for cjk glyphs, 2 cells are used.
"""
return len(s)
def umount(mountpoint):
"""un-mount the FUSE filesystem mounted at <mountpoint>"""
return 0 # dummy, see also posix module
def get_process_id():
"""
Return identification tuple (hostname, pid, thread_id) for 'us'. If this is a FUSE process, then the PID will be
that of the parent, not the forked FUSE child.
"""
raise NotImplementedError
def process_alive(host, pid, thread):
"""
Check if the (host, pid, thread_id) combination corresponds to a potentially alive process.
"""
raise NotImplementedError
def local_pid_alive(pid):
"""Return whether *pid* is alive."""
raise NotImplementedError
|
bsd-3-clause
|
CorySimon/pyIAST
|
test/python_scripts/Test IAST for Langmuir case.py
|
2
|
7330
|
# coding: utf-8
# # Test pyIAST for match with competitive Langmuir model
# In the case that the pure-component isotherms $N_{i,pure}(P)$ follow the Langmuir model with the same saturation loading $M$:
#
# $N_{i,pure} = M \frac{K_iP}{1+K_iP},$
#
# The mixed gas adsorption isotherm follows the competitive Langmuir isotherm:
#
# $N_i = M \frac{K_i p_i}{1 + \sum_j K_jp_j},$
#
# where $p_i$ is the partial pressure of component $i$. Here, we generate synthetic pure-component adsorption isotherm data and confirm that pyIAST agrees with the competitive Langmuir isotherm for 3 components.
# In[1]:
from __future__ import absolute_import
import numpy as np
import pyiast
import pandas as pd
import matplotlib.pyplot as plt
from six.moves import range
plt.style.use('fivethirtyeight')
colors = ['b', 'g', 'r'] # for representing each component
component_names = {0: 'A', 1: 'B', 2: 'C'}
# ## Generate synthetic pure-component isotherm data, fit Langmuir models to them.
# Model parameters ($M$, $\{K_i\}$)
# In[2]:
M = 1.0
langmuirKs = [2.0, 10.0, 20.0] # K_i
# Generate data according to Langmuir model, store in list of Pandas DataFrames
# In[3]:
pressure = np.logspace(-3, np.log10(10), 20)
dfs = [
pd.DataFrame({
'P':
pressure,
'L':
M * langmuirKs[i] * pressure / (1.0 + langmuirKs[i] * pressure)
}) for i in range(3)
]
# Use pyIAST to fit Lanmguir models to the data, then plot fits
# In[4]:
isotherms = [
pyiast.ModelIsotherm(
dfs[i], pressure_key='P', loading_key='L', model='Langmuir')
for i in range(3)
]
for i in range(len(isotherms)):
isotherms[i].print_params()
pyiast.plot_isotherm(isotherms[i])
# Plot synthetic data all in one plot for paper
# In[5]:
p_plot = np.logspace(-3, np.log10(11)) # for plotting
fig = plt.figure(facecolor='w')
for i in range(len(isotherms)):
plt.scatter(dfs[i]['P'], dfs[i]['L'], color=colors[i], s=50, label=None)
plt.plot(
p_plot,
M * langmuirKs[i] * p_plot / (1.0 + langmuirKs[i] * p_plot),
color=colors[i],
linewidth=2,
label=r'$N_%s(P) = \frac{%d P}{1+%dP}$' %
(component_names[i], langmuirKs[i], langmuirKs[i]))
plt.xlim([-.05 * 10, 1.05 * 10])
plt.ylim([-.05 * M, M * 1.05])
plt.xlabel('Pressure (bar)')
plt.ylabel('Gas uptake (mmol/g)')
plt.legend(loc='lower right')
plt.tight_layout()
plt.savefig(
'pure_component_Langmuir.png',
format='png',
dpi=300,
facecolor=fig.get_facecolor())
plt.show()
# ## Compare pyIAST predicted component loadings to that of competitive Langmuir
# Let us consider a tertiary mixture of components 0, 1, and 2 above at a total pressure of `total_pressure` bar.
# In[6]:
total_pressure = 1.0
# We will explore gas phase composition space ($\{y_i\}$) by generating random compositions and checking that they are within the triangle. We do not want to get too close to a pure phase boundary becuase of numerical instability, so we keep a distance `dx` away from pure phases. We will perform `num_tests` tests.
# In[15]:
dx = 0.0001
num_tests = 100
# Generate the compositions and store in list `compositions`
# In[16]:
compositions = []
test_no = 0
while test_no < num_tests:
# generate random compoisitions
y1 = np.random.uniform(dx, 1.0 - dx)
y2 = np.random.uniform(dx, 1.0 - dx)
y3 = 1.0 - y2 - y1
# check that composition is within the triangle
if y3 < dx:
continue
# viable composition
compositions.append([y1, y2, y3])
# keep generating until we have num_tests
test_no += 1
# Next, we assert that pyIAST gives the same result as the competitive Langmuir isotherm for each of these compositions.
# Function to compute loading according to competitive Langmuir
# In[17]:
def competitive_langmuir_loading(partial_pressures, i):
"""
Calculate loading of component i according to competitive Langmuir
"""
return M * langmuirKs[i] * partial_pressures[i] / (
1.0 + np.dot(langmuirKs, partial_pressures))
# Function to compute loading according to pyIAST
# In[10]:
def iast_loading(partial_pressures, i):
"""
Calculate loading of component i according to IAST
partial_pressures: Array, partial pressures of each component
i: component in the mixture
"""
component_loadings = pyiast.iast(partial_pressures, isotherms)
return component_loadings[i]
# Loop over compositions, assert pyIAST agrees with competitive Langmuir for each component. If this runs, then there is agreement!
# In[14]:
for i in range(num_tests):
partial_pressure = np.array(compositions[i]) * total_pressure
# for each component...
for c in range(len(langmuirKs)):
np.testing.assert_almost_equal(
competitive_langmuir_loading(partial_pressure, c),
iast_loading(partial_pressure, c),
decimal=4)
# ### This is using a custom library to plot the phase diagrams for the paper.
# Use ternary to plot phase diagram
# https://github.com/marcharper/python-ternary
# In[19]:
import ternary
scale = 10 # resolution in triangle
axis_colors = {'l': colors[1], 'r': colors[0], 'b': colors[2]}
cmaps = ["Blues", "Greens", "Reds"]
iast_or_lang = 'lang'
for c in range(3):
if iast_or_lang == 'lang':
f = lambda p: competitive_langmuir_loading(p, c)
else:
f = lambda p: iast_loading(p, c)
# loop over component
fig, ax = plt.subplots(facecolor='w')
ax.axis("off")
figure, tax = ternary.figure(ax=ax, scale=scale)
tax.heatmapf(
f,
boundary=False,
style="hexagonal",
cmap=plt.cm.get_cmap(cmaps[c]),
vmax=M,
vmin=0.0,
cbarlabel="%s uptake (mmol/g)" % component_names[c])
tax.boundary(linewidth=2.0, color_dict=axis_colors)
tax.left_axis_label("$p_1$ (bar)", color=axis_colors['l'], offset=0.16)
tax.right_axis_label("$p_0$ (bar)", color=axis_colors['r'], offset=0.16)
tax.bottom_axis_label("$p_2$ (bar)", color=axis_colors['b'], offset=-0.06)
tax.gridlines(
color="blue",
multiple=1,
linewidth=2,
horizontal_kwargs={'color': axis_colors['b']},
left_kwargs={'color': axis_colors['l']},
right_kwargs={'color': axis_colors['r']},
alpha=0.7) # Every 5th gridline, can be a float
tax.ticks(
axis='rlb',
linewidth=1,
locations=np.arange(scale + 1),
clockwise=True,
color_dict=axis_colors,
ticks=["%.1f" % (1.0 - 1.0 * i / scale) for i in range(scale + 1)],
offset=0.03)
tax.clear_matplotlib_ticks()
tax._redraw_labels()
# if iast_or_lang == 'iast':
# tax.set_title("IAST uptake, component %d" % c, y=1.08, fontsize=14)
# if iast_or_lang == 'lang':
# tax.set_title("Competitive Langmuir uptake, component %d" % c, y=1.08, fontsize=14)
plt.tight_layout()
if iast_or_lang == 'iast':
plt.savefig(
"Tertiary_diagram_IAST_component_%d.png" % c,
format='png',
dpi=300,
facecolor=fig.get_facecolor())
if iast_or_lang == 'lang':
plt.savefig(
"Tertiary_diagram_Langmuir_component_%d.png" % c,
format='png',
dpi=300,
facecolor=fig.get_facecolor())
tax.show()
# In[ ]:
|
mit
|
stasiek/robotframework
|
src/robot/running/timeouts/jython.py
|
26
|
1627
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from java.lang import Thread, Runnable
from robot.errors import TimeoutError
class Timeout(object):
def __init__(self, timeout, error):
self._timeout = timeout
self._error = error
def execute(self, runnable):
runner = Runner(runnable)
thread = Thread(runner, name='RobotFrameworkTimeoutThread')
thread.setDaemon(True)
thread.start()
thread.join(int(self._timeout * 1000))
if thread.isAlive():
thread.stop()
raise TimeoutError(self._error)
return runner.get_result()
class Runner(Runnable):
def __init__(self, runnable):
self._runnable = runnable
self._result = None
self._error = None
def run(self):
try:
self._result = self._runnable()
except:
self._error = sys.exc_info()
def get_result(self):
if not self._error:
return self._result
raise self._error[0], self._error[1], self._error[2]
|
apache-2.0
|
jmmartinez84/yowsup
|
yowsup/layers/protocol_contacts/protocolentities/notification_contact_remove.py
|
68
|
1295
|
from yowsup.structs import ProtocolTreeNode
from .notification_contact import ContactNotificationProtocolEntity
class RemoveContactNotificationProtocolEntity(ContactNotificationProtocolEntity):
'''
<notification offline="0" id="{{NOTIFICATION_ID}}" notify="{{NOTIFY_NAME}}" type="contacts"
t="{{TIMESTAMP}}" from="{{SENDER_JID}}">
<remove jid="{{SET_JID}}"> </remove>
</notification>
'''
def __init__(self, _id, _from, timestamp, notify, offline, contactJid):
super(RemoveContactNotificationProtocolEntity, self).__init__(_id, _from, timestamp, notify, offline)
self.setData(contactJid)
def setData(self, jid):
self.contactJid = jid
def toProtocolTreeNode(self):
node = super(RemoveContactNotificationProtocolEntity, self).toProtocolTreeNode()
removeNode = ProtocolTreeNode("remove", {"jid": self.contactJid}, None, None)
node.addChild(removeNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = ContactNotificationProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = RemoveContactNotificationProtocolEntity
removeNode = node.getChild("remove")
entity.setData(removeNode.getAttributeValue("jid"))
return entity
|
gpl-3.0
|
tachylyte/HydroGeoPy
|
conversion.py
|
1
|
1110
|
# Useful unit conversions
def secsToDays(s):
'''Assume s is time in seconds and a positive integer or float.
Return time in days'''
days = s / (60 * 60 * 24)
return days
def daysToSecs(d):
'''Assume d is time in days and a positive integer or float.
Return time in seconds'''
secs = (60 * 60 * 24) * d
return secs
def dayToYear(d):
'''Assume d is time in days and a positive integer or float.
Return time in years'''
year = 365.25 / d
return year
def yearToDay(y):
'''Assume y is time in years and a positive integer or float.
Return time in days'''
day = y * 365.25
return day
def mPerS(mPerD):
'''Assume mPerD is a positive integer or float.
Return mPerS'''
mPerS = days(mPerD)
return mPerS
def mPerD(mPerS):
'''Assume mPerS is a positive integer or float.
Return mPerD'''
mPerD = secs(mPerS)
return mPerD
def kgPerM3(mgl): # Convert mg/l to kg/m^3
kgPerM3 = mgl /1000
return kgPerM3
def mgPerL(kgPerM3): # Convert kg/m^3 to g/m^3 (same as mg/l)
mgl = 1000 * kgPerM3
return mgl
|
bsd-2-clause
|
mhbu50/frappe
|
frappe/desk/doctype/desktop_icon/test_desktop_icon.py
|
10
|
2904
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.desk.doctype.desktop_icon.desktop_icon import (get_desktop_icons, add_user_icon,
set_hidden_list, set_order, clear_desktop_icons_cache)
# test_records = frappe.get_test_records('Desktop Icon')
class TestDesktopIcon(unittest.TestCase):
def setUp(self):
frappe.set_user('[email protected]')
frappe.db.sql('delete from `tabDesktop Icon` where standard=0')
frappe.db.sql('delete from `tabBlock Module`')
frappe.db.sql('update `tabDesktop Icon` set hidden=0, blocked=0')
def tearDown(self):
frappe.set_user('Administrator')
def get_icon(self, module_name):
for i in get_desktop_icons():
if i.module_name == module_name:
return i
return None
def test_get_standard_desktop_icon_for_user(self):
self.assertEqual(self.get_icon('Desk').standard, 1)
def test_add_desktop_icon(self):
self.assertEqual(self.get_icon('User'), None)
add_user_icon('User')
icon = self.get_icon('User')
self.assertEqual(icon.custom, 1)
self.assertEqual(icon.standard, 0)
def test_hide_desktop_icon(self):
set_hidden_list(["Desk"], '[email protected]')
icon = self.get_icon('Desk')
self.assertEqual(icon.hidden, 1)
self.assertEqual(icon.standard, 0)
def test_remove_custom_desktop_icon_on_hidden(self):
self.test_add_desktop_icon()
set_hidden_list(['User'], '[email protected]')
icon = self.get_icon('User')
self.assertEqual(icon, None)
def test_show_desktop_icon(self):
self.test_hide_desktop_icon()
set_hidden_list([], '[email protected]')
icon = self.get_icon('Desk')
self.assertEqual(icon.hidden, 0)
self.assertEqual(icon.standard, 0)
def test_globally_hidden_desktop_icon(self):
set_hidden_list(["Desk"])
icon = self.get_icon('Desk')
self.assertEqual(icon.hidden, 1)
frappe.set_user('[email protected]')
icon = self.get_icon('Desk')
self.assertEqual(icon.hidden, 1)
def test_re_order_desktop_icons(self):
icons = [d.module_name for d in get_desktop_icons()]
m0, m1 = icons[0], icons[1]
set_order([m1, m0] + icons[2:], frappe.session.user)
# reload
icons = [d.module_name for d in get_desktop_icons()]
# check switched order
self.assertEqual(icons[0], m1)
self.assertEqual(icons[1], m0)
def test_block_desktop_icons_for_user(self):
def test_unblock():
user = frappe.get_doc('User', '[email protected]')
user.block_modules = []
user.save(ignore_permissions = 1)
icon = self.get_icon('Desk')
self.assertEqual(icon.hidden, 0)
test_unblock()
user = frappe.get_doc('User', '[email protected]')
user.append('block_modules', {'module': 'Desk'})
user.save(ignore_permissions = 1)
clear_desktop_icons_cache(user.name)
icon = self.get_icon('Desk')
self.assertEqual(icon.hidden, 1)
test_unblock()
|
mit
|
jjmachan/activityPointsApp
|
activitypoints/lib/python3.5/site-packages/django/contrib/gis/db/backends/oracle/adapter.py
|
273
|
1866
|
from cx_Oracle import CLOB
from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.geos import GeometryCollection, Polygon
from django.utils.six.moves import range
class OracleSpatialAdapter(WKTAdapter):
input_size = CLOB
def __init__(self, geom):
"""
Oracle requires that polygon rings are in proper orientation. This
affects spatial operations and an invalid orientation may cause
failures. Correct orientations are:
* Outer ring - counter clockwise
* Inner ring(s) - clockwise
"""
if isinstance(geom, Polygon):
self._fix_polygon(geom)
elif isinstance(geom, GeometryCollection):
self._fix_geometry_collection(geom)
self.wkt = geom.wkt
self.srid = geom.srid
def _fix_polygon(self, poly):
# Fix single polygon orientation as described in __init__()
if self._isClockwise(poly.exterior_ring):
poly.exterior_ring = list(reversed(poly.exterior_ring))
for i in range(1, len(poly)):
if not self._isClockwise(poly[i]):
poly[i] = list(reversed(poly[i]))
return poly
def _fix_geometry_collection(self, coll):
# Fix polygon orientations in geometry collections as described in
# __init__()
for i, geom in enumerate(coll):
if isinstance(geom, Polygon):
coll[i] = self._fix_polygon(geom)
def _isClockwise(self, coords):
# A modified shoelace algorithm to determine polygon orientation.
# See https://en.wikipedia.org/wiki/Shoelace_formula
n = len(coords)
area = 0.0
for i in range(n):
j = (i + 1) % n
area += coords[i][0] * coords[j][1]
area -= coords[j][0] * coords[i][1]
return area < 0.0
|
mit
|
fxfitz/ansible
|
lib/ansible/modules/cloud/amazon/rds_instance_facts.py
|
5
|
12095
|
#!/usr/bin/python
# Copyright (c) 2017, 2018 Michael De La Rue
# Copyright (c) 2017, 2018 Will Thames
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: rds_instance_facts
version_added: "2.6"
short_description: obtain facts about one or more RDS instances
description:
- obtain facts about one or more RDS instances
options:
db_instance_identifier:
description:
- The RDS instance's unique identifier.
required: false
aliases:
- id
filters:
description:
- A filter that specifies one or more DB instances to describe.
See U(https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html)
requirements:
- "python >= 2.7"
- "boto3"
author:
- "Will Thames (@willthames)"
- "Michael De La Rue (@mikedlr)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Get facts about an instance
- rds_instance_facts:
name: new-database
register: new_database_facts
# Get all RDS instances
- rds_instance_facts:
'''
RETURN = '''
instances:
description: List of RDS instances
returned: always
type: complex
contains:
allocated_storage:
description: Gigabytes of storage allocated to the database
returned: always
type: int
sample: 10
auto_minor_version_upgrade:
description: Whether minor version upgrades happen automatically
returned: always
type: bool
sample: true
availability_zone:
description: Availability Zone in which the database resides
returned: always
type: string
sample: us-west-2b
backup_retention_period:
description: Days for which backups are retained
returned: always
type: int
sample: 7
ca_certificate_identifier:
description: ID for the CA certificate
returned: always
type: string
sample: rds-ca-2015
copy_tags_to_snapshot:
description: Whether DB tags should be copied to the snapshot
returned: always
type: bool
sample: false
db_instance_arn:
description: ARN of the database instance
returned: always
type: string
sample: arn:aws:rds:us-west-2:111111111111:db:helloworld-rds
db_instance_class:
description: Instance class of the database instance
returned: always
type: string
sample: db.t2.small
db_instance_identifier:
description: Database instance identifier
returned: always
type: string
sample: helloworld-rds
db_instance_port:
description: Port used by the database instance
returned: always
type: int
sample: 0
db_instance_status:
description: Status of the database instance
returned: always
type: string
sample: available
db_name:
description: Name of the database
returned: always
type: string
sample: management
db_parameter_groups:
description: List of database parameter groups
returned: always
type: complex
contains:
db_parameter_group_name:
description: Name of the database parameter group
returned: always
type: string
sample: psql-pg-helloworld
parameter_apply_status:
description: Whether the parameter group has been applied
returned: always
type: string
sample: in-sync
db_security_groups:
description: List of security groups used by the database instance
returned: always
type: list
sample: []
db_subnet_group:
description: list of subnet groups
returned: always
type: complex
contains:
db_subnet_group_description:
description: Description of the DB subnet group
returned: always
type: string
sample: My database subnet group
db_subnet_group_name:
description: Name of the database subnet group
returned: always
type: string
sample: my-subnet-group
subnet_group_status:
description: Subnet group status
returned: always
type: string
sample: Complete
subnets:
description: List of subnets in the subnet group
returned: always
type: complex
contains:
subnet_availability_zone:
description: Availability zone of the subnet
returned: always
type: complex
contains:
name:
description: Name of the availability zone
returned: always
type: string
sample: us-west-2c
subnet_identifier:
description: Subnet ID
returned: always
type: string
sample: subnet-abcd1234
subnet_status:
description: Subnet status
returned: always
type: string
sample: Active
vpc_id:
description: VPC id of the subnet group
returned: always
type: string
sample: vpc-abcd1234
dbi_resource_id:
description: AWS Region-unique, immutable identifier for the DB instance
returned: always
type: string
sample: db-AAAAAAAAAAAAAAAAAAAAAAAAAA
domain_memberships:
description: List of domain memberships
returned: always
type: list
sample: []
endpoint:
description: Database endpoint
returned: always
type: complex
contains:
address:
description: Database endpoint address
returned: always
type: string
sample: helloworld-rds.ctrqpe3so1sf.us-west-2.rds.amazonaws.com
hosted_zone_id:
description: Route53 hosted zone ID
returned: always
type: string
sample: Z1PABCD0000000
port:
description: Database endpoint port
returned: always
type: int
sample: 5432
engine:
description: Database engine
returned: always
type: string
sample: postgres
engine_version:
description: Database engine version
returned: always
type: string
sample: 9.5.10
iam_database_authentication_enabled:
description: Whether database authentication through IAM is enabled
returned: always
type: bool
sample: false
instance_create_time:
description: Date and time the instance was created
returned: always
type: string
sample: '2017-10-10T04:00:07.434000+00:00'
kms_key_id:
description: KMS Key ID
returned: always
type: string
sample: arn:aws:kms:us-west-2:111111111111:key/abcd1234-0000-abcd-1111-0123456789ab
latest_restorable_time:
description: Latest time to which a database can be restored with point-in-time restore
returned: always
type: string
sample: '2018-05-17T00:03:56+00:00'
license_model:
description: License model
returned: always
type: string
sample: postgresql-license
master_username:
description: Database master username
returned: always
type: string
sample: dbadmin
monitoring_interval:
description: Interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance
returned: always
type: int
sample: 0
multi_az:
description: Whether Multi-AZ is on
returned: always
type: bool
sample: false
option_group_memberships:
description: List of option groups
returned: always
type: complex
contains:
option_group_name:
description: Option group name
returned: always
type: string
sample: default:postgres-9-5
status:
description: Status of option group
returned: always
type: string
sample: in-sync
pending_modified_values:
description: Modified values pending application
returned: always
type: complex
contains: {}
performance_insights_enabled:
description: Whether performance insights are enabled
returned: always
type: bool
sample: false
preferred_backup_window:
description: Preferred backup window
returned: always
type: string
sample: 04:00-05:00
preferred_maintenance_window:
description: Preferred maintenance window
returned: always
type: string
sample: mon:05:00-mon:05:30
publicly_accessible:
description: Whether the DB is publicly accessible
returned: always
type: bool
sample: false
read_replica_db_instance_identifiers:
description: List of database instance read replicas
returned: always
type: list
sample: []
storage_encrypted:
description: Whether the storage is encrypted
returned: always
type: bool
sample: true
storage_type:
description: Storage type of the Database instance
returned: always
type: string
sample: gp2
tags:
description: Tags used by the database instance
returned: always
type: complex
contains: {}
vpc_security_groups:
description: List of VPC security groups
returned: always
type: complex
contains:
status:
description: Status of the VPC security group
returned: always
type: string
sample: active
vpc_security_group_id:
description: VPC Security Group ID
returned: always
type: string
sample: sg-abcd1234
'''
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, AWSRetry, camel_dict_to_snake_dict
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
def instance_facts(module, conn):
instance_name = module.params.get('db_instance_identifier')
filters = module.params.get('filters')
params = dict()
if instance_name:
params['DBInstanceIdentifier'] = instance_name
if filters:
params['Filters'] = ansible_dict_to_boto3_filter_list(filters)
paginator = conn.get_paginator('describe_db_instances')
try:
results = paginator.paginate(**params).build_full_result()['DBInstances']
except is_boto3_error_code('DBInstanceNotFound'):
results = []
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, "Couldn't get instance information")
for instance in results:
try:
instance['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=instance['DBInstanceArn'],
aws_retry=True)['TagList'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Couldn't get tags for instance %s" % instance['DBInstanceIdentifier'])
return dict(changed=False, instances=[camel_dict_to_snake_dict(instance, ignore_list=['Tags']) for instance in results])
def main():
argument_spec = dict(
db_instance_identifier=dict(aliases=['id']),
filters=dict(type='dict')
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10))
module.exit_json(**instance_facts(module, conn))
if __name__ == '__main__':
main()
|
gpl-3.0
|
davidsims9t/itis-graphql
|
venv/lib/python3.4/site-packages/setuptools/namespaces.py
|
54
|
2648
|
import os
from distutils import log
import itertools
from setuptools.extern.six.moves import map
flatten = itertools.chain.from_iterable
class Installer:
nspkg_ext = '-nspkg.pth'
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp:
return
filename, ext = os.path.splitext(self._get_target())
filename += self.nspkg_ext
self.outputs.append(filename)
log.info("Installing %s", filename)
lines = map(self._gen_nspkg_line, nsp)
if self.dry_run:
# always generate the lines, even in dry run
list(lines)
return
with open(filename, 'wt') as f:
f.writelines(lines)
def _get_target(self):
return self.target
_nspkg_tmpl = (
"import sys, types, os",
"pep420 = sys.version_info > (3, 3)",
"p = os.path.join(%(root)s, *%(pth)r)",
"ie = os.path.exists(os.path.join(p,'__init__.py'))",
"m = not ie and not pep420 and "
"sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))",
"mp = (m or []) and m.__dict__.setdefault('__path__',[])",
"(p not in mp) and mp.append(p)",
)
"lines for the namespace installer"
_nspkg_tmpl_multi = (
'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
)
"additional line(s) when a parent package is indicated"
def _get_root(self):
return "sys._getframe(1).f_locals['sitedir']"
def _gen_nspkg_line(self, pkg):
# ensure pkg is not a unicode string under Python 2.7
pkg = str(pkg)
pth = tuple(pkg.split('.'))
root = self._get_root()
tmpl_lines = self._nspkg_tmpl
parent, sep, child = pkg.rpartition('.')
if parent:
tmpl_lines += self._nspkg_tmpl_multi
return ';'.join(tmpl_lines) % locals() + '\n'
def _get_all_ns_packages(self):
"""Return sorted list of all package namespaces"""
pkgs = self.distribution.namespace_packages or []
return sorted(flatten(map(self._pkg_names, pkgs)))
@staticmethod
def _pkg_names(pkg):
"""
Given a namespace package, yield the components of that
package.
>>> names = Installer._pkg_names('a.b.c')
>>> set(names) == set(['a', 'a.b', 'a.b.c'])
True
"""
parts = pkg.split('.')
while parts:
yield '.'.join(parts)
parts.pop()
class DevelopInstaller(Installer):
def _get_root(self):
return repr(str(self.egg_path))
def _get_target(self):
return self.egg_link
|
gpl-3.0
|
jthomm/game-center-db
|
gcdb/__init__.py
|
1
|
18930
|
__version__ = '0.0.1'
"""Reading and executing SQL"""
from os import path
class Table(object):
"""Given a table name and a `sqlite3.Cursor`, execute `CREATE TABLE` and
`INSERT INTO`.
"""
SQL_PATH = path.abspath(path.join(path.dirname(__file__), 'sql'))
def __init__(self, name):
self.name = name
def _sql(self, operation):
"""A helper that finds and reads the SQL script for the given operation
"""
if operation not in ('create', 'insert',):
raise Exception("Invalid operation: '{0}'".format(operation))
file_path = path.join(self.SQL_PATH, self.name, operation + '.sql')
with open(file_path, 'rb') as file_handle: return file_handle.read()
@property
def create_sql(self):
return self._sql('create')
@property
def insert_sql(self):
return self._sql('insert')
def insert(self, cursor, values):
cursor.execute(self.insert_sql, values)
return cursor.lastrowid
def create(self, cursor):
cursor.execute(self.create_sql)
"""Traversing the NFL Game Center JSON object"""
import abc
class InserterABC(object):
__metaclass__ = abc.ABCMeta
# This has to be an instance of `Table`
table = None
def __init__(self, cursor, _parent_id):
self.cursor = cursor
# `parent_id` is the foreign key to the "parent" table
self._parent_id = _parent_id
@abc.abstractmethod
def __call__(self, *args):
pass
class GcCurDrivePlayEvent(InserterABC):
table = Table('gc_cur_drive_play_event')
def __call__(self, player_id, dct):
self.table.insert(self.cursor, (
player_id,
dct['playerName'],
dct['clubcode'],
dct['yards'],
dct['statId'],
dct['sequence'],
self._parent_id,))
class GcCurDrivePlay(InserterABC):
table = Table('gc_cur_drive_play')
def __call__(self, play_id, dct):
_id = self.table.insert(self.cursor, (
play_id,
dct['desc'],
dct['posteam'],
dct['qtr'],
dct['time'],
dct['down'],
dct['ydstogo'],
dct['yrdln'],
dct['ydsnet'],
dct['note'],
dct['sp'],
self._parent_id,))
# Insert play events
gc_cur_drive_play_event = GcCurDrivePlayEvent(self.cursor, _id)
for player_id, event_dcts in dct['players'].iteritems():
for event_dct in event_dcts:
gc_cur_drive_play_event(player_id, event_dct)
class GcCurDrive(InserterABC):
table = Table('gc_cur_drive')
def __call__(self, drive_num, dct):
_id = self.table.insert(self.cursor, (
drive_num,
None if 'redzone' not in dct else int(dct['redzone']),
dct['postime'],
dct['fds'],
dct['result'],
dct['numplays'],
dct.get('qtr'),
dct['penyds'],
dct['posteam'],
dct['ydsgained'],
dct['start']['yrdln'],
dct['start']['team'],
dct['start']['qtr'],
dct['start']['time'],
dct['end']['yrdln'],
dct['end']['team'],
dct['end']['qtr'],
dct['end']['time'],
self._parent_id,))
# Insert plays
gc_cur_drive_play = GcCurDrivePlay(self.cursor, _id)
for play_id, play_dct in dct['plays'].iteritems():
gc_cur_drive_play(play_id, play_dct)
class GcCurTeamScore(InserterABC):
table = Table('gc_cur_team_score')
def __call__(self, qtr, points):
self.table.insert(self.cursor, (
qtr,
points,
self._parent_id,))
class GcCurTeamStatsTeam(InserterABC):
table = Table('gc_cur_team_stats_team')
def __call__(self, dct):
self.table.insert(self.cursor, (
dct['totfd'],
dct['trnovr'],
dct['pyds'],
dct['ryds'],
dct['totyds'],
dct['pt'],
dct['ptyds'],
dct['ptavg'],
dct['pen'],
dct['penyds'],
dct['top'],
self._parent_id,))
class GcCurTeamStatsKickret(InserterABC):
table = Table('gc_cur_team_stats_kickret')
def __call__(self, player_id, dct):
self.table.insert(self.cursor, (
player_id,
dct['name'],
dct['ret'],
dct['avg'],
dct['lng'],
dct['tds'],
self._parent_id,))
class GcCurTeamStatsPuntret(InserterABC):
table = Table('gc_cur_team_stats_puntret')
def __call__(self, player_id, dct):
self.table.insert(self.cursor, (
player_id,
dct['name'],
dct['ret'],
dct['avg'],
dct['lng'],
dct['tds'],
self._parent_id,))
class GcCurTeamStatsDefense(InserterABC):
table = Table('gc_cur_team_stats_defense')
def __call__(self, player_id, dct):
self.table.insert(self.cursor, (
player_id,
dct['name'],
dct['tkl'],
dct['ast'],
dct['sk'],
dct['ffum'],
dct['int'],
self._parent_id,))
class GcCurTeamStatsFumbles(InserterABC):
table = Table('gc_cur_team_stats_fumbles')
def __call__(self, player_id, dct):
self.table.insert(self.cursor, (
player_id,
dct['name'],
dct['tot'],
dct['yds'],
dct['lost'],
dct['rcv'],
dct['trcv'],
self._parent_id,))
class GcCurTeamStatsKicking(InserterABC):
table = Table('gc_cur_team_stats_kicking')
def __call__(self, player_id, dct):
self.table.insert(self.cursor, (
player_id,
dct['name'],
dct['totpfg'],
dct['fga'],
dct['fgm'],
dct['fgyds'],
dct['xptot'],
dct['xpa'],
dct['xpmade'],
dct['xpmissed'],
dct['xpb'],
self._parent_id,))
class GcCurTeamStatsPunting(InserterABC):
table = Table('gc_cur_team_stats_punting')
def __call__(self, player_id, dct):
self.table.insert(self.cursor, (
player_id,
dct['name'],
dct['pts'],
dct['yds'],
dct['avg'],
dct['lng'],
dct['i20'],
self._parent_id,))
class GcCurTeamStatsPassing(InserterABC):
table = Table('gc_cur_team_stats_passing')
def __call__(self, player_id, dct):
self.table.insert(self.cursor, (
player_id,
dct['name'],
dct['att'],
dct['cmp'],
dct['yds'],
dct['tds'],
dct['ints'],
dct['twopta'],
dct['twoptm'],
self._parent_id,))
class GcCurTeamStatsReceiving(InserterABC):
table = Table('gc_cur_team_stats_receiving')
def __call__(self, player_id, dct):
self.table.insert(self.cursor, (
player_id,
dct.get('name'),
dct.get('rec'),
dct.get('yds'),
dct.get('tds'),
dct.get('lng'),
dct.get('lngtd'),
dct.get('twopta'),
dct.get('twoptm'),
self._parent_id,))
class GcCurTeamStatsRushing(InserterABC):
table = Table('gc_cur_team_stats_rushing')
def __call__(self, player_id, dct):
self.table.insert(self.cursor, (
player_id,
dct.get('name'),
dct.get('att'),
dct.get('yds'),
dct.get('tds'),
dct.get('lng'),
dct.get('lngtd'),
dct.get('twopta'),
dct.get('twoptm'),
self._parent_id,))
class GcCurTeamPlayer(InserterABC):
table = Table('gc_cur_team_player')
def __call__(self, player_id, dct):
self.table.insert(self.cursor, (
player_id,
dct['esbid'],
dct['fn'],
dct['ln'],
dct['hometown'],
dct['bdate'],
dct['age'],
dct['exp'],
dct['pos'],
dct['ht'],
dct['wt'],
dct['college'],
dct['team'],
dct['uniformNumber'],
self._parent_id,))
class GcCurTeam(InserterABC):
table = Table('gc_cur_team')
def __call__(self, ah, dct):
_id = self.table.insert(self.cursor, (
ah,
dct['teamCode'],
dct['abbr'],
dct['fullName'],
dct['comUrl'],
dct['clubUrl'],
dct['smsCode'],
dct['color'],
dct['teamColor'],
dct['phone'],
dct['standing'],
dct['to'],
self._parent_id,))
# Insert score
gc_cur_team_score = GcCurTeamScore(self.cursor, _id)
for qtr, points in dct['score'].iteritems():
gc_cur_team_score(qtr, points)
# Insert player
gc_cur_team_player = GcCurTeamPlayer(self.cursor, _id)
for player_id, player_dct in dct['players'].iteritems():
gc_cur_team_player(player_id, player_dct)
# Insert stats team
gc_cur_team_stats_team = GcCurTeamStatsTeam(self.cursor, _id)
gc_cur_team_stats_team(dct['stats']['team'])
# Insert stats kickret
if 'kickret' in dct['stats']:
gc_cur_team_stats_kickret = GcCurTeamStatsKickret(self.cursor, _id)
for player_id, kickret_dct in dct['stats']['kickret'].iteritems():
gc_cur_team_stats_kickret(player_id, kickret_dct)
# Insert stats puntret
if 'puntret' in dct['stats']:
gc_cur_team_stats_puntret = GcCurTeamStatsPuntret(self.cursor, _id)
for player_id, puntret_dct in dct['stats']['puntret'].iteritems():
gc_cur_team_stats_puntret(player_id, puntret_dct)
# Insert stats defense
if 'defense' in dct['stats']:
gc_cur_team_stats_defense = GcCurTeamStatsDefense(self.cursor, _id)
for player_id, defense_dct in dct['stats']['defense'].iteritems():
gc_cur_team_stats_defense(player_id, defense_dct)
# Insert stats fumbles
if 'fumbles' in dct['stats']:
gc_cur_team_stats_fumbles = GcCurTeamStatsFumbles(self.cursor, _id)
for player_id, fumbles_dct in dct['stats']['fumbles'].iteritems():
gc_cur_team_stats_fumbles(player_id, fumbles_dct)
# Insert stats kicking
if 'kicking' in dct['stats']:
gc_cur_team_stats_kicking = GcCurTeamStatsKicking(self.cursor, _id)
for player_id, kicking_dct in dct['stats']['kicking'].iteritems():
gc_cur_team_stats_kicking(player_id, kicking_dct)
# Insert stats punting
if 'punting' in dct['stats']:
gc_cur_team_stats_punting = GcCurTeamStatsPunting(self.cursor, _id)
for player_id, punting_dct in dct['stats']['punting'].iteritems():
gc_cur_team_stats_punting(player_id, punting_dct)
# Insert stats passing
if 'passing' in dct['stats']:
gc_cur_team_stats_passing = GcCurTeamStatsPassing(self.cursor, _id)
for player_id, passing_dct in dct['stats']['passing'].iteritems():
gc_cur_team_stats_passing(player_id, passing_dct)
# Insert stats receiving
if 'receiving' in dct['stats']:
gc_cur_team_stats_receiving = GcCurTeamStatsReceiving(self.cursor, _id)
for player_id, receiving_dct in dct['stats']['receiving'].iteritems():
gc_cur_team_stats_receiving(player_id, receiving_dct)
# Insert stats rushing
if 'rushing' in dct['stats']:
gc_cur_team_stats_rushing = GcCurTeamStatsRushing(self.cursor, _id)
for player_id, rushing_dct in dct['stats']['rushing'].iteritems():
gc_cur_team_stats_rushing(player_id, rushing_dct)
class GcCurScrsummaryPlayer(InserterABC):
table = Table('gc_cur_scrsummary_player')
def __call__(self, player_name, player_id):
self.table.insert(self.cursor, (
player_id,
player_name,
self._parent_id,))
class GcCurScrsummary(InserterABC):
table = Table('gc_cur_scrsummary')
def __call__(self, play_id, dct):
_id = self.table.insert(self.cursor, (
play_id,
dct['qtr'],
dct['team'],
dct['type'],
dct['desc'],
self._parent_id,))
# Insert players
gc_cur_scrsummary_player = GcCurScrsummaryPlayer(self.cursor, _id)
for player_name, player_id in dct['players'].iteritems():
gc_cur_scrsummary_player(player_name, player_id)
class GcCur(InserterABC):
table = Table('gc_cur')
def __call__(self, dct):
_id = self.table.insert(self.cursor, (
int(dct['redzone']),
dct['rooftype'],
dct['qtr'],
dct['yl'],
dct['clock'],
dct['down'],
dct['togo'],
dct['posteam'],
dct['stadium'],
self._parent_id,))
# Insert drives
gc_cur_drive = GcCurDrive(self.cursor, _id)
for drive_num, drive_dct in dct['drives'].iteritems():
# Most keys in `dct['drives']` will be a drive number and their
# values will be a drive dictionary. One exception: the key
# 'crntdrv', which has an integer value. Skip this key.
if drive_num != 'crntdrv':
gc_cur_drive(drive_num, drive_dct)
# Insert teams
gc_cur_team = GcCurTeam(self.cursor, _id)
for ah in ('away', 'home',):
gc_cur_team(ah, dct[ah])
# Insert score summary
gc_cur_scrsummary = GcCurScrsummary(self.cursor, _id)
for play_id, scrsummary_dct in dct['scrsummary'].iteritems():
gc_cur_scrsummary(play_id, scrsummary_dct)
class GcGameTeam(InserterABC):
table = Table('gc_game_team')
def __call__(self, abbr, dct):
self.table.insert(self.cursor, (
abbr,
dct['fullname'],
dct['link'],
dct['standing'],
self._parent_id,))
class GcGame(InserterABC):
table = Table('gc_game')
def __call__(self, dct):
_id = self.table.insert(self.cursor, (
dct['id'],
dct['key'],
dct['uri'],
dct['gamebook'],
dct['seasontype'],
dct['week'],
dct['cp'],
dct['year'],
dct['date'],
dct['time'],
dct['day'],
dct['state'],
self._parent_id,))
# Insert teams
gc_game_team = GcGameTeam(self.cursor, _id)
for abbr, team_dct in dct['teams'].iteritems():
gc_game_team(abbr, team_dct)
class GcTeam(InserterABC):
table = Table('gc_team')
def __call__(self, dct):
for ah in ('away', 'home',):
self.table.insert(self.cursor, (
ah,
dct[ah]['standing'],
dct[ah]['abbr'],
self._parent_id,))
class Gc(InserterABC):
table = Table('gc')
def __call__(self, dct):
_id = self.table.insert(self.cursor, (self._parent_id,))
GcGame(self.cursor, _id)(dct['game'])
GcTeam(self.cursor, _id)(dct['teams'])
GcCur(self.cursor, _id)(dct['current'][self._parent_id])
"""Command line arguments"""
import argparse
argument_parser = argparse.ArgumentParser(
description='Insert data for a given game')
argument_parser.add_argument(
'-d', '--database', help='path to the SQLite database file', required=True)
argument_parser.add_argument(
'-f', '--filename', help='path to the Game Center .json file', required=True)
argument_parser.add_argument(
'-g', '--gameid', help="Game Center ID of the game (e.g. '2014092800')")
def parse_args():
"""Parse command line arguments but infer `gameid` based on file name if
not explicitly provided.
"""
args = argument_parser.parse_args()
if args.gameid is None:
file_base_name = path.basename(args.filename)
file_name_without_ext, ext = path.splitext(file_base_name)
args.gameid = file_name_without_ext
return args
"""Reading the data from disk and deserializing"""
try:
import simplejson as json
except ImportError:
import json
def read_data_from_file_and_deserialize(file_path):
json_string = ''
with open(file_path, 'rb') as f:
json_string = f.read()
return json.loads(json_string)
"""Building the database"""
import sqlite3
def database_is_empty(cursor):
results = cursor.execute('SELECT COUNT (*) FROM sqlite_master').fetchall()
return results[0][0] == 0
def create_tables(cursor):
for table_name in (
'gc',
'gc_game',
'gc_game_team',
'gc_team',
'gc_cur',
'gc_cur_scrsummary',
'gc_cur_scrsummary_player',
'gc_cur_team',
'gc_cur_team_score',
'gc_cur_team_stats_team',
'gc_cur_team_stats_kickret',
'gc_cur_team_stats_puntret',
'gc_cur_team_stats_defense',
'gc_cur_team_stats_fumbles',
'gc_cur_team_stats_kicking',
'gc_cur_team_stats_punting',
'gc_cur_team_stats_passing',
'gc_cur_team_stats_receiving',
'gc_cur_team_stats_rushing',
'gc_cur_team_player',
'gc_cur_drive',
'gc_cur_drive_play',
'gc_cur_drive_play_event',):
table = Table(table_name)
table.create(cursor)
def insert_into_tables(cursor, game_id, dct):
Gc(cursor, game_id)(dct)
"""Logging"""
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('gcdb')
"""Main function"""
def main():
args = parse_args()
dct = read_data_from_file_and_deserialize(args.filename)
with sqlite3.connect(args.database) as connection:
cursor = connection.cursor()
# Make sure foreign key constraints are enforced
cursor.execute('PRAGMA foreign_keys = ON')
# If tables aren't already there, create them
if database_is_empty(cursor):
logger.info(
'No objects found in database - creating tables first...')
create_tables(cursor)
# Insert the data
logger.info('Inserting {}'.format(args.gameid))
try:
insert_into_tables(cursor, args.gameid, dct)
except:
logger.error('There was an issue - aborting...')
raise
else:
# If no errors, commit
connection.commit()
if __name__ == '__main__': main()
|
mit
|
maiome-development/malibu
|
src/malibu/design/borgish.py
|
1
|
2517
|
# -*- coding: utf-8 -*-
__doc__ = """
malibu.design.borgish
---------------------
Borgish was designed as a more extended implementation of Alex Martelli's
Borg design pattern, which aims to provide state consistency similar to
a singleton design, but without the terribleness of singletons.
"""
class SharedState(object):
""" This class is for meta-class use as a state machine for
persistence so we don't use any singleton design.
The module is "Borg-ish", as this implementation is loosely
based on the Borg design pattern by Alex Martelli.
"""
@classmethod
def __initialize_states(cls):
""" Initializes a class-scoped states dictionary to store named
states.
"""
if not hasattr(cls, "_SharedState__states"):
cls.__states = {}
def __init__(self, *args, **kw):
""" Calls the classes state dict initializer and loads initial
state, if provided.
"""
self.__initialize_states()
if "state" in kw:
self.load_state(kw.get("state"))
def load_state(self, state):
""" Loads state into the class, overwriting all data that was
previously stored.
:param str state: Name of state to load.
:rtype: None
:returns: None
:raises NameError: If the named state does not exist.
"""
if state in self.__states:
self.__dict__ = self.__states[state]
else:
raise NameError("Can't load non-existent state '%s'." % (state))
def save_state(self, state):
""" Saves class state into a namespace on the class' shared state
dict.
:param str state: Name of state to save.
:rtype: None
:returns: None
:raises NameError: If the named state already exists.
"""
if state in self.__states:
raise NameError("Can't overwrite stored state '%s'." % (state))
self.__states.update({state: self.__dict__})
def drop_state(self, state):
""" Drops the state specified from the class' shared state dictionary.
:param str state: Name of state to drop.
:rtype: bool
:returns: True if state was dropped, False otherwise.
"""
if state not in self.__states:
return False
state_dict = self.__states.pop(state, None)
if not state_dict:
return False
return True
|
unlicense
|
Dino0631/RedRain-Bot
|
lib/youtube_dl/extractor/cracked.py
|
170
|
3213
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_iso8601,
str_to_int,
)
class CrackedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?cracked\.com/video_(?P<id>\d+)_[\da-z-]+\.html'
_TESTS = [{
'url': 'http://www.cracked.com/video_19070_if-animal-actors-got-e21-true-hollywood-stories.html',
'md5': '89b90b9824e3806ca95072c4d78f13f7',
'info_dict': {
'id': '19070',
'ext': 'mp4',
'title': 'If Animal Actors Got E! True Hollywood Stories',
'timestamp': 1404954000,
'upload_date': '20140710',
}
}, {
# youtube embed
'url': 'http://www.cracked.com/video_19006_4-plot-holes-you-didnt-notice-in-your-favorite-movies.html',
'md5': 'ccd52866b50bde63a6ef3b35016ba8c7',
'info_dict': {
'id': 'EjI00A3rZD0',
'ext': 'mp4',
'title': "4 Plot Holes You Didn't Notice in Your Favorite Movies - The Spit Take",
'description': 'md5:c603708c718b796fe6079e2b3351ffc7',
'upload_date': '20140725',
'uploader_id': 'Cracked',
'uploader': 'Cracked',
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
youtube_url = self._search_regex(
r'<iframe[^>]+src="((?:https?:)?//www\.youtube\.com/embed/[^"]+)"',
webpage, 'youtube url', default=None)
if youtube_url:
return self.url_result(youtube_url, 'Youtube')
video_url = self._html_search_regex(
[r'var\s+CK_vidSrc\s*=\s*"([^"]+)"', r'<video\s+src="([^"]+)"'],
webpage, 'video URL')
title = self._search_regex(
[r'property="?og:title"?\s+content="([^"]+)"', r'class="?title"?>([^<]+)'],
webpage, 'title')
description = self._search_regex(
r'name="?(?:og:)?description"?\s+content="([^"]+)"',
webpage, 'description', default=None)
timestamp = self._html_search_regex(
r'"date"\s*:\s*"([^"]+)"', webpage, 'upload date', fatal=False)
if timestamp:
timestamp = parse_iso8601(timestamp[:-6])
view_count = str_to_int(self._html_search_regex(
r'<span\s+class="?views"? id="?viewCounts"?>([\d,\.]+) Views</span>',
webpage, 'view count', fatal=False))
comment_count = str_to_int(self._html_search_regex(
r'<span\s+id="?commentCounts"?>([\d,\.]+)</span>',
webpage, 'comment count', fatal=False))
m = re.search(r'_(?P<width>\d+)X(?P<height>\d+)\.mp4$', video_url)
if m:
width = int(m.group('width'))
height = int(m.group('height'))
else:
width = height = None
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'timestamp': timestamp,
'view_count': view_count,
'comment_count': comment_count,
'height': height,
'width': width,
}
|
gpl-3.0
|
Abdoctor/behave
|
features/steps/behave_active_tags_steps.py
|
4
|
3394
|
# -*- coding: UTF-8 -*-
"""
.. code-block:: gherkin
Given I setup the current values for active tags with:
| category | value |
| foo | xxx |
Then the following active tag combinations are enabled:
| tags | enabled? |
| @active.with_foo=xxx | yes |
| @active.with_foo=other | no |
"""
from behave import given, when, then, step
from behave.tag_matcher import ActiveTagMatcher
from behave.tag_expression import TagExpression
from behave.userdata import parse_bool
from hamcrest import assert_that, equal_to
# -----------------------------------------------------------------------------
# UTILITY FUNCTIONS:
# -----------------------------------------------------------------------------
def normalize_tags(tags):
# -- STRIP: Leading '@' from tags.
return [TagExpression.normalize_tag(tag) for tag in tags]
# -----------------------------------------------------------------------------
# STEP DEFINITIONS:
# -----------------------------------------------------------------------------
@given(u'I setup the current values for active tags with')
def step_given_setup_the_current_values_for_active_tags_with(context):
assert context.table, "REQUIRE: table"
context.table.require_columns(["category", "value"])
active_values = getattr(context, "active_value_provider", None)
if active_values is None:
# -- SETUP DATA:
context.active_value_provider = active_values = {}
for row in context.table.rows:
category = row["category"]
value = row["value"]
active_values[category] = value
@then(u'the following active tag combinations are enabled')
def step_then_following_active_tags_combinations_are_enabled(context):
assert context.table, "REQUIRE: table"
assert context.active_value_provider, "REQUIRE: active_value_provider"
context.table.require_columns(["tags", "enabled?"])
ignore_unknown_categories = getattr(context,
"active_tags_ignore_unknown_categories",
ActiveTagMatcher.ignore_unknown_categories)
table = context.table
annotate_column_id = 0
active_tag_matcher = ActiveTagMatcher(context.active_value_provider)
active_tag_matcher.ignore_unknown_categories = ignore_unknown_categories
mismatched_rows = []
for row_index, row in enumerate(table.rows):
tags = normalize_tags(row["tags"].split())
expected_enabled = parse_bool(row["enabled?"])
actual_enabled = active_tag_matcher.should_run_with(tags)
if actual_enabled != expected_enabled:
# -- ANNOTATE MISMATCH IN EXTRA COLUMN:
if annotate_column_id == 0:
annotate_column_id = table.ensure_column_exists("MISMATCH!")
row.cells[annotate_column_id] = "= %s" % actual_enabled
mismatched_rows.append(row_index)
# -- FINALLY: Ensure that there are no mismatched rows.
assert_that(mismatched_rows, equal_to([]), "No mismatched rows:")
@step(u'unknown categories are ignored in active tags')
def step_unknown_categories_are_ignored_in_active_tags(context):
context.active_tags_ignore_unknown_categories = True
@step(u'unknown categories are not ignored in active tags')
def step_unknown_categories_are_not_ignored_in_active_tags(context):
context.active_tags_ignore_unknown_categories = False
|
bsd-2-clause
|
dwhswenson/mdtraj
|
mdtraj/formats/prmtop.py
|
10
|
9196
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: TJ Lane
# Contributors: Robert McGibbon, Jason Swails
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit, copyright (c) 2012 Stanford University and Peter Eastman. Those
# portions are distributed under the following terms:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
"""Load an md.Topology from AMBER PRMTOP files
"""
# Written by: TJ Lane <[email protected]> 2/25/14
# This code was mostly stolen/stripped down from OpenMM code, specifically
# the files amber_file_parser.py and amberprmtopfile.py
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import re
from mdtraj.core import topology
from mdtraj.formats import pdb
from mdtraj.core import element as elem
FORMAT_RE_PATTERN = re.compile("([0-9]+)([a-zA-Z]+)([0-9]+)\.?([0-9]*)")
__all__ = ['load_prmtop']
##############################################################################
# Functions
##############################################################################
def _get_pointer_value(pointer_label, raw_data):
POINTER_LABELS = """
NATOM, NTYPES, NBONH, MBONA, NTHETH, MTHETA,
NPHIH, MPHIA, NHPARM, NPARM, NEXT, NRES,
NBONA, NTHETA, NPHIA, NUMBND, NUMANG, NPTRA,
NATYP, NPHB, IFPERT, NBPER, NGPER, NDPER,
MBPER, MGPER, MDPER, IFBOX, NMXRS, IFCAP
"""
POINTER_LABEL_LIST = POINTER_LABELS.replace(',', '').split()
index = POINTER_LABEL_LIST.index(pointer_label)
return float(raw_data['POINTERS'][index])
def load_prmtop(filename, **kwargs):
"""Load an AMBER prmtop topology file from disk.
Parameters
----------
filename : str
Path to the prmtop file on disk.
Returns
-------
top : md.Topology
The resulting topology, as an md.Topology object.
Notes
-----
Deprecated fields in the prmtop file are not loaded. This includes the
BOX dimensions, which should be stored in trajectory files instead of the
prmtop for systems with periodic boundary conditions. Because '.binpos'
files do not store box dimensions, this means that unitcell information
will be lost if you use .binpos + .prmtop files with MDTraj.
Examples
--------
>>> topology = md.load_prmtop('mysystem.prmtop')
>>> # or
>>> trajectory = md.load('trajectory.mdcrd', top='system.prmtop')
"""
top = topology.Topology()
prmtop_version = None
flags = []
raw_format = {}
raw_data = {}
ignoring = False
with open(filename, 'r') as f:
for line in f:
if line[0] == '%':
if line.startswith('%VERSION'):
tag, prmtop_version = line.rstrip().split(None, 1)
elif line.startswith('%FLAG'):
tag, flag = line.rstrip().split(None, 1)
flags.append(flag)
raw_data[flag] = []
ignoring = flag in ('TITLE', 'CTITLE')
elif line.startswith('%FORMAT'):
format = line.rstrip()
index0=format.index('(')
index1=format.index(')')
format = format[index0+1:index1]
m = FORMAT_RE_PATTERN.search(format)
if m is None:
ignoring = True
else:
raw_format[flags[-1]] = (format, m.group(1), m.group(2), m.group(3), m.group(4))
elif line.startswith('%COMMENT'):
continue
elif not ignoring:
flag=flags[-1]
format, numItems, itemType, itemLength, itemPrecision = raw_format[flag]
iLength=int(itemLength)
line = line.rstrip()
for index in range(0, len(line), iLength):
item = line[index:index+iLength]
if item:
raw_data[flag].append(item.strip())
# Add atoms to the topology
pdb.PDBTrajectoryFile._loadNameReplacementTables()
previous_residue = None
c = top.add_chain()
n_atoms = int(_get_pointer_value('NATOM', raw_data))
# built a dictionary telling us which atom belongs to which residue
residue_pointer_dict = {}
res_pointers = raw_data['RESIDUE_POINTER']
first_atom = [int(p)-1 for p in res_pointers] # minus 1 necessary
first_atom.append(n_atoms)
res = 0
for i in range(n_atoms):
while first_atom[res+1] <= i:
res += 1
residue_pointer_dict[i] = res
# add each residue/atom to the topology object
for index in range(n_atoms):
res_number = residue_pointer_dict[index]
if res_number != previous_residue:
previous_residue = res_number
# check
res_name = raw_data['RESIDUE_LABEL'][residue_pointer_dict[index]].strip()
if res_name in pdb.PDBTrajectoryFile._residueNameReplacements:
res_name = pdb.PDBTrajectoryFile._residueNameReplacements[res_name]
r = top.add_residue(res_name, c)
if res_name in pdb.PDBTrajectoryFile._atomNameReplacements:
atom_replacements = pdb.PDBTrajectoryFile._atomNameReplacements[res_name]
else:
atom_replacements = {}
atom_name = raw_data['ATOM_NAME'][index].strip()
if atom_name in atom_replacements:
atom_name = atom_replacements[atom_name]
# Get the element from the prmtop file if available
if 'ATOMIC_NUMBER' in raw_data:
try:
element = elem.Element.getByAtomicNumber(int(raw_data['ATOMIC_NUMBER'][index]))
except KeyError:
element = elem.virtual
else:
# Try to guess the element from the atom name.
upper = atom_name.upper()
if upper.startswith('CL'):
element = elem.chlorine
elif upper.startswith('NA'):
element = elem.sodium
elif upper.startswith('MG'):
element = elem.magnesium
elif upper.startswith('ZN'):
element = elem.zinc
else:
try:
element = elem.get_by_symbol(atom_name[0])
except KeyError:
element = elem.virtual
top.add_atom(atom_name, element, r)
# Add bonds to the topology
bond_pointers = raw_data["BONDS_INC_HYDROGEN"] + raw_data["BONDS_WITHOUT_HYDROGEN"]
atoms = list(top.atoms)
bond_list = []
for ii in range(0,len(bond_pointers),3):
if int(bond_pointers[ii])<0 or int(bond_pointers[ii+1])<0:
raise Exception("Found negative bonded atom pointers %s"
% ((bond_pointers[ii],
bond_pointers[ii+1]),))
else:
bond_list.append((int(bond_pointers[ii])//3, int(bond_pointers[ii+1])//3))
for bond in bond_list:
top.add_bond(atoms[bond[0]], atoms[bond[1]])
return top
|
lgpl-2.1
|
minghuascode/pyj
|
examples/hangman/Wordlist_15.py
|
8
|
5084
|
words=[
'counterintelligence',
'interdenominational',
'nonrepresentational',
'characteristically',
'chlorofluorocarbon',
'disproportionately',
'electrocardiograph',
'oversimplification',
'telecommunications',
'transubstantiation',
'commercialization',
'comprehensiveness',
'conscientiousness',
'constitutionality',
'contradistinction',
'conversationalist',
'counterproductive',
'counterrevolution',
'decriminalization',
'electrocardiogram',
'inconsequentially',
'indistinguishable',
'industrialization',
'interdisciplinary',
'interrelationship',
'misinterpretation',
'misrepresentation',
'nondenominational',
'telecommunication',
'transcendentalism',
'ultraconservative',
'anesthesiologist',
'antagonistically',
'anthropomorphism',
'apprehensiveness',
'aristocratically',
'arteriosclerosis',
'authoritarianism',
'autobiographical',
'bloodthirstiness',
'bureaucratically',
'cantankerousness',
'characterization',
'circumnavigation',
'circumstantially',
'compartmentalize',
'constitutionally',
'conversationally',
'counterclockwise',
'counterespionage',
'decentralization',
'disproportionate',
'disqualification',
'electromagnetism',
'enthusiastically',
'environmentalist',
'extemporaneously',
'extraterrestrial',
'hydroelectricity',
'hypersensitivity',
'hyperventilation',
'incomprehensible',
'incontrovertible',
'indiscriminately',
'inextinguishable',
'institutionalize',
'intercontinental',
'internationalism',
'internationalize',
'irresponsibility',
'lightheartedness',
'melodramatically',
'misappropriation',
'mispronunciation',
'misunderstanding',
'multiculturalism',
'multimillionaire',
'nonproliferation',
'overcompensation',
'overenthusiastic',
'paraprofessional',
'photographically',
'representational',
'semiprofessional',
'shortsightedness',
'tintinnabulation',
'transcontinental',
'uncharacteristic',
'unconstitutional',
'unconventionally',
'unrepresentative',
'acclimatization',
'acknowledgement',
'acquisitiveness',
'aerodynamically',
'Americanization',
'anthropological',
'anthropomorphic',
'appropriateness',
'architecturally',
'authoritatively',
'bacteriological',
'bibliographical',
'chronologically',
'cinematographer',
'compassionately',
'competitiveness',
'comprehensively',
'computerization',
'condescendingly',
'confidentiality',
'congratulations',
'conscientiously',
'conservationist',
'contemporaneous',
'controversially',
'conventionality',
'correspondingly',
'crystallization',
'decontamination',
'demagnetization',
'demographically',
'desensitization',
'destructiveness',
'differentiation',
'disadvantageous',
'disappointingly',
'discontinuation',
'disentanglement',
'disillusionment',
'disorganization',
'dispassionately',
'disrespectfully',
'dissatisfaction',
'distinctiveness',
'distinguishable',
'diversification',
'electrification',
'electromagnetic',
'enfranchisement',
'entrepreneurial',
'environmentally',
'euphemistically',
'excommunication',
'exemplification',
'experimentation',
'extracurricular',
'extraordinarily',
'halfheartedness',
'hardheartedness',
'heterosexuality',
'hospitalization',
'humanitarianism',
'impenetrability',
'impressionistic',
'inaccessibility',
'inappropriately',
'incompatibility',
'inconsequential',
'inconsiderately',
'inconspicuously',
'indemnification',
'individualistic',
'industriousness',
'ineffectiveness',
'infinitesimally',
'inquisitiveness',
'instantaneously',
'instrumentalist',
'instrumentality',
'instrumentation',
'insubordination',
'insurrectionist',
'intelligibility',
'intensification',
'interchangeable',
'intercollegiate',
'interconnection',
'interdependence',
'internationally',
'interscholastic',
'invulnerability',
'kindheartedness',
'lackadaisically',
'maneuverability',
'materialization',
'microscopically',
'miniaturization',
'misapprehension',
'mischievousness',
'misconstruction',
'nationalization',
'nearsightedness',
'noninterference',
'nonintervention',
'nonprescription',
'nonprofessional',
'nontransferable',
'notwithstanding',
'ophthalmologist',
'parenthetically',
'parliamentarian',
'personification',
'pessimistically',
'philosophically',
'phosphorescence',
'photojournalism',
'physiotherapist',
'plainclothesman',
'plenipotentiary',
'polyunsaturated',
'pretentiousness',
'problematically',
'procrastination',
'professionalism',
'prognostication',
'proportionately',
'psychologically',
'psychotherapist',
'rationalization',
'reapportionment',
'reconsideration',
'resourcefulness',
'retrospectively',
'sanctimoniously',
'standardization',
'straightforward',
'superintendence',
'surreptitiously',
'syllabification',
'sympathetically',
'synchronization',
'technologically',
'temperamentally',
'thoughtlessness',
'totalitarianism',
'transfiguration',
'transplantation',
'trustworthiness',
'typographically',
'unceremoniously',
'uncommunicative',
'uncomplimentary',
'uncomprehending',
'unconditionally',
'unconsciousness',
'undemonstrative',
'underprivileged',
'understandingly',
'undistinguished',
'unexceptionable',
'unintentionally',
'unobjectionable',
'unpronounceable',
'unquestioningly',
'unrealistically',
'unsophisticated',
'unsportsmanlike',
'unsubstantiated',
'whatchamacallit'
]
|
apache-2.0
|
Doctor777/d8
|
themes/custom/my_bootstrap/node_modules/node-gyp/gyp/pylib/gyp/mac_tool.py
|
1569
|
23354
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest, convert_to_binary):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest, convert_to_binary)
else:
shutil.copy(source, dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices',
'--output-format', 'human-readable-text', '--compile', dest, source]
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _ConvertToBinary(self, dest):
subprocess.check_call([
'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest])
def _CopyStringsFile(self, source, dest, convert_to_binary):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile(r'[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$')
libtool_re5 = re.compile(
r'^.*libtool: warning for library: ' +
r'.* the table of contents is empty ' +
r'\(no object file members in the library define global symbols\)$')
env = os.environ.copy()
# Ref:
# http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c
# The problem with this flag is that it resets the file mtime on the file to
# epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.
env['ZERO_AR_DATE'] = '1'
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line) and not libtool_re5.match(line):
print >>sys.stderr, line
# Unconditionally touch the output .a file on the command line if present
# and the command succeeded. A bit hacky.
if not libtoolout.returncode:
for i in range(len(cmd_list) - 1):
if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'):
os.utime(cmd_list[i+1], None)
break
return libtoolout.returncode
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCompileXcassets(self, keys, *inputs):
"""Compiles multiple .xcassets files into a single .car file.
This invokes 'actool' to compile all the inputs .xcassets files. The
|keys| arguments is a json-encoded dictionary of extra arguments to
pass to 'actool' when the asset catalogs contains an application icon
or a launch image.
Note that 'actool' does not create the Assets.car file if the asset
catalogs does not contains imageset.
"""
command_line = [
'xcrun', 'actool', '--output-format', 'human-readable-text',
'--compress-pngs', '--notices', '--warnings', '--errors',
]
is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ
if is_iphone_target:
platform = os.environ['CONFIGURATION'].split('-')[-1]
if platform not in ('iphoneos', 'iphonesimulator'):
platform = 'iphonesimulator'
command_line.extend([
'--platform', platform, '--target-device', 'iphone',
'--target-device', 'ipad', '--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile',
os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']),
])
else:
command_line.extend([
'--platform', 'macosx', '--target-device', 'mac',
'--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'],
'--compile',
os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']),
])
if keys:
keys = json.loads(keys)
for key, value in keys.iteritems():
arg_name = '--' + key
if isinstance(value, bool):
if value:
command_line.append(arg_name)
elif isinstance(value, list):
for v in value:
command_line.append(arg_name)
command_line.append(str(v))
else:
command_line.append(arg_name)
command_line.append(str(value))
# Note: actool crashes if inputs path are relative, so use os.path.abspath
# to get absolute path name for inputs.
command_line.extend(map(os.path.abspath, inputs))
subprocess.check_call(command_line)
def ExecMergeInfoPlist(self, output, *inputs):
"""Merge multiple .plist files into a single .plist file."""
merged_plist = {}
for path in inputs:
plist = self._LoadPlistMaybeBinary(path)
self._MergePlist(merged_plist, plist)
plistlib.writePlist(merged_plist, output)
def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. copy ResourceRules.plist from the user or the SDK into the bundle,
2. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
3. copy Entitlements.plist from user or SDK next to the bundle,
4. code sign the bundle.
"""
resource_rules_path = self._InstallResourceRules(resource_rules)
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--resource-rules',
resource_rules_path, '--entitlements', entitlements_path,
os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallResourceRules(self, resource_rules):
"""Installs ResourceRules.plist from user or SDK into the bundle.
Args:
resource_rules: string, optional, path to the ResourceRules.plist file
to use, default to "${SDKROOT}/ResourceRules.plist"
Returns:
Path to the copy of ResourceRules.plist into the bundle.
"""
source_path = resource_rules
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'ResourceRules.plist')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'], 'ResourceRules.plist')
shutil.copy2(source_path, target_path)
return target_path
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _MergePlist(self, merged_plist, plist):
"""Merge |plist| into |merged_plist|."""
for key, value in plist.iteritems():
if isinstance(value, dict):
merged_value = merged_plist.get(key, {})
if isinstance(merged_value, dict):
self._MergePlist(merged_value, value)
merged_plist[key] = merged_value
else:
merged_plist[key] = value
else:
merged_plist[key] = value
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return {k: self._ExpandVariables(data[k], substitutions) for k in data}
return data
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
gpl-2.0
|
spbguru/repo1
|
tests/unit/py2/nupic/encoders/vector_test.py
|
4
|
3552
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for VectorEncoder."""
CL_VERBOSITY = 0
import unittest2 as unittest
from nupic.encoders.vector import VectorEncoder, VectorEncoderOPF, SimpleVectorEncoder
from nupic.encoders.scalar import ScalarEncoder
class VectorEncoderTest(unittest.TestCase):
"""Unit tests for VectorEncoder class."""
def setUp(self):
self._tmp = None # to pass around values
def testInitialization(self):
e = VectorEncoder(3, ScalarEncoder(21, 0, 10, n=200), name="vec")
self.assertIsInstance(e, VectorEncoder)
def testEncoding(self):
s = ScalarEncoder(1,1,3,n=3, name='idx', forced=True)
v = VectorEncoder(3, s, typeCastFn=float)
data=[1,2,3]
print "data=", data
# encode
enc = v.encode(data)
print "encoded=", enc
correct = [1,0,0,0,1,0,0,0,1]
self.assertTrue((enc==correct).all(), "Did not encode correctly")
def testDecoding(self):
s = ScalarEncoder(1,1,3,n=3, name='idx', forced=True)
v = VectorEncoder(3, s, typeCastFn=float)
data=[1,2,3]
enc = v.encode(data)
#decode
dec = v.decode(enc)
print "decoded=", dec
res= v.getData(dec)
self.assertEqual(data, res, "Decoded data not equal to original")
def testVectorEncoderOPFInstance(self):
"""calling VectorEncoder from OPF"""
opfVect = VectorEncoderOPF(3, 1, 3, n=211, w=21, dataType="int")
data=[1,2,3]
enc=opfVect.encode(data)
dec=opfVect.decode(enc)
data2=opfVect.getData(dec)
self.assertEqual(data, data2, "VectorEncoderOPF did not encode/decode correctly.")
def testVectorEncoderOPFTypeCast(self):
"""for calling from OPF, use this to cast data type"""
opfVect = VectorEncoderOPF(3, 1, 3, n=300, w=21, dataType="str")
data=[1,2,3]
enc=opfVect.encode(data)
dec=opfVect.decode(enc)
data2=opfVect.getData(dec)
self.assertIsInstance(data2[0], str, "VectorEncoderOPF did not cast output to str(ing)")
opfVect = VectorEncoderOPF(3, 1, 3, n=300, w=21, dataType="int")
data=[1,2,3]
enc=opfVect.encode(data)
dec=opfVect.decode(enc)
data2=opfVect.getData(dec)
self.assertIsInstance(data2[0], int, "VectorEncoderOPF did not cast output to int")
def testSimpleVectorEncoderInstance(self):
""" simple demo version"""
simpleVect = SimpleVectorEncoder()
data=[1.0, 2.0, 3.0, 4.0, 5.0]
enc=simpleVect.encode(data)
dec=simpleVect.decode(enc)
data2=simpleVect.getData(dec)
self.assertEqual(data, data2, "Simple vector did not encode/decode correctly")
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.