repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
kallimachos/archive
|
games/sonar.py
|
1
|
6342
|
# Sonar
import random
import sys
def drawBoard(board):
# Draw the board data structure.
hline = ' ' # initial space for the numbers down the left side of the board
for i in range(1, 6):
hline += (' ' * 9) + str(i)
# print the numbers across the top
print(hline)
print(' ' + ('0123456789' * 6))
print()
# print each of the 15 rows
for i in range(15):
# single-digit numbers need to be padded with an extra space
if i < 10:
extraSpace = ' '
else:
extraSpace = ''
print('%s%s %s %s' % (extraSpace, i, getRow(board, i), i))
# print the numbers across the bottom
print()
print(' ' + ('0123456789' * 6))
print(hline)
def getRow(board, row):
# Return a string from the board data structure at a certain row.
boardRow = ''
for i in range(60):
boardRow += board[i][row]
return boardRow
def getNewBoard():
# Create a new 60x15 board data structure.
board = []
for x in range(60): # the main list is a list of 60 lists
board.append([])
for y in range(15): # each list in the main list has 15 single-character strings
# use different characters for the ocean to make it more readable.
if random.randint(0, 1) == 0:
board[x].append('~')
else:
board[x].append('`')
return board
def getRandomChests(numChests):
# Create a list of chest data structures (two-item lists of x, y int coordinates)
chests = []
for i in range(numChests):
chests.append([random.randint(0, 59), random.randint(0, 14)])
return chests
def isValidMove(x, y):
# Return True if the coordinates are on the board, otherwise False.
return x >= 0 and x <= 59 and y >= 0 and y <= 14
def makeMove(board, chests, x, y):
# Change the board data structure with a sonar device character. Remove treasure chests
# from the chests list as they are found. Return False if this is an invalid move.
# Otherwise, return the string of the result of this move.
if not isValidMove(x, y):
return False
smallestDistance = 100 # any chest will be closer than 100.
for cx, cy in chests:
if abs(cx - x) > abs(cy - y):
distance = abs(cx - x)
else:
distance = abs(cy - y)
if distance < smallestDistance: # we want the closest treasure chest.
smallestDistance = distance
if smallestDistance == 0:
# xy is directly on a treasure chest!
chests.remove([x, y])
return 'You have found a sunken treasure chest!'
else:
if smallestDistance < 10:
board[x][y] = str(smallestDistance)
return 'Treasure detected at a distance of %s from the sonar device.' % (smallestDistance)
else:
board[x][y] = 'O'
return 'Sonar did not detect anything. All treasure chests out of range.'
def enterPlayerMove():
# Let the player type in her move. Return a two-item list of int xy coordinates.
print('Where do you want to drop the next sonar device? (0-59 0-14) (or type quit)')
while True:
move = raw_input()
if move.lower() == 'quit':
print('Thanks for playing!')
sys.exit()
move = move.split()
if len(move) == 2 and move[0].isdigit() and move[1].isdigit() and isValidMove(int(move[0]), int(move[1])):
return [int(move[0]), int(move[1])]
print('Enter a number from 0 to 59, a space, then a number from 0 to 14.')
def playAgain():
# This function returns True if the player wants to play again, otherwise it returns False.
print('Do you want to play again? (yes or no)')
return raw_input().lower().startswith('y')
def showInstructions():
print('''Instructions:
You are the captain of the Simon, a treasure-hunting ship. Your current mission
is to find the three sunken treasure chests that are lurking in the part of the
ocean you are in and collect them.
To play, enter the coordinates of the point in the ocean you wish to drop a
sonar device. The sonar can find out how far away the closest chest is to it.
For example, the d below marks where the device was dropped, and the 2's
represent distances of 2 away from the device. The 4's represent
distances of 4 away from the device.
444444444
4 4
4 22222 4
4 2 2 4
4 2 d 2 4
4 2 2 4
4 22222 4
4 4
444444444
Press enter to continue...''')
raw_input()
print('''For example, here is a treasure chest (the c) located a distance of 2 away
from the sonar device (the d):
22222
c 2
2 d 2
2 2
22222
The point where the device was dropped will be marked with a d.
The treasure chests don't move around. Sonar devices can detect treasure
chests up to a distance of 9. If all chests are out of range, the point
will be marked with O
If a device is directly dropped on a treasure chest, you have discovered
the location of the chest, and it will be collected. The sonar device will
remain there.
When you collect a chest, all sonar devices will update to locate the next
closest sunken treasure chest.
Press enter to continue...''')
raw_input()
print()
print('S O N A R !')
print ""
print('Would you like to view the instructions? (yes/no)')
if raw_input().lower().startswith('y'):
showInstructions()
while True:
# game setup
sonarDevices = 16
theBoard = getNewBoard()
theChests = getRandomChests(3)
drawBoard(theBoard)
previousMoves = []
while sonarDevices > 0:
# Start of a turn:
# show sonar device/chest status
if sonarDevices > 1: extraSsonar = 's'
else: extraSsonar = ''
if len(theChests) > 1: extraSchest = 's'
else: extraSchest = ''
print('You have %s sonar device%s left. %s treasure chest%s remaining.' % (sonarDevices, extraSsonar, len(theChests), extraSchest))
x, y = enterPlayerMove()
previousMoves.append([x, y]) # we must track all moves so that sonar devices can be updated.
moveResult = makeMove(theBoard, theChests, x, y)
if moveResult == False:
continue
else:
if moveResult == 'You have found a sunken treasure chest!':
# update all the sonar devices currently on the map.
for x, y in previousMoves:
makeMove(theBoard, theChests, x, y)
drawBoard(theBoard)
print(moveResult)
if len(theChests) == 0:
print('You have found all the sunken treasure chests! Congratulations and good game!')
break
sonarDevices -= 1
if sonarDevices == 0:
print('We\'ve run out of sonar devices! Now we have to turn the ship around and head')
print('for home with treasure chests still out there! Game over.')
print(' The remaining chests were here:')
for x, y in theChests:
print(' %s, %s' % (x, y))
if not playAgain():
sys.exit()
|
gpl-3.0
| -1,569,769,953,806,892,800 | 35.034091 | 133 | 0.694734 | false | 3.1226 | false | false | false |
fernandog/Medusa
|
ext/sqlalchemy/orm/descriptor_props.py
|
1
|
27751
|
# orm/descriptor_props.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Descriptor properties are more "auxiliary" properties
that exist as configurational elements, but don't participate
as actively in the load/persist ORM loop.
"""
from .interfaces import MapperProperty, PropComparator
from .util import _none_set
from . import attributes
from .. import util, sql, exc as sa_exc, event, schema
from ..sql import expression
from . import properties
from . import query
class DescriptorProperty(MapperProperty):
""":class:`.MapperProperty` which proxies access to a
user-defined descriptor."""
doc = None
def instrument_class(self, mapper):
prop = self
class _ProxyImpl(object):
accepts_scalar_loader = False
expire_missing = True
collection = False
def __init__(self, key):
self.key = key
if hasattr(prop, 'get_history'):
def get_history(self, state, dict_,
passive=attributes.PASSIVE_OFF):
return prop.get_history(state, dict_, passive)
if self.descriptor is None:
desc = getattr(mapper.class_, self.key, None)
if mapper._is_userland_descriptor(desc):
self.descriptor = desc
if self.descriptor is None:
def fset(obj, value):
setattr(obj, self.name, value)
def fdel(obj):
delattr(obj, self.name)
def fget(obj):
return getattr(obj, self.name)
self.descriptor = property(
fget=fget,
fset=fset,
fdel=fdel,
)
proxy_attr = attributes.create_proxied_attribute(
self.descriptor)(
self.parent.class_,
self.key,
self.descriptor,
lambda: self._comparator_factory(mapper),
doc=self.doc,
original_property=self
)
proxy_attr.impl = _ProxyImpl(self.key)
mapper.class_manager.instrument_attribute(self.key, proxy_attr)
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class CompositeProperty(DescriptorProperty):
"""Defines a "composite" mapped attribute, representing a collection
of columns as one attribute.
:class:`.CompositeProperty` is constructed using the :func:`.composite`
function.
.. seealso::
:ref:`mapper_composite`
"""
def __init__(self, class_, *attrs, **kwargs):
r"""Return a composite column-based property for use with a Mapper.
See the mapping documentation section :ref:`mapper_composite` for a
full usage example.
The :class:`.MapperProperty` returned by :func:`.composite`
is the :class:`.CompositeProperty`.
:param class\_:
The "composite type" class.
:param \*cols:
List of Column objects to be mapped.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
scalar attribute should be loaded when replaced, if not
already loaded. See the same flag on :func:`.column_property`.
.. versionchanged:: 0.7
This flag specifically becomes meaningful
- previously it was a placeholder.
:param group:
A group name for this property when marked as deferred.
:param deferred:
When True, the column property is "deferred", meaning that it does
not load immediately, and is instead loaded when the attribute is
first accessed on an instance. See also
:func:`~sqlalchemy.orm.deferred`.
:param comparator_factory: a class which extends
:class:`.CompositeProperty.Comparator` which provides custom SQL
clause generation for comparison operations.
:param doc:
optional string that will be applied as the doc on the
class-bound descriptor.
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
.. versionadded:: 0.8
:param extension:
an :class:`.AttributeExtension` instance,
or list of extensions, which will be prepended to the list of
attribute listeners for the resulting descriptor placed on the
class. **Deprecated.** Please see :class:`.AttributeEvents`.
"""
super(CompositeProperty, self).__init__()
self.attrs = attrs
self.composite_class = class_
self.active_history = kwargs.get('active_history', False)
self.deferred = kwargs.get('deferred', False)
self.group = kwargs.get('group', None)
self.comparator_factory = kwargs.pop('comparator_factory',
self.__class__.Comparator)
if 'info' in kwargs:
self.info = kwargs.pop('info')
util.set_creation_order(self)
self._create_descriptor()
def instrument_class(self, mapper):
super(CompositeProperty, self).instrument_class(mapper)
self._setup_event_handlers()
def do_init(self):
"""Initialization which occurs after the :class:`.CompositeProperty`
has been associated with its parent mapper.
"""
self._setup_arguments_on_columns()
def _create_descriptor(self):
"""Create the Python descriptor that will serve as
the access point on instances of the mapped class.
"""
def fget(instance):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
if self.key not in dict_:
# key not present. Iterate through related
# attributes, retrieve their values. This
# ensures they all load.
values = [
getattr(instance, key)
for key in self._attribute_keys
]
# current expected behavior here is that the composite is
# created on access if the object is persistent or if
# col attributes have non-None. This would be better
# if the composite were created unconditionally,
# but that would be a behavioral change.
if self.key not in dict_ and (
state.key is not None or
not _none_set.issuperset(values)
):
dict_[self.key] = self.composite_class(*values)
state.manager.dispatch.refresh(state, None, [self.key])
return dict_.get(self.key, None)
def fset(instance, value):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
attr = state.manager[self.key]
previous = dict_.get(self.key, attributes.NO_VALUE)
for fn in attr.dispatch.set:
value = fn(state, value, previous, attr.impl)
dict_[self.key] = value
if value is None:
for key in self._attribute_keys:
setattr(instance, key, None)
else:
for key, value in zip(
self._attribute_keys,
value.__composite_values__()):
setattr(instance, key, value)
def fdel(instance):
state = attributes.instance_state(instance)
dict_ = attributes.instance_dict(instance)
previous = dict_.pop(self.key, attributes.NO_VALUE)
attr = state.manager[self.key]
attr.dispatch.remove(state, previous, attr.impl)
for key in self._attribute_keys:
setattr(instance, key, None)
self.descriptor = property(fget, fset, fdel)
@util.memoized_property
def _comparable_elements(self):
return [
getattr(self.parent.class_, prop.key)
for prop in self.props
]
@util.memoized_property
def props(self):
props = []
for attr in self.attrs:
if isinstance(attr, str):
prop = self.parent.get_property(
attr, _configure_mappers=False)
elif isinstance(attr, schema.Column):
prop = self.parent._columntoproperty[attr]
elif isinstance(attr, attributes.InstrumentedAttribute):
prop = attr.property
else:
raise sa_exc.ArgumentError(
"Composite expects Column objects or mapped "
"attributes/attribute names as arguments, got: %r"
% (attr,))
props.append(prop)
return props
@property
def columns(self):
return [a for a in self.attrs if isinstance(a, schema.Column)]
def _setup_arguments_on_columns(self):
"""Propagate configuration arguments made on this composite
to the target columns, for those that apply.
"""
for prop in self.props:
prop.active_history = self.active_history
if self.deferred:
prop.deferred = self.deferred
prop.strategy_key = (
("deferred", True),
("instrument", True))
prop.group = self.group
def _setup_event_handlers(self):
"""Establish events that populate/expire the composite attribute."""
def load_handler(state, *args):
_load_refresh_handler(state, args, is_refresh=False)
def refresh_handler(state, *args):
_load_refresh_handler(state, args, is_refresh=True)
def _load_refresh_handler(state, args, is_refresh):
dict_ = state.dict
if not is_refresh and self.key in dict_:
return
# if column elements aren't loaded, skip.
# __get__() will initiate a load for those
# columns
for k in self._attribute_keys:
if k not in dict_:
return
dict_[self.key] = self.composite_class(
*[state.dict[key] for key in
self._attribute_keys]
)
def expire_handler(state, keys):
if keys is None or set(self._attribute_keys).intersection(keys):
state.dict.pop(self.key, None)
def insert_update_handler(mapper, connection, state):
"""After an insert or update, some columns may be expired due
to server side defaults, or re-populated due to client side
defaults. Pop out the composite value here so that it
recreates.
"""
state.dict.pop(self.key, None)
event.listen(self.parent, 'after_insert',
insert_update_handler, raw=True)
event.listen(self.parent, 'after_update',
insert_update_handler, raw=True)
event.listen(self.parent, 'load',
load_handler, raw=True, propagate=True)
event.listen(self.parent, 'refresh',
refresh_handler, raw=True, propagate=True)
event.listen(self.parent, 'expire',
expire_handler, raw=True, propagate=True)
# TODO: need a deserialize hook here
@util.memoized_property
def _attribute_keys(self):
return [
prop.key for prop in self.props
]
def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF):
"""Provided for userland code that uses attributes.get_history()."""
added = []
deleted = []
has_history = False
for prop in self.props:
key = prop.key
hist = state.manager[key].impl.get_history(state, dict_)
if hist.has_changes():
has_history = True
non_deleted = hist.non_deleted()
if non_deleted:
added.extend(non_deleted)
else:
added.append(None)
if hist.deleted:
deleted.extend(hist.deleted)
else:
deleted.append(None)
if has_history:
return attributes.History(
[self.composite_class(*added)],
(),
[self.composite_class(*deleted)]
)
else:
return attributes.History(
(), [self.composite_class(*added)], ()
)
def _comparator_factory(self, mapper):
return self.comparator_factory(self, mapper)
class CompositeBundle(query.Bundle):
def __init__(self, property, expr):
self.property = property
super(CompositeProperty.CompositeBundle, self).__init__(
property.key, *expr)
def create_row_processor(self, query, procs, labels):
def proc(row):
return self.property.composite_class(
*[proc(row) for proc in procs])
return proc
class Comparator(PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.CompositeProperty` attributes.
See the example in :ref:`composite_operations` for an overview
of usage , as well as the documentation for :class:`.PropComparator`.
See also:
:class:`.PropComparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__hash__ = None
@property
def clauses(self):
return self.__clause_element__()
def __clause_element__(self):
return expression.ClauseList(
group=False, *self._comparable_elements)
def _query_clause_element(self):
return CompositeProperty.CompositeBundle(
self.prop, self.__clause_element__())
def _bulk_update_tuples(self, value):
if value is None:
values = [None for key in self.prop._attribute_keys]
elif isinstance(value, self.prop.composite_class):
values = value.__composite_values__()
else:
raise sa_exc.ArgumentError(
"Can't UPDATE composite attribute %s to %r" %
(self.prop, value))
return zip(
self._comparable_elements,
values
)
@util.memoized_property
def _comparable_elements(self):
if self._adapt_to_entity:
return [
getattr(
self._adapt_to_entity.entity,
prop.key
) for prop in self.prop._comparable_elements
]
else:
return self.prop._comparable_elements
def __eq__(self, other):
if other is None:
values = [None] * len(self.prop._comparable_elements)
else:
values = other.__composite_values__()
comparisons = [
a == b
for a, b in zip(self.prop._comparable_elements, values)
]
if self._adapt_to_entity:
comparisons = [self.adapter(x) for x in comparisons]
return sql.and_(*comparisons)
def __ne__(self, other):
return sql.not_(self.__eq__(other))
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class ConcreteInheritedProperty(DescriptorProperty):
"""A 'do nothing' :class:`.MapperProperty` that disables
an attribute on a concrete subclass that is only present
on the inherited mapper, not the concrete classes' mapper.
Cases where this occurs include:
* When the superclass mapper is mapped against a
"polymorphic union", which includes all attributes from
all subclasses.
* When a relationship() is configured on an inherited mapper,
but not on the subclass mapper. Concrete mappers require
that relationship() is configured explicitly on each
subclass.
"""
def _comparator_factory(self, mapper):
comparator_callable = None
for m in self.parent.iterate_to_root():
p = m._props[self.key]
if not isinstance(p, ConcreteInheritedProperty):
comparator_callable = p.comparator_factory
break
return comparator_callable
def __init__(self):
super(ConcreteInheritedProperty, self).__init__()
def warn():
raise AttributeError("Concrete %s does not implement "
"attribute %r at the instance level. Add "
"this property explicitly to %s." %
(self.parent, self.key, self.parent))
class NoninheritedConcreteProp(object):
def __set__(s, obj, value):
warn()
def __delete__(s, obj):
warn()
def __get__(s, obj, owner):
if obj is None:
return self.descriptor
warn()
self.descriptor = NoninheritedConcreteProp()
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class SynonymProperty(DescriptorProperty):
def __init__(self, name, map_column=None,
descriptor=None, comparator_factory=None,
doc=None, info=None):
"""Denote an attribute name as a synonym to a mapped property,
in that the attribute will mirror the value and expression behavior
of another attribute.
e.g.::
class MyClass(Base):
__tablename__ = 'my_table'
id = Column(Integer, primary_key=True)
job_status = Column(String(50))
status = synonym("job_status")
:param name: the name of the existing mapped property. This
can refer to the string name ORM-mapped attribute
configured on the class, including column-bound attributes
and relationships.
:param descriptor: a Python :term:`descriptor` that will be used
as a getter (and potentially a setter) when this attribute is
accessed at the instance level.
:param map_column: **For classical mappings and mappings against
an existing Table object only**. if ``True``, the :func:`.synonym`
construct will locate the :class:`.Column` object upon the mapped
table that would normally be associated with the attribute name of
this synonym, and produce a new :class:`.ColumnProperty` that instead
maps this :class:`.Column` to the alternate name given as the "name"
argument of the synonym; in this way, the usual step of redefining
the mapping of the :class:`.Column` to be under a different name is
unnecessary. This is usually intended to be used when a
:class:`.Column` is to be replaced with an attribute that also uses a
descriptor, that is, in conjunction with the
:paramref:`.synonym.descriptor` parameter::
my_table = Table(
"my_table", metadata,
Column('id', Integer, primary_key=True),
Column('job_status', String(50))
)
class MyClass(object):
@property
def _job_status_descriptor(self):
return "Status: %s" % self._job_status
mapper(
MyClass, my_table, properties={
"job_status": synonym(
"_job_status", map_column=True,
descriptor=MyClass._job_status_descriptor)
}
)
Above, the attribute named ``_job_status`` is automatically
mapped to the ``job_status`` column::
>>> j1 = MyClass()
>>> j1._job_status = "employed"
>>> j1.job_status
Status: employed
When using Declarative, in order to provide a descriptor in
conjunction with a synonym, use the
:func:`sqlalchemy.ext.declarative.synonym_for` helper. However,
note that the :ref:`hybrid properties <mapper_hybrids>` feature
should usually be preferred, particularly when redefining attribute
behavior.
:param info: Optional data dictionary which will be populated into the
:attr:`.InspectionAttr.info` attribute of this object.
.. versionadded:: 1.0.0
:param comparator_factory: A subclass of :class:`.PropComparator`
that will provide custom comparison behavior at the SQL expression
level.
.. note::
For the use case of providing an attribute which redefines both
Python-level and SQL-expression level behavior of an attribute,
please refer to the Hybrid attribute introduced at
:ref:`mapper_hybrids` for a more effective technique.
.. seealso::
:ref:`synonyms` - Overview of synonyms
:func:`.synonym_for` - a helper oriented towards Declarative
:ref:`mapper_hybrids` - The Hybrid Attribute extension provides an
updated approach to augmenting attribute behavior more flexibly
than can be achieved with synonyms.
"""
super(SynonymProperty, self).__init__()
self.name = name
self.map_column = map_column
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
if info:
self.info = info
util.set_creation_order(self)
# TODO: when initialized, check _proxied_property,
# emit a warning if its not a column-based property
@util.memoized_property
def _proxied_property(self):
attr = getattr(self.parent.class_, self.name)
if not hasattr(attr, 'property') or not \
isinstance(attr.property, MapperProperty):
raise sa_exc.InvalidRequestError(
"""synonym() attribute "%s.%s" only supports """
"""ORM mapped attributes, got %r""" % (
self.parent.class_.__name__,
self.name,
attr
)
)
return attr.property
def _comparator_factory(self, mapper):
prop = self._proxied_property
if self.comparator_factory:
comp = self.comparator_factory(prop, mapper)
else:
comp = prop.comparator_factory(prop, mapper)
return comp
def set_parent(self, parent, init):
if self.map_column:
# implement the 'map_column' option.
if self.key not in parent.mapped_table.c:
raise sa_exc.ArgumentError(
"Can't compile synonym '%s': no column on table "
"'%s' named '%s'"
% (self.name, parent.mapped_table.description, self.key))
elif parent.mapped_table.c[self.key] in \
parent._columntoproperty and \
parent._columntoproperty[
parent.mapped_table.c[self.key]
].key == self.name:
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r" %
(self.key, self.name, self.name, self.key)
)
p = properties.ColumnProperty(parent.mapped_table.c[self.key])
parent._configure_property(
self.name, p,
init=init,
setparent=True)
p._mapped_by_synonym = self.key
self.parent = parent
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class ComparableProperty(DescriptorProperty):
"""Instruments a Python property for use in query expressions."""
def __init__(
self, comparator_factory, descriptor=None, doc=None, info=None):
"""Provides a method of applying a :class:`.PropComparator`
to any Python descriptor attribute.
.. versionchanged:: 0.7
:func:`.comparable_property` is superseded by
the :mod:`~sqlalchemy.ext.hybrid` extension. See the example
at :ref:`hybrid_custom_comparators`.
Allows any Python descriptor to behave like a SQL-enabled
attribute when used at the class level in queries, allowing
redefinition of expression operator behavior.
In the example below we redefine :meth:`.PropComparator.operate`
to wrap both sides of an expression in ``func.lower()`` to produce
case-insensitive comparison::
from sqlalchemy.orm import comparable_property
from sqlalchemy.orm.interfaces import PropComparator
from sqlalchemy.sql import func
from sqlalchemy import Integer, String, Column
from sqlalchemy.ext.declarative import declarative_base
class CaseInsensitiveComparator(PropComparator):
def __clause_element__(self):
return self.prop
def operate(self, op, other):
return op(
func.lower(self.__clause_element__()),
func.lower(other)
)
Base = declarative_base()
class SearchWord(Base):
__tablename__ = 'search_word'
id = Column(Integer, primary_key=True)
word = Column(String)
word_insensitive = comparable_property(lambda prop, mapper:
CaseInsensitiveComparator(
mapper.c.word, mapper)
)
A mapping like the above allows the ``word_insensitive`` attribute
to render an expression like::
>>> print SearchWord.word_insensitive == "Trucks"
lower(search_word.word) = lower(:lower_1)
:param comparator_factory:
A PropComparator subclass or factory that defines operator behavior
for this property.
:param descriptor:
Optional when used in a ``properties={}`` declaration. The Python
descriptor or property to layer comparison behavior on top of.
The like-named descriptor will be automatically retrieved from the
mapped class if left blank in a ``properties`` declaration.
:param info: Optional data dictionary which will be populated into the
:attr:`.InspectionAttr.info` attribute of this object.
.. versionadded:: 1.0.0
"""
super(ComparableProperty, self).__init__()
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
if info:
self.info = info
util.set_creation_order(self)
def _comparator_factory(self, mapper):
return self.comparator_factory(self, mapper)
|
gpl-3.0
| 7,448,052,880,767,229,000 | 34.993515 | 79 | 0.569097 | false | 4.754326 | false | false | false |
fy/compare_dp_mechanisms
|
notebooks/utility_functions.py
|
1
|
3409
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# Utility functions.
# <codecell>
import numpy as np
# <codecell>
def get_chisq_sensitivity(NN_case, NN_control):
"""sensitivity for the chi-square statistic based on 2x3 genotype tables"""
NN = NN_case + NN_control # total number of subjects
CC_max = max(NN_case, NN_control)
CC_min = min(NN_case, NN_control)
sensitivity = 1. * NN**2 / (CC_min * (CC_max + 1)) # sensitivity of chisq
return sensitivity
# <codecell>
def get_allelic_test_sensitivity(NN_case, NN_control):
"""sensitivity for the chi-square statistic based on 2x2 allelic tables derived from 2x3 genotype tables"""
def sensitivity_type_1(SS, RR):
NN = SS + RR
return 1.0 * 8 * NN**2 * SS / \
(RR * (2 * SS + 3) * (2 * SS + 1))
def sensitivity_type_2(SS, RR):
NN = SS + RR
return 1.0 * 4 * NN**2 * ((2 * RR**2 - 1) * (2 * SS - 1) - 1) / \
(SS * RR * (2 * RR + 1) * (2 * RR - 1) * (2 * SS + 1))
return np.max([sensitivity_type_1(NN_case, NN_control),
sensitivity_type_1(NN_control, NN_case),
sensitivity_type_2(NN_case, NN_control),
sensitivity_type_2(NN_control, NN_case)])
# <codecell>
def check_table_valid(input_table):
"""Make sure that the margins (row sums and column sums ) are all positive.
Args:
input_table: A 2x3 numpy matrix.
"""
## check zero margins
rowsum = np.array(map(np.sum, input_table))
colsum = np.array(map(np.sum, input_table.T))
if np.any(rowsum == 0) or np.any(colsum == 0):
return False
else:
return True
# <codecell>
def chisq_stat(input_table):
"""Calculate the Pearson's chi-square staitsitc.
Args:
input_table: A 2x3 numpy matrix.
Returns:
A tuple (chisquare_statistics, degree_of_freedom).
"""
input_table = input_table.astype(float)
rowsum = np.array(map(np.sum, input_table))
colsum = np.array(map(np.sum, input_table.T))
expected = np.outer(rowsum, colsum) / np.sum(rowsum)
# df = (len([1 for rr in rowsum if rr > 0]) - 1) * \
# (len([1 for cc in colsum if cc > 0]) - 1)
chisq = np.sum(np.array(input_table[expected > 0] -
expected[expected > 0]) ** 2 /
expected[expected > 0])
# return (chisq, df)
return chisq
# <codecell>
def chisq_gradient(input_table):
"""Return the changable part of the gradient of the chi-square staitsitc.
Args:
input_table: A 2x3 numpy matrix.
Returns:
A four-element tuple consisting of the partial derivatives based on the
parametrization the chi-square statistic by (r0, r1, n0, n1). The
full parametrization would be
(r0, r1, r2, s0, s1, s2, n0, n1, n2), where ri + si = ni. The returned
value will be scaled down by N^2 / (R * S).
"""
input_table = input_table.astype(float)
colsum = np.array(map(np.sum, input_table.T))
## divide each cell by colsum
fraction_table = input_table / colsum
dy_dr0, dy_dr1 = [2 * fraction_table[0, ii] - 2 * fraction_table[0, 2] for
ii in [0, 1]]
dy_dn0, dy_dn1 = [-fraction_table[0, ii] ** 2 + fraction_table[0, 2] ** 2 for
ii in [0, 1]]
return (dy_dr0, dy_dr1, dy_dn0, dy_dn1)
|
mit
| 3,960,275,843,723,750,000 | 32.097087 | 111 | 0.576709 | false | 3.032918 | false | false | false |
Flavsditz/projects
|
eyeTracking/pupil/pupil_src/shared_modules/uvc_capture/mac_video/cf_string.py
|
1
|
2685
|
'''
(*)~----------------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2013 Moritz Kassner & William Patera
Distributed under the terms of the CC BY-NC-SA License.
License details are in the file license.txt, distributed as part of this software.
----------------------------------------------------------------------------------~(*)
'''
# Python string to/from CFString conversion helper functions:
from ctypes import *
from ctypes import util
cf = cdll.LoadLibrary(util.find_library('CoreFoundation'))
# Setup return types for functions that return pointers.
# (Otherwise ctypes returns 32-bit int which breaks on 64-bit systems.)
# Note that you must also wrap the return value with c_void_p before
# you use it as an argument to another function, otherwise ctypes will
# automatically convert it back to a 32-bit int again.
cf.CFDictionaryCreateMutable.restype = c_void_p
cf.CFStringCreateWithCString.restype = c_void_p
cf.CFAttributedStringCreate.restype = c_void_p
cf.CFDataCreate.restype = c_void_p
cf.CFNumberCreate.restype = c_void_p
# Core Foundation constants
kCFStringEncodingUTF8 = 0x08000100
kCFStringEncodingMacRoman = 0
kCFStringEncodingWindowsLatin1 = 0x0500
kCFStringEncodingISOLatin1 = 0x0201
kCFStringEncodingNextStepLatin = 0x0B01
kCFStringEncodingASCII = 0x0600
kCFStringEncodingUnicode = 0x0100
kCFStringEncodingUTF8 = 0x08000100
kCFStringEncodingNonLossyASCII = 0x0BFF
kCFStringEncodingUTF16 = 0x0100
kCFStringEncodingUTF16BE = 0x10000100
kCFStringEncodingUTF16LE = 0x14000100
kCFStringEncodingUTF32 = 0x0c000100
kCFStringEncodingUTF32BE = 0x18000100
kCFStringEncodingUTF32LE = 0x1c000100
kCFNumberSInt32Type = 3
def CFSTR(text):
return c_void_p(cf.CFStringCreateWithCString(None, text.encode('utf8'), kCFStringEncodingASCII))
def cfstring_to_string(cfstring):
length = cf.CFStringGetLength(cfstring)
size = cf.CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingASCII)
buffer = c_buffer(size + 1)
result = cf.CFStringGetCString(cfstring, buffer, len(buffer), kCFStringEncodingASCII)
if result:
return buffer.value
def cfstring_to_string_release(cfstring):
length = cf.CFStringGetLength(cfstring)
size = cf.CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingASCII)
buffer = c_buffer(size + 1)
result = cf.CFStringGetCString(cfstring, buffer, len(buffer), kCFStringEncodingASCII)
cf.CFRelease(cfstring)
if result:
return buffer.value
def release(cfstring):
cf.CFRelease(cfstring)
if __name__ == '__main__':
cf_pointer = CFSTR("THIS is a Test")
print cfstring_to_string(cf_pointer)
|
gpl-2.0
| -7,743,753,104,019,920,000 | 35.283784 | 100 | 0.731844 | false | 3.451157 | false | false | false |
1Strategy/security-fairy
|
aws_iam_policy.py
|
1
|
5068
|
import logging
import json
import re
class IAMPolicy:
def __init__(self, logging_level = logging.DEBUG):
logging.basicConfig(level=logging_level)
self.statements = []
self.service_actions = {}
self.max_policy_size = {
'user' : 2048, # User policy size cannot exceed 2,048 characters
'role' : 10240, # Role policy size cannot exceed 10,240 characters
'group': 5120 # Group policy size cannot exceed 5,120 characters
}
def __add_statement__(self, statement):
if not isinstance(statement, IAMStatement):
raise Exception('This Method only supports objects of type IAMStatement')
self.statements.append(statement)
def add_actions(self, statement_actions):
for statement_action in statement_actions:
self.add_action(statement_action)
def add_action(self, statement_action):
split_statement_action = statement_action.split(':')
if len(split_statement_action) != 2:
raise InvalidStatementAction('Invalid Statement: {action} Statement must be \'service:api-action\'.'.format(action=action))
service = self.__get_service_alias__(split_statement_action[0])
if service == 'lambda':
# Checks for extraneous lambda api version information:
# e.g. lambda:ListTags20170331
# lambda:GetFunctionConfiguration20150331v2"
# lambda:"UpdateFunctionCode20150331v2"
api_version_info = re.findall(r"(\d+v\d+)|(\d+)", split_statement_action[1])
if api_version_info:
for api_version in api_version_info[0]:
logging.debug(api_version)
if api_version is not '':
action = split_statement_action[1].replace(api_version,'')
else:
action = split_statement_action[1]
else:
action = split_statement_action[1]
logging.debug(statement_action)
logging.debug(self.service_actions.get(service))
if self.service_actions.get(service) is None:
self.service_actions[service] = []
if not action in self.service_actions[service]:
self.service_actions[service].append(action)
logging.debug("Action added: {service}:{action}".format(service=service, action=action))
def __get_service_alias__(self, service):
service_aliases = {
"monitoring": "cloudwatch"
}
return service_aliases.get(service, service)
def __build_statements__(self):
for service in self.service_actions:
actions_per_service = []
for action in self.service_actions[service]:
actions_per_service.append(service+":"+action)
statement = IAMStatement( effect="Allow",
actions=actions_per_service,
resource="*",
sid='SecurityFairyBuilt{service}Policy'.format(service=service.capitalize())
)
self.__add_statement__(statement)
def get_policy(self):
self.__build_statements__()
built_policy_statements = []
for statement in self.statements:
built_policy_statements.append(statement.get_statement())
policy = {
"Version": "2012-10-17",
"Statement": built_policy_statements
}
logging.debug(policy)
return policy
def print_policy(self):
return json.dumps(self.get_policy())
class IAMStatement:
def __init__(self, effect, actions, resource, sid='', logging_level = logging.DEBUG):
logging.basicConfig(level=logging_level)
self.validate_statement(effect, actions, resource)
self.actions = actions
self.resource = resource
self.effect = effect
if sid != '':
self.sid = sid
def validate_statement(self, effect, actions, resource):
if not effect.lower() in ['allow', 'deny']:
logging.debug(effect)
raise InvalidStatementAction("Valid Effects are 'Allow' and 'Deny'.")
if not resource == '*':
logging.debug(resource)
raise Exception('Invalid Resource.')
logging.debug(actions)
for action in actions:
if len(action.split(':')) != 2:
raise InvalidStatementAction('Invalid Statement: {action} Statement must be \'service:api-action\'.'.format(action=action))
self.actions = actions
def get_statement(self):
if self.actions == []:
raise Exception('This statement has no Actions')
statement = {
"Effect": self.effect,
"Resource": self.resource,
"Action": self.actions
}
if self.sid != '':
statement['Sid'] = self.sid
return statement
|
apache-2.0
| -6,934,768,332,968,634,000 | 37.105263 | 139 | 0.572612 | false | 4.553459 | false | false | false |
khalido/nd101
|
tf_save_check.py
|
1
|
1112
|
import tensorflow as tf
# The file path to save the data
save_file = './model.ckpt'
# Two Tensor Variables: weights and bias
weights = tf.Variable(tf.truncated_normal([2, 3]))
bias = tf.Variable(tf.truncated_normal([3]))
# Class used to save and/or restore Tensor Variables
saver = tf.train.Saver()
with tf.Session() as sess:
# Initialize all the Variables
sess.run(tf.global_variables_initializer())
# Show the values of weights and bias
print('Weights:')
print(sess.run(weights))
print('Bias:')
print(sess.run(bias))
# Save the model
saver.save(sess, save_file)
# Remove the previous weights and bias
tf.reset_default_graph()
# Two Variables: weights and bias
weights = tf.Variable(tf.truncated_normal([2, 3]))
bias = tf.Variable(tf.truncated_normal([3]))
# Class used to save and/or restore Tensor Variables
saver = tf.train.Saver()
with tf.Session() as sess:
# Load the weights and bias
saver.restore(sess, save_file)
# Show the values of weights and bias
print('Weight:')
print(sess.run(weights))
print('Bias:')
print(sess.run(bias))
|
gpl-3.0
| 3,808,513,290,078,029,300 | 24.883721 | 52 | 0.690647 | false | 3.507886 | false | false | false |
hakuya/higu
|
lib/hdbfs/query.py
|
1
|
12573
|
import calendar
import datetime
import hdbfs
import model
class TagConstraint:
def __init__( self, tag ):
self.__tag = tag
def to_db_constraint( self, db ):
if( isinstance( self.__tag, hdbfs.Obj ) ):
tag = self.__tag
elif( isinstance( self.__tag, int ) ):
tag = db.get_object_by_id( self.__tag )
else:
tag = db.get_tag( self.__tag )
return db.session.query( model.Relation.child_id ) \
.filter( model.Relation.parent_id == tag.obj.object_id )
class StringConstraint:
def __init__( self, s ):
self.__s = s
def to_db_constraint( self, db ):
if( len( self.__s ) == 0 ):
sql_s = '%'
else:
sql_s = self.__s.replace( '%', '[%]' ) \
.replace( '*', '%' )
if( sql_s[0] != '%' ):
sql_s = '%' + sql_s
if( sql_s[-1] != '%' ):
sql_s = sql_s + '%'
return db.session.query( model.Object.object_id ) \
.filter( model.Object.name.like( sql_s ) )
class UnboundConstraint:
def __init__( self, s ):
self.__s = s
def to_db_constraint( self, db ):
try:
c = TagConstraint( self.__s )
db_c = c.to_db_constraint( db )
if( db_c is not None ):
return db_c
except:
pass
c = StringConstraint( self.__s )
return c.to_db_constraint( db )
def QueryInt( v, ceil = False ):
try:
# Try as int
return int( v )
except ValueError:
# Try as date
if( '_' in v ):
date_str, time_str = v.split( '_' )
else:
date_str = v
time_str = None
date_str = v.split( '/' )
year = int( date_str[0] )
dmon = int( date_str[1] ) if( len( date_str ) >= 2 ) else 1
dday = int( date_str[2] ) if( len( date_str ) >= 3 ) else 1
if( len( date_str ) >= 4 ):
raise ValueError
if( time_str is not None and len( date_str ) >= 3 ):
time_str = time_str.split( ':' )
hour = int( time_str[0] ) if( len( time_str ) >= 1 ) else 0
tmin = int( time_str[1] ) if( len( time_str ) >= 2 ) else 0
tsec = int( time_str[2] ) if( len( time_str ) >= 3 ) else 0
if( len( time_str ) >= 4 ):
raise ValueError
else:
hour = 0
tmin = 0
tsec = 0
if( ceil ):
if( len( date_str ) == 1 ):
year += 1
elif( len( date_str ) == 2 ):
dmon += 1
elif( len( date_str ) == 3 ):
if( time_str is None or len( time_str ) == 0 ):
dday += 1
elif( len( time_str ) == 1 ):
hour += 1
elif( len( time_str ) == 2 ):
tmin += 1
elif( len( time_str ) == 3 ):
tsec += 1
dt = datetime.datetime( year, dmon, dday, hour, tmin, tsec )
dt = calendar.timegm( dt.timetuple() )
if( ceil ):
dt -= 1
return dt
class ObjIdConstraint:
def __init__( self, op, value ):
from sqlalchemy import and_
if( op == '=' ):
self.__constraint = (model.Object.object_id == int( value ))
elif( op == '!=' ):
self.__constraint = (model.Object.object_id != int( value ))
elif( op == '>' ):
self.__constraint = (model.Object.object_id > int( value ))
elif( op == '>=' ):
self.__constraint = (model.Object.object_id >= int( value ))
elif( op == '<' ):
self.__constraint = (model.Object.object_id < int( value ))
elif( op == '<=' ):
self.__constraint = (model.Object.object_id <= int( value ))
elif( op == '~' ):
if( '-' in value ):
lower, upper = map( int, value.split( '-' ) )
elif( '|' in value ):
value, vrange = map( int, value.split( '|' ) )
lower = value - vrange
upper = value + vrange
else:
lower = int( value )
upper = lower
if( lower != upper ):
self.__constraint = and_( model.Object.object_id >= lower,
model.Object.object_id <= upper )
else:
self.__constraint = (model.Object.object_id == lower)
else:
assert False
def to_db_constraint( self, db ):
return db.session.query( model.Object.object_id ) \
.filter( self.__constraint )
class ParameterConstraint:
def __init__( self, key, op, value ):
from sqlalchemy import and_
self.__key = key
if( op == '=' ):
self.__constraint = (model.ObjectMetadata.value == str( value ))
elif( op == '!=' ):
self.__constraint = (model.ObjectMetadata.value != str( value ))
elif( op == '>' ):
self.__constraint = (model.ObjectMetadata.numeric > QueryInt( value ))
elif( op == '>=' ):
self.__constraint = (model.ObjectMetadata.numeric >= QueryInt( value ))
elif( op == '<' ):
self.__constraint = (model.ObjectMetadata.numeric < QueryInt( value ))
elif( op == '<=' ):
self.__constraint = (model.ObjectMetadata.numeric <= QueryInt( value ))
elif( op == '~' ):
if( '-' in value ):
lower, upper = map( QueryInt, value.split( '-' ) )
elif( '|' in value ):
value, vrange = value.split( '|' )
lower = QueryInt( value, False ) - int( vrange )
upper = QueryInt( value, True ) + int( vrange )
else:
lower = QueryInt( value, False )
upper = QueryInt( value, True )
if( lower != upper ):
self.__constraint = and_( model.ObjectMetadata.numeric >= lower,
model.ObjectMetadata.numeric <= upper )
else:
self.__constraint = (model.ObjectMetadata.numeric == lower)
else:
assert False
def to_db_constraint( self, db ):
from sqlalchemy import and_
return db.session.query( model.ObjectMetadata.object_id ) \
.filter( and_( model.ObjectMetadata.key == self.__key, \
self.__constraint ) )
class Query:
def __init__( self ):
self.__obj_type = None
self.__order_by = 'rand'
self.__order_desc = False
self.__strict = False
self.__req_constraints = []
self.__or_constraints = []
self.__not_constraints = []
def set_strict( self ):
self.__strict = True
def set_type( self, obj_type ):
self.__obj_type = obj_type
def set_order( self, prop, desc = False ):
self.__order_by = prop
self.__order_desc = desc
def add_require_constraint( self, constraint ):
self.__req_constraints.append( constraint )
def add_or_constraint( self, constraint ):
self.__or_constraints.append( constraint )
def add_not_constraint( self, constraint ):
self.__not_constraints.append( constraint )
def set_constraints( self, req_c = [], or_c = [], not_c = [] ):
self.__req_constraints = list( req_c )
self.__or_constraints = list( or_c )
self.__not_constraints = list( not_c )
def execute( self, db ):
to_db_c = lambda c: c.to_db_constraint( db )
if( len( self.__or_constraints ) > 0 ):
add_q = map( to_db_c, self.__or_constraints )
add_q = add_q[0].union( *add_q[1:] )
else:
add_q = None
if( len( self.__not_constraints ) > 0 ):
sub_q = map( to_db_c, self.__not_constraints )
sub_q = sub_q[0].union( *sub_q[1:] )
else:
sub_q = None
if( len( self.__req_constraints ) > 0 ):
req_q = map( to_db_c, self.__req_constraints )
req_q = req_q[0].intersect( *req_q[1:] )
else:
req_q = None
query = db.session.query( model.Object )
if( req_q is not None ):
q = req_q
if( add_q is not None ):
q = q.union( add_q )
query = query.filter( model.Object.object_id.in_( q ) )
elif( add_q is not None ):
query = query.filter( model.Object.object_id.in_( add_q ) )
if( sub_q is not None ):
query = query.filter( ~model.Object.object_id.in_( sub_q ) )
if( self.__obj_type is not None ):
query = query.filter( model.Object.object_type == self.__obj_type )
else:
query = query.filter( model.Object.object_type.in_( [
hdbfs.TYPE_FILE, hdbfs.TYPE_ALBUM ] ) )
if( self.__order_by == 'rand' ):
query = query.order_by( 'RANDOM()' )
elif( self.__order_by == 'add' ):
if( not self.__order_desc ):
query = query.order_by( model.Object.object_id )
else:
query = query.order_by( model.Object.object_id.desc() )
elif( self.__order_by == 'name' ):
if( not self.__order_desc ):
query = query.order_by( model.Object.name,
model.Object.object_id )
else:
query = query.order_by( model.Object.name.desc(),
model.Object.object_id.desc() )
elif( self.__order_by == 'origin' ):
query = query.join( model.ObjectMetadata )\
.filter( model.ObjectMetadata.key == 'origin_time' )
if( not self.__order_desc ):
query = query.order_by( model.ObjectMetadata.numeric,
model.Object.object_id )
else:
query = query.order_by( model.ObjectMetadata.numeric.desc(),
model.Object.object_id.desc() )
return hdbfs.ModelObjToHiguObjIterator( db, query )
def create_constraint( s ):
if( s.startswith( '@' ) ):
return StringConstraint( s[1:] )
elif( s.startswith( '#' ) ):
return TagConstraint( s[1:] )
elif( s.startswith( '&' ) ):
ops = [ '>=', '<=', '>', '<', '!=', '=', '~' ]
s = s[1:]
for i in ops:
try:
idx = s.index( i[0] )
key = s[0:idx]
op = i
value = s[idx+len(i[0]):]
if( key == 'id' ):
return ObjIdConstraint( op, value )
else:
return ParameterConstraint( key, op, value )
except ValueError:
pass
else:
raise ValueError, 'Bad Parameter Constraint'
else:
return UnboundConstraint( s )
def build_query( s ):
query = Query()
clauses = s.split( ' ' )
clauses = [i for i in clauses if( len( i ) > 0 )]
commands = [i[1:] for i in clauses if( i[0] == '$' )]
add = [i[1:] for i in clauses if( i[0] == '?' )]
sub = [i[1:] for i in clauses if( i[0] == '!' )]
req = [i for i in clauses if( i[0] != '$' and i[0] != '?' and i[0] != '!' )]
for cmd in commands:
cmd = cmd.split( ':' )
if( cmd[0] == 'strict' ):
query.set_strict()
elif( cmd[0] == 'sort' ):
if( len( cmd ) < 2 ):
raise ValueError, 'Sort command needs an argument'
desc = False
if( len( cmd ) > 2 and cmd[2] == 'desc' ):
desc = True
query.set_order( cmd[1], desc )
elif( cmd[0] == 'type' ):
if( len( cmd ) < 2 ):
raise ValueError, 'Type command needs an argument'
if( cmd[1] == 'file' ):
query.set_type( hdbfs.TYPE_FILE );
elif( cmd[1] == 'album' ):
query.set_type( hdbfs.TYPE_ALBUM );
else:
raise ValueError, 'Bad type'
else:
raise ValueError, 'Bad Command'
req = map( create_constraint, req )
add = map( create_constraint, add )
sub = map( create_constraint, sub )
query.set_constraints( req, add, sub )
return query
|
bsd-2-clause
| -7,394,012,294,911,807,000 | 30.198511 | 83 | 0.459318 | false | 3.879358 | false | false | false |
eloquence/unisubs
|
docs/conf.py
|
1
|
6953
|
# -*- coding: utf-8 -*-
#
# Amara documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 25 14:58:38 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
sys.path.insert(0, os.path.abspath('../'))
import startup
startup.startup()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.httpdomain',
'sphinxcontrib.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Amara'
copyright = u'2012, Participatory Culture Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2.0'
# The full version, including alpha/beta/rc tags.
release = '1.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
autodoc_member_order = 'bysource'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'UniversalSubtitlesdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'UniversalSubtitles.tex', u'Amara Documentation',
u'Participatory Culture Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'universalsubtitles', u'Amara Documentation',
[u'Participatory Culture Foundation'], 1)
]
|
agpl-3.0
| -4,141,644,483,052,377,000 | 30.894495 | 80 | 0.709766 | false | 3.712226 | true | false | false |
brguez/TEIBA
|
src/python/mergeVCF.py
|
1
|
2384
|
#!/usr/bin/env python
#coding: utf-8
#### FUNCTIONS ####
def header(string):
"""
Display header
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print '\n', timeInfo, "****", string, "****"
def subHeader(string):
"""
Display subheader
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, "**", string, "**"
def info(string):
"""
Display basic information
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, string
#### MAIN ####
## Import modules ##
import argparse
import sys
import os.path
import formats
import time
## Get user's input ##
parser = argparse.ArgumentParser(description="Merge multiple one-sample VCF files in a single one")
parser.add_argument('VCFPaths', help='text file with the path to the VCF files will be merged')
parser.add_argument('sampleId', help='Identifier to name output file.')
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.' )
args = parser.parse_args()
VCFPaths = args.VCFPaths
sampleId = args.sampleId
outDir = args.outDir
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "VCF: ", VCFPaths
print "sampleId: ", sampleId
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
## Start ##
#### 1. Create VCF object and read input VCF
header("1. Process input VCFs")
paths = open(VCFPaths, 'r')
# Make merged VCF object
completeVCFObj = formats.VCF()
## Read one VCF per iteration and add the variants to the merged VCF
for VCFfile in paths:
VCFfile = VCFfile.rstrip('\n\r')
VCFObj = formats.VCF()
VCFObj.read_VCF(VCFfile)
# Add variant objects
for lineObj in VCFObj.lineList:
completeVCFObj.addLine(lineObj)
# Create header
if completeVCFObj.header == "":
completeVCFObj.header = VCFObj.header
# Sort variants in the merged VCF object
completeVCFObj.lineList = completeVCFObj.sort()
#### Write output VCF file from merged VCF object
header("2. Write output VCF file")
outFilePath = outDir + '/' + sampleId + ".vcf"
# 1. Write header
completeVCFObj.write_header(outFilePath)
# 2. Write variants
completeVCFObj.write_variants(outFilePath)
header("Finished")
|
gpl-3.0
| -8,237,825,818,623,232,000 | 22.544554 | 136 | 0.666526 | false | 3.289073 | false | false | false |
jdavidrcamacho/Tests_GP
|
08 - Thesis results/speed_test6.py
|
1
|
5414
|
import Gedi as gedi
import george
import numpy as np;
import matplotlib.pylab as pl; pl.close('all')
from time import time,sleep
import scipy.optimize as op
import sys
##### INITIAL DATA ###########################################################
nrep = 1
pontos=[]
temposQP=[]
temposmulti=[]
georgeQP=[]
sleeptime=10
lista=[10,20,50,100,200,500]
#for i in np.arange(100,650,200):
#for i in np.arange(100,1400,350):
### Functions george
# Define the objective function (negative log-likelihood in this case).
def nll(p):
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
ll = gp.lnlikelihood(y, quiet=True)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll(p):
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
return -gp.grad_lnlikelihood(y, quiet=True)
### Functions gedi
def nll_gedi(p):
global kernel
# Update the kernel parameters and compute the likelihood.
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(p))
ll = gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll_gedi(p):
global kernel
# Update the kernel parameters and compute the likelihood.
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(p))
return -np.array(gedi.kernel_likelihood.gradient_likelihood(kernel,x,y,yerr))
###############################################################################
### Things to run
for i0, i in enumerate(lista):
f=open("{0}.txt".format(i),"w")
sys.stdout = f
print i
pontos.append(i)
print 'pontos', pontos
x = 10 * np.sort(np.random.rand(2*i))
yerr = 0.2 * np.ones_like(x)
y = np.sin(x) + yerr * np.random.randn(len(x))
av = []
for _ in range(nrep):
start= time()
kernel= gedi.kernel.QuasiPeriodic(15.0,2.0,1.0,10.0)
print 'Initial gedi kernel =',kernel
print 'Initial gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
# Run the optimization routine.
p0_gedi = np.log(kernel.pars)
results_gedi = op.minimize(nll_gedi, p0_gedi, jac=grad_nll_gedi)
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(results_gedi.x))
print 'Final gedi kernel =',kernel
print 'Final gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
print
tempo1= time() - start
av.append(tempo1)
temposQP.append(sum(av) / float(nrep))
print 'temposQP', temposQP
sleep(sleeptime*i0)
###############################################################################
av = []
for _ in range(nrep):
start= time()
kernel= gedi.kernel.ExpSineSquared(15.0, 2.0, 10.0)* \
gedi.kernel.ExpSquared(1.0,1.0)
print 'Initial gedi kernel =',kernel
print 'Initial gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
# Run the optimization routine.
p0_gedi = np.log(kernel.pars)
results_gedi = op.minimize(nll_gedi, p0_gedi, jac=grad_nll_gedi)
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(results_gedi.x))
print 'Final gedi kernel =',kernel
print 'Final gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
print
tempo1= time() - start
av.append(tempo1)
temposmulti.append(sum(av) / float(nrep))
print 'temposmult', temposmulti
sleep(sleeptime*i0)
av = []
for _ in range(nrep):
start = time() # Calculation using george
kernelg1 = 15.0**2*george.kernels.ExpSine2Kernel(2/2.0**2,10.0)* \
george.kernels.ExpSquaredKernel(1.0**2)
# You need to compute the GP once before starting the optimization.
gp = george.GP(kernelg1, mean=np.mean(y))
gp.compute(x,yerr)
# Print the initial ln-likelihood.
print 'Initial george kernel', kernelg1
print 'Initial george likelihood', gp.lnlikelihood(y)
# Run the optimization routine.
p0 = gp.kernel.vector
results = op.minimize(nll, p0, jac=grad_nll)
# Update the kernel and print the final log-likelihood.
gp.kernel[:] = results.x
print 'Final george kernel =',gp.kernel
print 'Final george likelihood= ', gp.lnlikelihood(y)
print
tempog1= time() - start
av.append(tempog1)
georgeQP.append(sum(av) / float(nrep))
print 'georgeQP', georgeQP
###########################################################################
sys.stdout = sys.__stdout__
f.close()
sleep(sleeptime*i0)
N = pontos
pl.figure()
pl.loglog(N, temposQP, 'r-')
pl.loglog(N, temposmulti, 'b-o')
pl.loglog(N, georgeQP, 'b--')
pl.xlim(0.9*N[0], 1.1*N[-1])
pl.xlabel('Number of points')
pl.ylabel('Time')
#pl.title('Covariance matrix calculations')
pl.legend(['gedi QP', 'gedi ESS*ES','george ESS*ES'],loc='upper left')
pl.xticks(fontsize = 18);pl.yticks(fontsize=18)
pl.savefig('speedtest_6.png')
#pl.close('all')
|
mit
| -2,779,067,002,796,960,300 | 32.84375 | 92 | 0.598079 | false | 3.373209 | false | false | false |
ArtBIT/Droptopus
|
droptopus/forms.py
|
1
|
3409
|
import os
import logging
from droptopus import config, settings
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (
QDialog,
QDialogButtonBox,
QFileDialog,
QFormLayout,
QHBoxLayout,
QLabel,
QLineEdit,
QMessageBox,
QPushButton,
)
from PyQt5.QtGui import QPixmap
class EditItemForm(QDialog):
def __init__(self, item, parent=None):
super(EditItemForm, self).__init__(parent)
l1 = QLabel("Name:")
name = QLineEdit()
l2 = QLabel("Description:")
desc = QLineEdit()
l3 = QLabel("Icon:")
icon = QLabel()
btn_icon = QPushButton("...")
btn_icon.setFixedWidth(50)
btn_icon.clicked.connect(self.onChangeIcon)
l4 = QLabel("Target Path:")
path = QLineEdit()
path.setReadOnly(True)
btn_path = QPushButton("...")
btn_path.setFixedWidth(50)
btn_path.clicked.connect(self.onChangePath)
layout = QFormLayout(self)
layout.addRow(l1, name)
layout.addRow(l2, desc)
row = QHBoxLayout()
row.addWidget(icon)
row.addWidget(btn_icon)
layout.addRow(l3, row)
row = QHBoxLayout()
row.addWidget(path)
row.addWidget(btn_path)
layout.addRow(l4, row)
buttons = QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel, Qt.Horizontal, self
)
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
layout.addRow(buttons)
self.icon = icon
self.name = name
self.path = path
self.desc = desc
self.loadItem(item)
def loadItem(self, item):
self.icon.setPixmap(
QPixmap(item["icon"]).scaled(
50, 50, Qt.KeepAspectRatio, Qt.SmoothTransformation
)
)
self.name.setText(item["name"])
self.desc.setText(item["desc"])
self.path.setText(item["path"])
self.item = item
def onChangeIcon(self):
icon_filepath, _filter = QFileDialog.getOpenFileName(
self, "Choose Icon", os.path.dirname(self.item["icon"])
)
if icon_filepath:
icon_size = 15
self.icon.setPixmap(
QPixmap(icon_filepath).scaled(
icon_size, icon_size, Qt.KeepAspectRatio, Qt.SmoothTransformation
)
)
self.item["icon"] = icon_filepath
def onChangePath(self):
path = self.item["path"] if len(self.item["path"]) else os.path.expanduser("~")
if self.item["type"] == "dir":
path = QFileDialog.getExistingDirectory(self, "Choose a directory", path)
if path:
self.path.setText(path)
self.item["path"] = path
else:
path, _filter = QFileDialog.getOpenFileName(self, "Open file", path)
if path:
self.path.setText(path)
self.item["path"] = path
def validate(self):
return True
def accept(self):
if not self.validate():
QMessageBox.critical(
self, "Error", "\n".join(self.validation_errors), QMessageBox.Ok
)
return
self.item["name"] = self.name.text()
self.item["desc"] = self.desc.text()
settings.writeItem(self.item)
self.close()
|
mit
| -2,698,943,702,042,871,000 | 27.889831 | 87 | 0.563802 | false | 3.896 | false | false | false |
nwokeo/supysonic
|
supysonic/frontend/folder.py
|
1
|
3211
|
# coding: utf-8
# This file is part of Supysonic.
#
# Supysonic is a Python implementation of the Subsonic server API.
# Copyright (C) 2013 Alban 'spl0k' Féron
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from flask import request, flash, render_template, redirect, url_for, session
import os.path
import uuid
from supysonic.web import app, store
from supysonic.db import Folder
from supysonic.scanner import Scanner
from supysonic.managers.user import UserManager
from supysonic.managers.folder import FolderManager
@app.before_request
def check_admin():
if not request.path.startswith('/folder'):
return
if not UserManager.get(store, session.get('userid'))[1].admin:
return redirect(url_for('index'))
@app.route('/folder')
def folder_index():
return render_template('folders.html', folders = store.find(Folder, Folder.root == True))
@app.route('/folder/add', methods = [ 'GET', 'POST' ])
def add_folder():
if request.method == 'GET':
return render_template('addfolder.html')
error = False
(name, path) = map(request.form.get, [ 'name', 'path' ])
if name in (None, ''):
flash('The name is required.')
error = True
if path in (None, ''):
flash('The path is required.')
error = True
if error:
return render_template('addfolder.html')
ret = FolderManager.add(store, name, path)
if ret != FolderManager.SUCCESS:
flash(FolderManager.error_str(ret))
return render_template('addfolder.html')
flash("Folder '%s' created. You should now run a scan" % name)
return redirect(url_for('folder_index'))
@app.route('/folder/del/<id>')
def del_folder(id):
try:
idid = uuid.UUID(id)
except ValueError:
flash('Invalid folder id')
return redirect(url_for('folder_index'))
ret = FolderManager.delete(store, idid)
if ret != FolderManager.SUCCESS:
flash(FolderManager.error_str(ret))
else:
flash('Deleted folder')
return redirect(url_for('folder_index'))
@app.route('/folder/scan')
@app.route('/folder/scan/<id>')
def scan_folder(id = None):
scanner = Scanner(store)
if id is None:
for folder in store.find(Folder, Folder.root == True):
scanner.scan(folder)
else:
status, folder = FolderManager.get(store, id)
if status != FolderManager.SUCCESS:
flash(FolderManager.error_str(status))
return redirect(url_for('folder_index'))
scanner.scan(folder)
scanner.finish()
added, deleted = scanner.stats()
store.commit()
flash('Added: %i artists, %i albums, %i tracks' % (added[0], added[1], added[2]))
flash('Deleted: %i artists, %i albums, %i tracks' % (deleted[0], deleted[1], deleted[2]))
return redirect(url_for('folder_index'))
|
agpl-3.0
| 8,356,012,142,312,733,000 | 29.571429 | 90 | 0.718069 | false | 3.27551 | false | false | false |
tischlda/mopidy-oe1
|
mopidy_oe1/library.py
|
1
|
5362
|
from __future__ import unicode_literals
import logging
import re
from client import OE1Client
from mopidy import backend
from mopidy.models import Ref, Track
logger = logging.getLogger(__name__)
class OE1Uris(object):
ROOT = 'oe1:directory'
LIVE = 'oe1:live'
CAMPUS = 'oe1:campus'
ARCHIVE = 'oe1:archive'
class OE1LibraryProvider(backend.LibraryProvider):
root_directory = Ref.directory(uri=OE1Uris.ROOT, name='OE1')
root = [
Ref.track(uri=OE1Uris.LIVE, name='Live'),
Ref.track(uri=OE1Uris.CAMPUS, name='Campus'),
Ref.directory(uri=OE1Uris.ARCHIVE, name='7 Tage')]
def __init__(self, backend, client=OE1Client()):
super(OE1LibraryProvider, self).__init__(backend)
self.client = client
def browse(self, uri):
try:
library_uri = OE1LibraryUri.parse(uri)
except InvalidOE1Uri, e:
logger.error(e)
return []
if library_uri.uri_type == OE1UriType.ROOT:
return self.root
if library_uri.uri_type == OE1UriType.ARCHIVE:
return self._browse_archive()
if library_uri.uri_type == OE1UriType.ARCHIVE_DAY:
return self._browse_day(library_uri.day_id)
logger.warn('OE1LibraryProvider.browse called with uri '
'that does not support browsing: \'%s\'.' % uri)
return []
def _browse_archive(self):
return [Ref.directory(uri=str(OE1LibraryUri(OE1UriType.ARCHIVE_DAY,
day['id'])),
name=day['label'])
for day in self.client.get_days()]
def _get_track_title(self, item):
return '%s: %s' % (item['time'], item['title'])
def _browse_day(self, day_id):
return [Ref.track(uri=str(OE1LibraryUri(OE1UriType.ARCHIVE_ITEM,
day_id, item['id'])),
name=self._get_track_title(item))
for item in self.client.get_day(day_id)['items']]
def lookup(self, uri):
try:
library_uri = OE1LibraryUri.parse(uri)
except InvalidOE1Uri, e:
logger.error(e)
return []
if library_uri.uri_type == OE1UriType.LIVE:
return [Track(uri=OE1Uris.LIVE, name='Live')]
if library_uri.uri_type == OE1UriType.CAMPUS:
return [Track(uri=OE1Uris.CAMPUS, name='Campus')]
if library_uri.uri_type == OE1UriType.ARCHIVE_DAY:
return self._browse_day(library_uri.day_id)
if library_uri.uri_type == OE1UriType.ARCHIVE_ITEM:
return self._lookup_item(library_uri.day_id, library_uri.item_id)
logger.warn('OE1LibraryProvider.lookup called with uri '
'that does not support lookup: \'%s\'.' % uri)
return []
def _lookup_item(self, day_id, item_id):
item = self.client.get_item(day_id, item_id)
return [Track(uri=str(OE1LibraryUri(OE1UriType.ARCHIVE_ITEM,
day_id, item['id'])),
name=self._get_track_title(item))]
def refresh(self, uri=None):
self.client.refresh()
class OE1LibraryUri(object):
def __init__(self, uri_type, day_id=None, item_id=None):
self.uri_type = uri_type
self.day_id = day_id
self.item_id = item_id
archive_parse_expression = '^' + re.escape(OE1Uris.ARCHIVE) +\
':(?P<day_id>\d{8})(:(?P<item_id>\d+))?$'
archive_parser = re.compile(archive_parse_expression)
@staticmethod
def parse(uri):
if uri == OE1Uris.ROOT:
return OE1LibraryUri(OE1UriType.ROOT)
if uri == OE1Uris.LIVE:
return OE1LibraryUri(OE1UriType.LIVE)
if uri == OE1Uris.CAMPUS:
return OE1LibraryUri(OE1UriType.CAMPUS)
if uri == OE1Uris.ARCHIVE:
return OE1LibraryUri(OE1UriType.ARCHIVE)
matches = OE1LibraryUri.archive_parser.match(uri)
if matches is not None:
day_id = matches.group('day_id')
item_id = matches.group('item_id')
if day_id is not None:
if matches.group('item_id') is not None:
return OE1LibraryUri(OE1UriType.ARCHIVE_ITEM,
day_id, item_id)
return OE1LibraryUri(OE1UriType.ARCHIVE_DAY, day_id)
raise InvalidOE1Uri(uri)
def __str__(self):
if self.uri_type == OE1UriType.ROOT:
return OE1Uris.ROOT
if self.uri_type == OE1UriType.LIVE:
return OE1Uris.LIVE
if self.uri_type == OE1UriType.CAMPUS:
return OE1Uris.CAMPUS
if self.uri_type == OE1UriType.ARCHIVE:
return OE1Uris.ARCHIVE
if self.uri_type == OE1UriType.ARCHIVE_DAY:
return OE1Uris.ARCHIVE + ':' + self.day_id
if self.uri_type == OE1UriType.ARCHIVE_ITEM:
return OE1Uris.ARCHIVE + ':' + self.day_id + ':' + self.item_id
class InvalidOE1Uri(TypeError):
def __init__(self, uri):
super(TypeError, self).__init__(
'The URI is not a valid OE1LibraryUri: \'%s\'.' % uri)
class OE1UriType(object):
ROOT = 0
LIVE = 1
CAMPUS = 2
ARCHIVE = 3
ARCHIVE_DAY = 4
ARCHIVE_ITEM = 5
|
apache-2.0
| 2,123,299,529,363,965,000 | 32.304348 | 77 | 0.567326 | false | 3.406607 | false | false | false |
antoniorohit/xbob.spkrec
|
xbob/spkrec/script/external_vad_conversion.py
|
1
|
2026
|
#!bin/python
# vim: set fileencoding=utf-8 :
# Elie Khoury <[email protected]>
# Fri Aug 30 11:44:33 CEST 2013
#
# Copyright (C) 2012-2013 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import bob
import time, imp
import xbob.spkrec.preprocessing
import sys
from .. import utils
import os
def main():
"""Executes the main function"""
input_file_list = sys.argv[1] # The input file list
audio_dir = sys.argv[2] # The Audio directory
vad_dir = sys.argv[3] # The VAD directory
out_dir = sys.argv[4] # The Output directory
# ensure output directory
utils.ensure_dir(out_dir)
# Define the processor and the parameters
m_preprocessor_config = imp.load_source('preprocessor', "config/preprocessing/external.py")
preprocessor = xbob.spkrec.preprocessing.External(m_preprocessor_config)
infile=open(input_file_list)
for filename in infile:
filename = filename.strip()
audio_file = str(os.path.join(audio_dir, filename) + '.sph')
if os.path.exists(audio_file):
out_file = str(os.path.join(out_dir, filename) + '.hdf5')
vad_file = str(os.path.join(vad_dir, filename) + '.vad')
# The VAD file is 5 columns text file
# Column 1: segment number
# Column 3: start time
# Column 5: end time
preprocessor(audio_file, out_file, vad_file)
else:
print("Warning: file does not exist: %s" %audio_file)
if __name__ == "__main__":
main()
|
gpl-3.0
| -514,649,792,676,167,900 | 32.213115 | 94 | 0.698421 | false | 3.463248 | false | false | false |
psiwczak/openstack
|
nova/virt/baremetal/tilera.py
|
1
|
12639
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tilera back-end for bare-metal compute node provisioning
The details of this implementation are specific to ISI's testbed. This code
is provided here as an example of how to implement a backend.
"""
import base64
import os
import subprocess
import time
from nova.compute import power_state
from nova.openstack.common import cfg
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
FLAGS = flags.FLAGS
tilera_opts = [
cfg.StrOpt('tile_monitor',
default='/usr/local/TileraMDE/bin/tile-monitor',
help='Tilera command line program for Bare-metal driver')
]
FLAGS.register_opts(tilera_opts)
LOG = logging.getLogger(__name__)
def get_baremetal_nodes():
return BareMetalNodes()
class BareMetalNodes(object):
"""
This manages node information and implements singleton.
BareMetalNodes class handles machine architectures of interest to
technical computing users have either poor or non-existent support
for virtualization.
"""
_instance = None
_is_init = False
def __new__(cls, *args, **kwargs):
"""
Returns the BareMetalNodes singleton.
"""
if not cls._instance or ('new' in kwargs and kwargs['new']):
cls._instance = super(BareMetalNodes, cls).__new__(cls)
return cls._instance
def __init__(self, file_name="/tftpboot/tilera_boards"):
"""
Only call __init__ the first time object is instantiated.
From the bare-metal node list file: /tftpboot/tilera_boards,
reads each item of each node such as node ID, IP address,
MAC address, vcpus, memory, hdd, hypervisor type/version, and cpu
and appends each node information into nodes list.
"""
if self._is_init:
return
self._is_init = True
self.nodes = []
self.BOARD_ID = 0
self.IP_ADDR = 1
self.MAC_ADDR = 2
self.VCPUS = 3
self.MEMORY_MB = 4
self.LOCAL_GB = 5
self.MEMORY_MB_USED = 6
self.LOCAL_GB_USED = 7
self.HYPERVISOR_TYPE = 8
self.HYPERVISOR_VER = 9
self.CPU_INFO = 10
fp = open(file_name, "r")
for item in fp:
l = item.split()
if l[0] == '#':
continue
l_d = {'node_id': int(l[self.BOARD_ID]),
'ip_addr': l[self.IP_ADDR],
'mac_addr': l[self.MAC_ADDR],
'status': power_state.NOSTATE,
'vcpus': int(l[self.VCPUS]),
'memory_mb': int(l[self.MEMORY_MB]),
'local_gb': int(l[self.LOCAL_GB]),
'memory_mb_used': int(l[self.MEMORY_MB_USED]),
'local_gb_used': int(l[self.LOCAL_GB_USED]),
'hypervisor_type': l[self.HYPERVISOR_TYPE],
'hypervisor_version': int(l[self.HYPERVISOR_VER]),
'cpu_info': l[self.CPU_INFO]}
self.nodes.append(l_d)
fp.close()
def get_hw_info(self, field):
"""
Returns hardware information of bare-metal node by the given field.
Given field can be vcpus, memory_mb, local_gb, memory_mb_used,
local_gb_used, hypervisor_type, hypervisor_version, and cpu_info.
"""
for node in self.nodes:
if node['node_id'] == 9:
if field == 'vcpus':
return node['vcpus']
elif field == 'memory_mb':
return node['memory_mb']
elif field == 'local_gb':
return node['local_gb']
elif field == 'memory_mb_used':
return node['memory_mb_used']
elif field == 'local_gb_used':
return node['local_gb_used']
elif field == 'hypervisor_type':
return node['hypervisor_type']
elif field == 'hypervisor_version':
return node['hypervisor_version']
elif field == 'cpu_info':
return node['cpu_info']
def set_status(self, node_id, status):
"""
Sets status of the given node by the given status.
Returns 1 if the node is in the nodes list.
"""
for node in self.nodes:
if node['node_id'] == node_id:
node['status'] = status
return True
return False
def get_status(self):
"""
Gets status of the given node.
"""
pass
def get_idle_node(self):
"""
Gets an idle node, sets the status as 1 (RUNNING) and Returns node ID.
"""
for item in self.nodes:
if item['status'] == 0:
item['status'] = 1 # make status RUNNING
return item['node_id']
raise exception.NotFound("No free nodes available")
def get_ip_by_id(self, id):
"""
Returns default IP address of the given node.
"""
for item in self.nodes:
if item['node_id'] == id:
return item['ip_addr']
def free_node(self, node_id):
"""
Sets/frees status of the given node as 0 (IDLE).
"""
LOG.debug(_("free_node...."))
for item in self.nodes:
if item['node_id'] == str(node_id):
item['status'] = 0 # make status IDLE
def power_mgr(self, node_id, mode):
"""
Changes power state of the given node.
According to the mode (1-ON, 2-OFF, 3-REBOOT), power state can be
changed. /tftpboot/pdu_mgr script handles power management of
PDU (Power Distribution Unit).
"""
if node_id < 5:
pdu_num = 1
pdu_outlet_num = node_id + 5
else:
pdu_num = 2
pdu_outlet_num = node_id
path1 = "10.0.100." + str(pdu_num)
utils.execute('/tftpboot/pdu_mgr', path1, str(pdu_outlet_num),
str(mode), '>>', 'pdu_output')
def deactivate_node(self, node_id):
"""
Deactivates the given node by turnning it off.
/tftpboot/fs_x directory is a NFS of node#x
and /tftpboot/root_x file is an file system image of node#x.
"""
node_ip = self.get_ip_by_id(node_id)
LOG.debug(_("deactivate_node is called for "
"node_id = %(id)s node_ip = %(ip)s"),
{'id': str(node_id), 'ip': node_ip})
for item in self.nodes:
if item['node_id'] == node_id:
LOG.debug(_("status of node is set to 0"))
item['status'] = 0
self.power_mgr(node_id, 2)
self.sleep_mgr(5)
path = "/tftpboot/fs_" + str(node_id)
pathx = "/tftpboot/root_" + str(node_id)
utils.execute('sudo', '/usr/sbin/rpc.mountd')
try:
utils.execute('sudo', 'umount', '-f', pathx)
utils.execute('sudo', 'rm', '-f', pathx)
except Exception:
LOG.debug(_("rootfs is already removed"))
def network_set(self, node_ip, mac_address, ip_address):
"""
Sets network configuration based on the given ip and mac address.
User can access the bare-metal node using ssh.
"""
cmd = (FLAGS.tile_monitor +
" --resume --net " + node_ip + " --run - " +
"ifconfig xgbe0 hw ether " + mac_address +
" - --wait --run - ifconfig xgbe0 " + ip_address +
" - --wait --quit")
subprocess.Popen(cmd, shell=True)
#utils.execute(cmd, shell=True)
self.sleep_mgr(5)
def iptables_set(self, node_ip, user_data):
"""
Sets security setting (iptables:port) if needed.
iptables -A INPUT -p tcp ! -s $IP --dport $PORT -j DROP
/tftpboot/iptables_rule script sets iptables rule on the given node.
"""
if user_data != '':
open_ip = base64.b64decode(user_data)
utils.execute('/tftpboot/iptables_rule', node_ip, open_ip)
def check_activated(self, node_id, node_ip):
"""
Checks whether the given node is activated or not.
"""
LOG.debug(_("Before ping to the bare-metal node"))
tile_output = "/tftpboot/tile_output_" + str(node_id)
grep_cmd = ("ping -c1 " + node_ip + " | grep Unreachable > " +
tile_output)
subprocess.Popen(grep_cmd, shell=True)
self.sleep_mgr(5)
file = open(tile_output, "r")
out_msg = file.readline().find("Unreachable")
utils.execute('sudo', 'rm', tile_output)
if out_msg == -1:
LOG.debug(_("TILERA_BOARD_#%(node_id)s %(node_ip)s is ready"),
locals())
return True
else:
LOG.debug(_("TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready,"
" out_msg=%(out_msg)s"), locals())
self.power_mgr(node_id, 2)
return False
def vmlinux_set(self, node_id, mode):
"""
Sets kernel into default path (/tftpboot) if needed.
From basepath to /tftpboot, kernel is set based on the given mode
such as 0-NoSet, 1-SetVmlinux, or 9-RemoveVmlinux.
"""
LOG.debug(_("Noting to do for tilera nodes: vmlinux is in CF"))
def sleep_mgr(self, time_in_seconds):
"""
Sleeps until the node is activated.
"""
time.sleep(time_in_seconds)
def ssh_set(self, node_ip):
"""
Sets and Runs sshd in the node.
"""
cmd = (FLAGS.tile_monitor +
" --resume --net " + node_ip + " --run - " +
"/usr/sbin/sshd - --wait --quit")
subprocess.Popen(cmd, shell=True)
self.sleep_mgr(5)
def activate_node(self, node_id, node_ip, name, mac_address,
ip_address, user_data):
"""
Activates the given node using ID, IP, and MAC address.
"""
LOG.debug(_("activate_node"))
self.power_mgr(node_id, 2)
self.power_mgr(node_id, 3)
self.sleep_mgr(100)
try:
self.check_activated(node_id, node_ip)
self.network_set(node_ip, mac_address, ip_address)
self.ssh_set(node_ip)
self.iptables_set(node_ip, user_data)
return power_state.RUNNING
except Exception as ex:
self.deactivate_node(node_id)
raise exception.NovaException(_("Node is unknown error state."))
def get_console_output(self, console_log, node_id):
"""
Gets console output of the given node.
"""
node_ip = self.get_ip_by_id(node_id)
log_path = "/tftpboot/log_" + str(node_id)
kmsg_cmd = (FLAGS.tile_monitor +
" --resume --net " + node_ip +
" -- dmesg > " + log_path)
subprocess.Popen(kmsg_cmd, shell=True)
self.sleep_mgr(5)
utils.execute('cp', log_path, console_log)
def get_image(self, bp):
"""
Gets the bare-metal file system image into the instance path.
Noting to do for tilera nodes: actual image is used.
"""
path_fs = "/tftpboot/tilera_fs"
path_root = bp + "/root"
utils.execute('cp', path_fs, path_root)
def set_image(self, bpath, node_id):
"""
Sets the PXE bare-metal file system from the instance path.
This should be done after ssh key is injected.
/tftpboot/fs_x directory is a NFS of node#x.
/tftpboot/root_x file is an file system image of node#x.
"""
path1 = bpath + "/root"
pathx = "/tftpboot/root_" + str(node_id)
path2 = "/tftpboot/fs_" + str(node_id)
utils.execute('sudo', 'mv', path1, pathx)
utils.execute('sudo', 'mount', '-o', 'loop', pathx, path2)
|
apache-2.0
| 6,158,067,869,919,608,000 | 33.627397 | 78 | 0.544505 | false | 3.778475 | false | false | false |
pgdr/ert
|
python/python/ert_gui/simulation/models/single_test_run.py
|
1
|
1111
|
from ecl.util import BoolVector
from res.enkf.enums import HookRuntime
from res.enkf import ErtRunContext
from ert_gui.simulation.models import BaseRunModel, ErtRunError, EnsembleExperiment
class SingleTestRun(EnsembleExperiment):
def __init__(self, queue_config):
super(EnsembleExperiment, self).__init__("Single realization test-run" , queue_config)
def runSimulations(self, arguments):
self.runSimulations__( arguments , "Running single realisation test ...")
def create_context(self, arguments):
fs_manager = self.ert().getEnkfFsManager()
init_fs = fs_manager.getCurrentFileSystem( )
result_fs = fs_manager.getCurrentFileSystem( )
model_config = self.ert().getModelConfig( )
runpath_fmt = model_config.getRunpathFormat( )
subst_list = self.ert().getDataKW( )
itr = 0
mask = BoolVector( default_value = False )
mask[0] = True
run_context = ErtRunContext.ensemble_experiment( init_fs, result_fs, mask, runpath_fmt, subst_list, itr)
return run_context
|
gpl-3.0
| 2,944,036,167,774,455,000 | 32.666667 | 112 | 0.666067 | false | 3.804795 | false | false | false |
schmidtj/PyGNA
|
PyGNA/graphMLRead.py
|
1
|
1143
|
'''
This is a wrapper for the networkx graphml read/writer so that the GNA can
read a graphml file with multiple graphs. The current networkx read_graphml
only returns the first element in the graph list that is returned by the
graphMLReader class.
'''
import networkx.readwrite.graphml as ml
def read_graphml(path,node_type=str):
"""Read graph in GraphML format from path.
Parameters
----------
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
node_type: Python type (default: str)
Convert node ids to this type
Returns
-------
list(graphs): List of NetworkX graphs
If no parallel edges are found a Graph or DiGraph is returned.
Otherwise a MultiGraph or MultiDiGraph is returned.
"""
# **Deprecated ** fh=ml._get_fh(path,mode='rb')
reader = ml.GraphMLReader(node_type=int)
# need to check for multiple graphs
glist=list(reader(path))
#return glist[0] <---- The current networkx read_graphml return value
return glist # <---- returns the full list of graphs read from a file
|
bsd-3-clause
| 4,107,397,642,745,463,300 | 33.666667 | 76 | 0.67804 | false | 4.053191 | false | false | false |
FescueFungiShare/hydroshare
|
hs_tools_resource/page_processors.py
|
1
|
7367
|
from mezzanine.pages.page_processors import processor_for
from crispy_forms.layout import Layout, HTML
from hs_core import page_processors
from hs_core.views import add_generic_context
from forms import UrlBaseForm, VersionForm, SupportedResTypesForm, ToolIconForm, \
SupportedSharingStatusForm, AppHomePageUrlForm
from models import ToolResource
from utils import get_SupportedResTypes_choices
@processor_for(ToolResource)
def landing_page(request, page):
content_model = page.get_content_model()
edit_resource = page_processors.check_resource_mode(request)
if content_model.metadata.supported_sharing_status.first() is None:
content_model.metadata.create_element('SupportedSharingStatus',
sharing_status=['Published', 'Public',
'Discoverable', 'Private'],)
if not edit_resource:
# get the context from hs_core
context = page_processors.get_page_context(page, request.user,
resource_edit=edit_resource,
extended_metadata_layout=None,
request=request)
extended_metadata_exists = False
if content_model.metadata.url_bases.first() or content_model.metadata.versions.first():
extended_metadata_exists = True
new_supported_res_types_array = []
if content_model.metadata.supported_res_types.first():
extended_metadata_exists = True
supported_res_types_str = content_model.metadata.\
supported_res_types.first().get_supported_res_types_str()
supported_res_types_array = supported_res_types_str.split(',')
for type_name in supported_res_types_array:
for class_verbose_list in get_SupportedResTypes_choices():
if type_name.lower() == class_verbose_list[0].lower():
new_supported_res_types_array += [class_verbose_list[1]]
break
context['supported_res_types'] = ", ".join(new_supported_res_types_array)
if content_model.metadata.supported_sharing_status.first() is not None:
extended_metadata_exists = True
sharing_status_str = content_model.metadata.supported_sharing_status.first()\
.get_sharing_status_str()
context['supported_sharing_status'] = sharing_status_str
if content_model.metadata.tool_icon.first():
context['tool_icon_url'] = content_model.metadata.tool_icon.first().value
context['extended_metadata_exists'] = extended_metadata_exists
context['url_base'] = content_model.metadata.url_bases.first()
context['version'] = content_model.metadata.versions.first()
context['homepage_url'] = content_model.metadata.homepage_url.first()
else:
url_base = content_model.metadata.url_bases.first()
url_base_form = UrlBaseForm(instance=url_base,
res_short_id=content_model.short_id,
element_id=url_base.id
if url_base else None)
homepage_url = content_model.metadata.homepage_url.first()
homepage_url_form = \
AppHomePageUrlForm(instance=homepage_url,
res_short_id=content_model.short_id,
element_id=homepage_url.id
if homepage_url else None)
version = content_model.metadata.versions.first()
version_form = VersionForm(instance=version,
res_short_id=content_model.short_id,
element_id=version.id
if version else None)
supported_res_types_obj = content_model.metadata.supported_res_types.first()
supported_res_types_form = SupportedResTypesForm(instance=supported_res_types_obj,
res_short_id=content_model.short_id,
element_id=supported_res_types_obj.id
if supported_res_types_obj else None)
sharing_status_obj = content_model.metadata.supported_sharing_status.first()
sharing_status_obj_form = \
SupportedSharingStatusForm(instance=sharing_status_obj,
res_short_id=content_model.short_id,
element_id=sharing_status_obj.id
if sharing_status_obj else None)
tool_icon_obj = content_model.metadata.tool_icon.first()
tool_icon_form = ToolIconForm(instance=tool_icon_obj,
res_short_id=content_model.short_id,
element_id=tool_icon_obj.id
if tool_icon_obj else None)
ext_md_layout = Layout(
HTML('<div class="form-group col-lg-6 col-xs-12" id="SupportedResTypes"> '
'{% load crispy_forms_tags %} '
'{% crispy supported_res_types_form %} '
'</div> '),
HTML('<div class="form-group col-lg-6 col-xs-12" id="SupportedSharingStatus"> '
'{% load crispy_forms_tags %} '
'{% crispy sharing_status_obj_form %} '
'</div> '),
HTML("<div class='form-group col-lg-6 col-xs-12' id='homepage_url'> "
'{% load crispy_forms_tags %} '
'{% crispy homepage_url_form %} '
'</div>'),
HTML("<div class='form-group col-lg-6 col-xs-12' id='url_bases'> "
'{% load crispy_forms_tags %} '
'{% crispy url_base_form %} '
'</div>'),
HTML('<div class="form-group col-lg-6 col-xs-12" id="version"> '
'{% load crispy_forms_tags %} '
'{% crispy version_form %} '
'</div> '),
HTML('<div class="form-group col-lg-6 col-xs-12" id="tool_icon"> '
'{% load crispy_forms_tags %} '
'{% crispy tool_icon_form %} '
'</div> '),
)
# get the context from hs_core
context = page_processors.get_page_context(page, request.user,
resource_edit=edit_resource,
extended_metadata_layout=ext_md_layout,
request=request)
context['url_base_form'] = url_base_form
context['homepage_url_form'] = homepage_url_form
context['version_form'] = version_form
context['supported_res_types_form'] = supported_res_types_form
context['tool_icon_form'] = tool_icon_form
context['sharing_status_obj_form'] = sharing_status_obj_form
hs_core_dublin_context = add_generic_context(request, page)
context.update(hs_core_dublin_context)
return context
|
bsd-3-clause
| 1,497,654,030,710,353,200 | 51.248227 | 95 | 0.532917 | false | 4.467556 | false | false | false |
VirgilSecurity/virgil-sdk-python
|
virgil_sdk/signers/model_signer.py
|
1
|
4288
|
# Copyright (C) 2016-2019 Virgil Security Inc.
#
# Lead Maintainer: Virgil Security Inc. <[email protected]>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from virgil_sdk.client.raw_signature import RawSignature
from virgil_sdk.utils import Utils
class ModelSigner(object):
"""
The ModelSigner class provides signing operation for RawSignedModel.
"""
SELF_SIGNER = "self"
VIRGIL_SIGNER = "virgil"
def __init__(
self,
card_crypto
):
self.__card_crypto = card_crypto
def sign(self, model, signer, signer_private_key, signature_snapshot=None, extra_fields=None):
# type: (RawSignedModel, str, VirgilPrivateKey, Union[bytearray, bytes], dict) -> None
"""
Adds signature to the specified RawSignedModel using specified signer.
Args:
model: The instance of RawSignedModel to be signed.
signer:
signer_private_key: The instance of PrivateKey to sign with.
signature_snapshot: Some additional raw bytes to be signed with model.
extra_fields: Dictionary with additional data to be signed with model.
"""
if model.signatures:
if any(list(filter(lambda x: x.signer == signer, model.signatures))):
raise ValueError("The model already has this signature")
if extra_fields and not signature_snapshot:
signature_snapshot = bytearray(Utils.json_dumps(extra_fields).encode())
if signature_snapshot:
extended_snapshot = Utils.b64encode(bytearray(Utils.b64_decode(model.content_snapshot)) + bytearray(signature_snapshot))
else:
extended_snapshot = model.content_snapshot
signature_bytes = self.__card_crypto.generate_signature(
bytearray(Utils.b64_decode(extended_snapshot)),
signer_private_key
)
signature = RawSignature(signer, bytearray(signature_bytes), signature_snapshot)
model.add_signature(signature)
def self_sign(self, model, signer_private_key, signature_snapshot=None, extra_fields=None):
# type: (RawSignedModel, VirgilPrivateKey, Union[bytearray, bytes], dict) -> None
"""
Adds owner's signature to the specified RawSignedModel using specified signer.
Args:
model: The instance of RawSignedModel to be signed.
signer_private_key: The instance of PrivateKey to sign with.
signature_snapshot: Some additional raw bytes to be signed with model.
extra_fields: Dictionary with additional data to be signed with model.
"""
if extra_fields and not signature_snapshot:
signature_snapshot = Utils.json_dumps(extra_fields).encode()
self.sign(model, self.SELF_SIGNER, signer_private_key, signature_snapshot)
|
bsd-3-clause
| 8,491,931,940,891,143,000 | 42.755102 | 132 | 0.698228 | false | 4.411523 | false | false | false |
zobe123/Plex-CS
|
plexcs/graphs.py
|
1
|
50414
|
# This file is part of Plex:CS.
#
# Plex:CS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Plex:CS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Plex:CS. If not, see <http://www.gnu.org/licenses/>.
from plexcs import logger, database, helpers, common
import datetime
class Graphs(object):
def __init__(self):
pass
def get_total_plays_per_day(self, time_range='30', y_axis='plays'):
monitor_db = database.MonitorDatabase()
if not time_range.isdigit():
time_range = '30'
try:
if y_axis == 'plays':
query = 'SELECT date(started, "unixepoch", "localtime") as date_played, ' \
'SUM(case when media_type = "episode" then 1 else 0 end) as tv_count, ' \
'SUM(case when media_type = "movie" then 1 else 0 end) as movie_count, ' \
'SUM(case when media_type = "track" then 1 else 0 end) as music_count ' \
'FROM session_history ' \
'WHERE datetime(stopped, "unixepoch", "localtime") >= datetime("now", "-%s days", "localtime") ' \
'GROUP BY date_played ' \
'ORDER BY started ASC' % time_range
result = monitor_db.select(query)
else:
query = 'SELECT date(started, "unixepoch", "localtime") as date_played, ' \
'SUM(case when media_type = "episode" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as tv_count, ' \
'SUM(case when media_type = "movie" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as movie_count, ' \
'SUM(case when media_type = "track" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as music_count ' \
'FROM session_history ' \
'WHERE datetime(stopped, "unixepoch", "localtime") >= datetime("now", "-%s days", "localtime") ' \
'GROUP BY date_played ' \
'ORDER BY started ASC' % time_range
result = monitor_db.select(query)
except:
logger.warn("Unable to execute database query.")
return None
# create our date range as some days may not have any data
# but we still want to display them
base = datetime.date.today()
date_list = [base - datetime.timedelta(days=x) for x in range(0, int(time_range))]
categories = []
series_1 = []
series_2 = []
series_3 = []
for date_item in sorted(date_list):
date_string = date_item.strftime('%Y-%m-%d')
categories.append(date_string)
series_1_value = 0
series_2_value = 0
series_3_value = 0
for item in result:
if date_string == item['date_played']:
series_1_value = item['tv_count']
series_2_value = item['movie_count']
series_3_value = item['music_count']
break
else:
series_1_value = 0
series_2_value = 0
series_3_value = 0
series_1.append(series_1_value)
series_2.append(series_2_value)
series_3.append(series_3_value)
series_1_output = {'name': 'TV',
'data': series_1}
series_2_output = {'name': 'Movies',
'data': series_2}
series_3_output = {'name': 'Music',
'data': series_3}
output = {'categories': categories,
'series': [series_1_output, series_2_output, series_3_output]}
return output
def get_total_plays_per_dayofweek(self, time_range='30', y_axis='plays'):
monitor_db = database.MonitorDatabase()
if not time_range.isdigit():
time_range = '30'
if y_axis == 'plays':
query = 'SELECT strftime("%w", datetime(started, "unixepoch", "localtime")) as daynumber, ' \
'case cast (strftime("%w", datetime(started, "unixepoch", "localtime")) as integer) ' \
'when 0 then "Sunday" ' \
'when 1 then "Monday" ' \
'when 2 then "Tuesday" ' \
'when 3 then "Wednesday" ' \
'when 4 then "Thursday" ' \
'when 5 then "Friday" ' \
'else "Saturday" end as dayofweek, ' \
'SUM(case when media_type = "episode" then 1 else 0 end) as tv_count, ' \
'SUM(case when media_type = "movie" then 1 else 0 end) as movie_count, ' \
'SUM(case when media_type = "track" then 1 else 0 end) as music_count ' \
'FROM session_history ' \
'WHERE datetime(stopped, "unixepoch", "localtime") >= ' \
'datetime("now", "-' + time_range + ' days", "localtime") ' \
'GROUP BY dayofweek ' \
'ORDER BY daynumber'
result = monitor_db.select(query)
else:
query = 'SELECT strftime("%w", datetime(started, "unixepoch", "localtime")) as daynumber, ' \
'case cast (strftime("%w", datetime(started, "unixepoch", "localtime")) as integer) ' \
'when 0 then "Sunday" ' \
'when 1 then "Monday" ' \
'when 2 then "Tuesday" ' \
'when 3 then "Wednesday" ' \
'when 4 then "Thursday" ' \
'when 5 then "Friday" ' \
'else "Saturday" end as dayofweek, ' \
'SUM(case when media_type = "episode" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as tv_count, ' \
'SUM(case when media_type = "movie" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as movie_count, ' \
'SUM(case when media_type = "track" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as music_count ' \
'FROM session_history ' \
'WHERE datetime(stopped, "unixepoch", "localtime") >= ' \
'datetime("now", "-' + time_range + ' days", "localtime") ' \
'GROUP BY dayofweek ' \
'ORDER BY daynumber'
result = monitor_db.select(query)
days_list = ['Sunday', 'Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Saturday']
categories = []
series_1 = []
series_2 = []
series_3 = []
for day_item in days_list:
categories.append(day_item)
series_1_value = 0
series_2_value = 0
series_3_value = 0
for item in result:
if day_item == item['dayofweek']:
series_1_value = item['tv_count']
series_2_value = item['movie_count']
series_3_value = item['music_count']
break
else:
series_1_value = 0
series_2_value = 0
series_3_value = 0
series_1.append(series_1_value)
series_2.append(series_2_value)
series_3.append(series_3_value)
series_1_output = {'name': 'TV',
'data': series_1}
series_2_output = {'name': 'Movies',
'data': series_2}
series_3_output = {'name': 'Music',
'data': series_3}
output = {'categories': categories,
'series': [series_1_output, series_2_output, series_3_output]}
return output
def get_total_plays_per_hourofday(self, time_range='30', y_axis='plays'):
monitor_db = database.MonitorDatabase()
if not time_range.isdigit():
time_range = '30'
if y_axis == 'plays':
query = 'select strftime("%H", datetime(started, "unixepoch", "localtime")) as hourofday, ' \
'SUM(case when media_type = "episode" then 1 else 0 end) as tv_count, ' \
'SUM(case when media_type = "movie" then 1 else 0 end) as movie_count, ' \
'SUM(case when media_type = "track" then 1 else 0 end) as music_count ' \
'FROM session_history ' \
'WHERE datetime(stopped, "unixepoch", "localtime") >= ' \
'datetime("now", "-' + time_range + ' days", "localtime") ' \
'GROUP BY hourofday ' \
'ORDER BY hourofday'
result = monitor_db.select(query)
else:
query = 'select strftime("%H", datetime(started, "unixepoch", "localtime")) as hourofday, ' \
'SUM(case when media_type = "episode" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as tv_count, ' \
'SUM(case when media_type = "movie" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as movie_count, ' \
'SUM(case when media_type = "track" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as music_count ' \
'FROM session_history ' \
'WHERE datetime(stopped, "unixepoch", "localtime") >= ' \
'datetime("now", "-' + time_range + ' days", "localtime") ' \
'GROUP BY hourofday ' \
'ORDER BY hourofday'
result = monitor_db.select(query)
hours_list = ['00','01','02','03','04','05',
'06','07','08','09','10','11',
'12','13','14','15','16','17',
'18','19','20','21','22','23']
categories = []
series_1 = []
series_2 = []
series_3 = []
for hour_item in hours_list:
categories.append(hour_item)
series_1_value = 0
series_2_value = 0
series_3_value = 0
for item in result:
if hour_item == item['hourofday']:
series_1_value = item['tv_count']
series_2_value = item['movie_count']
series_3_value = item['music_count']
break
else:
series_1_value = 0
series_2_value = 0
series_3_value = 0
series_1.append(series_1_value)
series_2.append(series_2_value)
series_3.append(series_3_value)
series_1_output = {'name': 'TV',
'data': series_1}
series_2_output = {'name': 'Movies',
'data': series_2}
series_3_output = {'name': 'Music',
'data': series_3}
output = {'categories': categories,
'series': [series_1_output, series_2_output, series_3_output]}
return output
def get_total_plays_per_month(self, y_axis='plays'):
import time as time
monitor_db = database.MonitorDatabase()
if y_axis == 'plays':
query = 'SELECT strftime("%Y-%m", datetime(started, "unixepoch", "localtime")) as datestring, ' \
'SUM(case when media_type = "episode" then 1 else 0 end) as tv_count, ' \
'SUM(case when media_type = "movie" then 1 else 0 end) as movie_count, ' \
'SUM(case when media_type = "track" then 1 else 0 end) as music_count ' \
'FROM session_history ' \
'WHERE datetime(started, "unixepoch", "localtime") >= datetime("now", "-12 months", "localtime") ' \
'GROUP BY strftime("%Y-%m", datetime(started, "unixepoch", "localtime")) ' \
'ORDER BY datestring DESC LIMIT 12'
result = monitor_db.select(query)
else:
query = 'SELECT strftime("%Y-%m", datetime(started, "unixepoch", "localtime")) as datestring, ' \
'SUM(case when media_type = "episode" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as tv_count, ' \
'SUM(case when media_type = "movie" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as movie_count, ' \
'SUM(case when media_type = "track" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as music_count ' \
'FROM session_history ' \
'WHERE datetime(started, "unixepoch", "localtime") >= datetime("now", "-12 months", "localtime") ' \
'GROUP BY strftime("%Y-%m", datetime(started, "unixepoch", "localtime")) ' \
'ORDER BY datestring DESC LIMIT 12'
result = monitor_db.select(query)
# create our date range as some months may not have any data
# but we still want to display them
x = 12
base = time.localtime()
month_range = [time.localtime(
time.mktime((base.tm_year, base.tm_mon - n, 1, 0, 0, 0, 0, 0, 0))) for n in range(x)]
categories = []
series_1 = []
series_2 = []
series_3 = []
for month_item in sorted(month_range):
dt = datetime.datetime(*month_item[:6])
date_string = dt.strftime('%Y-%m')
categories.append(dt.strftime('%b %Y'))
series_1_value = 0
series_2_value = 0
series_3_value = 0
for item in result:
if date_string == item['datestring']:
series_1_value = item['tv_count']
series_2_value = item['movie_count']
series_3_value = item['music_count']
break
else:
series_1_value = 0
series_2_value = 0
series_3_value = 0
series_1.append(series_1_value)
series_2.append(series_2_value)
series_3.append(series_3_value)
series_1_output = {'name': 'TV',
'data': series_1}
series_2_output = {'name': 'Movies',
'data': series_2}
series_3_output = {'name': 'Music',
'data': series_3}
output = {'categories': categories,
'series': [series_1_output, series_2_output, series_3_output]}
return output
def get_total_plays_by_top_10_platforms(self, time_range='30', y_axis='plays'):
monitor_db = database.MonitorDatabase()
if not time_range.isdigit():
time_range = '30'
if y_axis == 'plays':
query = 'SELECT platform, ' \
'SUM(case when media_type = "episode" then 1 else 0 end) as tv_count, ' \
'SUM(case when media_type = "movie" then 1 else 0 end) as movie_count, ' \
'SUM(case when media_type = "track" then 1 else 0 end) as music_count, ' \
'COUNT(id) as total_count ' \
'FROM session_history ' \
'WHERE (datetime(stopped, "unixepoch", "localtime") >= ' \
'datetime("now", "-' + time_range + ' days", "localtime")) ' \
'GROUP BY platform ' \
'ORDER BY total_count DESC ' \
'LIMIT 10'
result = monitor_db.select(query)
else:
query = 'SELECT platform, ' \
'SUM(case when media_type = "episode" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as tv_count, ' \
'SUM(case when media_type = "movie" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as movie_count, ' \
'SUM(case when media_type = "track" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as music_count, ' \
'SUM(case when stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as total_duration ' \
'FROM session_history ' \
'WHERE (datetime(stopped, "unixepoch", "localtime") >= ' \
'datetime("now", "-' + time_range + ' days", "localtime")) ' \
'GROUP BY platform ' \
'ORDER BY total_duration DESC ' \
'LIMIT 10'
result = monitor_db.select(query)
categories = []
series_1 = []
series_2 = []
series_3 = []
for item in result:
categories.append(common.PLATFORM_NAME_OVERRIDES.get(item['platform'], item['platform']))
series_1.append(item['tv_count'])
series_2.append(item['movie_count'])
series_3.append(item['music_count'])
series_1_output = {'name': 'TV',
'data': series_1}
series_2_output = {'name': 'Movies',
'data': series_2}
series_3_output = {'name': 'Music',
'data': series_3}
output = {'categories': categories,
'series': [series_1_output, series_2_output, series_3_output]}
return output
def get_total_plays_by_top_10_users(self, time_range='30', y_axis='plays'):
monitor_db = database.MonitorDatabase()
if not time_range.isdigit():
time_range = '30'
if y_axis == 'plays':
query = 'SELECT ' \
'(case when users.friendly_name is null then users.username else ' \
'users.friendly_name end) as friendly_name,' \
'SUM(case when media_type = "episode" then 1 else 0 end) as tv_count, ' \
'SUM(case when media_type = "movie" then 1 else 0 end) as movie_count, ' \
'SUM(case when media_type = "track" then 1 else 0 end) as music_count, ' \
'COUNT(session_history.id) as total_count ' \
'FROM session_history ' \
'JOIN users on session_history.user_id = users.user_id ' \
'WHERE (datetime(session_history.stopped, "unixepoch", "localtime") >= ' \
'datetime("now", "-' + time_range + ' days", "localtime")) ' \
'GROUP BY session_history.user_id ' \
'ORDER BY total_count DESC ' \
'LIMIT 10'
result = monitor_db.select(query)
else:
query = 'SELECT ' \
'(case when users.friendly_name is null then users.username else ' \
'users.friendly_name end) as friendly_name,' \
'SUM(case when media_type = "episode" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as tv_count, ' \
'SUM(case when media_type = "movie" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as movie_count, ' \
'SUM(case when media_type = "track" and stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as music_count, ' \
'SUM(case when stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as total_duration ' \
'FROM session_history ' \
'JOIN users on session_history.user_id = users.user_id ' \
'WHERE (datetime(session_history.stopped, "unixepoch", "localtime") >= ' \
'datetime("now", "-' + time_range + ' days", "localtime")) ' \
'GROUP BY session_history.user_id ' \
'ORDER BY total_duration DESC ' \
'LIMIT 10'
result = monitor_db.select(query)
categories = []
series_1 = []
series_2 = []
series_3 = []
for item in result:
categories.append(item['friendly_name'])
series_1.append(item['tv_count'])
series_2.append(item['movie_count'])
series_3.append(item['music_count'])
series_1_output = {'name': 'TV',
'data': series_1}
series_2_output = {'name': 'Movies',
'data': series_2}
series_3_output = {'name': 'Music',
'data': series_3}
output = {'categories': categories,
'series': [series_1_output, series_2_output, series_3_output]}
return output
def get_total_plays_per_stream_type(self, time_range='30', y_axis='plays'):
monitor_db = database.MonitorDatabase()
if not time_range.isdigit():
time_range = '30'
try:
if y_axis == 'plays':
query = 'SELECT date(session_history.started, "unixepoch", "localtime") as date_played, ' \
'SUM(case when session_history_media_info.video_decision = "direct play" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "direct play") ' \
'then 1 else 0 end) as dp_count, ' \
'SUM(case when session_history_media_info.video_decision = "copy" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "copy") ' \
'then 1 else 0 end) as ds_count, ' \
'SUM(case when session_history_media_info.video_decision = "transcode" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "transcode") ' \
'then 1 else 0 end) as tc_count ' \
'FROM session_history ' \
'JOIN session_history_media_info ON session_history.id = session_history_media_info.id ' \
'WHERE (datetime(session_history.stopped, "unixepoch", "localtime") >= ' \
'datetime("now", "-%s days", "localtime")) AND ' \
'(session_history.media_type = "episode" OR session_history.media_type = "movie" OR session_history.media_type = "track") ' \
'GROUP BY date_played ' \
'ORDER BY started ASC' % time_range
result = monitor_db.select(query)
else:
query = 'SELECT date(session_history.started, "unixepoch", "localtime") as date_played, ' \
'SUM(case when (session_history_media_info.video_decision = "direct play" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "direct play")) ' \
'and session_history.stopped > 0 then (session_history.stopped - session_history.started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as dp_count, ' \
'SUM(case when (session_history_media_info.video_decision = "copy" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "copy")) ' \
'and session_history.stopped > 0 then (session_history.stopped - session_history.started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as ds_count, ' \
'SUM(case when (session_history_media_info.video_decision = "transcode" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "transcode")) ' \
'and session_history.stopped > 0 then (session_history.stopped - session_history.started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as tc_count ' \
'FROM session_history ' \
'JOIN session_history_media_info ON session_history.id = session_history_media_info.id ' \
'WHERE datetime(session_history.stopped, "unixepoch", "localtime") >= ' \
'datetime("now", "-%s days", "localtime") AND ' \
'(session_history.media_type = "episode" OR session_history.media_type = "movie" OR session_history.media_type = "track") ' \
'GROUP BY date_played ' \
'ORDER BY started ASC' % time_range
result = monitor_db.select(query)
except:
logger.warn("Unable to execute database query.")
return None
# create our date range as some days may not have any data
# but we still want to display them
base = datetime.date.today()
date_list = [base - datetime.timedelta(days=x) for x in range(0, int(time_range))]
categories = []
series_1 = []
series_2 = []
series_3 = []
for date_item in sorted(date_list):
date_string = date_item.strftime('%Y-%m-%d')
categories.append(date_string)
series_1_value = 0
series_2_value = 0
series_3_value = 0
for item in result:
if date_string == item['date_played']:
series_1_value = item['dp_count']
series_2_value = item['ds_count']
series_3_value = item['tc_count']
break
else:
series_1_value = 0
series_2_value = 0
series_3_value = 0
series_1.append(series_1_value)
series_2.append(series_2_value)
series_3.append(series_3_value)
series_1_output = {'name': 'Direct Play',
'data': series_1}
series_2_output = {'name': 'Direct Stream',
'data': series_2}
series_3_output = {'name': 'Transcode',
'data': series_3}
output = {'categories': categories,
'series': [series_1_output, series_2_output, series_3_output]}
return output
def get_total_plays_by_source_resolution(self, time_range='30', y_axis='plays'):
monitor_db = database.MonitorDatabase()
if not time_range.isdigit():
time_range = '30'
if y_axis == 'plays':
query = 'SELECT session_history_media_info.video_resolution AS resolution, ' \
'SUM(case when session_history_media_info.video_decision = "direct play" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "direct play") ' \
'then 1 else 0 end) as dp_count, ' \
'SUM(case when session_history_media_info.video_decision = "copy" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "copy") ' \
'then 1 else 0 end) as ds_count, ' \
'SUM(case when session_history_media_info.video_decision = "transcode" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "transcode") ' \
'then 1 else 0 end) as tc_count, ' \
'COUNT(session_history.id) as total_count ' \
'FROM session_history ' \
'JOIN session_history_media_info on session_history.id = session_history_media_info.id ' \
'WHERE (datetime(session_history.stopped, "unixepoch", "localtime") >= ' \
'datetime("now", "-' + time_range + ' days", "localtime")) AND ' \
'(session_history.media_type = "episode" OR session_history.media_type = "movie") ' \
'GROUP BY resolution ' \
'ORDER BY total_count DESC ' \
'LIMIT 10'
result = monitor_db.select(query)
else:
query = 'SELECT session_history_media_info.video_resolution AS resolution,' \
'SUM(case when (session_history_media_info.video_decision = "direct play" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "direct play")) ' \
'and session_history.stopped > 0 then (session_history.stopped - session_history.started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as dp_count, ' \
'SUM(case when (session_history_media_info.video_decision = "copy" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "copy")) ' \
'and session_history.stopped > 0 then (session_history.stopped - session_history.started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as ds_count, ' \
'SUM(case when (session_history_media_info.video_decision = "transcode" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "transcode")) ' \
'and session_history.stopped > 0 then (session_history.stopped - session_history.started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as tc_count, ' \
'SUM(case when stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as total_duration ' \
'FROM session_history ' \
'JOIN session_history_media_info on session_history.id = session_history_media_info.id ' \
'WHERE (datetime(session_history.stopped, "unixepoch", "localtime") >= ' \
'datetime("now", "-' + time_range + ' days", "localtime")) AND ' \
'(session_history.media_type = "episode" OR session_history.media_type = "movie") ' \
'GROUP BY resolution ' \
'ORDER BY total_duration DESC ' \
'LIMIT 10'
result = monitor_db.select(query)
categories = []
series_1 = []
series_2 = []
series_3 = []
for item in result:
categories.append(item['resolution'])
series_1.append(item['dp_count'])
series_2.append(item['ds_count'])
series_3.append(item['tc_count'])
series_1_output = {'name': 'Direct Play',
'data': series_1}
series_2_output = {'name': 'Direct Stream',
'data': series_2}
series_3_output = {'name': 'Transcode',
'data': series_3}
output = {'categories': categories,
'series': [series_1_output, series_2_output, series_3_output]}
return output
def get_total_plays_by_stream_resolution(self, time_range='30', y_axis='plays'):
monitor_db = database.MonitorDatabase()
if not time_range.isdigit():
time_range = '30'
if y_axis == 'plays':
query = 'SELECT ' \
'(case when session_history_media_info.video_decision = "transcode" then ' \
'(case ' \
'when session_history_media_info.transcode_height <= 360 then "sd" ' \
'when session_history_media_info.transcode_height <= 480 then "480" ' \
'when session_history_media_info.transcode_height <= 576 then "576" ' \
'when session_history_media_info.transcode_height <= 720 then "720" ' \
'when session_history_media_info.transcode_height <= 1080 then "1080" ' \
'when session_history_media_info.transcode_height <= 1440 then "QHD" ' \
'when session_history_media_info.transcode_height <= 2160 then "4K" ' \
'else "unknown" end) else session_history_media_info.video_resolution end) as resolution, ' \
'SUM(case when session_history_media_info.video_decision = "direct play" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "direct play") ' \
'then 1 else 0 end) as dp_count, ' \
'SUM(case when session_history_media_info.video_decision = "copy" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "copy") ' \
'then 1 else 0 end) as ds_count, ' \
'SUM(case when session_history_media_info.video_decision = "transcode" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "transcode") ' \
'then 1 else 0 end) as tc_count, ' \
'COUNT(session_history.id) as total_count ' \
'FROM session_history ' \
'JOIN session_history_media_info on session_history.id = session_history_media_info.id ' \
'WHERE (datetime(session_history.stopped, "unixepoch", "localtime") >= ' \
'datetime("now", "-' + time_range + ' days", "localtime")) AND ' \
'(session_history.media_type = "episode" OR session_history.media_type = "movie") ' \
'GROUP BY resolution ' \
'ORDER BY total_count DESC ' \
'LIMIT 10'
result = monitor_db.select(query)
else:
query = 'SELECT ' \
'(case when session_history_media_info.video_decision = "transcode" then ' \
'(case ' \
'when session_history_media_info.transcode_height <= 360 then "sd" ' \
'when session_history_media_info.transcode_height <= 480 then "480" ' \
'when session_history_media_info.transcode_height <= 576 then "576" ' \
'when session_history_media_info.transcode_height <= 720 then "720" ' \
'when session_history_media_info.transcode_height <= 1080 then "1080" ' \
'when session_history_media_info.transcode_height <= 1440 then "QHD" ' \
'when session_history_media_info.transcode_height <= 2160 then "4K" ' \
'else "unknown" end) else session_history_media_info.video_resolution end) as resolution, ' \
'SUM(case when (session_history_media_info.video_decision = "direct play" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "direct play")) ' \
'and session_history.stopped > 0 then (session_history.stopped - session_history.started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as dp_count, ' \
'SUM(case when (session_history_media_info.video_decision = "copy" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "copy")) ' \
'and session_history.stopped > 0 then (session_history.stopped - session_history.started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as ds_count, ' \
'SUM(case when (session_history_media_info.video_decision = "transcode" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "transcode")) ' \
'and session_history.stopped > 0 then (session_history.stopped - session_history.started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as tc_count, ' \
'SUM(case when stopped > 0 then (stopped - started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as total_duration ' \
'FROM session_history ' \
'JOIN session_history_media_info on session_history.id = session_history_media_info.id ' \
'WHERE (datetime(session_history.stopped, "unixepoch", "localtime") >= ' \
'datetime("now", "-' + time_range + ' days", "localtime")) AND ' \
'(session_history.media_type = "episode" OR session_history.media_type = "movie") ' \
'GROUP BY resolution ' \
'ORDER BY total_duration DESC ' \
'LIMIT 10'
result = monitor_db.select(query)
categories = []
series_1 = []
series_2 = []
series_3 = []
for item in result:
categories.append(item['resolution'])
series_1.append(item['dp_count'])
series_2.append(item['ds_count'])
series_3.append(item['tc_count'])
series_1_output = {'name': 'Direct Play',
'data': series_1}
series_2_output = {'name': 'Direct Stream',
'data': series_2}
series_3_output = {'name': 'Transcode',
'data': series_3}
output = {'categories': categories,
'series': [series_1_output, series_2_output, series_3_output]}
return output
def get_stream_type_by_top_10_platforms(self, time_range='30', y_axis='plays'):
monitor_db = database.MonitorDatabase()
if not time_range.isdigit():
time_range = '30'
if y_axis == 'plays':
query = 'SELECT ' \
'session_history.platform as platform, ' \
'SUM(case when session_history_media_info.video_decision = "direct play" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "direct play") ' \
'then 1 else 0 end) as dp_count, ' \
'SUM(case when session_history_media_info.video_decision = "copy" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "copy") ' \
'then 1 else 0 end) as ds_count, ' \
'SUM(case when session_history_media_info.video_decision = "transcode" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "transcode") ' \
'then 1 else 0 end) as tc_count, ' \
'COUNT(session_history.id) as total_count ' \
'FROM session_history ' \
'JOIN session_history_media_info ON session_history.id = session_history_media_info.id ' \
'WHERE datetime(session_history.started, "unixepoch", "localtime") >= ' \
'datetime("now", "-' + time_range + ' days", "localtime") AND ' \
'(session_history.media_type = "episode" OR session_history.media_type = "movie" OR session_history.media_type = "track") ' \
'GROUP BY platform ' \
'ORDER BY total_count DESC LIMIT 10'
result = monitor_db.select(query)
else:
query = 'SELECT ' \
'session_history.platform as platform, ' \
'SUM(case when (session_history_media_info.video_decision = "direct play" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "direct play")) ' \
'and session_history.stopped > 0 then (session_history.stopped - session_history.started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as dp_count, ' \
'SUM(case when (session_history_media_info.video_decision = "copy" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "copy")) ' \
'and session_history.stopped > 0 then (session_history.stopped - session_history.started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as ds_count, ' \
'SUM(case when (session_history_media_info.video_decision = "transcode" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "transcode")) ' \
'and session_history.stopped > 0 then (session_history.stopped - session_history.started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as tc_count, ' \
'SUM(case when session_history.stopped > 0 ' \
'then (session_history.stopped - session_history.started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as total_duration ' \
'FROM session_history ' \
'JOIN session_history_media_info ON session_history.id = session_history_media_info.id ' \
'WHERE datetime(session_history.started, "unixepoch", "localtime") >= ' \
'datetime("now", "-' + time_range + ' days", "localtime") AND ' \
'(session_history.media_type = "episode" OR session_history.media_type = "movie" OR session_history.media_type = "track") ' \
'GROUP BY platform ' \
'ORDER BY total_duration DESC LIMIT 10'
result = monitor_db.select(query)
categories = []
series_1 = []
series_2 = []
series_3 = []
for item in result:
categories.append(common.PLATFORM_NAME_OVERRIDES.get(item['platform'], item['platform']))
series_1.append(item['dp_count'])
series_2.append(item['ds_count'])
series_3.append(item['tc_count'])
series_1_output = {'name': 'Direct Play',
'data': series_1}
series_2_output = {'name': 'Direct Stream',
'data': series_2}
series_3_output = {'name': 'Transcode',
'data': series_3}
output = {'categories': categories,
'series': [series_1_output, series_2_output, series_3_output]}
return output
def get_stream_type_by_top_10_users(self, time_range='30', y_axis='plays'):
monitor_db = database.MonitorDatabase()
if not time_range.isdigit():
time_range = '30'
if y_axis == 'plays':
query = 'SELECT ' \
'CASE WHEN users.friendly_name is null then users.username else users.friendly_name end as username, ' \
'SUM(case when session_history_media_info.video_decision = "direct play" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "direct play") ' \
'then 1 else 0 end) as dp_count, ' \
'SUM(case when session_history_media_info.video_decision = "copy" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "copy") ' \
'then 1 else 0 end) as ds_count, ' \
'SUM(case when session_history_media_info.video_decision = "transcode" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "transcode") ' \
'then 1 else 0 end) as tc_count, ' \
'COUNT(session_history.id) as total_count ' \
'FROM session_history ' \
'JOIN users ON session_history.user_id = users.user_id ' \
'JOIN session_history_media_info ON session_history.id = session_history_media_info.id ' \
'WHERE datetime(session_history.started, "unixepoch", "localtime") >= ' \
'datetime("now", "-' + time_range + ' days", "localtime") AND ' \
'(session_history.media_type = "episode" OR session_history.media_type = "movie" OR session_history.media_type = "track") ' \
'GROUP BY username ' \
'ORDER BY total_count DESC LIMIT 10'
result = monitor_db.select(query)
else:
query = 'SELECT ' \
'CASE WHEN users.friendly_name is null then users.username else users.friendly_name end as username, ' \
'SUM(case when (session_history_media_info.video_decision = "direct play" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "direct play")) ' \
'and session_history.stopped > 0 then (session_history.stopped - session_history.started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as dp_count, ' \
'SUM(case when (session_history_media_info.video_decision = "copy" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "copy")) ' \
'and session_history.stopped > 0 then (session_history.stopped - session_history.started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as ds_count, ' \
'SUM(case when (session_history_media_info.video_decision = "transcode" ' \
'or (session_history_media_info.video_decision = "" and session_history_media_info.audio_decision = "transcode")) ' \
'and session_history.stopped > 0 then (session_history.stopped - session_history.started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as tc_count, ' \
'SUM(case when session_history.stopped > 0 ' \
'then (session_history.stopped - session_history.started) ' \
' - (case when paused_counter is NULL then 0 else paused_counter end) else 0 end) as total_duration ' \
'FROM session_history ' \
'JOIN users ON session_history.user_id = users.user_id ' \
'JOIN session_history_media_info ON session_history.id = session_history_media_info.id ' \
'WHERE datetime(session_history.started, "unixepoch", "localtime") >= ' \
'datetime("now", "-' + time_range + ' days", "localtime") AND ' \
'(session_history.media_type = "episode" OR session_history.media_type = "movie" OR session_history.media_type = "track") ' \
'GROUP BY username ' \
'ORDER BY total_duration DESC LIMIT 10'
result = monitor_db.select(query)
categories = []
series_1 = []
series_2 = []
series_3 = []
for item in result:
categories.append(item['username'])
series_1.append(item['dp_count'])
series_2.append(item['ds_count'])
series_3.append(item['tc_count'])
series_1_output = {'name': 'Direct Play',
'data': series_1}
series_2_output = {'name': 'Direct Stream',
'data': series_2}
series_3_output = {'name': 'Transcode',
'data': series_3}
output = {'categories': categories,
'series': [series_1_output, series_2_output, series_3_output]}
return output
|
gpl-3.0
| 1,632,475,451,479,142,700 | 55.013333 | 149 | 0.522911 | false | 4.089891 | false | false | false |
cbertinato/pandas
|
pandas/tests/indexes/timedeltas/test_scalar_compat.py
|
1
|
2391
|
"""
Tests for TimedeltaIndex methods behaving like their Timedelta counterparts
"""
import numpy as np
import pytest
import pandas as pd
from pandas import Index, Series, Timedelta, TimedeltaIndex, timedelta_range
import pandas.util.testing as tm
class TestVectorizedTimedelta:
def test_tdi_total_seconds(self):
# GH#10939
# test index
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
expt = [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9,
1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456. / 1e9]
tm.assert_almost_equal(rng.total_seconds(), Index(expt))
# test Series
ser = Series(rng)
s_expt = Series(expt, index=[0, 1])
tm.assert_series_equal(ser.dt.total_seconds(), s_expt)
# with nat
ser[1] = np.nan
s_expt = Series([1 * 86400 + 10 * 3600 + 11 * 60 +
12 + 100123456. / 1e9, np.nan], index=[0, 1])
tm.assert_series_equal(ser.dt.total_seconds(), s_expt)
# with both nat
ser = Series([np.nan, np.nan], dtype='timedelta64[ns]')
tm.assert_series_equal(ser.dt.total_seconds(),
Series([np.nan, np.nan], index=[0, 1]))
def test_tdi_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00')])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd._libs.tslibs.frequencies.INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
td.round(freq='foo')
with pytest.raises(ValueError, match=msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
td.round(freq='M')
with pytest.raises(ValueError, match=msg):
elt.round(freq='M')
|
bsd-3-clause
| -2,343,195,333,733,465,600 | 36.952381 | 76 | 0.546633 | false | 3.644817 | true | false | false |
CrowdEmotion/crowdemotion-api-clients-examples
|
python/crowdemotion_api_client_python/models/face_video.py
|
1
|
13837
|
# coding: utf-8
"""
CloudEmotion API v1
CrowdEmotion API
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class FaceVideo(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, time_created=None, filename=None, fq_avg=None, fq_stddev=None, frame_rate=None, num_frames=None, remote_location=None, status=None, status_text=None, status_message=None, response_id=None, md5=None, length_ms=None, length_s=None, frames_processed=None, processing_time=None, engine_version=None):
"""
FaceVideo - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'time_created': 'datetime',
'filename': 'str',
'fq_avg': 'int',
'fq_stddev': 'int',
'frame_rate': 'int',
'num_frames': 'int',
'remote_location': 'str',
'status': 'int',
'status_text': 'str',
'status_message': 'str',
'response_id': 'str',
'md5': 'str',
'length_ms': 'int',
'length_s': 'float',
'frames_processed': 'int',
'processing_time': 'int',
'engine_version': 'int'
}
self.attribute_map = {
'id': 'id',
'time_created': 'timeCreated',
'filename': 'filename',
'fq_avg': 'fqAvg',
'fq_stddev': 'fqStddev',
'frame_rate': 'frameRate',
'num_frames': 'numFrames',
'remote_location': 'remoteLocation',
'status': 'status',
'status_text': 'statusText',
'status_message': 'statusMessage',
'response_id': 'responseId',
'md5': 'md5',
'length_ms': 'lengthMS',
'length_s': 'lengthS',
'frames_processed': 'framesProcessed',
'processing_time': 'processingTime',
'engine_version': 'engineVersion'
}
self._id = id
self._time_created = time_created
self._filename = filename
self._fq_avg = fq_avg
self._fq_stddev = fq_stddev
self._frame_rate = frame_rate
self._num_frames = num_frames
self._remote_location = remote_location
self._status = status
self._status_text = status_text
self._status_message = status_message
self._response_id = response_id
self._md5 = md5
self._length_ms = length_ms
self._length_s = length_s
self._frames_processed = frames_processed
self._processing_time = processing_time
self._engine_version = engine_version
@property
def id(self):
"""
Gets the id of this FaceVideo.
:return: The id of this FaceVideo.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this FaceVideo.
:param id: The id of this FaceVideo.
:type: int
"""
self._id = id
@property
def time_created(self):
"""
Gets the time_created of this FaceVideo.
:return: The time_created of this FaceVideo.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this FaceVideo.
:param time_created: The time_created of this FaceVideo.
:type: datetime
"""
self._time_created = time_created
@property
def filename(self):
"""
Gets the filename of this FaceVideo.
:return: The filename of this FaceVideo.
:rtype: str
"""
return self._filename
@filename.setter
def filename(self, filename):
"""
Sets the filename of this FaceVideo.
:param filename: The filename of this FaceVideo.
:type: str
"""
self._filename = filename
@property
def fq_avg(self):
"""
Gets the fq_avg of this FaceVideo.
:return: The fq_avg of this FaceVideo.
:rtype: int
"""
return self._fq_avg
@fq_avg.setter
def fq_avg(self, fq_avg):
"""
Sets the fq_avg of this FaceVideo.
:param fq_avg: The fq_avg of this FaceVideo.
:type: int
"""
self._fq_avg = fq_avg
@property
def fq_stddev(self):
"""
Gets the fq_stddev of this FaceVideo.
:return: The fq_stddev of this FaceVideo.
:rtype: int
"""
return self._fq_stddev
@fq_stddev.setter
def fq_stddev(self, fq_stddev):
"""
Sets the fq_stddev of this FaceVideo.
:param fq_stddev: The fq_stddev of this FaceVideo.
:type: int
"""
self._fq_stddev = fq_stddev
@property
def frame_rate(self):
"""
Gets the frame_rate of this FaceVideo.
:return: The frame_rate of this FaceVideo.
:rtype: int
"""
return self._frame_rate
@frame_rate.setter
def frame_rate(self, frame_rate):
"""
Sets the frame_rate of this FaceVideo.
:param frame_rate: The frame_rate of this FaceVideo.
:type: int
"""
self._frame_rate = frame_rate
@property
def num_frames(self):
"""
Gets the num_frames of this FaceVideo.
:return: The num_frames of this FaceVideo.
:rtype: int
"""
return self._num_frames
@num_frames.setter
def num_frames(self, num_frames):
"""
Sets the num_frames of this FaceVideo.
:param num_frames: The num_frames of this FaceVideo.
:type: int
"""
self._num_frames = num_frames
@property
def remote_location(self):
"""
Gets the remote_location of this FaceVideo.
:return: The remote_location of this FaceVideo.
:rtype: str
"""
return self._remote_location
@remote_location.setter
def remote_location(self, remote_location):
"""
Sets the remote_location of this FaceVideo.
:param remote_location: The remote_location of this FaceVideo.
:type: str
"""
self._remote_location = remote_location
@property
def status(self):
"""
Gets the status of this FaceVideo.
:return: The status of this FaceVideo.
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this FaceVideo.
:param status: The status of this FaceVideo.
:type: int
"""
self._status = status
@property
def status_text(self):
"""
Gets the status_text of this FaceVideo.
:return: The status_text of this FaceVideo.
:rtype: str
"""
return self._status_text
@status_text.setter
def status_text(self, status_text):
"""
Sets the status_text of this FaceVideo.
:param status_text: The status_text of this FaceVideo.
:type: str
"""
self._status_text = status_text
@property
def status_message(self):
"""
Gets the status_message of this FaceVideo.
:return: The status_message of this FaceVideo.
:rtype: str
"""
return self._status_message
@status_message.setter
def status_message(self, status_message):
"""
Sets the status_message of this FaceVideo.
:param status_message: The status_message of this FaceVideo.
:type: str
"""
self._status_message = status_message
@property
def response_id(self):
"""
Gets the response_id of this FaceVideo.
:return: The response_id of this FaceVideo.
:rtype: str
"""
return self._response_id
@response_id.setter
def response_id(self, response_id):
"""
Sets the response_id of this FaceVideo.
:param response_id: The response_id of this FaceVideo.
:type: str
"""
self._response_id = response_id
@property
def md5(self):
"""
Gets the md5 of this FaceVideo.
:return: The md5 of this FaceVideo.
:rtype: str
"""
return self._md5
@md5.setter
def md5(self, md5):
"""
Sets the md5 of this FaceVideo.
:param md5: The md5 of this FaceVideo.
:type: str
"""
self._md5 = md5
@property
def length_ms(self):
"""
Gets the length_ms of this FaceVideo.
:return: The length_ms of this FaceVideo.
:rtype: int
"""
return self._length_ms
@length_ms.setter
def length_ms(self, length_ms):
"""
Sets the length_ms of this FaceVideo.
:param length_ms: The length_ms of this FaceVideo.
:type: int
"""
self._length_ms = length_ms
@property
def length_s(self):
"""
Gets the length_s of this FaceVideo.
:return: The length_s of this FaceVideo.
:rtype: float
"""
return self._length_s
@length_s.setter
def length_s(self, length_s):
"""
Sets the length_s of this FaceVideo.
:param length_s: The length_s of this FaceVideo.
:type: float
"""
self._length_s = length_s
@property
def frames_processed(self):
"""
Gets the frames_processed of this FaceVideo.
:return: The frames_processed of this FaceVideo.
:rtype: int
"""
return self._frames_processed
@frames_processed.setter
def frames_processed(self, frames_processed):
"""
Sets the frames_processed of this FaceVideo.
:param frames_processed: The frames_processed of this FaceVideo.
:type: int
"""
self._frames_processed = frames_processed
@property
def processing_time(self):
"""
Gets the processing_time of this FaceVideo.
:return: The processing_time of this FaceVideo.
:rtype: int
"""
return self._processing_time
@processing_time.setter
def processing_time(self, processing_time):
"""
Sets the processing_time of this FaceVideo.
:param processing_time: The processing_time of this FaceVideo.
:type: int
"""
self._processing_time = processing_time
@property
def engine_version(self):
"""
Gets the engine_version of this FaceVideo.
:return: The engine_version of this FaceVideo.
:rtype: int
"""
return self._engine_version
@engine_version.setter
def engine_version(self, engine_version):
"""
Sets the engine_version of this FaceVideo.
:param engine_version: The engine_version of this FaceVideo.
:type: int
"""
self._engine_version = engine_version
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
mit
| -7,702,536,492,178,499,000 | 23.40388 | 328 | 0.532847 | false | 4.313279 | false | false | false |
vicgc/pyAndriller
|
Andriller.py
|
1
|
33884
|
#!/usr/bin/env python3
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Andriller.py - Forensic acquisition tool for Android devices.
# Website, Usage and Disclaimer: http://android.saz.lt
# Copyright (C) 2013 Denis Sazonov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import sys
import os
import time
import re
import hashlib
import sqlite3 as sq
from json import loads
from binascii import hexlify
from datetime import datetime
from datetime import timedelta
from subprocess import check_output as co
from subprocess import call
# Setting variables
ANDRILLER_VERSION = "alpha-1.1.0"
A_BUILD_DATE = "07/11/2013"
# Intro info
print("\033[93m>>>>>>>>>> Andriller version: %s\033[0m" % ANDRILLER_VERSION)
print("\033[93m>>>>>>>>>> Build date: %s\033[0m" % A_BUILD_DATE)
print("\033[93m>>>>>>>>>> http://android.saz.lt\033[0m")
REPORT = [] # List to be populated for generating the REPORT.html file
# Check OS and define adb
download_adb = ' ERROR! \n\'./adb\' file is not present!\n Download it from http://android.saz.lt/download/adb.zip; \n Unzip, and place them into this directory;\n Run the program again.'
OS_CHECK = sys.platform
if OS_CHECK == 'linux' or OS_CHECK == 'linux2':
if call(['which', 'adb']) == 0:
ADB = "adb"
SEP = '/'
else:
ADB = './adb'
SEP = '/'
if os.path.isfile(ADB) == True:
os.chmod(ADB, '0755')
else:
sys.exit(download_adb)
elif OS_CHECK == 'win32':
ADB = "adb.exe"
SEP = '\\'
if os.path.isfile(ADB) == False:
sys.exit(download_adb)
elif OS_CHECK == 'darwin':
ADB = "./adb_mac"
SEP = '/'
if os.path.isfile(ADB) == False:
sys.exit(download_adb)
try:
ADB; co([ADB, 'start-server'])
except NameError:
sys.exit(" Cannot determine OS!")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Unrooted (shell) devices, to print device information, limited extractions
#
print("\033[94m>>>>>>>>>> General Device Information.\033[0m")
# Check for connected Android device
if 'unknown' in co([ADB, 'get-state']).decode('UTF-8'):
sys.exit("\033[91m No Android device found!\033[0m")
else:
ADB_SER = co([ADB, 'get-serialno']).decode('UTF-8').replace('\n', '').replace('\r', '')
print(" ADB serial: " + ADB_SER); REPORT.append(["ADB serial", ADB_SER])
# Check permissions
QPERM = co([ADB, 'shell', 'id']).decode('UTF-8')
if 'root' in QPERM:
PERM = 'root'
else:
QPERMSU = co([ADB, 'shell', 'su', '-c', 'id']).decode('UTF-8')
if 'root' in QPERMSU:
PERM = 'root(su)'
else:
PERM = 'shell'
try:
print(" Shell permissions: " + PERM); REPORT.append(["Shell permissions", PERM])
except NameError:
sys.exit("\033[91m Android permission cannot be established!\033[0m")
BUILDPROP = co([ADB, 'shell', 'cat', '/system/build.prop']).decode('UTF-8')
# Make & Model
for manuf in BUILDPROP.split('\n'):
if 'ro.product.manufacturer' in manuf:
DEVICE_MANUF = manuf.strip().split('=')[1]
for model in BUILDPROP.split('\n'):
if 'ro.product.model' in model:
DEVICE_MODEL = model.strip().split('=')[1]
try:
print(" Device model: %s %s" % (DEVICE_MANUF, DEVICE_MODEL)); REPORT.append(["Manufacturer", DEVICE_MANUF]); REPORT.append(["Model", DEVICE_MODEL])
except:
pass
# IMEI
IMEI = co([ADB, 'shell', 'dumpsys', 'iphonesubinfo']).decode('UTF-8').split()[-1]
try:
print(" IMEI: " + IMEI); REPORT.append(["IMEI", IMEI])
except:
pass
# A version
for aver in BUILDPROP.split('\n'):
if 'ro.build.version.release' in aver:
ANDROID_VER = aver.strip().split('=')[1]
try:
print(" Android version: " + ANDROID_VER); REPORT.append(["Android version", ANDROID_VER])
except:
pass
# Build ID
for buildid in BUILDPROP.split('\n'):
if 'ro.build.display.id' in buildid:
BUILD_ID = buildid.strip().split('=')[1]
try:
print(" Build number: " + BUILD_ID); REPORT.append(["Build name", BUILD_ID])
except:
pass
# Wifi
DUMPSYS_W = co([ADB, 'shell', 'dumpsys', 'wifi']).decode('UTF-8')
try:
wifi_beg = DUMPSYS_W.index('MAC:')+5
wifi_end = DUMPSYS_W[wifi_beg:].index(',')
if wifi_end == 17:
WIFI_MAC = DUMPSYS_W[wifi_beg:wifi_beg+wifi_end].lower()
try:
print(" Wi-fi MAC: " + WIFI_MAC); REPORT.append(["Wifi MAC", WIFI_MAC])
except:
pass
except:
pass
# Time and date
LOCAL_TIME = time.strftime('%Y-%m-%d %H:%M:%S %Z')
try:
print(" Local time: " + LOCAL_TIME); REPORT.append(["Local time", LOCAL_TIME])
except:
pass
ANDROID_TIME = co([ADB, 'shell', 'date', '+%F %T %Z']).decode('UTF-8').replace('\r\n', '')
try:
print(" Android time: " + ANDROID_TIME); REPORT.append(["Android time", ANDROID_TIME])
except:
pass
# SIM card extraction
SIM_LOC = '/data/system/SimCard.dat'
if co([ADB, 'shell', 'ls', SIM_LOC]).decode('UTF-8').replace('\r', '').replace('\n', '') == SIM_LOC:
SIM_DATA = co([ADB, 'shell', 'cat', SIM_LOC]).decode('UTF-8').replace('\r', '')
for sim_d in SIM_DATA.split('\n'):
if 'CurrentSimSerialNumber' in sim_d:
SIM_ICCID = sim_d.split('=')[1]
if SIM_ICCID != '' and SIM_ICCID != 'null':
REPORT.append(['SIM ICCID', SIM_ICCID])
if 'CurrentSimPhoneNumber' in sim_d:
SIM_MSISDN = sim_d.split('=')[1]
if SIM_MSISDN != '' and SIM_MSISDN != 'null':
REPORT.append(['SIM MSISDN', SIM_MSISDN])
if 'CurrentSimOperatorName' in sim_d:
SIM_OP = sim_d.split('=')[1]
if SIM_OP != '' and SIM_OP != 'null':
REPORT.append(['SIM Operator', SIM_OP])
if 'PreviousSimSerialNumber' in sim_d:
PRV_SIM_ICCID = sim_d.split('=')[1]
if PRV_SIM_ICCID != '' and PRV_SIM_ICCID != 'null':
REPORT.append(['SIM ICCID (Previous)', PRV_SIM_ICCID])
if 'PreviousSimPhoneNumber' in sim_d:
PRV_SIM_MSISDN = sim_d.split('=')[1]
if PRV_SIM_MSISDN != '' and PRV_SIM_MSISDN != 'null':
REPORT.append(['SIM MSISDN (Previous)', PRV_SIM_MSISDN])
#
# Accounts
ALLACC = co([ADB, 'shell', 'dumpsys', 'account']).decode('UTF-8')
all_acc = re.compile('Account {name=', re.DOTALL).finditer(ALLACC)
ACCOUNTS = []
for acc in all_acc:
hit_pos = acc.start()
tacc = ALLACC[hit_pos+14:]
end_pos = tacc.index('}')
acc0 = tacc[:end_pos].replace(' type=', '').split(',')
acc = acc0[1]+": "+acc0[0]
ACCOUNTS.append(acc)
if ACCOUNTS != '':
print("\033[94m>>>>>>>>>> Sync'ed Accounts.\033[0m")
for account in ACCOUNTS:
print(account)
REPORT.append(["Accounts", ACCOUNTS])
# Create output directory
OR_DATE = time.strftime('%Y-%m-%d')
OR_TIME = time.strftime('%H.%M.%S')
OUTPUT = DEVICE_MANUF+"_"+DEVICE_MODEL+"_"+OR_DATE+"_"+OR_TIME+SEP
try:
os.mkdir(OUTPUT)
os.mkdir(OUTPUT+SEP+'db')
except:
sys.exit(" Insufficient permissions to create a folder in this directory!")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# ROOT EXTRACTION
#
if 'root' in QPERM:
SUC = ''
print("\033[94m>>>>>>>>>> Downloading databases...\033[0m")
elif 'root' in QPERMSU:
SUC = 'su -c'
print("\033[94m>>>>>>>>>> Downloading databases...\033[0m")
#
# DATABASE EXTRACTION
#
# Database links
DBLS = [
'/data/data/com.android.providers.settings/databases/settings.db',
'/data/data/com.android.providers.contacts/databases/contacts2.db',
'/data/data/com.sec.android.provider.logsprovider/databases/logs.db',
'/data/data/com.android.providers.telephony/databases/mmssms.db',
'/data/data/com.facebook.katana/databases/fb.db',
'/data/data/com.facebook.katana/databases/contacts_db2',
'/data/data/com.facebook.katana/databases/threads_db2',
'/data/data/com.facebook.katana/databases/photos_db',
'/data/data/com.whatsapp/databases/wa.db',
'/data/data/com.whatsapp/databases/msgstore.db',
'/data/data/kik.android/databases/kikDatabase.db',
'/data/system/gesture.key',
'/data/system/cm_gesture.key',
'/data/system/locksettings.db',
'/data/system/password.key'
]
#
# DOWNLOADING DATABASES
DLLS = [] # downloaded databases empty list
def download_database(DB_PATH):
DB_NAME = DB_PATH.split('/')[-1]
if co([ADB, 'shell', SUC, 'ls', DB_PATH]).decode('UTF-8').replace('\r', '').replace('\n', '') == DB_PATH:
if 'su' in PERM:
co([ADB, 'shell', SUC, 'dd', 'if='+DB_PATH, 'of=/data/local/tmp/'+DB_NAME])
co([ADB, 'shell', SUC, 'chmod', '777', '/data/local/tmp/'+DB_NAME])
co([ADB, 'pull', '/data/local/tmp/'+DB_NAME, OUTPUT+SEP+'db'+SEP+DB_NAME])
co([ADB, 'shell', SUC, 'rm', '/data/local/tmp/'+DB_NAME])
else:
co([ADB, 'pull', DB_PATH, OUTPUT+SEP+'db'+SEP+DB_NAME])
if os.path.isfile(OUTPUT+SEP+'db'+SEP+DB_NAME) == True:
fileh = open(OUTPUT+SEP+'db'+SEP+'md5sums', 'a')
DB_MD5 = hashlib.md5(open(OUTPUT+SEP+'db'+SEP+DB_NAME, 'rb').read()).hexdigest()
DLLS.append(DB_NAME) #; DLLS.append(DB_MD5)
fileh.write(DB_MD5+'\t'+DB_NAME+'\n')
fileh.close()
if 'root' in PERM:
for db in DBLS:
download_database(db)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# DECODING DEFINITIONS FOR DATABASES
#
# Decode gesture.key # # # # # # # # # # # # # # # # # # # # #
def decode_gesturekey():
fileh = open(OUTPUT+SEP+'db'+SEP+'gesture.key', 'rb')
ges_data = fileh.read()
if len(ges_data) == 20:
GKEY = hexlify(ges_data).decode('UTF-8')
REPORT.append(['Gesture pattern', '<a href="http://android.saz.lt/cgi-bin/online_pattern.py?encoded=%s" target="_blank">%s</a>' % (GKEY, GKEY)])
# # # # #
REP_FOOTER = '</table>\n<p align="center"><i># <a href="http://android.saz.lt" target="_blank">http://android.saz.lt</a> #</i></p>\n</body></html>'
# Brute force 4-digit password # # # # # # # # # # # # # # # #
def decode_pwkey(pwkey, pwsalt):
for pin in range(0,10000):
pin = str(pin).zfill(4)
salt = '%x' % pwsalt
h = hashlib.sha1((str(pin)+str(salt)).encode('ascii')).hexdigest()
if h.upper() == pwkey[:40]:
return pin
# # # # #
# Decode settings.db # # # # # # # # # # # # # # # # # # # # #
def decode_settingsdb():
con = sq.connect(OUTPUT+SEP+'db'+SEP+'settings.db')
c = con.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='secure'")
if c.fetchone() != None:
c.execute("SELECT value FROM secure WHERE name = 'bluetooth_address'")
BT_MAC = c.fetchone()
c.execute("SELECT value FROM secure WHERE name = 'bluetooth_name'")
BT_NAME = c.fetchone()
c.execute("SELECT value FROM secure WHERE name = 'android_id'")
AN_ID = c.fetchone(); REPORT.insert(1, ["Android ID", AN_ID])
c.execute("SELECT value FROM secure WHERE name = 'lockscreen.password_salt'")
try:
PW_SALT = int(c.fetchone()[0])
except:
PW_SALT = None
con.close()
if BT_MAC != None:
for findlt in REPORT:
if 'Local time' in findlt:
LotLoc = REPORT.index(findlt)
REPORT.insert(LotLoc, ["Bluetooth MAC", BT_MAC])
REPORT.insert(LotLoc+1, ["Bluetooth name", BT_NAME])
break
if PW_SALT != None:
if 'password.key' in DLLS:
fileh = open(OUTPUT+SEP+'db'+SEP+'password.key', 'r')
PW_KEY = fileh.read(); fileh.close()
if len(PW_KEY) == 72:
PW_PIN = decode_pwkey(PW_KEY, PW_SALT)
if PW_PIN != None or PW_PIN != '':
REPORT.append(["Lockscreen PIN", PW_PIN])
# # # # #
# Decode contacts2.db (Pbook) # # # # # # # # # # # # # # # # #
def decode_contacts2db():
rep_title = 'Contacts'
con = sq.connect(OUTPUT+SEP+'db'+SEP+'contacts2.db')
c = con.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='data'")
if c.fetchone() != None:
c.execute("SELECT raw_contact_id, mimetypes.mimetype, data1 FROM data JOIN mimetypes ON (data.mimetype_id=mimetypes._id) ORDER BY raw_contact_id")
#c.execute("SELECT raw_contact_id, mimetypes.mimetype, data1 FROM data JOIN mimetypes ON (data.mimetype_id=mimetypes._id) JOIN visible_contacts ON (data.raw_contact_id=visible_contacts._id) ORDER BY raw_contact_id")
c2_data = c.fetchall()
con.close()
if c2_data != '':
fileh = open(OUTPUT+'contacts.html', 'w', encoding='UTF-8')
fileh.write('<!DOCTYPE html><html><head>\n<title>%s Andriller Report for %s</title>\n<style>body,td,tr {font-family: Vernada, Arial, sans-serif; font-size: 12px;}</style></head>\n<body>\n<a href="REPORT.html">[Back]</a>\n<p align="center"><i># This report was generated using Andriller on %s #</i></p>\n<h3 align="center">[%s] %s</h3>\n<table border="1" cellpadding="2" cellspacing="0" align="center">\n<tr bgcolor="#72A0C1"><th nowrap>#</th><th nowrap>Name</th><th nowrap>Number</th><th nowrap>Email</th><th>Other</th></tr>' % (str(rep_title), str(IMEI), str(LOCAL_TIME), str(rep_title), str(IMEI)))
pbook = []; tD = {}
for c2_item in c2_data:
c2key = str(c2_item[0])
c2typ = c2_item[1].split('/')[1]
c2dat = c2_item[2]
if c2dat != None and c2dat != '':
if tD.get('index_key') == c2key:
if c2typ in tD:
tD[c2typ] = tD[c2typ]+'<br/>'+c2dat
else:
tD[c2typ] = c2dat
else:
if len(tD) > 0:
pbook.append(tD); tD = {}
tD['index_key'] = c2key
tD[c2typ] = c2dat
else:
tD['index_key'] = c2key
tD[c2typ] = c2dat
pbook.append(tD); del tD
for pb in pbook:
pb_index = pb.pop('index_key')
try:
pb_name = pb.pop('name')
except KeyError:
pb_name = ''
try:
pb_number = pb.pop('phone_v2')
except KeyError:
pb_number = ''
try:
pb_email = pb.pop('email_v2')
except KeyError:
pb_email = ''
try:
pb_other = ''.join([(x+': '+pb[x]+'<br/>\n') for x in pb])
except:
pb_other = ''
fileh.write('<tr><td nowrap>%s</td><td nowrap>%s</td><td nowrap>%s</td><td nowrap>%s</td><td>%s</td></tr>\n' % (pb_index, pb_name, pb_number, pb_email, pb_other))
fileh.write(REP_FOOTER)
fileh.close()
REPORT.append(['Communications data', '<a href="contacts.html">%s (%d)</a>' % (rep_title, len(pbook))])
# # # # #
# Decode contacts2.db (Calls) # # # # # # # # # # # # # # # # #
def decode_calls_contacts2db():
rep_title = 'Call logs'
con = sq.connect(OUTPUT+'db'+SEP+'contacts2.db')
c = con.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='calls'")
if c.fetchone() != None: # check if table exists
c.execute("SELECT _id,type,number,name,date,duration FROM calls ORDER by date DESC")
c2_data = c.fetchall()
con.close()
if c2_data != []:
fileh = open(OUTPUT+'call_logs.html', 'w', encoding='UTF-8')
fileh.write('<!DOCTYPE html><html><head>\n<title>%s Andriller Report for %s</title>\n<style>body,td,tr {font-family: Vernada, Arial, sans-serif; font-size: 12px;}</style></head>\n<body>\n<a href="REPORT.html">[Back]</a>\n<p align="center"><i># This report was generated using Andriller on %s #</i></p>\n<h3 align="center">[%s] %s</h3>\n<table border="1" cellpadding="2" cellspacing="0" align="center">\n<tr bgcolor="#72A0C1"><th>#</th><th>Type</th><th>Number</th><th>Name</th><th>Time</th><th>Duration</th></tr>' % (str(rep_title), str(IMEI), str(LOCAL_TIME), str(rep_title), str(IMEI)))
for c2_item in c2_data:
c2_id = str(c2_item[0]) # id
c2_type_raw = c2_item[1] # type
if c2_type_raw == 1:
c2_type = 'Received'
elif c2_type_raw == 2:
c2_type = 'Dialled'
elif c2_type_raw == 3:
c2_type = 'Missed'
elif c2_type_raw == 5:
c2_type = 'Rejected'
else:
c2_type = 'Type('+str(c2_type_raw)+')'
c2_number = str(c2_item[2]) # number
if int(c2_number) <= 0:
c2_number = 'UNKNOWN'
c2_name = c2_item[3] # name
if c2_name == None:
c2_name = ''
c2_date = datetime.fromtimestamp(int(str(c2_item[4])[:10])).strftime('%Y-%m-%d %H:%M:%S')
c2_dur = str(timedelta(seconds=c2_item[5])) # duration
fileh.write('<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>\n' % (str(c2_id), str(c2_type), str(c2_number), str(c2_name), str(c2_date), str(c2_dur), ))
fileh.write(REP_FOOTER)
fileh.close()
REPORT.append(['Communications data', '<a href="call_logs.html">%s (%d)</a>' % (rep_title, len(c2_data))])
# # # # #
# Decode logs.db (Samsung Calls(SEC)) # # # # # # # # # # # # # # # # #
def decode_logsdb():
rep_title = 'Samsung Call logs'
con = sq.connect(OUTPUT+'db'+SEP+'logs.db')
c = con.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='logs'")
if c.fetchone() != None:
c.execute("SELECT _id,type,number,name,date,duration FROM logs WHERE logtype='100' ORDER by date DESC")
sec_data = c.fetchall()
con.close()
fileh = open(OUTPUT+'sec_call_logs.html', 'w', encoding='UTF-8')
fileh.write('<!DOCTYPE html><html><head>\n<title>%s Andriller Report for %s</title>\n<style>body,td,tr {font-family: Vernada, Arial, sans-serif; font-size: 12px;}</style></head>\n<body>\n<a href="REPORT.html">[Back]</a>\n<p align="center"><i># This report was generated using Andriller on %s #</i></p>\n<h3 align="center">[%s] %s</h3>\n<table border="1" cellpadding="2" cellspacing="0" align="center">\n<tr bgcolor="#72A0C1"><th>#</th><th>Type</th><th>Number</th><th>Name</th><th>Time</th><th>Duration</th></tr>' % (str(rep_title), str(IMEI), str(LOCAL_TIME), str(rep_title), str(IMEI)))
for sec_item in sec_data:
sec_id = str(sec_item[0]) # id
sec_type_raw = sec_item[1] # type
if sec_type_raw == 1:
sec_type = 'Received'
elif sec_type_raw == 2:
sec_type = 'Dialled'
elif sec_type_raw == 3:
sec_type = 'Missed'
elif sec_type_raw == 5:
sec_type = 'Rejected'
else:
sec_type = 'Type('+str(sec_type_raw)+')'
sec_number = str(sec_item[2]) # number
if int(sec_number) <= 0:
sec_number = 'UNKNOWN'
sec_name = sec_item[3] # name
if sec_name == None:
sec_name = ''
sec_date = datetime.fromtimestamp(int(str(sec_item[4])[:10])).strftime('%Y-%m-%d %H:%M:%S')
sec_dur = str(timedelta(seconds=sec_item[5])) # duration
fileh.write('<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>\n' % (str(sec_id), str(sec_type), str(sec_number), str(sec_name), str(sec_date), str(sec_dur), ))
fileh.write(REP_FOOTER)
fileh.close()
REPORT.append(['Communications data', '<a href="sec_call_logs.html">%s (%d)</a>' % (rep_title, len(sec_data))])
# # # # #
# Decode mmssms.db # # # # # # # # # # # # # # # # # # # # # #
def decode_mmssmsdb():
rep_title = 'SMS Messages'
con = sq.connect(OUTPUT+'db'+SEP+'mmssms.db')
c = con.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='sms'")
if c.fetchone() != None:
c.execute("SELECT address,body,date,type,_id FROM sms ORDER by sms.date DESC")
sms_data = c.fetchall()
con.close()
fileh = open(OUTPUT+'mmssms.html', 'w', encoding='UTF-8')
fileh.write('<!DOCTYPE html><html><head>\n<title>%s Andriller Report for %s</title>\n<style>body,td,tr {font-family: Vernada, Arial, sans-serif; font-size: 12px;}</style></head>\n<body>\n<a href="REPORT.html">[Back]</a>\n<p align="center"><i># This report was generated using Andriller on %s #</i></p>\n<h3 align="center">[%s] %s</h3>\n<table border=1 cellpadding=2 cellspacing=0 align=center>\n<tr bgcolor=#72A0C1><th>#</th><th>Number</th><th width="500">Message</th><th>Type</th><th nowrap>Time</th></tr>\n' % (str(rep_title), str(IMEI), str(LOCAL_TIME), str(rep_title), str(IMEI)))
for sms_item in sms_data:
sms_number = str(sms_item[0])
sms_text = str(sms_item[1])
sms_time = datetime.fromtimestamp(int(str(sms_item[2])[:10])).strftime('%Y-%m-%d %H:%M:%S')
if sms_item[3] == 1:
sms_typ = "Inbox"
elif sms_item[3] == 2:
sms_typ = "Sent"
elif sms_item[3] == 3:
sms_typ = "Draft"
elif sms_item[3] == 5:
sms_typ = "Sending failed"
elif sms_item[3] == 6:
sms_typ = "Sent"
else:
sms_typ = "Type"+"("+str(sms_item[3])+")"
sms_index = sms_item[4]
fileh.write('<tr><td>%s</td><td>%s</td><td width="500">%s</td><td>%s</td><td nowrap>%s</td></tr>\n' % (str(sms_index),sms_number,sms_text,sms_typ,sms_time))
fileh.write(REP_FOOTER)
fileh.close()
REPORT.append(['Communications data', '<a href="mmssms.html">%s (%d)</a>' % (rep_title, len(sms_data))])
# # # # #
# Decode threads_db2 # # # # # # # # # # # # # # # # # # #
def decode_threads_db2():
rep_title = 'Facebook: Messages'
con = sq.connect(OUTPUT+SEP+'db'+SEP+'threads_db2')
c = con.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='messages'")
if c.fetchone() != None:
c.execute("SELECT sender,threads.participants,text,messages.timestamp_ms FROM messages JOIN threads ON (messages.thread_id=threads.thread_id) WHERE NOT messages.timestamp_ms='0' ORDER BY messages.timestamp_ms DESC")
fbt_data = c.fetchall()
c.execute("SELECT user_key,name,profile_pic_square FROM thread_users")
fbt_users = c.fetchall()
con.close()
if fbt_data != '':
fileh = open(OUTPUT+SEP+'fb_messages.html', 'w', encoding='UTF-8')
fileh.write('<!DOCTYPE html><html><head>\n<title>%s Andriller Report for %s</title>\n<style>body,td,tr {font-family: Vernada, Arial, sans-serif; font-size: 12px;}</style></head>\n<body>\n<a href="REPORT.html">[Back]</a>\n<p align="center"><i># This report was generated using Andriller on %s #</i></p>\n<h3 align="center">[%s] %s</h3>\n<table border="1" cellpadding="2" cellspacing="0" align="center">\n<tr bgcolor="#72A0C1"><th nowrap>Sender</th><th nowrap>Image</th><th width="500">Message</th><th nowrap>Recipient(s)</th><th>Time</th></tr>' % (str(rep_title), str(IMEI), str(LOCAL_TIME), str(rep_title), str(IMEI)))
for fbt_item in fbt_data:
if fbt_item[0] != None:
fbt_sender_nm = loads(fbt_item[0]).get('name')
fbt_sender_id = loads(fbt_item[0]).get('user_key')
else:
fbt_sender_nm = ''
fbt_sender_id = ''
for fbimgs in fbt_users:
if fbimgs[0] == fbt_sender_id:
fbt_img = loads(fbimgs[2])[0].get('url')
fbt_text = fbt_item[2]
fbt_time = datetime.fromtimestamp(int(str(fbt_item[3])[:10])).strftime('%Y-%m-%d %H:%M:%S')
fbt_part = []
for fbtdic in loads(fbt_item[1]):
fbt_part.append(fbtdic.get('name')+' (ID:'+fbtdic.get('user_key').split(':')[1]+')')
try:
fbt_part.remove(fbt_sender_nm+' (ID:'+fbt_sender_id.split(':')[1]+')')
except:
pass
fbt_parti = '<br/>'.join(fbt_part)
fileh.write('<tr><td nowrap><a href="http://www.facebook.com/profile.php?id=%s">%s</a></td><td><img src="%s"></td><td width="500">%s</td><td nowrap>%s</td><td nowrap>%s</td></tr>\n' % (fbt_sender_id.split(':')[1], fbt_sender_nm, fbt_img, fbt_text, fbt_parti, str(fbt_time)))
fileh.write(REP_FOOTER)
fileh.close()
REPORT.append(['Applications data', '<a href="fb_messages.html">%s (%d)</a>' % (rep_title, len(fbt_data))])
# # # # #
# Decode photos_db # # # # # # # # # # # # # # # # # # # # # # #
def decode_photos_db():
rep_title = 'Facebook: Viewed Photos'
con = sq.connect(OUTPUT+'db'+SEP+'photos_db')
c = con.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='photos'")
if c.fetchone() != None:
c.execute("SELECT _id,owner,src_small,src_big,caption,created,thumbnail FROM photos ORDER BY _id DESC")
fbp_data = c.fetchall()
if len(fbp_data) > 0:
os.mkdir(OUTPUT+'fb_media'); os.mkdir(OUTPUT+'fb_media'+SEP+'Thumbs')
fileh = open(OUTPUT+'fb_photos2.html', 'w', encoding='UTF-8')
fileh.write('<!DOCTYPE html><html><head>\n<title>%s Andriller Report for %s</title>\n<style>body,td,tr {font-family: Vernada, Arial, sans-serif; font-size: 12px;}</style></head>\n<body>\n<a href="REPORT.html">[Back]</a>\n<p align="center"><i># This report was generated using Andriller on %s #</i></p>\n<h3 align="center">[%s] %s</h3>\n<table border="1" cellpadding="2" cellspacing="0" align="center">\n<tr bgcolor="#72A0C1"><th>#</th><th>Picture</th><th>Owner</th><th width="500">Caption</th><th nowrap>Date (uploaded)</th></tr>' % (str(rep_title), str(IMEI), str(LOCAL_TIME), str(rep_title), str(IMEI)))
for fbp_item in fbp_data:
fbp_id = fbp_item[0]
fbp_owner = str(fbp_item[1])
fbp_thm = fbp_item[2]
fbp_img = fbp_item[3]
if fbp_item[4] == None:
fbp_cap = ''
else:
fbp_cap = str(fbp_item[4])
fbp_date = datetime.fromtimestamp(int(str(fbp_item[5])[:10])).strftime('%Y-%m-%d %H:%M:%S')
if fbp_item[6] != None:
filewa = open(OUTPUT+'fb_media'+SEP+'Thumbs'+SEP+str(fbp_id)+'.jpg', 'wb')
filewa.write(fbp_item[6]); filewa.close()
fbp_thumb = 'fb_media'+SEP+'Thumbs'+SEP+str(fbp_id)+'.jpg'
else:
fbp_thumb = fbp_item[2]
fileh.write('<tr><td>%s</td><td><a href="%s" target="_blank"><img src="%s"></a></td><td><a href="http://www.facebook.com/profile.php?id=%s" target="_blank">%s</a></td><td width="500">%s</td><td nowrap>%s</td></tr>\n' % (str(fbp_id), str(fbp_img), str(fbp_thm), str(fbp_owner), str(fbp_owner), fbp_cap, fbp_date))
fileh.write(REP_FOOTER)
fileh.close()
REPORT.append(['Applications data', '<a href="fb_photos2.html">%s (%d)</a>' % (rep_title, len(fbp_data))])
# # # # #
# Decode fb.db # # # # # # # # # # # # # # # # # # # # # # # #
def decode_fbdb():
rep_title = 'Facebook: Viewed Photos'
con = sq.connect(OUTPUT+'db'+SEP+'fb.db')
c = con.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='photos'")
if c.fetchone() != None:
c.execute("SELECT _id,owner,src_small,src_big,caption,created,thumbnail FROM photos ORDER BY _id DESC")
fbp_data = c.fetchall()
if len(fbp_data) > 0:
os.mkdir(OUTPUT+'fb_media'); os.mkdir(OUTPUT+'fb_media'+SEP+'Thumbs')
fileh = open(OUTPUT+'fb_photos.html', 'w', encoding='UTF-8')
fileh.write('<!DOCTYPE html><html><head>\n<title>%s Andriller Report for %s</title>\n<style>body,td,tr {font-family: Vernada, Arial, sans-serif; font-size: 12px;}</style></head>\n<body>\n<a href="REPORT.html">[Back]</a>\n<p align="center"><i># This report was generated using Andriller on %s #</i></p>\n<h3 align="center">[%s] %s</h3>\n<table border="1" cellpadding="2" cellspacing="0" align="center">\n<tr bgcolor="#72A0C1"><th>#</th><th>Picture</th><th>Owner</th><th width="500">Caption</th><th nowrap>Date (uploaded)</th></tr>' % (str(rep_title), str(IMEI), str(LOCAL_TIME), str(rep_title), str(IMEI)))
for fbp_item in fbp_data:
fbp_id = fbp_item[0]
fbp_owner = str(fbp_item[1])
fbp_thm = fbp_item[2]
fbp_img = fbp_item[3]
if fbp_item[4] == None:
fbp_cap = ''
else:
fbp_cap = str(fbp_item[4])
fbp_date = datetime.fromtimestamp(int(str(fbp_item[5])[:10])).strftime('%Y-%m-%d %H:%M:%S')
if fbp_item[6] != None:
filewa = open(OUTPUT+'fb_media'+SEP+'Thumbs'+SEP+str(fbp_id)+'.jpg', 'wb')
filewa.write(fbp_item[6]); filewa.close()
fbp_thumb = 'fb_media'+SEP+'Thumbs'+SEP+str(fbp_id)+'.jpg'
else:
fbp_thumb = fbp_item[2]
fileh.write('<tr><td>%s</td><td><a href="%s" target="_blank"><img src="%s"></a></td><td><a href="http://www.facebook.com/profile.php?id=%s" target="_blank">%s</a></td><td width="500">%s</td><td nowrap>%s</td></tr>\n' % (str(fbp_id), str(fbp_img), str(fbp_thm), str(fbp_owner), str(fbp_owner), fbp_cap, fbp_date))
fileh.write(REP_FOOTER)
fileh.close()
REPORT.append(['Applications data', '<a href="fb_photos.html">%s (%d)</a>' % (rep_title, len(fbp_data))])
# # # # #
# Decode wa.db # # # # # # # # # # # # # # # # # # # # # # # #
def decode_wadb():
rep_title = 'WhatsApp Contacts'
con = sq.connect(OUTPUT+'db'+SEP+'wa.db')
c = con.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='wa_contacts'")
if c.fetchone() != None:
c.execute("select display_name,number,status from wa_contacts where is_whatsapp_user='1'")
wa_data = c.fetchall()
con.close()
fileh = open(OUTPUT+'wa_contacts.html', 'w', encoding='UTF-8')
fileh.write('<!DOCTYPE html><html><head>\n<title>%s Andriller Report for %s</title>\n<style>body,td,tr {font-family: Vernada, Arial, sans-serif; font-size: 12px;}</style></head>\n<body>\n<a href="REPORT.html">[Back]</a>\n<p align="center"><i># This report was generated using Andriller on %s #</i></p>\n<h3 align="center">[%s] %s</h3>\n<table border="1" cellpadding="2" cellspacing="0" align="center">\n<tr bgcolor="#72A0C1"><th>Name</th><th>Number</th><th>Status</th></tr>' % (str(rep_title), str(IMEI), str(LOCAL_TIME), str(rep_title), str(IMEI)))
for wa_item in wa_data:
wa_name = wa_item[0]
wa_number = wa_item[1]
wa_status = wa_item[2]
if wa_status == None:
wa_status = ''
fileh.write('<tr><td>%s</td><td>%s</td><td>%s</td></tr>\n' % (wa_name,wa_number,wa_status))
fileh.write(REP_FOOTER)
fileh.close()
REPORT.append(['Applications data', '<a href="wa_contacts.html">%s (%d)</a>' % (rep_title, len(wa_data))])
# # # # #
# Decode msgstore.db # # # # # # # # # # # # # # # # # # # # #
def decode_msgstoredb():
rep_title = 'WhatsApp Messages'
con = sq.connect(OUTPUT+'db'+SEP+'msgstore.db')
c = con.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='messages'")
if c.fetchone() != None:
#os.mkdir(OUTPUT+SEP+'wa_media'+SEP+'Sent'); os.mkdir(OUTPUT+SEP+'wa_media'+SEP+'Received')
os.mkdir(OUTPUT+'wa_media'); os.mkdir(OUTPUT+'wa_media'+SEP+'Thumbs')
c.execute("SELECT _id, key_remote_jid, data, timestamp, key_from_me, media_size, media_mime_type, media_name, raw_data, latitude, longitude FROM messages WHERE NOT status='-1' ORDER BY timestamp DESC")
wam_data = c.fetchall()
con.close()
fileh = open(OUTPUT+'wa_messages.html', 'w', encoding='UTF-8')
fileh.write('<!DOCTYPE html><html><head>\n<title>%s Andriller Report for %s</title>\n<style>body,td,tr {font-family: Vernada, Arial, sans-serif; font-size: 12px;}</style></head>\n<body>\n<a href="REPORT.html">[Back]</a>\n<p align="center"><i># This report was generated using Andriller on %s #</i></p>\n<h3 align="center">[%s] %s</h3>\n<table border="1" cellpadding="2" cellspacing="0" align="center">\n<tr bgcolor="#72A0C1"><th>#</th><th>Number</th><th width="500">Message</th><th nowrap>Time</th><th>Type</th></tr>' % (str(rep_title), str(IMEI), str(LOCAL_TIME), str(rep_title), str(IMEI)))
for wam_item in wam_data:
wam_id = wam_item[0]
wam_number = wam_item[1].split('@')[0]
if wam_number[0] != 0:
wam_number = '+'+wam_number
wam_text = wam_item[2] # data
wam_date = datetime.fromtimestamp(int(str(wam_item[3])[:10])).strftime('%Y-%m-%d %H:%M:%S') # timestamp
if wam_item[4] == 1: # key_from_me
wam_dir = 'Sent'
else:
wam_dir = 'Inbox'
if wam_item[8] != None: # raw_data
if wam_item[7] != None: # media_name
wam_fname = wam_item[7]
elif wam_item[6] != None:
wam_fname = str(wam_item[0])+'.'+wam_item[6].split('/')[1] # media_mime_type
else:
wam_fname = str(wam_item[0])+'.jpg'
filewa = open(OUTPUT+SEP+'wa_media'+SEP+'Thumbs'+SEP+wam_fname, 'wb')
filewa.write(wam_item[8]); filewa.close() # raw_data, writes file
wam_text = '<img src="'+'wa_media'+SEP+'Thumbs'+SEP+wam_fname+'">'
if wam_item[6] != None:
wam_text = 'Type: '+str(wam_item[6])+'<br/>'+wam_text
if wam_item[7] != None:
wam_text = 'Filename: '+str(wam_item[7])+'<br/>'+wam_text
if wam_item[9] != 0 and wam_item[10] != 0: # latitude, longtitude
wam_text = '<a href="http://maps.google.com/maps?q='+str(wam_item[9])+','+str(wam_item[10])+'" target="_blank">Map Location: '+str(wam_item[9])+','+str(wam_item[10])+'<br/>'+wam_text+'</a>'
fileh.write('<tr><td>%s</td><td>%s</td><td width="500">%s</td><td nowrap>%s</td><td>%s</td></tr>\n' % (wam_id, wam_number, wam_text, wam_date, wam_dir))
fileh.write(REP_FOOTER)
fileh.close()
REPORT.append(['Applications data', '<a href="wa_messages.html">%s (%d)</a>' % (rep_title, len(wam_data))])
# # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# DECODING DOWNLOADED DATABASES
#
decoders = [
(decode_gesturekey, 'gesture.key'),
(decode_settingsdb, 'settings.db'),
(decode_contacts2db, 'contacts2.db'),
(decode_calls_contacts2db, 'contacts2.db'),
(decode_logsdb, 'logs.db'),
(decode_mmssmsdb, 'mmssms.db'),
(decode_threads_db2, 'threads_db2'),
(decode_photos_db, 'photos_db'),
(decode_fbdb, 'fb.db'),
(decode_wadb, 'wa.db'),
(decode_msgstoredb, 'msgstore.db')
]
# Loop for decoding all DB's
def DECODE_ALL(DLLS):
for dec in decoders:
if dec[1] in DLLS:
try:
print('\033[95m Decoding: ' + dec[1] + '\033[0m', end='\r')
dec[0]()
except:
pass
print(' '.join([' ' for x in range(20)]), end='\r')
if DLLS != []:
print("\033[94m>>>>>>>>>> Decoding data...\033[0m")
DECODE_ALL(DLLS)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# REPORTING
#
print("\033[94m>>>>>>>>>> Generating report:\033[0m")
file_handle = open(OUTPUT+SEP+'REPORT.html', 'w', encoding='UTF-8')
report_t = '<!DOCTYPE html><html><head>\n<title>Andriller Report for %s</title>\n<style>body,td,tr {font-family: Vernada, Arial, sans-serif; font-size: 12px;}</style></head><body>\n<p align="center"><i># This report was generated using Andriller version %s on %s #</i></p><h3 align="center">[Andriller Report] %s %s | %s</h3>\n<table border="1" cellpadding=2 cellspacing="0" align="center">\n<tr bgcolor="#72A0C1"><th>Type</th><th>Data</th></tr>\n' % (str(IMEI), ANDRILLER_VERSION, str(LOCAL_TIME), DEVICE_MANUF, str(DEVICE_MODEL), str(IMEI))
file_handle.write(report_t)
for torep in REPORT:
file_handle.write('<tr><td>%s:</td><td>' % torep[0])
if type(torep[1]) is list:
for tore in torep[1]:
file_handle.write('%s<br/>' % tore)
file_handle.write('</td></tr>\n')
else:
file_handle.write('%s</td></tr>\n' % torep[1])
file_handle.write(REP_FOOTER)
file_handle.close()
# Print generated report path:
print('\033[92m'+os.getcwd()+SEP+OUTPUT+'REPORT.html\033[0m')
|
gpl-3.0
| 2,126,477,740,589,460,500 | 43.998672 | 621 | 0.616427 | false | 2.547286 | false | false | false |
adaptive-learning/robomission
|
backend/learn/tests/test_recommendation.py
|
1
|
3737
|
import pytest
from learn.models import Task, ProblemSet, Domain
from learn.models import Student, TaskSession, Skill
from learn.recommendation import get_recommendation, select_task
def create_domain():
# TODO: Allow to set domain briefly, sth. like:
# create_domain('m1(p1(t1, t2, t3), p2(t4, t5))').
m1 = ProblemSet.objects.create(name='m1', section='1')
m2 = ProblemSet.objects.create(name='m2', section='2')
p1 = m1.add_part(name='p1')
p2 = m1.add_part(name='p2')
t1 = p1.add_task(name='t1')
t2 = p2.add_task(name='t2')
t3 = p2.add_task(name='t3')
domain = Domain.objects.create()
domain.problemsets.set([m1, m2, p1, p2])
domain.tasks.set([t1, t2, t3])
return domain
@pytest.mark.django_db
def test_recommendation_available():
domain = create_domain()
student = Student.objects.create()
recommendation = get_recommendation(domain, student)
assert recommendation.available
assert recommendation.mission is not None
assert recommendation.phase is not None
assert recommendation.task is not None
@pytest.mark.django_db
def test_recommendation_unavailable_in_empty_domain():
domain = Domain.objects.create()
student = Student.objects.create()
recommendation = get_recommendation(domain, student)
assert not recommendation.available
@pytest.mark.django_db
def test_recommendation_unavailable_phase_without_tasks():
domain = Domain.objects.create()
m1 = ProblemSet.objects.create(name='m1')
p1 = m1.add_part(name='p1')
domain.problemsets.set([m1, p1])
student = Student.objects.create()
recommendation = get_recommendation(domain, student)
assert not recommendation.available
@pytest.mark.django_db
def test_recommendation_unavailable_all_ps_solved():
domain = create_domain()
student = Student.objects.create()
p1 = domain.problemsets.get(name='p1')
p2 = domain.problemsets.get(name='p2')
Skill.objects.create(student=student, chunk=p1, value=1)
Skill.objects.create(student=student, chunk=p2, value=1)
recommendation = get_recommendation(domain, student)
assert not recommendation.available
@pytest.mark.django_db
def test_recommendation_unavailable_tasks_solved():
domain = create_domain()
m1 = ProblemSet.objects.create(name='m1')
p1 = m1.add_part(name='p1')
t1 = p1.add_task(name='t1')
domain.problemsets.set([m1, p1])
domain.tasks.set([t1])
student = Student.objects.create()
TaskSession.objects.create(student=student, task=t1, solved=True)
recommendation = get_recommendation(domain, student)
assert not recommendation.available
@pytest.mark.django_db
def test_recommend_first_mission_and_phase_for_new_student():
domain = create_domain()
student = Student.objects.create()
recommendation = get_recommendation(domain, student)
assert recommendation.mission == 'm1'
assert recommendation.phase == 'p1'
assert recommendation.task == 't1'
@pytest.mark.django_db
def test_dont_recommend_solved_phase():
domain = create_domain()
student = Student.objects.create()
p1 = domain.problemsets.get(name='p1')
Skill.objects.create(student=student, chunk=p1, value=1)
recommendation = get_recommendation(domain, student)
assert recommendation.mission == 'm1'
assert recommendation.phase == 'p2'
assert recommendation.task in {'t2', 't3'}
@pytest.mark.django_db
def test_dont_recommend_solved_task():
ps = ProblemSet.objects.create()
t1 = ps.add_task(name='t1')
t2 = ps.add_task(name='t2')
student = Student.objects.create()
TaskSession.objects.create(student=student, task=t1, solved=True)
task = select_task(ps, student)
assert task == t2
|
gpl-3.0
| -1,803,799,994,616,128,500 | 33.284404 | 69 | 0.704308 | false | 3.275197 | true | false | false |
depet/scikit-learn
|
sklearn/decomposition/pca.py
|
1
|
20538
|
""" Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import warnings
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import array2d, check_random_state, as_float_array
from ..utils import atleast2d_or_csr
from ..utils.extmath import fast_logdet, safe_sparse_dot, randomized_svd, \
fast_dot
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
data spectrum
rank: int,
tested rank value
n_samples: int,
number of samples
dim: int,
embedding/empirical dimension
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
`components_` : array, [n_components, n_features]
Components with maximum variance.
`explained_variance_ratio_` : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
`n_components_` : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
ProbabilisticPCA
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
""" Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = array2d(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
self.explained_variance_ = (S ** 2) / n_samples
self.explained_variance_ratio_ = (self.explained_variance_ /
self.explained_variance_.sum())
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(self.explained_variance_,
n_samples, n_features)
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = self.explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
self.components_ = self.components_[:n_components, :]
self.explained_variance_ = \
self.explained_variance_[:n_components]
self.explained_variance_ratio_ = \
self.explained_variance_ratio_[:n_components]
self.n_components_ = n_components
return (U, S, V)
def transform(self, X):
"""Apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = array2d(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation as transform.
"""
return fast_dot(X, self.components_) + self.mean_
class ProbabilisticPCA(PCA):
"""Additional layer on top of PCA that adds a probabilistic evaluation"""
__doc__ += PCA.__doc__
def fit(self, X, y=None, homoscedastic=True):
"""Additionally to PCA.fit, learns a covariance model
Parameters
----------
X : array of shape(n_samples, n_features)
The data to fit
homoscedastic : bool, optional,
If True, average variance across remaining dimensions
"""
PCA.fit(self, X)
n_samples, n_features = X.shape
self._dim = n_features
Xr = X - self.mean_
Xr -= np.dot(np.dot(Xr, self.components_.T), self.components_)
n_components = self.n_components
if n_components is None:
n_components = n_features
# Make the low rank part of the estimated covariance
self.covariance_ = np.dot(self.components_[:n_components].T *
self.explained_variance_[:n_components],
self.components_[:n_components])
if n_features == n_components:
delta = 0.
elif homoscedastic:
delta = (Xr ** 2).sum() / (n_samples * n_features)
else:
delta = (Xr ** 2).mean(axis=0) / (n_features - n_components)
# Add delta to the diagonal without extra allocation
self.covariance_.flat[::n_features + 1] += delta
return self
def score(self, X, y=None):
"""Return a score associated to new data
Parameters
----------
X: array of shape(n_samples, n_features)
The data to test
Returns
-------
ll: array of shape (n_samples),
log-likelihood of each row of X under the current model
"""
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
self.precision_ = linalg.inv(self.covariance_)
log_like = -.5 * (Xr * (np.dot(Xr, self.precision_))).sum(axis=1)
log_like -= .5 * (fast_logdet(self.covariance_)
+ n_features * log(2. * np.pi))
return log_like
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
`components_` : array, [n_components, n_features]
Components with maximum variance.
`explained_variance_ratio_` : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
ProbabilisticPCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
Notes
-----
This class supports sparse matrix input for backward compatibility, but
actually computes a truncated SVD instead of a PCA in that case (i.e. no
centering is performed). This support is deprecated; use the class
TruncatedSVD for sparse matrix support.
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.mean_ = None
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
if hasattr(X, 'todense'):
warnings.warn("Sparse matrix support is deprecated"
" and will be dropped in 0.16."
" Use TruncatedSVD instead.",
DeprecationWarning)
else:
# not a sparse matrix, ensure this is a 2D array
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
if not hasattr(X, 'todense'):
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
self.explained_variance_ratio_ = exp_var / exp_var.sum()
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
# XXX remove scipy.sparse support here in 0.16
X = atleast2d_or_csr(X)
if self.mean_ is not None:
X = X - self.mean_
X = safe_sparse_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Apply dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = self._fit(atleast2d_or_csr(X))
X = safe_sparse_dot(X, self.components_.T)
return X
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
# XXX remove scipy.sparse support here in 0.16
X_original = safe_sparse_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
|
bsd-3-clause
| -5,295,474,510,407,824,000 | 32.23301 | 80 | 0.583991 | false | 4.072576 | false | false | false |
graik/biskit
|
biskit/core/trajparserFactory.py
|
1
|
3036
|
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
"""
.. seealso:: `biskit.core.TrajParser`,`biskit.core.TrajParserNetCDF`,
"""
from biskit.core.trajparser import TrajParserError, TrajParser
from biskit.core.trajparseNetCDF import TrajParseNetCDF
from biskit.core.trajparsePDBs import TrajParsePDBs
from biskit.core.trajparseAmberCrd import TrajParseAmberCrd
class TrajParserFactory:
"""
Provide the right PDBParser for different structure sources.
"""
@staticmethod
def getParser(source, hasbox=True, rmwat=False, analyzeEach=False,
verbose=False):
"""
getParser( source ) -> TrajParser; Fetch a Parser for the source.
The method is static and should be called directly with the class::
p = TrajParserFactory.getParser( 'myfile.crd' )
Args:
source (str or LocalPath): trajectory source (file)
hasbox (bool): assume file with box info
(applies to Amber ASCII CRD only)
rmwat (bool): remove water and other solvent molecules on the fly
(applies to Amber ASCII CRD, and PDB input only)
analyzeEach (bool): compare each frame's atom content to reference
(applies to PDB input only)
verbose (bool): print loading progress to STDERR
Returns:
TrajParser: a parser that handles the given source
Raises:
TrajParserError: if no compatible parser is found
"""
if TrajParseNetCDF.supports( source ):
return TrajParseNetCDF(verbose=verbose)
if TrajParseAmberCrd.supports( source ):
return TrajParseAmberCrd(verbose=verbose,
rmwat=rmwat,
hasbox=hasbox)
if TrajParsePDBs.supports( source ):
return TrajParsePDBs(verbose=verbose,
rmwat=rmwat, analyzeEach=analyzeEach)
raise TrajParserError('Format of %r is not recognized.' % source)
#############
## TESTING
#############
import biskit.test as BT
class Test(BT.BiskitTest):
"""nothing to test"""
pass
|
gpl-3.0
| -8,348,231,987,865,714,000 | 36.02439 | 78 | 0.638011 | false | 4.147541 | false | false | false |
takluyver/xray
|
xray/backends/netCDF4_.py
|
1
|
4885
|
from collections import OrderedDict
import warnings
import numpy as np
from common import AbstractWritableDataStore
import xray
from xray.conventions import encode_cf_variable
from xray.utils import FrozenOrderedDict, NDArrayMixin, as_array_or_item
from xray import indexing
class NetCDF4ArrayWrapper(NDArrayMixin):
def __init__(self, array):
self.array = array
@property
def dtype(self):
dtype = self.array.dtype
if dtype is str:
# return object dtype because that's the only way in numpy to
# represent variable length strings; it also prevents automatic
# string concatenation via conventions.decode_cf_variable
dtype = np.dtype('O')
return dtype
def __getitem__(self, key):
if self.ndim == 0:
# work around for netCDF4-python's broken handling of 0-d
# arrays (slicing them always returns a 1-dimensional array):
# https://github.com/Unidata/netcdf4-python/pull/220
data = as_array_or_item(np.asscalar(self.array[key]))
else:
data = self.array[key]
return data
class NetCDF4DataStore(AbstractWritableDataStore):
"""Store for reading and writing data via the Python-NetCDF4 library.
This store supports NetCDF3, NetCDF4 and OpenDAP datasets.
"""
def __init__(self, filename, mode='r', clobber=True, diskless=False,
persist=False, format='NETCDF4'):
import netCDF4 as nc4
if nc4.__version__ < (1, 0, 6):
warnings.warn('python-netCDF4 %s detected; '
'the minimal recommended version is 1.0.6.'
% nc4.__version__, ImportWarning)
self.ds = nc4.Dataset(filename, mode=mode, clobber=clobber,
diskless=diskless, persist=persist,
format=format)
def open_store_variable(self, var):
var.set_auto_maskandscale(False)
dimensions = var.dimensions
data = indexing.LazilyIndexedArray(NetCDF4ArrayWrapper(var))
attributes = OrderedDict((k, var.getncattr(k))
for k in var.ncattrs())
# netCDF4 specific encoding; save _FillValue for later
encoding = {}
filters = var.filters()
if filters is not None:
encoding.update(filters)
chunking = var.chunking()
if chunking is not None:
if chunking == 'contiguous':
encoding['contiguous'] = True
encoding['chunksizes'] = None
else:
encoding['contiguous'] = False
encoding['chunksizes'] = tuple(chunking)
# TODO: figure out how to round-trip "endian-ness" without raising
# warnings from netCDF4
# encoding['endian'] = var.endian()
encoding['least_significant_digit'] = \
attributes.pop('least_significant_digit', None)
return xray.Variable(dimensions, data, attributes, encoding)
@property
def attrs(self):
return FrozenOrderedDict((k, self.ds.getncattr(k))
for k in self.ds.ncattrs())
@property
def dimensions(self):
return FrozenOrderedDict((k, len(v))
for k, v in self.ds.dimensions.iteritems())
def set_dimension(self, name, length):
self.ds.createDimension(name, size=length)
def set_attribute(self, key, value):
self.ds.setncatts({key: value})
def set_variable(self, name, variable):
variable = encode_cf_variable(variable)
self.set_necessary_dimensions(variable)
fill_value = variable.attrs.pop('_FillValue', None)
encoding = variable.encoding
self.ds.createVariable(
varname=name,
datatype=variable.dtype,
dimensions=variable.dimensions,
zlib=encoding.get('zlib', False),
complevel=encoding.get('complevel', 4),
shuffle=encoding.get('shuffle', True),
fletcher32=encoding.get('fletcher32', False),
contiguous=encoding.get('contiguous', False),
chunksizes=encoding.get('chunksizes'),
endian=encoding.get('endian', 'native'),
least_significant_digit=encoding.get('least_significant_digit'),
fill_value=fill_value)
nc4_var = self.ds.variables[name]
nc4_var.set_auto_maskandscale(False)
if variable.values.ndim == 0:
nc4_var[:] = variable.values
else:
nc4_var[:] = variable.values[:]
nc4_var.setncatts(variable.attrs)
def del_attribute(self, key):
self.ds.delncattr(key)
def sync(self):
self.ds.sync()
def close(self):
self.ds.close()
def __exit__(self, type, value, tb):
self.close()
|
apache-2.0
| 2,144,527,192,232,187,000 | 35.729323 | 76 | 0.596725 | false | 4.136325 | false | false | false |
MagnusS/mirage-bench
|
test-jitsu/plot.py
|
1
|
1208
|
#!/usr/bin/env python
import sys
print "# Creating graphs from stdin (requires matplotlib)"
results = {}
for filename in sys.argv[1:]:
results[filename] = []
with open(filename) as f:
for l in f:
line = l.strip()
if len(line) == 0 or line[0] == '#':
continue
if l[0] == "!":
print "Warning: Some results are invalid:"
print l
continue
results[filename].append(float(l) * 1000)
print results
import matplotlib.pyplot as plt
import numpy as np
#fig,ax = plt.subplots()
name = {}
name["processed_results_warm.dat"] = "Jitsu warm start"
name["processed_results_cold.dat"] = "Jitsu cold start wo/synjitsu"
name["processed_results_http_warm.dat"] = "Jitsu warm start (http)"
name["processed_results_http_cold.dat"] = "Jitsu cold start wo/synjitsu (http)"
plt.title('Time from DNS query to first packet of HTTP response')
for t in results:
title = t
if t in name:
title = name[t]
r = results[t]
print "Plotting",r,"==",len(r)
maxval = 1500
bins = 20
binwidth = maxval / bins
plt.hist(r, bins=range(1, maxval+binwidth, binwidth), label=title)
plt.legend(loc="best")
plt.ylabel("Results")
plt.xlabel("Time in milliseconds")
plt.savefig("jitsu.pdf")
plt.show()
|
isc
| 409,130,284,544,493,900 | 20.571429 | 79 | 0.673841 | false | 2.862559 | false | false | false |
SunDwarf/curious
|
curious/dataclasses/role.py
|
1
|
6768
|
# This file is part of curious.
#
# curious is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# curious is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with curious. If not, see <http://www.gnu.org/licenses/>.
"""
Wrappers for Role objects.
.. currentmodule:: curious.dataclasses.role
"""
import functools
from curious.dataclasses import guild as dt_guild, member as dt_member, \
permissions as dt_permissions
from curious.dataclasses.bases import Dataclass
from curious.exc import PermissionsError
class _MentionableRole(object):
"""
A wrapper class that makes a role mentionable for a short time period.
.. code-block:: python3
async with role.allow_mentions():
await ctx.channel.messages.send(role.mention)
"""
def __init__(self, r: 'Role'):
self.role = r
def allow_mentions(self):
return self.role.edit(mentionable=True)
def disallow_mentions(self):
return self.role.edit(mentionable=False)
def __aenter__(self):
return self.allow_mentions()
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.disallow_mentions()
return False
@functools.total_ordering
class Role(Dataclass):
"""
Represents a role on a server.
"""
__slots__ = "name", "colour", "hoisted", "mentionable", "permissions", "managed", "position", \
"guild_id"
def __init__(self, client, **kwargs) -> None:
super().__init__(kwargs.get("id"), client)
#: The name of this role.
self.name = kwargs.get("name", None)
#: The colour of this role.
self.colour = kwargs.get("color", 0)
#: Is this role hoisted?
self.hoisted = kwargs.get("hoist", False)
#: Is this role mentionable?
self.mentionable = kwargs.get("mentionable", False)
#: The permissions of this role.
self.permissions = dt_permissions.Permissions(kwargs.get("permissions", 0))
#: Is this role managed?
self.managed = kwargs.get("managed", False)
#: The position of this role.
self.position = kwargs.get("position", 0)
#: The ID of the guild associated with this Role.
self.guild_id = int(kwargs.get("guild_id", 0)) # type: dt_guild.Guild
def __lt__(self, other: 'Role') -> bool:
if not isinstance(other, Role):
return NotImplemented
if other.guild != self.guild:
raise ValueError("Cannot compare roles between guilds")
return self.position < other.position \
if self.position != other.position \
else self.id < other.id
def _copy(self) -> 'Role':
obb = object.__new__(self.__class__)
obb.name = self.name
obb.colour = self.colour
obb.hoisted = self.hoisted
obb.permissions = self.permissions
obb.managed = self.managed
obb.position = self.position
obb.guild_id = self.guild_id
return obb
@property
def guild(self) -> 'dt_guild.Guild':
"""
:return: The :class:`.Guild` associated with this role.
"""
return self._bot.guilds[self.guild_id]
@property
def is_default_role(self) -> bool:
"""
:return: If this role is the default role of the guild.
"""
return self.guild.id == self.id
def allow_mentions(self) -> _MentionableRole:
"""
Temporarily allows this role to be mentioned during.
.. code-block:: python3
async with role.allow_mentions():
await ctx.channel.messages.send(role.mention)
"""
return _MentionableRole(self)
@property
def mention(self) -> str:
"""
Gets the string that can be used to mention this role.
.. warning::
If :attr:`.Role.mentionable` is ``False``, this will not actually mention the role.
"""
return f"<@&{self.id}>"
async def assign_to(self, member: 'dt_member.Member') -> 'Role':
"""
Assigns this role to a member.
.. seealso::
:meth:`.MemberRoleContainer.add`
:param member: The :class:`.Member` to assign to.
"""
await member.roles.add(self)
return self
async def remove_from(self, member: 'dt_member.Member'):
"""
Removes this role from a member.
.. seealso::
:meth:`.MemberRoleContainer.remove`
:param member: The :class:`.Member` to assign to.
"""
await member.roles.remove(self)
return self
async def delete(self) -> 'Role':
"""
Deletes this role.
"""
if not self.guild.me.guild_permissions.manage_roles:
raise PermissionsError("manage_roles")
await self._bot.http.delete_role(self.guild.id, self.id)
return self
async def edit(self, *,
name: str = None, permissions: 'dt_permissions.Permissions' = None,
colour: int = None, position: int = None,
hoist: bool = None, mentionable: bool = None) -> 'Role':
"""
Edits this role.
:param name: The name of the role.
:param permissions: The permissions that the role has.
:param colour: The colour of the role.
:param position: The position in the sorting list that the role has.
:param hoist: Is this role hoisted (shows separately in the role list)?
:param mentionable: Is this mentionable by everyone?
"""
if not self.guild.me.guild_permissions.manage_roles:
raise PermissionsError("manage_roles")
if permissions is not None:
if isinstance(permissions, dt_permissions.Permissions):
permissions = permissions.bitfield
async with self._bot.events.wait_for_manager("role_update", lambda b, a: a.id == self.id):
await self._bot.http.edit_role(self.guild_id, self.id,
name=name, permissions=permissions, colour=colour,
hoist=hoist, position=position, mentionable=mentionable)
return self
|
mit
| -870,904,555,056,619,100 | 30.626168 | 99 | 0.593085 | false | 4.131868 | false | false | false |
horejsek/python-webdriverwrapper
|
docs/conf.py
|
1
|
10146
|
# -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, '.')
sys.path.insert(0, '..')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinxtogithub',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'WebDriver Wrapper'
copyright = u'2015, Michal Horejsek'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'WebDriver Wrapper Documentation'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'WebdriverWrapper.tex', u'WebDriver Wrapper Documentation',
u'Michal Horejsek', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'webdriverwrapper', u'WebDriver Wrapper Documentation',
[u'Michal Horejsek'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'WebDriver Wrapper', u'WebDriver Wrapper Documentation',
u'Michal Horejsek', 'WebDriver Wrapper', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'WebDriver Wrapper'
epub_author = u'Michal Horejsek'
epub_publisher = u'Michal Horejsek'
epub_copyright = u'2015, Michal Horejsek'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'WebdriverWrapper'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# -- Options for intersphinx ----------------------------------------------
intersphinx_mapping = {
'python': ('http://docs.python.org/2.7', None),
'selenium': ('http://selenium-python.readthedocs.io/', None),
'request': ('http://docs.python-requests.org/en/latest/', None),
'pytest': ('http://pytest.org/latest/', None),
}
|
mit
| -5,967,862,005,639,540,000 | 29.838906 | 80 | 0.699685 | false | 3.668113 | false | false | false |
mithrandi/eliottree
|
eliottree/tree.py
|
1
|
5439
|
from collections import OrderedDict, defaultdict
def task_name(task):
"""
Compute the task name for an Eliot task.
If we can't find a ``message_type`` or an ``action_type`` field to use to
derive the name, then return ``None``.
"""
if task is None:
raise ValueError('Cannot compute the task name for {!r}'.format(task))
level = u','.join(map(unicode, task[u'task_level']))
message_type = task.get('message_type', None)
if message_type is not None:
status = u''
elif message_type is None:
message_type = task.get('action_type', None)
if message_type is None:
return None
status = u'/' + task['action_status']
return u'{message_type}@{level}{status}'.format(
message_type=message_type,
level=level,
status=status)
class _TaskNode(object):
"""
A node representing an Eliot task and its child tasks.
:type task: ``dict``
:ivar task: Eliot task.
:type name: ``unicode``
:ivar name: Node name; this will be derived from the task if it is not
specified.
:type _children: ``OrderedDict`` of ``_TaskNode``
:ivar _children: Child nodes, see ``_TaskNode.children``
"""
_DEFAULT_TASK_NAME = u'<UNNAMED TASK>'
def __init__(self, task, name=None):
if task is None:
raise ValueError('Missing eliot task')
self.task = task
self._children = OrderedDict()
if name is None:
name = task_name(task) or self._DEFAULT_TASK_NAME
self.name = name
def __repr__(self):
"""
Human-readable representation of the node.
"""
# XXX: This is probably wrong in a bunch of places.
task_uuid = self.task[u'task_uuid'].encode('utf-8')
return '<{type} {task_uuid} {name} children={children}>'.format(
type=type(self).__name__,
task_uuid=task_uuid,
# XXX: This is probably wrong in a bunch of places.
name=self.name.encode('utf-8'),
children=len(self._children))
def add_child(self, node):
"""
Add a child node.
:type node: ``_TaskNode``
:param node: Child node to add to the tree, if the child has multiple
levels it may be added as a grandchild.
"""
def _add_child(parent, levels):
levels = list(levels)
level = levels.pop(0)
children = parent._children
if level in children:
_add_child(children[level], levels)
else:
children[level] = node
_add_child(self, node.task['task_level'])
def children(self):
"""
Get a ``list`` of child ``_TaskNode``s ordered by task level.
"""
return sorted(
self._children.values(), key=lambda n: n.task[u'task_level'])
class Tree(object):
"""
Eliot task tree.
:ivar _nodes: Internal tree storage, use ``Tree.nodes`` or
``Tree.matching_nodes`` to obtain the tree nodes.
"""
def __init__(self):
self._nodes = {}
def nodes(self, uuids=None):
"""
All top-level nodes in the tree.
:type uuids: ``set`` of ``unicode``
:param uuids: Set of task UUIDs to include, or ``None`` for no
filtering.
:rtype: ``iterable`` of 2-``tuple``s
:return: Iterable of key and node pairs for top-level nodes, sorted by
timestamp.
"""
if uuids:
nodes = ((k, self._nodes[k]) for k in uuids)
else:
nodes = self._nodes.iteritems()
return sorted(nodes, key=lambda (_, n): n.task[u'timestamp'])
def merge_tasks(self, tasks, filter_funcs=None):
"""
Merge tasks into the tree.
:type tasks: ``iterable`` of ``dict``
:param tasks: Iterable of task dicts.
:type filter_funcs: ``iterable`` of 1-argument ``callable``s returning
``bool``
:param filter_funcs: Iterable of predicate functions that given a task
determine whether to keep it.
:return: ``set`` of task UUIDs that match all of the filter functions,
can be passed to ``Tree.matching_nodes``, or ``None`` if no filter
functions were specified.
"""
tasktree = self._nodes
matches = defaultdict(set)
if filter_funcs is None:
filter_funcs = []
filter_funcs = list(filter_funcs)
def _merge(tasks):
pending = []
for task in tasks:
key = task[u'task_uuid']
node = tasktree.get(key)
if node is None:
if task[u'task_level'] != [1]:
pending.append(task)
continue
node = tasktree[key] = _TaskNode(task=task)
else:
node.add_child(_TaskNode(task))
for i, fn in enumerate(filter_funcs):
if fn(task):
matches[i].add(key)
return pending
pending = _merge(tasks)
if pending:
pending = _merge(pending)
if pending:
raise RuntimeError('Some tasks have no start parent', pending)
if not matches:
return None
return set.intersection(*matches.values())
__all__ = ['Tree']
|
mit
| -8,383,232,947,113,377,000 | 30.994118 | 78 | 0.541644 | false | 4.164625 | false | false | false |
claashk/python-config
|
schema/default_reader.py
|
1
|
11785
|
# -*- coding: utf-8 -*-
import re
from .content_buffer import ContentBuffer
from .error_handler import ErrorHandler
class Locator(object):
"""Simple locator used by :class:`~schema.DefaultReader`
"""
def __init__(self):
self.row= 0
self.col= 0
def __str__(self):
"""Convert current locator to string
This method is used by various error reporting routines
"""
return "{:d}:{:d}".format(self.row, self.col)
class DefaultReader(ErrorHandler):
"""Default reader for ASCII files
Arguments:
contentHandler (:class:`~.ContentHandler`): Content handler object.
assignChar (:class:`str`): Assignment character. Defaults to '='.
commentChar (:class:`str`): Comment character. Defaults to '#'.
"""
def __init__(self, contentHandler, assignChar= "=", commentChar= "#"):
super().__init__(name="schema.DefaultReader")
self._impl = contentHandler
self._actions = list()
self._locator = Locator()
self._onLhs = True #: Whether or not we are on left-hand side of expr
self._inAttributes = False #: Whether we are parsing attributes
self._inBlock = False #: Whether we are inside a () block
self._buffer = ContentBuffer()
self._attributes = dict()
self._currentAttribute= None
self._stack = list()
# Default actions
self.actions([
(r"{0}(.*)".format(commentChar), "comment"),
(r"[\t ]*(\r?\n)", "newline"),
(r"([\t ]*)'([^']*)[\t ]*'", "quoted_identifier"),
(r"([\t ]*)\"([^\"]*)\"[\t ]*", "quoted_identifier"),
(r"([\t ]*)\(", "beginBlock"),
(r"\)[\t ]*", "endBlock"),
(r"[ \t]*{0}[ \t]*".format(assignChar), "beginAssignment"),
(r"[\t ]*\{", "enterContext"),
(r"\}[\t ]*", "leaveContext"),
(r"([\t ]*)(\[)([\t ]*)", "beginAttributes"),
(r"([\t ]*)(\])([\t ]*)", "endAttributes"),
(r"(,)[\t ]*", "comma"),
(r"(;)[\t ]*", "semicolon"),
(r"([\ ]*)([^\s{0}{{}}\[\],;{1}\(\)]+)[\t *]*"
.format(assignChar, commentChar), "identifier"),
(r"([\t ]+)", "ignore")
])
def actions(self, actions):
"""Register regular expression for a method
Assigns a regular expression to a class method to execute, when the
regular expression matches an input line.
Arguments:
name (str): Name of class method to invoke. The method is invoked
with a match object as single parameter.
pattern (str): Regular expression pattern to match.
"""
for pattern, name in actions:
self._actions.append((re.compile(pattern), getattr(self, name)))
def parse(self, inputStream):
self.startDocument()
self.tokenize(inputStream)
self.endDocument()
def startDocument(self):
"""Start parsing a new document/stream
"""
self._stack.clear()
self._impl.open()
self._impl.locator= self._locator
self._impl.enter("root") #Enter root context
def endDocument(self):
"""End parsing the current document
"""
#leave root context
if self._stack:
msg= "The following contexts were not closed:\n"
for name in self._stack:
msg= "\n - ".join([msg, name])
self.warn(msg)
self._impl.leave() #leave root context
self._impl.close()
def tokenize(self, inputStream):
"""Tokenize input stream and process tokens
Arguments:
inputStream: Input stream
"""
for self._locator.line, line in enumerate(inputStream, start=1):
self._locator.column= 0
end= len(line)
while self._locator.column != end:
match= None
for regex, action in self._actions:
match= regex.match(line[self._locator.column:])
if match:
try:
action(match)
except Exception as ex:
self._impl.fatalError( str(ex) )
self._locator.column+= match.end()
break
if not match:
self.error("Undefined pattern")
def comment(self, match):
"""Parse a comment string
Arguments:
match (:class:`re.MatchObject`): Regular expression match object
"""
self._endAssignment()
self._impl.comment(match.group(1))
def beginBlock(self, match):
if self._inBlock:
raise ValueError("Nested blocks are not allowed")
if self._inAttributes:
raise ValueError("Blocks not allowed inside attributes.")
if self._onLhs:
raise ValueError("Blocks are not allowed on RHS expressions")
self._impl.content(match.group(0))
self._inBlock= True
def endBlock(self, match):
if not self._inBlock:
raise ValueError("Spourious ')'")
self._impl.content(")")
self._inBlock= False
def quoted_identifier(self, match):
if self._inBlock:
self._impl.content("\"")
self.identifier(match)
if self._inBlock:
self._impl.content("\"")
def identifier(self, match):
if self._inAttributes:
if self._onLhs:
if self._currentAttribute is not None:
raise ValueError("Expected assignment")
self._currentAttribute= match.group(2)
else:
self._attributes[self._currentAttribute]= match.group(2)
self._endAssignment()
else:
# Not in attribute mode
self._impl.ignore( match.group(1) )
if self._onLhs:
self._buffer.add( match.group(2) )
else:
self._impl.content( match.group(2) )
def beginAssignment(self, match):
"""Called if an assignment character is found
Arguments:
match: Ignored match object.
"""
if self._inBlock:
# Inside a block assignment chars are ignored.
self._impl.content(match.group(0))
return
if not self._onLhs:
# An assignment character on RHS shall be quoted
raise ValueError("Assignment character on RHS must be quoted")
if not self._inAttributes:
self.enterContext()
self._onLhs= False
def comma(self, match):
"""Called if a comma is found
Arguments:
match (:class:'MatchObject'): match object
"""
if self._inBlock:
self._impl.content(match.group(1))
elif self._inAttributes:
self._endAssignment()
else:
self._impl.content(match.group(1))
def semicolon(self, match):
"""Called if a semicolon is found
Arguments:
match (:class:'MatchObject'): match object
"""
self._endAssignment()
def _endAssignment(self):
"""Invoked on the end of an assignment
"""
if self._onLhs:
#Nothing to do
return
if self._inAttributes:
if not self._currentAttribute:
raise ValueError("Incomplete Attribute")
if self._attributes.get(self._currentAttribute, None) is None:
raise ValueError("Missing value for attribute '{0}'!"
.format(self._currentAttribute))
self._currentAttribute= None
else:
self._stack.pop()
self._impl.leave()
self._onLhs= True
def enterContext(self, match=None):
"""Enter a new context
Called if either an opening curly bracket or an assignment character
is found.
Arguments:
match: Ignored match object.
"""
if self._inBlock:
raise ValueError("Cannot start context in block")
if not self._onLhs:
raise ValueError("Invalid RHS expression")
if self._inAttributes:
raise ValueError("Cannot start scope in attribute")
if len(self._buffer) != 1:
raise ValueError("Expected exactly one identifier, got {0}"
.format(len(self._buffer)) )
ctxName= self._buffer.flush()
self._stack.append(ctxName)
try:
self._impl.enter(ctxName, **self._attributes)
finally:
self._attributes.clear()
def leaveContext(self, match=None):
"""Called if a closing curly bracket is encountered
"""
if self._inBlock:
raise ValueError("Cannot end scope in block")
self._endAssignment() #end assignment if we are on RHS, else do nothing
if self._attributes:
raise ValueError("Cannot end scope in attribute expression.")
self._impl.content(self._buffer.flush())
self._stack.pop()
self._impl.leave()
def newline(self, match):
"""Invoked each time a line is complete
Arguments:
match (): Match object
"""
if self._inBlock:
self._impl.ignore(match.group(0))
return
self._endAssignment()
if self._inAttributes:
if not self._currentAttribute:
return
raise ValueError("Illegal line break before incomplete attribute")
else:
self._endAssignment() #If on RHS, end assignment, else do nothing
if self._attributes:
raise ValueError("Superflous attributes")
# If buffer is not empty, we are facing content without assignment
self._impl.content( self._buffer.flush() )
self._impl.content(match.group(0))
def beginAttributes(self, match):
if not self._onLhs:
# An RHS '[' is treated as content
self._impl.content( match.group(0) )
return
if self._inBlock:
raise ValueError("'[' not allowed in block")
if self._inAttributes:
raise ValueError("Nested attributes are not allowed")
self._inAttributes= True
def endAttributes(self, match=None):
if self._inBlock:
raise ValueError("']' not allowed in block")
if not self._inAttributes:
raise ValueError("Cannot end attributes.")
if not self._onLhs:
raise ValueError("Incomplete attributes")
self._inAttributes= False
def ignore(self, match):
"""Ignore matched content
Forwards the entire content to :meth:`~.ContextManager.ignoreContent`
Arguments:
match (:class:re.MatchObject): Match object.
"""
if self._inBlock:
return
if not self._inAttributes:
self._impl.ignore( match.group(0) )
|
gpl-3.0
| 6,490,968,191,307,207,000 | 30.179894 | 90 | 0.51031 | false | 4.771255 | false | false | false |
ram8647/gcb-mobilecsp
|
tests/functional/whitelist.py
|
1
|
6236
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that walk through Course Builder pages."""
__author__ = 'Mike Gainer ([email protected])'
import urllib
from common import crypto
from controllers import sites
from models import config
from models import roles
from models import transforms
from modules.course_explorer import course_explorer
from tests.functional import actions
COURSE_NAME = 'whitelist_test'
ADMIN_EMAIL = '[email protected]'
STUDENT_EMAIL = '[email protected]'
NONSTUDENT_EMAIL = '[email protected]'
STUDENT_WHITELIST = '[%s]' % STUDENT_EMAIL
class WhitelistTest(actions.TestBase):
_course_added = False
_whitelist = ''
_get_environ_old = None
@classmethod
def setUpClass(cls):
sites.ApplicationContext.get_environ_old = (
sites.ApplicationContext.get_environ)
def get_environ_new(slf):
environ = slf.get_environ_old()
environ['course']['now_available'] = True
environ['reg_form']['whitelist'] = WhitelistTest._whitelist
return environ
sites.ApplicationContext.get_environ = get_environ_new
@classmethod
def tearDownClass(cls):
sites.ApplicationContext.get_environ = (
sites.ApplicationContext.get_environ_old)
def setUp(self):
super(WhitelistTest, self).setUp()
config.Registry.test_overrides[
course_explorer.GCB_ENABLE_COURSE_EXPLORER_PAGE.name] = True
actions.login(ADMIN_EMAIL, is_admin=True)
payload_dict = {
'name': COURSE_NAME,
'title': 'Whitelist Test',
'admin_email': ADMIN_EMAIL}
request = {
'payload': transforms.dumps(payload_dict),
'xsrf_token': crypto.XsrfTokenManager.create_xsrf_token(
'add-course-put')}
response = self.testapp.put('/rest/courses/item?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
self.assertEquals(response.status_int, 200)
sites.setup_courses('course:/%s::ns_%s, course:/:/' % (
COURSE_NAME, COURSE_NAME))
actions.logout()
def tearDown(self):
super(WhitelistTest, self).tearDown()
sites.reset_courses()
WhitelistTest._whitelist = ''
config.Registry.test_overrides.clear()
def _expect_visible(self):
response = self.get('/explorer')
self.assertIn('Whitelist Test', response.body)
response = self.get('/whitelist_test/course')
self.assertEquals(200, response.status_int)
def _expect_invisible(self):
response = self.get('/explorer')
self.assertNotIn('Whitelist Test', response.body)
response = self.get('/whitelist_test/course', expect_errors=True)
self.assertEquals(404, response.status_int)
def test_no_whitelist_not_logged_in(self):
self._expect_visible()
def test_course_whitelist_not_logged_in(self):
WhitelistTest._whitelist = STUDENT_WHITELIST
self._expect_invisible()
def test_course_whitelist_as_admin(self):
WhitelistTest._whitelist = STUDENT_WHITELIST
actions.login(ADMIN_EMAIL, is_admin=True)
self._expect_visible()
def test_course_whitelist_as_nonstudent(self):
WhitelistTest._whitelist = STUDENT_WHITELIST
actions.login(NONSTUDENT_EMAIL)
self._expect_invisible()
def test_course_whitelist_as_student(self):
WhitelistTest._whitelist = STUDENT_WHITELIST
actions.login(STUDENT_EMAIL)
self._expect_visible()
def test_global_whitelist_not_logged_in(self):
config.Registry.test_overrides[
roles.GCB_WHITELISTED_USERS.name] = STUDENT_WHITELIST
self._expect_invisible()
def test_global_whitelist_as_admin(self):
config.Registry.test_overrides[
roles.GCB_WHITELISTED_USERS.name] = STUDENT_WHITELIST
actions.login(ADMIN_EMAIL, is_admin=True)
self._expect_visible()
def test_global_whitelist_as_nonstudent(self):
config.Registry.test_overrides[
roles.GCB_WHITELISTED_USERS.name] = STUDENT_WHITELIST
actions.login(NONSTUDENT_EMAIL)
self._expect_invisible()
def test_global_whitelist_as_student(self):
config.Registry.test_overrides[
roles.GCB_WHITELISTED_USERS.name] = STUDENT_WHITELIST
actions.login(STUDENT_EMAIL)
self._expect_visible()
def test_course_whitelist_trumps_global_whitelist(self):
# Global whitelist is nonblank, but only lists NONSTUDENT_EMAIL
config.Registry.test_overrides[
roles.GCB_WHITELISTED_USERS.name] = '[%s]' % NONSTUDENT_EMAIL
# Course whitelist has STUDENT_EMAIL.
WhitelistTest._whitelist = STUDENT_WHITELIST
actions.login(STUDENT_EMAIL)
self._expect_visible()
def test_course_whitelist_with_multiple_entries(self):
WhitelistTest._whitelist = (
'[%s] ' % NONSTUDENT_EMAIL * 100 +
'[%s] ' % STUDENT_EMAIL +
'[%s] ' % NONSTUDENT_EMAIL * 100)
actions.login(STUDENT_EMAIL)
self._expect_visible()
def test_global_whitelist_with_multiple_entries(self):
config.Registry.test_overrides[
roles.GCB_WHITELISTED_USERS.name] = (
'[%s] ' % NONSTUDENT_EMAIL * 100 +
'[%s] ' % STUDENT_EMAIL +
'[%s] ' % NONSTUDENT_EMAIL * 100)
actions.login(STUDENT_EMAIL)
self._expect_visible()
def test_whitelist_is_case_insensitive(self):
WhitelistTest._whitelist = STUDENT_WHITELIST
actions.login(STUDENT_EMAIL.upper())
self._expect_visible()
|
apache-2.0
| -4,620,004,720,217,033,000 | 34.634286 | 79 | 0.652181 | false | 3.837538 | true | false | false |
mwytock/cvxpy
|
examples/communications/Channel_capacity_BV4.57.py
|
1
|
2632
|
#!/usr/bin/env python3
# @author: R. Gowers, S. Al-Izzi, T. Pollington, R. Hill & K. Briggs
# Boyd and Vandenberghe, Convex Optimization, exercise 4.57 page 207
import cvxpy as cvx
import numpy as np
'''
Input parameters
P: channel transition matrix P_ij(t) = P(output|input) at time t
n: size of input
m: size of output
'''
def channel_capacity(n,m,sum_x=1):
'''
Boyd and Vandenberghe, Convex Optimization, exercise 4.57 page 207
Capacity of a communication channel.
We consider a communication channel, with input x(t)∈{1,..,n} and
output Y(t)∈{1,...,m}, for t=1,2,... .The relation between the
input and output is given statistically:
p_(i,j) = ℙ(Y(t)=i|X(t)=j), i=1,..,m j=1,...,m
The matrix P ∈ ℝ^(m*n) is called the channel transition matrix, and
the channel is called a discrete memoryless channel. Assuming X has a
probability distribution denoted x ∈ ℝ^n, i.e.,
x_j = ℙ(X=j), j=1,...,n
The mutual information between X and Y is given by
∑(∑(x_j p_(i,j)log_2(p_(i,j)/∑(x_k p_(i,k)))))
Then channel capacity C is given by
C = sup I(X;Y).
With a variable change of y = Px this becomes
I(X;Y)= c^T x - ∑(y_i log_2 y_i)
where c_j = ∑(p_(i,j)log_2(p_(i,j)))
'''
# n is the number of different input values
# m is the number of different output values
if n*m == 0:
print('The range of both input and output values must be greater than zero')
return 'failed',np.nan,np.nan
# P is the channel transition matrix
P = np.ones((m,n))
# x is probability distribution of the input signal X(t)
x = cvx.Variable(rows=n,cols=1)
# y is the probability distribution of the output signal Y(t)
y = P*x
# I is the mutual information between x and y
c = np.sum(P*np.log2(P),axis=0)
I = c*x + cvx.sum_entries(cvx.entr(y))
# Channel capacity maximised by maximising the mutual information
obj = cvx.Minimize(-I)
constraints = [cvx.sum_entries(x) == sum_x,x >= 0]
# Form and solve problem
prob = cvx.Problem(obj,constraints)
prob.solve()
if prob.status=='optimal':
return prob.status,prob.value,x.value
else:
return prob.status,np.nan,np.nan
# as an example, let's optimise the channel capacity for two different possible input and output values
if __name__ == '__main__':
print(channel_capacity.__doc__)
# print all arrays to have 3 significant figures after the decimal place
np.set_printoptions(precision=3)
n = 2
m = 2
print('Number of input values=%s'%n)
print('Number of outputs=%s'%m)
stat,C,x=channel_capacity(n,m)
print('Problem status ',stat)
print('Optimal value of C = %.4g'%(C))
print('Optimal variable x = \n', x)
|
gpl-3.0
| 396,468,914,431,187,900 | 34.712329 | 103 | 0.675365 | false | 2.823402 | false | false | false |
PavanGupta01/aerospike-admin
|
asadm.py
|
1
|
11499
|
#!/usr/bin/env python
# Copyright 2013-2014 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import readline
import cmd
import sys
import os
import re
import argparse
import getpass
import shlex
from lib import citrusleaf
from lib.controller import *
from lib import terminal
__version__ = '$$__version__$$'
class AerospikeShell(cmd.Cmd):
def __init__(self, seed, telnet, user=None, password=None):
cmd.Cmd.__init__(self)
self.ctrl = RootController(seed_nodes=[seed]
, use_telnet=telnet
, user=user
, password=password)
try:
readline.read_history_file(ADMINHIST)
except Exception, i:
readline.write_history_file(ADMINHIST)
self.prompt = "Admin> "
if self.use_rawinput:
self.prompt = "\001" + terminal.bold() + terminal.fg_red() + "\002" +\
self.prompt + "\001" +\
terminal.unbold() + terminal.fg_clear() + "\002"
self.name = 'Aerospike Interactive Shell'
self.intro = terminal.bold() + self.name + ', version ' +\
__version__ + terminal.reset() + "\n" +\
str(self.ctrl.cluster) + "\n"
self.commands = set()
regex = re.compile("^do_(.*)$")
commands = map(lambda v: regex.match(v).groups()[0], filter(regex.search, dir(self)))
for command in commands:
if command != 'help':
self.commands.add(command)
def cleanLine(self, line):
# get rid of extra whitespace
lexer = shlex.shlex(line)
# TODO: shlex is not working with 'with' ip addresses. Need to write a
# new parser or correct shlex behavior.
commands = []
command = []
build_token = ''
for token in lexer:
build_token += token
if token == '-':
continue
if token == ';':
if command:
commands.append(command)
command = []
else:
command.append(build_token)
build_token = ''
else:
if build_token:
command.append(build_token)
if command:
commands.append(command)
return commands
def precmd(self, line):
lines = self.cleanLine(line)
if not lines: # allow empty lines
return ""
for line in lines:
if line[0] in self.commands:
return " ".join(line)
if len(lines) > 1:
print "~~~ %s%s%s ~~~"%(terminal.bold()
, ' '.join(line[1:])
, terminal.reset())
sys.stdout.write(terminal.reset())
try:
response = self.ctrl.execute(line)
if response == "EXIT":
return "exit"
except ShellException as e:
print "%sERR: %s%s"%(terminal.fg_red(), e, terminal.fg_clear())
return "" # line was handled by execute
def completenames(self, text, line, begidx, endidx):
try:
origline = line
if isinstance(origline, str):
line = origline.split(" ")
line = filter(lambda v: v, map(str.strip, line))
if origline and origline[-1] == ' ':
line.append('')
if len(line) > 0:
self.ctrl._initCommands() # dirty
cmds = self.ctrl.commands.getKey(line[0])
else:
cmds = []
watch = False
if len(cmds) == 1:
cmd = cmds[0]
if cmd == 'help':
line.pop(0)
if cmd == 'watch':
watch = True
line.pop(0)
try:
for _ in (1,2):
int(line[0])
line.pop(0)
except:
pass
names = self.ctrl.complete(line)
if watch:
try:
names.remove('watch')
except:
pass
except Exception as e:
return []
return map(lambda n: "%s "%n, names)
def complete(self, text, state):
"""Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
try:
if state >= 0:
import readline
origline = readline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = readline.get_begidx() - stripped
endidx = readline.get_endidx() - stripped
compfunc = self.completenames
self.completion_matches = compfunc(text, line, begidx, endidx)
except Exception as e:
pass
try:
return self.completion_matches[state]
except IndexError:
return None
def emptyline(self):
# do onthing
return
# Other
def do_exit(self, line):
readline.write_history_file(ADMINHIST)
print "\nConfig files location: " + str(ADMINHOME)
return True
def do_EOF(self, line):
return self.do_exit(line)
def do_cake(self, line):
msg = """
* *
*
* *
* ( )
(*) (*)
) | | (
* (*) |~| |~| (*)
| |S| |A| | *
|~| |P| |D| |~|
|A| |I| |M| |U|
,|E|a@@@@|K|@@@@@@@@@@@|I|@@@@a|T|.
.,a@@@|R|@@@@@|E|@@@@@@@@@@@|N|@@@@@|I|@@@@a,.
,a@@@@@@|O|@@@@@@@@@@@@.@@@@@@@@@@@@@@|L|@@@@@@@a,
a@@@@@@@@@@@@@@@@@@@@@\' . `@@@@@@@@@@@@@@@@@@@@@@@@a
;`@@@@@@@@@@@@@@@@@@\' . `@@@@@@@@@@@@@@@@@@@@@\';
;@@@`@@@@@@@@@@@@@\' . `@@@@@@@@@@@@@@@@\'@@@;
;@@@;,.aaaaaaaaaa . aaaaa,,aaaaaaa,;@@@;
;;@;;;;@@@@@@@@;@ @.@ ;@@@;;;@@@@@@;;;;@@;
;;;;;;;@@@@;@@;;@ @@ . @@ ;;@;;;;@@;@@@;;;;;;;
;;;;;;;;@@;;;;;;; @@ . @@ ;;;;;;;;;;;@@;;;;@;;
;;;;;;;;;;;;;;;;;@@ . @@;;;;;;;;;;;;;;;;@@@;
,%%%;;;;;;;;@;;;;;;;; . ;;;;;;;;;;;;;;;;@@;;%%%,
.%%%%%%;;;;;;;@@;;;;;;;; ,%%%, ;;;;;;;;;;;;;;;;;;;;%%%%%%,
.%%%%%%%;;;;;;;@@;;;;;;;; ,%%%%%%%, ;;;;;;;;;;;;;;;;;;;;%%%%%%%,
%%%%%%%%`;;;;;;;;;;;;;;;; %%%%%%%%%%% ;;;;;;;;;;;;;;;;;;;\'%%%%%%%%
%%%%%%%%%%%%`;;;;;;;;;;;;,%%%%%%%%%%%%%,;;;;;;;;;;;;;;;\'%%%%%%%%%%%%
`%%%%%%%%%%%%%%%%%,,,,,,,%%%%%%%%%%%%%%%,,,,,,,%%%%%%%%%%%%%%%%%%%%\'
`%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\'
`%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\'
"""
from time import sleep
s = 0.5
for line in msg.split('\n'):
print line
sleep(s)
s = s / 1.2
print terminal.bold() + \
"Let there be CAKE!".center(80) + \
terminal.reset()
def do_ctrl_c(*args, **kwargs):
print "Please press ctrl+d or type exit"
def main():
parser = argparse.ArgumentParser(add_help=False, conflict_handler='resolve')
parser.add_argument("-h"
, "--host"
, default="127.0.0.1"
, help="Address (ip/fqdn) of a host in an " + \
"Aerospike cluster")
parser.add_argument("-p", "--port"
, type=int
, default=3000
, help="Aerospike service port used by the host.")
parser.add_argument("-U"
, "--user"
, help="user name")
parser.add_argument("-P"
, "--password"
, nargs="?"
, const="prompt"
, help="password")
parser.add_argument("-e"
, "--execute"
, help="Execute a single asadmin command and exit")
parser.add_argument("--no-color"
, action="store_true"
, help="Disable colored output")
parser.add_argument("--profile"
, action="store_true"
#, help="Profile Aerospike Admin for performance issues"
, help=argparse.SUPPRESS)
parser.add_argument("-u"
, "--help"
, action="store_true"
, help="show program usage")
cli_args = parser.parse_args()
if cli_args.help:
parser.print_help()
exit(0)
if cli_args.no_color:
from lib import terminal
terminal.enable_color(False)
user = None
password = None
if cli_args.user != None:
user = cli_args.user
if cli_args.password == "prompt":
cli_args.password = getpass.getpass("Enter Password:")
password = citrusleaf.hashpassword(cli_args.password)
global ADMINHOME, ADMINHIST
ADMINHOME = os.environ['HOME'] + '/.aerospike/'
ADMINHIST = ADMINHOME + 'admin_hist'
if not os.path.isdir(ADMINHOME):
os.makedirs(ADMINHOME)
seed = (cli_args.host, cli_args.port)
telnet = False # telnet currently not working, hardcoding to off
shell = AerospikeShell(seed, telnet, user=user, password=password)
use_yappi = False
if cli_args.profile:
try:
import yappi
use_yappi = True
except Exception as a:
print "Unable to load profiler"
print "Yappi Exception:"
print str(a)
exit(1)
func = None
args = ()
if not cli_args.execute:
func = shell.cmdloop
else:
line = shell.precmd(cli_args.execute)
shell.onecmd(line)
func = shell.onecmd
args = (line,)
try:
if use_yappi:
yappi.start()
func(*args)
yappi.get_func_stats().print_all()
else:
func(*args)
except (KeyboardInterrupt, SystemExit):
shell.do_exit('')
exit(0)
if __name__ == '__main__':
main()
|
apache-2.0
| 7,454,791,825,567,895,000 | 33.325373 | 93 | 0.418906 | false | 4.079106 | false | false | false |
scemama/ninja_ocaml
|
ninja_ocaml.py
|
1
|
8281
|
#!/usr/bin/env python
#
# Copyright 2015 Anthony Scemama
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file can be downloaded here:
# https://raw.githubusercontent.com/scemama/ninja_ocaml/master/ninja_ocaml.py
#
"""Build OCaml projects using ninja."""
__author__ = """Anthony Scemama <[email protected]>"""
import os
import sys
import subprocess
def _help_ ():
print """
1) Download and install ninja :
https://github.com/martine/ninja/releases/latest
2) Copy the script into your OCaml project.
3) Run the script. It will build a default build.ninja file
4) Edit the build.ninja file
5) Compile the main target using `ninja`
6) Compile all the targets using `ninja all`
"""
def create_generated_ninja():
"""Creates the generated.ninja file"""
# Header
PACKAGES=""
THREAD=""
SYNTAX=""
OCAMLC_FLAGS=""
GENERATED_NINJA="generated.ninja"
with open('build.ninja','r') as f:
for line in f:
if line.startswith("PACKAGES"):
PACKAGES=line.split('=',1)[1].strip()
elif line.startswith("THREAD"):
THREAD=line.split('=',1)[1].strip()
elif line.startswith("SYNTAX"):
SYNTAX=line.split('=',1)[1].strip()
elif line.startswith("OCAMLC_FLAGS"):
OCAMLC_FLAGS=line.split('=',1)[1].strip()
elif line.startswith("LINK_FLAGS"):
LINK_FLAGS=line.split('=',1)[1].strip()
elif line.startswith("GENERATED_NINJA"):
GENERATED_NINJA=line.split('=',1)[1].strip()
if PACKAGES != "":
LINK_FLAGS = "-linkpkg "+PACKAGES
header = [
"""
########################################################
# This file was auto-generated. #
# This file will be overwritten. Don't edit this file! #
# Changes should be done in the build.ninja file. #
########################################################
""",
"PACKAGES=%s"%(PACKAGES),
"THREAD=%s"%(THREAD),
"SYNTAX=%s"%(SYNTAX),
"OCAMLC_FLAGS=%s"%(OCAMLC_FLAGS),
"LINK_FLAGS=%s"%(LINK_FLAGS),
"GENERATED_NINJA=%s"%(GENERATED_NINJA),
]
header += """
rule ocamlc
command = ocamlfind ocamlc -c $OCAMLC_FLAGS $THREAD $PACKAGES $SYNTAX -o $out $in
description = Compiling $out (bytecode)
rule ocamlopt
command = ocamlfind ocamlopt -c $OCAMLC_FLAGS $THREAD $PACKAGES $SYNTAX -o $o $in
description = Compiling $out (native)
rule ocamlc_link
command = ocamlfind ocamlc $OCAMLC_FLAGS $THREAD $LINK_FLAGS $PACKAGES $SYNTAX -o $out $in
description = Compiling $out (bytecode)
rule ocamlopt_link
command = ocamlfind ocamlopt $OCAMLC_FLAGS $THREAD $LINK_FLAGS $PACKAGES $SYNTAX -o $out $in
description = Compiling $out (native)
""".splitlines()
# Get the list of .ml files
all_files = os.listdir(os.getcwd())
files = [ os.path.splitext(i)[0] for i in all_files if i.endswith('.ml') ]
while "myocamlbuild" in files:
files.remove("myocamlbuild")
ml_files = ' '.join( [ '%s.ml'%i for i in files ] )
# Dependencies
result = subprocess.Popen(
("ocamlfind ocamldep {0} {1} {2}".format(PACKAGES,SYNTAX,ml_files)).split()
,stdout=subprocess.PIPE).communicate()[0]
result = result.replace('\\\n',' ')
dependencies = {}
for line in result.splitlines():
key, value = line.split(':')
dependencies[key.strip()] = value.strip()
result = header
template = """
build {0}.cmi: ocamlc {0}.mli | $GENERATED_NINJA
build {0}.cmo: ocamlc {0}.ml | $GENERATED_NINJA {1}
build {0}.cmx {0}.o: ocamlopt {0}.ml | $GENERATED_NINJA {2}
o = {0}.o
"""
template_root_byte = """
build {2}.byte: ocamlc_link {1} {0}
"""
template_root_native = """
build {2}: ocamlopt_link {1} {0}
"""
# Find roots
dep = {}
for f in dependencies:
dep[f] = [ i.strip() for i in dependencies[f].split() ]
roots = {}
for f in dependencies:
Found = False
for g,l in dep.iteritems():
if f in l:
Found = True
if not Found:
roots[f] = []
def get_deps(l):
result = []
for i in l:
if i in dep:
result += get_deps(dep[i])
result += l
newresult = []
for r in result:
if r not in newresult:
newresult.append(r)
return newresult
# for r in roots:
# roots[r] = [ i for i in get_deps(dep[r]) if not i.endswith(".cmi") ]
roots = {r:[ i for i in get_deps(dep[r]) if not i.endswith(".cmi") ] for r in roots}
# Write the $GENERATED_NINJA file
result += [ template.format(basename,
dependencies["%s.cmo"%basename],
dependencies["%s.cmx"%basename]
) for basename in files ]
result += [ template_root_byte.format(basename,
' '.join(roots[basename]),
os.path.splitext(basename)[0]
) for basename in roots if basename.endswith('.cmo')]
result += [ template_root_native.format(basename,
' '.join(roots[basename]),
os.path.splitext(basename)[0]
) for basename in roots if basename.endswith('.cmx')]
output = '\n'.join(result)
try:
with open(GENERATED_NINJA,'r') as f:
inp = f.read()
except IOError:
inp = ""
if inp != output:
with open(GENERATED_NINJA,'w') as f:
f.write(output)
def create_build_ninja ():
with open('build.ninja','w') as f:
f.write("""
MAIN=
# Main program to build
PACKAGES=
# Required opam packages, for example:
# PACKAGES=-package core,sexplib.syntax
THREAD=
# If you need threding support, use:
# THREAD=-thread
SYNTAX=
# If you need pre-processing, use:
# SYNTAX=-syntax camlp4o
OCAMLC_FLAGS=
# Flags to give to ocamlc, for example:
# OCAMLC_FLAGS=-g -warn-error A
LINK_FLAGS=
# Flags to give to the linker, for example:
# LINK_FLAGS=-cclib '-Wl,-rpath=../lib,--enable-new-dtags'
GENERATED_NINJA=generated.ninja
# Name of the auto-generated ninja file
rule create_generated
command = python ./ninja_ocaml.py
description = Finding dependencies between modules
rule run_ninja
command = ninja -f $in $target
description = Compiling OCaml executables
pool = console
rule run_clean
command = ninja -f $GENERATED_NINJA -t clean ; rm $GENERATED_NINJA
pool = console
description = Cleaning directory
rule ocamlc
command = ocamlfind ocamlc -c $OCAMLC_FLAGS $THREAD $PACKAGES $SYNTAX -o $out $in
description = Compiling $in (bytecode)
rule ocamlopt
command = ocamlfind ocamlopt -c $OCAMLC_FLAGS $THREAD $PACKAGES $SYNTAX -o $out $in
description = Compiling $in (native)
rule ocamlc_link
command = ocamlfind ocamlc $OCAMLC_FLAGS $THREAD $LINK_FLAGS $PACKAGES $SYNTAX -o $out $in
description = Compiling $out (bytecode)
rule ocamlopt_link
command = ocamlfind ocamlopt $OCAMLC_FLAGS $THREAD $LINK_FLAGS $PACKAGES $SYNTAX -o $out $in
description = Compiling $out (native)
build clean: run_clean
build always $GENERATED_NINJA: create_generated
build $MAIN: run_ninja $GENERATED_NINJA
target = $MAIN
build all: run_ninja $GENERATED_NINJA
target =
default $MAIN
""")
def main():
for h in "help -h -help --help ?".split():
if h in sys.argv:
_help_ ()
return
if "build.ninja" in os.listdir(os.getcwd()):
create_generated_ninja ()
else:
create_build_ninja ()
print """
==========================================================
A default build.ninja file was created.
Now, edit build.ninja and compile your project using:
ninja
==========================================================
"""
if __name__ == '__main__':
main()
|
apache-2.0
| -1,102,097,753,415,023,000 | 27.555172 | 94 | 0.602705 | false | 3.444676 | false | false | false |
Comunitea/l10n-spain
|
l10n_es_aeat_sii/models/account_invoice.py
|
1
|
68394
|
# -*- coding: utf-8 -*-
# Copyright 2017 Ignacio Ibeas <[email protected]>
# Copyright 2017 Studio73 - Pablo Fuentes <pablo@studio73>
# Copyright 2017 Studio73 - Jordi Tolsà <[email protected]>
# Copyright 2017 Otherway - Pedro Rodríguez Gil
# Copyright 2017 Tecnativa - Pedro M. Baeza
# Copyright 2017 Comunitea - Omar Castiñeira <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import logging
import json
from requests import Session
from openerp import _, api, exceptions, fields, models, SUPERUSER_ID
from openerp.modules.registry import RegistryManager
from openerp.tools.float_utils import float_round, float_compare
from openerp.tools import ustr
_logger = logging.getLogger(__name__)
try:
from zeep import Client
from zeep.transports import Transport
from zeep.plugins import HistoryPlugin
except (ImportError, IOError) as err:
_logger.debug(err)
try:
from openerp.addons.connector.queue.job import job
from openerp.addons.connector.session import ConnectorSession
except ImportError:
_logger.debug('Can not `import connector`.')
import functools
def empty_decorator_factory(*argv, **kwargs):
return functools.partial
job = empty_decorator_factory
SII_STATES = [
('not_sent', 'Not sent'),
('sent', 'Sent'),
('sent_w_errors', 'Accepted with errors'),
('sent_modified', 'Registered in SII but last modifications not sent'),
('cancelled', 'Cancelled'),
('cancelled_modified', 'Cancelled in SII but last modifications not sent'),
]
SII_VERSION_10 = '1.0'
SII_VERSION_11 = '1.1'
SII_VERSION_11_START_DATE = '2018-07-01'
SII_START_DATE = '2017-07-01'
SII_COUNTRY_CODE_MAPPING = {
'RE': 'FR',
'GP': 'FR',
'MQ': 'FR',
'GF': 'FR',
}
SII_MACRODATA_LIMIT = 100000000.0
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
def _default_sii_refund_type(self):
inv_type = self.env.context.get('type')
return 'S' if inv_type in ['out_refund', 'in_refund'] else False
def _default_sii_registration_key(self):
sii_key_obj = self.env['aeat.sii.mapping.registration.keys']
type = self.env.context.get('type')
if type in ['in_invoice', 'in_refund']:
key = sii_key_obj.search(
[('code', '=', '01'), ('type', '=', 'purchase')], limit=1)
else:
key = sii_key_obj.search(
[('code', '=', '01'), ('type', '=', 'sale')], limit=1)
return key
sii_manual_description = fields.Text(
string='SII manual description', size=500, copy=False,
)
sii_description_method = fields.Selection(
related='company_id.sii_description_method', readonly=True,
)
sii_description = fields.Text(
string='SII computed description', compute="_compute_sii_description",
store=True, inverse='_inverse_sii_description',
)
sii_state = fields.Selection(
selection=SII_STATES, string="SII send state", default='not_sent',
readonly=True, copy=False,
help="Indicates the state of this invoice in relation with the "
"presentation at the SII",
)
sii_csv = fields.Char(string='SII CSV', copy=False, readonly=True)
sii_return = fields.Text(string='SII Return', copy=False, readonly=True)
sii_header_sent = fields.Text(
string="SII last header sent", copy=False, readonly=True,
)
sii_content_sent = fields.Text(
string="SII last content sent", copy=False, readonly=True,
)
sii_send_error = fields.Text(
string='SII Send Error', readonly=True, copy=False,
)
sii_send_failed = fields.Boolean(
string="SII send failed", copy=False,
help="Indicates that the last attempt to communicate this invoice to "
"the SII has failed. See SII return for details",
)
sii_refund_type = fields.Selection(
selection=[('S', 'By substitution'), ('I', 'By differences')],
string="SII Refund Type",
default=lambda self: self._default_sii_refund_type(),
oldname='refund_type',
)
sii_account_registration_date = fields.Date(
string='SII account registration date', readonly=True, copy=False,
help="Indicates the account registration date set at the SII, which "
"must be the date when the invoice is recorded in the system and "
"is independent of the date of the accounting entry of the "
"invoice")
sii_registration_key = fields.Many2one(
comodel_name='aeat.sii.mapping.registration.keys',
string="SII registration key", default=_default_sii_registration_key,
oldname='registration_key',
# required=True, This is not set as required here to avoid the
# set not null constraint warning
)
sii_registration_key_additional1 = fields.Many2one(
comodel_name='aeat.sii.mapping.registration.keys',
string="Additional SII registration key"
)
sii_registration_key_additional2 = fields.Many2one(
comodel_name='aeat.sii.mapping.registration.keys',
string="Additional 2 SII registration key"
)
sii_registration_key_code = fields.Char(
related="sii_registration_key.code", readonly=True,
)
sii_enabled = fields.Boolean(
string='Enable SII', compute='_compute_sii_enabled',
)
sii_property_location = fields.Selection(
string="Real property location", copy=False,
selection=[
('1', '[1]-Real property with cadastral code located within '
'the Spanish territory except Basque Country or Navarra'),
('2', '[2]-Real property located in the '
'Basque Country or Navarra'),
('3', '[3]-Real property in any of the above situations '
'but without cadastral code'),
('4', '[4]-Real property located in a foreign country'),
],
)
sii_property_cadastrial_code = fields.Char(
string="Real property cadastrial code", size=25, copy=False,
)
sii_macrodata = fields.Boolean(
string="MacroData",
help="Check to confirm that the invoice has an absolute amount "
"greater o equal to 100 000 000,00 euros.",
compute='_compute_macrodata',
)
invoice_jobs_ids = fields.Many2many(
comodel_name='queue.job', column1='invoice_id', column2='job_id',
string="Connector Jobs", copy=False,
)
@api.depends('amount_total')
def _compute_macrodata(self):
for inv in self:
inv.sii_macrodata = True if float_compare(
inv.amount_total,
SII_MACRODATA_LIMIT,
precision_digits=2
) >= 0 else False
@api.onchange('sii_refund_type')
def onchange_sii_refund_type(self):
if (self.sii_enabled and self.sii_refund_type == 'S' and
not self.origin_invoices_ids):
self.sii_refund_type = False
return {
'warning': {
'message': _(
'You must have at least one refunded invoice'
),
}
}
@api.onchange('fiscal_position')
def onchange_fiscal_position_l10n_es_aeat_sii(self):
for invoice in self.filtered('fiscal_position'):
if 'out' in invoice.type:
key = invoice.fiscal_position.sii_registration_key_sale
else:
key = invoice.fiscal_position.sii_registration_key_purchase
invoice.sii_registration_key = key
@api.model
def create(self, vals):
"""Complete registration key for auto-generated invoices."""
invoice = super(AccountInvoice, self).create(vals)
if vals.get('fiscal_position') and \
not vals.get('sii_registration_key'):
invoice.onchange_fiscal_position_l10n_es_aeat_sii()
return invoice
@api.multi
def write(self, vals):
"""For supplier invoices the SII primary key is the supplier
VAT/ID Otro and the supplier invoice number. Cannot let change these
values in a SII registered supplier invoice"""
for invoice in self:
if invoice.sii_state == 'not_sent':
continue
if 'date_invoice' in vals:
raise exceptions.Warning(
_("You cannot change the invoice date of an invoice "
"already registered at the SII. You must cancel the "
"invoice and create a new one with the correct date")
)
if (invoice.type in ['in_invoice', 'in_refund']):
if 'partner_id' in vals:
correct_partners = invoice.partner_id.commercial_partner_id
correct_partners |= correct_partners.child_ids
if vals['partner_id'] not in correct_partners.ids:
raise exceptions.Warning(
_("You cannot change the supplier of an invoice "
"already registered at the SII. You must cancel "
"the invoice and create a new one with the "
"correct supplier")
)
elif 'supplier_invoice_number' in vals:
raise exceptions.Warning(
_("You cannot change the supplier invoice number of "
"an invoice already registered at the SII. You must "
"cancel the invoice and create a new one with the "
"correct number")
)
res = super(AccountInvoice, self).write(vals)
if vals.get('fiscal_position') and \
not vals.get('sii_registration_key'):
self.onchange_fiscal_position_l10n_es_aeat_sii()
return res
@api.multi
def unlink(self):
"""A registered invoice at the SII cannot be deleted"""
for invoice in self:
if invoice.sii_state != 'not_sent':
raise exceptions.Warning(
_("You cannot delete an invoice already registered at the "
"SII.")
)
return super(AccountInvoice, self).unlink()
@api.multi
def map_sii_tax_template(self, tax_template, mapping_taxes):
"""Adds a tax template -> tax id to the mapping.
Adapted from account_chart_update module.
:param self: Single invoice record.
:param tax_template: Tax template record.
:param mapping_taxes: Dictionary with all the tax templates mapping.
:return: Tax template current mapping
"""
self.ensure_one()
if not tax_template:
return self.env['account.tax']
if mapping_taxes.get(tax_template):
return mapping_taxes[tax_template]
# search inactive taxes too, to avoid re-creating
# taxes that have been deactivated before
tax_obj = self.env['account.tax'].with_context(active_test=False)
criteria = ['|',
('name', '=', tax_template.name),
('description', '=', tax_template.name)]
if tax_template.description:
criteria = ['|'] + criteria
criteria += [
'|',
('description', '=', tax_template.description),
('name', '=', tax_template.description),
]
criteria += [('company_id', '=', self.company_id.id)]
mapping_taxes[tax_template] = tax_obj.search(criteria)
return mapping_taxes[tax_template]
@api.multi
def _get_sii_taxes_map(self, codes):
"""Return the codes that correspond to that sii map line codes.
:param self: Single invoice record.
:param codes: List of code strings to get the mapping.
:return: Recordset with the corresponding codes
"""
self.ensure_one()
taxes = self.env['account.tax']
sii_map = self.env['aeat.sii.map'].search(
['|',
('date_from', '<=', self.date_invoice),
('date_from', '=', False),
'|',
('date_to', '>=', self.date_invoice),
('date_to', '=', False)], limit=1)
mapping_taxes = {}
tax_templates = sii_map.sudo().map_lines.filtered(
lambda x: x.code in codes
).taxes
for tax_template in tax_templates:
taxes += self.map_sii_tax_template(tax_template, mapping_taxes)
return taxes
@api.multi
def _change_date_format(self, date):
datetimeobject = fields.Date.from_string(date)
new_date = datetimeobject.strftime('%d-%m-%Y')
return new_date
@api.multi
def _get_sii_header(self, tipo_comunicacion=False, cancellation=False):
"""Builds SII send header
:param tipo_comunicacion String 'A0': new reg, 'A1': modification
:param cancellation Bool True when the communitacion es for invoice
cancellation
:return Dict with header data depending on cancellation
"""
self.ensure_one()
company = self.company_id
if not company.vat:
raise exceptions.Warning(_(
"No VAT configured for the company '{}'").format(company.name))
header = {
"IDVersionSii": (SII_VERSION_10
if fields.Date.today() < SII_VERSION_11_START_DATE
else SII_VERSION_11),
"Titular": {
"NombreRazon": self.company_id.name[0:120],
"NIF": self.company_id.vat[2:]}
}
if not cancellation:
header.update({"TipoComunicacion": tipo_comunicacion})
return header
@api.multi
def _is_sii_type_breakdown_required(self, taxes_dict):
"""Calculates if the block 'DesgloseTipoOperacion' is required for
the invoice communication."""
self.ensure_one()
if 'DesgloseFactura' not in taxes_dict:
return False
country_code = self._get_sii_country_code()
sii_gen_type = self._get_sii_gen_type()
if 'DesgloseTipoOperacion' in taxes_dict:
# DesgloseTipoOperacion and DesgloseFactura are Exclusive
return True
elif sii_gen_type in (2, 3):
# DesgloseTipoOperacion required for Intracommunity and
# Export operations
return True
elif sii_gen_type == 1 and country_code != 'ES':
# DesgloseTipoOperacion required for national operations
# with 'IDOtro' in the SII identifier block
return True
elif (sii_gen_type == 1 and
(self.partner_id.vat or '').startswith('ESN')):
# DesgloseTipoOperacion required if customer's country is Spain and
# has a NIF which starts with 'N'
return True
return False
@api.model
def _sii_adjust_first_semester(self, taxes_dict):
if 'DesgloseFactura' in taxes_dict:
tax_breakdown = taxes_dict['DesgloseFactura']
if 'NoSujeta' in tax_breakdown:
del tax_breakdown['NoSujeta']
if 'Sujeta' not in tax_breakdown:
sub_dict = tax_breakdown.setdefault('Sujeta', {})
sub_dict.setdefault(
'NoExenta', {
'TipoNoExenta': 'S1',
'DesgloseIVA': {
'DetalleIVA': [{
"BaseImponible": 0,
"CuotaRepercutida": 0,
"TipoImpositivo": "0",
"CuotaSoportada": 0}]}
})
elif 'Exenta' in tax_breakdown['Sujeta']:
BI = tax_breakdown['Sujeta']['Exenta']['BaseImponible']
del tax_breakdown['Sujeta']['Exenta']
tax_breakdown['Sujeta'].setdefault(
'NoExenta', {
'TipoNoExenta': 'S1',
'DesgloseIVA': {
'DetalleIVA': [{
"BaseImponible": BI,
"CuotaRepercutida": 0,
"TipoImpositivo": "0",
"CuotaSoportada": 0}]}})
if 'DesgloseTipoOperacion' in taxes_dict:
type_breakdown = taxes_dict['DesgloseTipoOperacion']
# key puede ser PrestacionServicios, Entrega o ambas
keys = type_breakdown.keys()
for key in keys:
if 'NoSujeta' in type_breakdown[key]:
del type_breakdown[key]['NoSujeta']
if 'Sujeta' not in type_breakdown[key]:
sub_dict = type_breakdown[key].setdefault('Sujeta', {})
sub_dict.setdefault(
'NoExenta', {
'TipoNoExenta': 'S1',
'DesgloseIVA': {
'DetalleIVA': [{
"BaseImponible": 0,
"CuotaRepercutida": 0,
"TipoImpositivo": "0",
"CuotaSoportada": 0}],
},
},
)
elif 'Exenta' in type_breakdown[key]['Sujeta']:
BI = type_breakdown[key]['Sujeta']['Exenta'][
'BaseImponible']
del type_breakdown[key]['Sujeta']['Exenta']
type_breakdown[key]['Sujeta'].setdefault(
'NoExenta', {
'TipoNoExenta': 'S1',
'DesgloseIVA': {
'DetalleIVA': [{
"BaseImponible": BI,
"CuotaRepercutida": 0,
"TipoImpositivo": "0",
"CuotaSoportada": 0}],
},
},
)
return taxes_dict
@api.multi
def _get_sii_out_taxes(self):
"""Get the taxes for sales invoices.
:param self: Single invoice record.
"""
self.ensure_one()
taxes_dict = {}
taxes_f = {}
taxes_to = {}
tax_breakdown = {}
type_breakdown = {}
taxes_sfesb = self._get_sii_taxes_map(['SFESB'])
taxes_sfesbe = self._get_sii_taxes_map(['SFESBE'])
taxes_sfesisp = self._get_sii_taxes_map(['SFESISP'])
# taxes_sfesisps = self._get_taxes_map(['SFESISPS'])
taxes_sfens = self._get_sii_taxes_map(['SFENS'])
taxes_sfess = self._get_sii_taxes_map(['SFESS'])
taxes_sfesse = self._get_sii_taxes_map(['SFESSE'])
taxes_sfesns = self._get_sii_taxes_map(['SFESNS'])
default_no_taxable_cause = self._get_no_taxable_cause()
# Check if refund type is 'By differences'. Negative amounts!
sign = self._get_sii_sign()
distinct_exempt_causes = {}
distinct_exempt_causes_serv = {}
for inv_line in self.invoice_line:
exempt_cause = self._get_sii_exempt_cause(inv_line.product_id)
for tax_line in inv_line.invoice_line_tax_id:
breakdown_taxes = (
taxes_sfesb + taxes_sfesisp + taxes_sfens + taxes_sfesbe
)
if tax_line in breakdown_taxes:
tax_breakdown = taxes_dict.setdefault(
'DesgloseFactura', {},
)
if tax_line in (taxes_sfesb + taxes_sfesbe + taxes_sfesisp):
sub_dict = tax_breakdown.setdefault('Sujeta', {})
# TODO l10n_es no tiene impuesto exento de bienes
# corrientes nacionales
ex_taxes = taxes_sfesbe
if tax_line in ex_taxes:
sub_dict = sub_dict.setdefault('Exenta',
{'DetalleExenta': []})
det_dict = {'BaseImponible':
inv_line._get_sii_line_price_subtotal()}
if exempt_cause:
if exempt_cause not in distinct_exempt_causes:
det_dict['CausaExencion'] = exempt_cause
distinct_exempt_causes[exempt_cause] = det_dict
sub_dict['DetalleExenta'].append(det_dict)
else:
ex_dict = distinct_exempt_causes[exempt_cause]
ex_dict['BaseImponible'] += (
det_dict['BaseImponible'])
else:
sub_dict['DetalleExenta'].append(det_dict)
else:
sub_dict.setdefault('NoExenta', {
'TipoNoExenta': (
'S2' if tax_line in taxes_sfesisp else 'S1'
),
'DesgloseIVA': {
'DetalleIVA': [],
},
})
not_ex_type = sub_dict['NoExenta']['TipoNoExenta']
if tax_line in taxes_sfesisp:
is_s3 = not_ex_type == 'S1'
else:
is_s3 = not_ex_type == 'S2'
if is_s3:
sub_dict['NoExenta']['TipoNoExenta'] = 'S3'
inv_line._update_sii_tax_line(taxes_f, tax_line)
# No sujetas
if tax_line in taxes_sfens:
nsub_dict = tax_breakdown.setdefault(
'NoSujeta', {default_no_taxable_cause: 0},
)
nsub_dict[default_no_taxable_cause] += (
inv_line._get_sii_line_price_subtotal()
)
if tax_line in (taxes_sfess + taxes_sfesse + taxes_sfesns):
type_breakdown = taxes_dict.setdefault(
'DesgloseTipoOperacion', {
'PrestacionServicios': {},
},
)
if tax_line in (taxes_sfesse + taxes_sfess):
type_breakdown['PrestacionServicios'].setdefault(
'Sujeta', {}
)
service_dict = type_breakdown['PrestacionServicios']
if tax_line in taxes_sfesse:
service_dict = service_dict['Sujeta'].setdefault(
'Exenta',
{'DetalleExenta': []})
det_dict = {'BaseImponible':
inv_line._get_sii_line_price_subtotal()
}
if exempt_cause:
if exempt_cause not in distinct_exempt_causes_serv:
det_dict['CausaExencion'] = exempt_cause
distinct_exempt_causes_serv[exempt_cause] = (
det_dict)
service_dict['DetalleExenta'].append(det_dict)
else:
ex_dict = (
distinct_exempt_causes_serv[exempt_cause])
ex_dict['BaseImponible'] += (
det_dict['BaseImponible'])
else:
service_dict['DetalleExenta'].append(det_dict)
if tax_line in taxes_sfess:
# TODO l10n_es_ no tiene impuesto ISP de servicios
# if tax_line in taxes_sfesisps:
# TipoNoExenta = 'S2'
# else:
service_dict['Sujeta'].setdefault(
'NoExenta', {
'TipoNoExenta': 'S1',
'DesgloseIVA': {
'DetalleIVA': [],
},
},
)
inv_line._update_sii_tax_line(taxes_to, tax_line)
if tax_line in taxes_sfesns:
nsub_dict = service_dict.setdefault(
'NoSujeta', {'ImporteTAIReglasLocalizacion': 0},
)
nsub_dict['ImporteTAIReglasLocalizacion'] += (
inv_line._get_sii_line_price_subtotal() * sign
)
for val in taxes_f.values() + taxes_to.values():
val['CuotaRepercutida'] = round(
float_round(val['CuotaRepercutida'] * sign, 2), 2)
val['BaseImponible'] = round(
float_round(val['BaseImponible'] * sign, 2), 2)
if 'CuotaRecargoEquivalencia' in val:
val['CuotaRecargoEquivalencia'] = round(
float_round(val['CuotaRecargoEquivalencia'] * sign, 2), 2)
if taxes_f:
breakdown = tax_breakdown['Sujeta']['NoExenta']['DesgloseIVA']
breakdown['DetalleIVA'] = taxes_f.values()
if taxes_to:
sub = type_breakdown['PrestacionServicios']['Sujeta']
sub['NoExenta']['DesgloseIVA']['DetalleIVA'] = taxes_to.values()
if 'Sujeta' in tax_breakdown and 'Exenta' in tax_breakdown['Sujeta']:
exempt_dict = tax_breakdown['Sujeta']['Exenta']['DetalleExenta']
for line in exempt_dict:
line['BaseImponible'] = \
round(
float_round(line['BaseImponible'] * sign, 2), 2)
if 'NoSujeta' in tax_breakdown:
nsub_dict = tax_breakdown['NoSujeta']
nsub_dict[default_no_taxable_cause] = \
round(
float_round(nsub_dict[default_no_taxable_cause] * sign, 2),
2)
if type_breakdown:
services_dict = type_breakdown['PrestacionServicios']
if 'Sujeta' in services_dict \
and 'Exenta' in services_dict['Sujeta']:
exempt_dict = (
services_dict['Sujeta']['Exenta']['DetalleExenta'])
for line in exempt_dict:
line['BaseImponible'] = \
round(
float_round(
line['BaseImponible'] * sign, 2), 2)
if 'NoSujeta' in services_dict:
nsub_dict = services_dict['NoSujeta']
nsub_dict["ImporteTAIReglasLocalizacion"] = \
round(
float_round(nsub_dict["ImporteTAIReglasLocalizacion"],
2), 2)
# Ajustes finales breakdown
# - DesgloseFactura y DesgloseTipoOperacion son excluyentes
# - Ciertos condicionantes obligan DesgloseTipoOperacion
if self._is_sii_type_breakdown_required(taxes_dict):
taxes_dict.setdefault('DesgloseTipoOperacion', {})
taxes_dict['DesgloseTipoOperacion']['Entrega'] = \
taxes_dict['DesgloseFactura']
del taxes_dict['DesgloseFactura']
# Con independencia del tipo de operación informado (no sujeta,
# sujeta y exenta o no exenta) deberá informarse en cualquier caso
# como factura sujeta y no exenta, en el caso de ser una factura del
# primer semestre.
if self.date_invoice < SII_START_DATE:
return self._sii_adjust_first_semester(taxes_dict)
return taxes_dict
@api.multi
def _get_sii_in_taxes(self):
"""Get the taxes for purchase invoices.
:param self: Single invoice record.
"""
self.ensure_one()
taxes_dict = {}
taxes_f = {}
taxes_fa = {}
taxes_isp = {}
taxes_ns = {}
taxes_nd = {}
taxes_sfrs = self._get_sii_taxes_map(['SFRS'])
taxes_sfrsa = self._get_sii_taxes_map(['SFRSA'])
taxes_sfrisp = self._get_sii_taxes_map(['SFRISP'])
taxes_sfrns = self._get_sii_taxes_map(['SFRNS'])
taxes_sfrnd = self._get_sii_taxes_map(['SFRND'])
tax_amount = 0.0
# Check if refund type is 'By differences'. Negative amounts!
sign = self._get_sii_sign()
for inv_line in self.invoice_line:
for tax_line in inv_line.invoice_line_tax_id:
if tax_line in taxes_sfrisp:
inv_line._update_sii_tax_line(taxes_isp, tax_line)
elif tax_line in taxes_sfrs:
inv_line._update_sii_tax_line(taxes_f, tax_line)
elif tax_line in taxes_sfrns:
taxes_ns.setdefault('no_sujeto', {'BaseImponible': 0}, )
taxes_ns['no_sujeto']['BaseImponible'] += inv_line. \
_get_sii_line_price_subtotal()
elif tax_line in taxes_sfrsa:
inv_line._update_sii_tax_line(taxes_fa, tax_line)
elif tax_line in taxes_sfrnd:
inv_line._update_sii_tax_line(taxes_nd, tax_line)
if taxes_isp:
taxes_dict.setdefault(
'InversionSujetoPasivo', {'DetalleIVA': taxes_isp.values()},
)
if taxes_f or taxes_ns or taxes_nd:
taxes_dict.setdefault(
'DesgloseIVA', {'DetalleIVA': (taxes_f.values() +
taxes_ns.values() +
taxes_nd.values())},
)
for val in taxes_isp.values() + taxes_f.values() + taxes_fa.values():
val['CuotaSoportada'] = round(
float_round(val['CuotaSoportada'] * sign, 2), 2)
val['BaseImponible'] = round(
float_round(val['BaseImponible'] * sign, 2), 2)
if 'CuotaRecargoEquivalencia' in val:
val['CuotaRecargoEquivalencia'] = round(
float_round(val['CuotaRecargoEquivalencia'] * sign, 2), 2)
tax_amount += val['CuotaSoportada']
for val in taxes_nd.values():
val['CuotaSoportada'] = round(
float_round(val['CuotaSoportada'] * sign, 2), 2)
val['BaseImponible'] = round(
float_round(val['BaseImponible'] * sign, 2), 2)
if 'CuotaRecargoEquivalencia' in val:
val['CuotaRecargoEquivalencia'] = round(
float_round(val['CuotaRecargoEquivalencia'] * sign, 2), 2)
for reg in taxes_ns.values():
reg['BaseImponible'] = round(
float_round(reg['BaseImponible'] * sign, 2), 2)
if taxes_fa:
# Régimen especial agricultura - Cambiar claves
for tax_fa in taxes_fa.values():
tax_fa['PorcentCompensacionREAGYP'] = tax_fa.pop(
'TipoImpositivo'
)
tax_fa['ImporteCompensacionREAGYP'] = tax_fa.pop(
'CuotaSoportada'
)
taxes_dict.setdefault(
'DesgloseIVA', {'DetalleIVA': taxes_fa.values()},
)
return taxes_dict, tax_amount
@api.multi
def _sii_check_exceptions(self):
"""Inheritable method for exceptions control when sending SII invoices.
"""
self.ensure_one()
gen_type = self._get_sii_gen_type()
partner = self.partner_id.commercial_partner_id
country_code = self._get_sii_country_code()
if partner.sii_simplified_invoice and self.type[:2] == 'in':
raise exceptions.Warning(
_("You can't make a supplier simplified invoice.")
)
if ((gen_type != 3 or country_code == 'ES') and
not partner.vat and not partner.sii_simplified_invoice):
raise exceptions.Warning(
_("The partner has not a VAT configured.")
)
if not self.company_id.chart_template_id:
raise exceptions.Warning(_(
'You have to select what account chart template use this'
' company.'))
if not self.company_id.sii_enabled:
raise exceptions.Warning(
_("This company doesn't have SII enabled.")
)
if not self.sii_enabled:
raise exceptions.Warning(
_("This invoice is not SII enabled.")
)
if not self.supplier_invoice_number \
and self.type in ['in_invoice', 'in_refund']:
raise exceptions.Warning(
_("The supplier number invoice is required")
)
@api.multi
def _get_account_registration_date(self):
"""Hook method to allow the setting of the account registration date
of each supplier invoice. The SII recommends to set the send date as
the default value (point 9.3 of the document
SII_Descripcion_ServicioWeb_v0.7.pdf), so by default we return
the current date or, if exists, the stored
sii_account_registration_date
:return String date in the format %Y-%m-%d"""
self.ensure_one()
return self.sii_account_registration_date or fields.Date.today()
@api.multi
def _get_importe_total(self):
"""Get ImporteTotal value.
Avoid to send IRPF data to SII systems,
but only check supplier invoices
"""
taxes_notincludedintotal = self._get_sii_taxes_map(
['NotIncludedInTotal'])
amount_total = 0.0
# Check if refund type is 'By differences'. Negative amounts!
sign = self._get_sii_sign()
# supplier invoice, check lines & irpf
# sumo/resto impuestos a menos que estén incluidos
# en el aeat.sii.map.lines NotIncludedInTotal
for inv_line in self.invoice_line:
amount_total += inv_line._get_sii_line_price_subtotal()
for tax_line in inv_line.invoice_line_tax_id:
if tax_line not in taxes_notincludedintotal:
taxes = tax_line.compute_all(
inv_line._get_sii_line_price_unit(), inv_line.quantity,
inv_line.product_id, self.partner_id,
)
if taxes['total'] >= 0:
amount_total += sum([t['amount'] for t in
taxes['taxes'] if
t['amount'] >= 0])
else:
amount_total += sum([t['amount'] for t in
taxes['taxes'] if t['amount'] < 0])
return round(float_round(amount_total * sign, 2), 2)
@api.multi
def _get_sii_invoice_dict_out(self, cancel=False):
"""Build dict with data to send to AEAT WS for invoice types:
out_invoice and out_refund.
:param cancel: It indicates if the dictionary is for sending a
cancellation of the invoice.
:return: invoices (dict) : Dict XML with data for this invoice.
"""
self.ensure_one()
invoice_date = self._change_date_format(self.date_invoice)
partner = self.partner_id.commercial_partner_id
company = self.company_id
ejercicio = fields.Date.from_string(
self.period_id.date_start).year
periodo = '%02d' % fields.Date.from_string(
self.period_id.date_start).month
inv_dict = {
"IDFactura": {
"IDEmisorFactura": {
"NIF": company.vat[2:],
},
# On cancelled invoices, number is not filled
"NumSerieFacturaEmisor": (
self.number or self.internal_number or '')[0:60],
"FechaExpedicionFacturaEmisor": invoice_date,
},
"PeriodoLiquidacion": {
"Ejercicio": ejercicio,
"Periodo": periodo,
},
}
if not cancel:
if partner.sii_simplified_invoice:
tipo_factura = 'R5' if self.type == 'out_refund' else 'F2'
else:
tipo_factura = 'R4' if self.type == 'out_refund' else 'F1'
inv_dict["FacturaExpedida"] = {
"TipoFactura": tipo_factura,
"ClaveRegimenEspecialOTrascendencia": (
self.sii_registration_key.code
),
"DescripcionOperacion": self.sii_description,
"TipoDesglose": self._get_sii_out_taxes(),
"ImporteTotal": self._get_importe_total(),
}
if self.sii_macrodata:
inv_dict["FacturaExpedida"].update(Macrodato="S")
if self.sii_registration_key_additional1:
inv_dict["FacturaExpedida"]. \
update({'ClaveRegimenEspecialOTrascendenciaAdicional1': (
self.sii_registration_key_additional1.code)})
if self.sii_registration_key_additional2:
inv_dict["FacturaExpedida"]. \
update({'ClaveRegimenEspecialOTrascendenciaAdicional2': (
self.sii_registration_key_additional2.code)})
if self.sii_registration_key.code in ['12', '13']:
inv_dict["FacturaExpedida"]['DatosInmueble'] = {
'DetalleInmueble': {
'SituacionInmueble': self.sii_property_location,
'ReferenciaCatastral': (
self.sii_property_cadastrial_code or '')
}
}
exp_dict = inv_dict['FacturaExpedida']
if not partner.sii_simplified_invoice:
# Simplified invoices don't have counterpart
exp_dict["Contraparte"] = {
"NombreRazon": partner.name[0:120],
}
# Uso condicional de IDOtro/NIF
exp_dict['Contraparte'].update(self._get_sii_identifier())
if self.type == 'out_refund':
exp_dict['TipoRectificativa'] = self.sii_refund_type
if self.sii_refund_type == 'S':
exp_dict['ImporteRectificacion'] = {
'BaseRectificada': sum(
self.
mapped('origin_invoices_ids.cc_amount_untaxed')
),
'CuotaRectificada': sum(
self.mapped('origin_invoices_ids.cc_amount_tax')
),
}
return inv_dict
@api.multi
def _get_sii_invoice_dict_in(self, cancel=False):
"""Build dict with data to send to AEAT WS for invoice types:
in_invoice and in_refund.
:param cancel: It indicates if the dictionary if for sending a
cancellation of the invoice.
:return: invoices (dict) : Dict XML with data for this invoice.
"""
self.ensure_one()
invoice_date = self._change_date_format(self.date_invoice)
reg_date = self._change_date_format(
self._get_account_registration_date(),
)
ejercicio = fields.Date.from_string(
self.period_id.date_start).year
periodo = '%02d' % fields.Date.from_string(
self.period_id.date_start).month
desglose_factura, tax_amount = self._get_sii_in_taxes()
inv_dict = {
"IDFactura": {
"IDEmisorFactura": {},
"NumSerieFacturaEmisor": (
(self.supplier_invoice_number or '')[:60]
),
"FechaExpedicionFacturaEmisor": invoice_date},
"PeriodoLiquidacion": {
"Ejercicio": ejercicio,
"Periodo": periodo
},
}
# Uso condicional de IDOtro/NIF
ident = self._get_sii_identifier()
inv_dict['IDFactura']['IDEmisorFactura'].update(ident)
if cancel:
inv_dict['IDFactura']['IDEmisorFactura'].update(
{'NombreRazon': (
self.partner_id.commercial_partner_id.name[0:120]
)}
)
else:
# Check if refund type is 'By differences'. Negative amounts!
inv_dict["FacturaRecibida"] = {
# TODO: Incluir los 5 tipos de facturas rectificativas
"TipoFactura": (
'R4' if self.type == 'in_refund' else 'F1'
),
"ClaveRegimenEspecialOTrascendencia": (
self.sii_registration_key.code
),
"DescripcionOperacion": self.sii_description,
"DesgloseFactura": desglose_factura,
"Contraparte": {
"NombreRazon": (
self.partner_id.commercial_partner_id.name[0:120]
)
},
"FechaRegContable": reg_date,
"CuotaDeducible": (self.period_id.date_start >=
SII_START_DATE
and round(float_round(tax_amount,
2), 2) or 0.0),
"ImporteTotal": self._get_importe_total(),
}
if self.sii_macrodata:
inv_dict["FacturaRecibida"].update(Macrodato="S")
if self.sii_registration_key_additional1:
inv_dict["FacturaRecibida"]. \
update({'ClaveRegimenEspecialOTrascendenciaAdicional1': (
self.sii_registration_key_additional1.code)})
if self.sii_registration_key_additional2:
inv_dict["FacturaRecibida"]. \
update({'ClaveRegimenEspecialOTrascendenciaAdicional2': (
self.sii_registration_key_additional2.code)})
# Uso condicional de IDOtro/NIF
inv_dict['FacturaRecibida']['Contraparte'].update(ident)
if self.type == 'in_refund':
rec_dict = inv_dict['FacturaRecibida']
rec_dict['TipoRectificativa'] = self.sii_refund_type
refund_tax_amount = sum([
x._get_sii_in_taxes()[1]
for x in self.origin_invoices_ids
])
if self.sii_refund_type == 'S':
rec_dict['ImporteRectificacion'] = {
'BaseRectificada': sum(
self.
mapped('origin_invoices_ids.cc_amount_untaxed')
),
'CuotaRectificada': refund_tax_amount,
}
return inv_dict
@api.multi
def _get_sii_invoice_dict(self):
self.ensure_one()
self._sii_check_exceptions()
if self.type in ['out_invoice', 'out_refund']:
return self._get_sii_invoice_dict_out()
elif self.type in ['in_invoice', 'in_refund']:
return self._get_sii_invoice_dict_in()
return {}
@api.multi
def _get_cancel_sii_invoice_dict(self):
self.ensure_one()
self._sii_check_exceptions()
if self.type in ['out_invoice', 'out_refund']:
return self._get_sii_invoice_dict_out(cancel=True)
elif self.type in ['in_invoice', 'in_refund']:
return self._get_sii_invoice_dict_in(cancel=True)
return {}
@api.multi
def _connect_sii(self, wsdl):
today = fields.Date.today()
sii_config = self.env['l10n.es.aeat.sii'].search([
('company_id', '=', self.company_id.id),
('public_key', '!=', False),
('private_key', '!=', False),
'|',
('date_start', '=', False),
('date_start', '<=', today),
'|',
('date_end', '=', False),
('date_end', '>=', today),
('state', '=', 'active'),
], limit=1)
if sii_config:
public_crt = sii_config.public_key
private_key = sii_config.private_key
else:
public_crt = self.env['ir.config_parameter'].get_param(
'l10n_es_aeat_sii.publicCrt', False)
private_key = self.env['ir.config_parameter'].get_param(
'l10n_es_aeat_sii.privateKey', False)
session = Session()
session.cert = (public_crt, private_key)
transport = Transport(session=session)
history = HistoryPlugin()
client = Client(wsdl=wsdl, transport=transport, plugins=[history])
return client
@api.multi
def _process_invoice_for_sii_send(self):
"""Process invoices for sending to the SII. Adds general checks from
configuration parameters and invoice availability for SII. If the
invoice is to be sent the decides the send method: direct send or
via connector depending on 'Use connector' configuration"""
# De momento evitamos enviar facturas del primer semestre si no estamos
# en entorno de pruebas
invoices = self.filtered(
lambda i: (
i.company_id.sii_test or
i.period_id.date_start >= SII_START_DATE or
(i.sii_registration_key.type == 'sale' and
i.sii_registration_key.code == '16') or
(i.sii_registration_key.type == 'purchase' and
i.sii_registration_key.code == '14')
)
)
queue_obj = self.env['queue.job'].sudo()
for invoice in invoices:
company = invoice.company_id
if not company.use_connector:
invoice._send_invoice_to_sii()
else:
eta = self.env.context.get('override_eta',
company._get_sii_eta())
ctx = self.env.context.copy()
ctx.update(company_id=company.id)
session = ConnectorSession(
self.env.cr, SUPERUSER_ID, context=ctx,
)
new_delay = confirm_one_invoice.delay(
session, 'account.invoice', invoice.id,
eta=eta if not invoice.sii_send_failed else False,
)
invoice.sudo().invoice_jobs_ids |= queue_obj.search(
[('uuid', '=', new_delay)], limit=1,
)
@api.multi
def _send_invoice_to_sii(self):
for invoice in self.filtered(lambda i: i.state in ['open', 'paid']):
company = invoice.company_id
port_name = ''
wsdl = ''
if invoice.type in ['out_invoice', 'out_refund']:
wsdl = self.env['ir.config_parameter'].get_param(
'l10n_es_aeat_sii.wsdl_out', False)
port_name = 'SuministroFactEmitidas'
if company.sii_test:
port_name += 'Pruebas'
elif invoice.type in ['in_invoice', 'in_refund']:
wsdl = self.env['ir.config_parameter'].get_param(
'l10n_es_aeat_sii.wsdl_in', False)
port_name = 'SuministroFactRecibidas'
if company.sii_test:
port_name += 'Pruebas'
client = self._connect_sii(wsdl)
serv = client.bind('siiService', port_name)
if invoice.sii_state == 'not_sent':
tipo_comunicacion = 'A0'
else:
tipo_comunicacion = 'A1'
header = invoice._get_sii_header(tipo_comunicacion)
inv_vals = {
'sii_header_sent': json.dumps(header, indent=4),
}
try:
inv_dict = invoice._get_sii_invoice_dict()
inv_vals['sii_content_sent'] = json.dumps(inv_dict, indent=4)
if invoice.type in ['out_invoice', 'out_refund']:
res = serv.SuministroLRFacturasEmitidas(
header, inv_dict)
elif invoice.type in ['in_invoice', 'in_refund']:
res = serv.SuministroLRFacturasRecibidas(
header, inv_dict)
# TODO Facturas intracomunitarias 66 RIVA
# elif invoice.fiscal_position.id == self.env.ref(
# 'account.fp_intra').id:
# res = serv.SuministroLRDetOperacionIntracomunitaria(
# header, invoices)
res_line = res['RespuestaLinea'][0]
if res['EstadoEnvio'] == 'Correcto':
inv_vals.update({
'sii_state': 'sent',
'sii_csv': res['CSV'],
'sii_send_failed': False,
})
elif res['EstadoEnvio'] == 'ParcialmenteCorrecto' and \
res_line['EstadoRegistro'] == 'AceptadoConErrores':
inv_vals.update({
'sii_state': 'sent_w_errors',
'sii_csv': res['CSV'],
'sii_send_failed': True,
})
else:
inv_vals['sii_send_failed'] = True
if ('sii_state' in inv_vals and
not invoice.sii_account_registration_date and
invoice.type[:2] == 'in'):
inv_vals['sii_account_registration_date'] = (
self._get_account_registration_date()
)
inv_vals['sii_return'] = res
send_error = False
if res_line['CodigoErrorRegistro']:
send_error = u"{} | {}".format(
unicode(res_line['CodigoErrorRegistro']),
unicode(res_line['DescripcionErrorRegistro'])[:60])
inv_vals['sii_send_error'] = send_error
invoice.write(inv_vals)
except Exception as fault:
new_cr = RegistryManager.get(self.env.cr.dbname).cursor()
env = api.Environment(new_cr, self.env.uid, self.env.context)
invoice = env['account.invoice'].browse(self.id)
inv_vals.update({
'sii_send_failed': True,
'sii_send_error': ustr(fault),
'sii_return': ustr(fault),
})
invoice.write(inv_vals)
new_cr.commit()
new_cr.close()
raise
@api.multi
def invoice_validate(self):
res = super(AccountInvoice, self).invoice_validate()
for invoice in self.filtered('sii_enabled'):
if invoice.sii_state == 'sent':
invoice.sii_state = 'sent_modified'
elif invoice.sii_state == 'cancelled':
invoice.sii_state = 'cancelled_modified'
company = invoice.company_id
if company.sii_method != 'auto':
continue
invoice._process_invoice_for_sii_send()
return res
@api.multi
def send_sii(self):
invoices = self.filtered(
lambda i: (
i.sii_enabled and i.state in ['open', 'paid'] and
i.sii_state not in ['sent', 'cancelled']
)
)
if not invoices._cancel_invoice_jobs():
raise exceptions.Warning(_(
'You can not communicate this invoice at this moment '
'because there is a job running!'))
invoices._process_invoice_for_sii_send()
@api.multi
def _cancel_invoice_to_sii(self):
for invoice in self.filtered(lambda i: i.state in ['cancel']):
company = invoice.company_id
port_name = ''
wsdl = ''
if invoice.type in ['out_invoice', 'out_refund']:
wsdl = self.env['ir.config_parameter'].get_param(
'l10n_es_aeat_sii.wsdl_out', False)
port_name = 'SuministroFactEmitidas'
if company.sii_test:
port_name += 'Pruebas'
elif invoice.type in ['in_invoice', 'in_refund']:
wsdl = self.env['ir.config_parameter'].get_param(
'l10n_es_aeat_sii.wsdl_in', False)
port_name = 'SuministroFactRecibidas'
if company.sii_test:
port_name += 'Pruebas'
client = self._connect_sii(wsdl)
serv = client.bind('siiService', port_name)
header = invoice._get_sii_header(cancellation=True)
try:
inv_dict = invoice._get_cancel_sii_invoice_dict()
if invoice.type in ['out_invoice', 'out_refund']:
res = serv.AnulacionLRFacturasEmitidas(
header, inv_dict)
elif invoice.type in ['in_invoice', 'in_refund']:
res = serv.AnulacionLRFacturasRecibidas(
header, inv_dict)
# TODO Facturas intracomunitarias 66 RIVA
# elif invoice.fiscal_position.id == self.env.ref(
# 'account.fp_intra').id:
# res = serv.AnulacionLRDetOperacionIntracomunitaria(
# header, invoices)
if res['EstadoEnvio'] == 'Correcto':
invoice.sii_state = 'cancelled'
invoice.sii_csv = res['CSV']
invoice.sii_send_failed = False
else:
invoice.sii_send_failed = True
invoice.sii_return = res
send_error = False
res_line = res['RespuestaLinea'][0]
if res_line['CodigoErrorRegistro']:
send_error = u"{} | {}".format(
unicode(res_line['CodigoErrorRegistro']),
unicode(res_line['DescripcionErrorRegistro'])[:60])
invoice.sii_send_error = send_error
except Exception as fault:
new_cr = RegistryManager.get(self.env.cr.dbname).cursor()
env = api.Environment(new_cr, self.env.uid, self.env.context)
invoice = env['account.invoice'].browse(self.id)
invoice.sii_send_error = fault
invoice.sii_send_failed = True
invoice.sii_return = fault
new_cr.commit()
new_cr.close()
raise
@api.multi
def cancel_sii(self):
invoices = self.filtered(
lambda i: (i.sii_enabled and i.state in ['cancel'] and
i.sii_state in ['sent', 'sent_w_errors',
'sent_modified'])
)
if not invoices._cancel_invoice_jobs():
raise exceptions.Warning(_(
'You can not communicate the cancellation of this invoice '
'at this moment because there is a job running!'))
queue_obj = self.env['queue.job']
for invoice in invoices:
company = invoice.company_id
if not company.use_connector:
invoice._cancel_invoice_to_sii()
else:
eta = company._get_sii_eta()
ctx = self.env.context.copy()
ctx.update(company_id=company.id)
session = ConnectorSession(
self.env.cr, SUPERUSER_ID, context=ctx,
)
new_delay = cancel_one_invoice.delay(
session, 'account.invoice', invoice.id, eta=eta)
queue_ids = queue_obj.search([
('uuid', '=', new_delay)
], limit=1)
invoice.sudo().invoice_jobs_ids |= queue_ids
@api.multi
def _cancel_invoice_jobs(self):
for queue in self.mapped('invoice_jobs_ids'):
if queue.state == 'started':
return False
elif queue.state in ('pending', 'enqueued', 'failed'):
queue.sudo().unlink()
return True
@api.multi
def action_cancel(self):
if not self._cancel_invoice_jobs():
raise exceptions.Warning(_(
'You can not cancel this invoice because'
' there is a job running!'))
res = super(AccountInvoice, self).action_cancel()
if self.sii_state == 'sent':
self.sii_state = 'sent_modified'
elif self.sii_state == 'cancelled_modified':
# Case when repoen a cancelled invoice, validate and cancel again
# without any SII communication.
self.sii_state = 'cancelled'
return res
@api.multi
def action_cancel_draft(self):
if not self._cancel_invoice_jobs():
raise exceptions.Warning(_(
'You can not set to draft this invoice because'
' there is a job running!'))
return super(AccountInvoice, self).action_cancel_draft()
@api.multi
def _get_sii_gen_type(self):
"""Make a choice for general invoice type
Returns:
int: 1 (National), 2 (Intracom), 3 (Export)
"""
self.ensure_one()
partner_ident = self.fiscal_position.sii_partner_identification_type
if partner_ident:
res = int(partner_ident)
elif self.fiscal_position.name == u'Régimen Intracomunitario':
res = 2
elif (self.fiscal_position.name ==
u'Régimen Extracomunitario / Canarias, Ceuta y Melilla'):
res = 3
else:
res = 1
return res
@api.multi
def _get_sii_identifier(self):
"""Get the SII structure for a partner identifier depending on the
conditions of the invoice.
"""
self.ensure_one()
gen_type = self._get_sii_gen_type()
# Limpiar alfanum
if self.partner_id.vat:
vat = ''.join(
e for e in self.partner_id.vat if e.isalnum()
).upper()
else:
vat = 'NO_DISPONIBLE'
country_code = self._get_sii_country_code()
if gen_type == 1:
if '1117' in (self.sii_send_error or ''):
return {
"IDOtro": {
"CodigoPais": country_code,
"IDType": '07',
"ID": vat[2:],
}
}
else:
if country_code != 'ES':
id_type = '06' if vat == 'NO_DISPONIBLE' else '04'
return {
"IDOtro": {
"CodigoPais": country_code,
"IDType": id_type,
"ID": vat,
},
}
else:
return {"NIF": vat[2:]}
elif gen_type == 2:
return {
"IDOtro": {
"IDType": '02',
"ID": vat,
}
}
elif gen_type == 3 and country_code != 'ES':
id_type = '06' if vat == 'NO_DISPONIBLE' else '04'
return {
"IDOtro": {
"CodigoPais": country_code,
"IDType": id_type,
"ID": vat,
},
}
elif gen_type == 3:
return {"NIF": vat[2:]}
@api.multi
def _get_sii_exempt_cause(self, product):
"""Código de la causa de exención según 3.6 y 3.7 de la FAQ del SII."""
self.ensure_one()
gen_type = self._get_sii_gen_type()
if gen_type == 2:
return 'E5'
elif gen_type == 3:
return 'E2'
elif product.sii_exempt_cause != 'none':
return product.sii_exempt_cause
elif self.fiscal_position and \
self.fiscal_position.sii_exempt_cause != 'none':
return self.fiscal_position.sii_exempt_cause
@api.multi
def _get_no_taxable_cause(self):
self.ensure_one()
return (self.fiscal_position.sii_no_taxable_cause or
'ImportePorArticulos7_14_Otros')
@api.multi
def _get_sii_country_code(self):
self.ensure_one()
country_code = (
self.partner_id.commercial_partner_id.country_id.code or
(self.partner_id.vat or '')[:2]
).upper()
return SII_COUNTRY_CODE_MAPPING.get(country_code, country_code)
@api.multi
@api.depends('invoice_line', 'invoice_line.name', 'company_id',
'sii_manual_description')
def _compute_sii_description(self):
for invoice in self:
if invoice.type in ['out_invoice', 'out_refund']:
description = invoice.company_id.sii_header_customer or ''
else: # supplier invoices
description = invoice.company_id.sii_header_supplier or ''
method = invoice.company_id.sii_description_method
if method == 'fixed':
description += (invoice.company_id.sii_description or '/')
elif method == 'manual':
description = (
invoice.sii_manual_description or description or '/'
)
else: # auto method
if invoice.invoice_line:
if description:
description += ' | '
description += ' - '.join(
invoice.mapped('invoice_line.name')
)
invoice.sii_description = description[:500] or '/'
@api.multi
def _inverse_sii_description(self):
for invoice in self:
invoice.sii_manual_description = invoice.sii_description
@api.multi
@api.depends('company_id', 'company_id.sii_enabled',
'fiscal_position', 'fiscal_position.sii_active')
def _compute_sii_enabled(self):
"""Compute if the invoice is enabled for the SII"""
for invoice in self:
if invoice.company_id.sii_enabled:
invoice.sii_enabled = (
(invoice.fiscal_position and
invoice.fiscal_position.sii_active) or
not invoice.fiscal_position
)
else:
invoice.sii_enabled = False
@api.model
def _prepare_refund(self, invoice, date=None, period_id=None,
description=None, journal_id=None):
res = super(AccountInvoice, self)._prepare_refund(
invoice, date=date, period_id=period_id,
description=description, journal_id=journal_id,
)
sii_refund_type = self.env.context.get('sii_refund_type')
supplier_invoice_number_refund = self.env.context.get(
'supplier_invoice_number'
)
if sii_refund_type:
res['sii_refund_type'] = sii_refund_type
if supplier_invoice_number_refund:
res['supplier_invoice_number'] = supplier_invoice_number_refund
return res
@api.multi
def _get_sii_sign(self):
self.ensure_one()
return -1.0 if self.sii_refund_type == 'I' and 'refund' in self.type \
else 1.0
class AccountInvoiceLine(models.Model):
_inherit = 'account.invoice.line'
@api.multi
def _get_sii_line_price_unit(self):
"""Obtain the effective invoice line price after discount. This is
obtain through this method, as it can be inherited in other modules
for altering the expected amount according other criteria."""
self.ensure_one()
price_unit = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
if self.invoice_id.currency_id != \
self.invoice_id.company_id.currency_id:
from_currency = self.invoice_id.currency_id. \
with_context(date=self.invoice_id.date_invoice)
price_unit = from_currency. \
compute(price_unit, self.invoice_id.company_id.currency_id,
round=False)
return price_unit
@api.multi
def _get_sii_line_price_subtotal(self):
"""Obtain the effective invoice line price after discount. Needed as
we can modify the unit price via inheritance."""
self.ensure_one()
price = self._get_sii_line_price_unit()
taxes = self.invoice_line_tax_id.compute_all(
price, self.quantity, product=self.product_id,
partner=self.invoice_id.partner_id)
return taxes['total']
@api.multi
def _get_sii_tax_line_req(self):
"""Get any possible tax amounts for 'Recargo equivalencia'."""
self.ensure_one()
taxes_re = self.invoice_id._get_sii_taxes_map(['RE'])
for tax in self.invoice_line_tax_id:
if tax in taxes_re:
price = self._get_sii_line_price_unit()
taxes = tax.compute_all(
price, self.quantity, self.product_id,
self.invoice_id.partner_id,
)
taxes['percentage'] = tax.amount
return taxes
return {}
@api.model
def _update_sii_tax_line(self, tax_dict, tax_line):
"""Update the SII taxes dictionary for the passed tax line.
:param self: Single invoice line record.
:param tax_dict: Previous SII taxes dictionary.
:param tax_line: Tax line that is being analyzed.
"""
self.ensure_one()
if tax_line.child_depend:
tax_type = abs(tax_line.child_ids.filtered('amount')[:1].amount)
else:
tax_type = abs(tax_line.amount)
if tax_type not in tax_dict:
tax_dict[tax_type] = {
'TipoImpositivo': str(tax_type * 100),
'BaseImponible': 0,
'CuotaRepercutida': 0,
'CuotaSoportada': 0,
}
# Recargo de equivalencia
tax_line_req = self._get_sii_tax_line_req()
if tax_line_req:
tipo_recargo = tax_line_req['percentage'] * 100
cuota_recargo = tax_line_req['taxes'][0]['amount']
tax_dict[tax_type]['TipoRecargoEquivalencia'] = tipo_recargo
tax_dict[tax_type].setdefault('CuotaRecargoEquivalencia', 0)
tax_dict[tax_type]['CuotaRecargoEquivalencia'] += cuota_recargo
# Rest of the taxes
taxes = tax_line.compute_all(
self._get_sii_line_price_unit(), self.quantity,
self.product_id, self.invoice_id.partner_id,
)
tax_dict[tax_type]['BaseImponible'] += taxes['total']
if self.invoice_id.type in ['out_invoice', 'out_refund']:
key = 'CuotaRepercutida'
else:
key = 'CuotaSoportada'
if taxes['total'] >= 0:
sii_included_taxes = [t for t in taxes['taxes']
if t['amount'] >= 0]
else:
sii_included_taxes = [t for t in taxes['taxes'] if t['amount'] < 0]
for tax in sii_included_taxes:
tax_dict[tax_type][key] += tax['amount']
@job(default_channel='root.invoice_validate_sii')
def confirm_one_invoice(session, model_name, invoice_id):
model = session.env[model_name]
invoice = model.browse(invoice_id)
if invoice.exists():
invoice._send_invoice_to_sii()
@job(default_channel='root.invoice_validate_sii')
def cancel_one_invoice(session, model_name, invoice_id):
model = session.env[model_name]
invoice = model.browse(invoice_id)
if invoice.exists():
invoice._cancel_invoice_to_sii()
|
agpl-3.0
| -6,420,739,032,322,082,000 | 42.225032 | 79 | 0.513249 | false | 3.984268 | false | false | false |
jcarreiro/jmc-python
|
essential_algorithms/chapter_2.py
|
1
|
1820
|
from __future__ import absolute_import
import random
from jmc.algorithms.searching import binary_search
def coin_flip(seed=None):
"""Generates coin flips using a fair six-sided die."""
if seed:
random.seed(seed)
return random.randint(1, 6) > 3
def test_coin_flip(count):
heads = 0
tails = 0
for x in xrange(0, count):
if coin_flip():
heads += 1
else:
tails += 1
return [heads, tails]
class BiasedDie(object):
def __init__(self, faces, r=random.Random()):
"""Create a biased die. Faces must be a list of floats, which are the
cumulative probability of a roll resulting in a value less than or equal to
the value of that face. Faces are implictly numbered from 1 to N.
"""
self.faces = faces
self.r = r
def roll(self):
return binary_search(self.faces, r.random()) + 1
def fair_d6(seed=None):
"""Uses a biased d6 to generate fair values between 1 and 6."""
# pick random weights for the faces, then normalize
if seed:
random.seed(seed)
faces = [random.random() for x in range(6)]
total = sum(faces)
faces = map(lambda x: x / total, faces)
faces = [sum(faces[:x]) for x in range(1,7)]
print faces
# Roll a biased d6. If we see a 1, 2, or 3, followed by a 4, 5, or 6, call
# that a 0, and call a 4, 5, or 6 followed by a 1, 2, or 3 a 1. Ignore all
# other results. This gives us a 0 or a 1 with equal probability.
d6 = BiasedDie(faces, r) # ok to re-use r, we're done with the stream now
while True:
s = '0b' # yeah this is clowny
while len(s) < 5:
a = d6.roll()
b = d6.roll()
if a <= 3 and b >= 4:
s += '0'
elif a >= 4 and b <= 3:
s += '1'
result = int(s, 0)
if result > 0 and result < 7:
yield result
|
mit
| -2,899,852,987,777,092,600 | 28.836066 | 79 | 0.601099 | false | 3.238434 | false | false | false |
cuhk-cse/CoMonitor
|
benchmarks/CS_PCA_twc12/run_orangelab_temperature.py
|
1
|
1732
|
#########################################################
# run_orangelab_temperature.py
# Author: Jamie Zhu <jimzhu@GitHub>
# Created: 2015/8/24
# Last updated: 2015/8/24
#########################################################
import numpy as np
import os, sys, time
sys.path.append('../')
from commons.utils import logger
from commons import utils
from commons import dataloader
import evaluator
# parameter config area
para = {'dataPath': '../data/', # data path
'dataName': 'Orangelab_sense_temperature', # set the dataset name
'outPath': 'result/', # output path for results
'metrics': ['MAE', 'NMAE', 'RMSE', 'MRE', 'NNPRE', 'SNR'], # evaluation metrics
'samplingRate': np.arange(0.05, 0.96, 0.05), # sampling rate
'rounds': 1, # how many runs to perform at each sampling rate
'lmbda': 1e-5, # sparisty regularization parameter
'trainingPeriod': 33, # training time periods
'saveTimeInfo': False, # whether to keep track of the running time
'saveLog': False, # whether to save log into file
'debugMode': False, #whether to record the debug info
'parallelMode': False # whether to leverage multiprocessing for speedup
}
startTime = time.time() # start timing
utils.setConfig(para) # set configuration
logger.info('==============================================')
logger.info('CS-PCA: [Quer et al., TWC\'2012]')
# load the dataset
dataMatrix = dataloader.load(para)
# evaluate compressive monitoring algorithm
evaluator.execute(dataMatrix, para)
logger.info('All done. Elaspsed time: ' + utils.formatElapsedTime(time.time() - startTime)) # end timing
logger.info('==============================================')
|
mit
| 5,718,934,930,505,162,000 | 36.652174 | 104 | 0.598152 | false | 3.900901 | false | false | false |
cpcloud/ibis
|
ibis/pandas/execution/tests/test_join.py
|
1
|
13150
|
import pandas as pd
import pandas.util.testing as tm
import pytest
from pytest import param
import ibis
import ibis.common.exceptions as com
pytestmark = pytest.mark.pandas
join_type = pytest.mark.parametrize(
'how',
[
'inner',
'left',
'right',
'outer',
param(
'semi',
marks=pytest.mark.xfail(
raises=NotImplementedError, reason='Semi join not implemented'
),
),
param(
'anti',
marks=pytest.mark.xfail(
raises=NotImplementedError, reason='Anti join not implemented'
),
),
],
)
@join_type
def test_join(how, left, right, df1, df2):
expr = left.join(right, left.key == right.key, how=how)[
left, right.other_value, right.key3
]
result = expr.execute()
expected = pd.merge(df1, df2, how=how, on='key')
tm.assert_frame_equal(result[expected.columns], expected)
def test_cross_join(left, right, df1, df2):
expr = left.cross_join(right)[left, right.other_value, right.key3]
result = expr.execute()
expected = pd.merge(
df1.assign(dummy=1), df2.assign(dummy=1), how='inner', on='dummy'
).rename(columns=dict(key_x='key'))
del expected['dummy'], expected['key_y']
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_project_left_table(how, left, right, df1, df2):
expr = left.join(right, left.key == right.key, how=how)[left, right.key3]
result = expr.execute()
expected = pd.merge(df1, df2, how=how, on='key')[
list(left.columns) + ['key3']
]
tm.assert_frame_equal(result[expected.columns], expected)
def test_cross_join_project_left_table(left, right, df1, df2):
expr = left.cross_join(right)[left, right.key3]
result = expr.execute()
expected = pd.merge(
df1.assign(dummy=1), df2.assign(dummy=1), how='inner', on='dummy'
).rename(columns=dict(key_x='key'))[list(left.columns) + ['key3']]
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_multiple_predicates(how, left, right, df1, df2):
expr = left.join(
right, [left.key == right.key, left.key2 == right.key3], how=how
)[left, right.key3, right.other_value]
result = expr.execute()
expected = pd.merge(
df1, df2, how=how, left_on=['key', 'key2'], right_on=['key', 'key3']
).reset_index(drop=True)
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_multiple_predicates_written_as_one(
how, left, right, df1, df2
):
predicate = (left.key == right.key) & (left.key2 == right.key3)
expr = left.join(right, predicate, how=how)[
left, right.key3, right.other_value
]
result = expr.execute()
expected = pd.merge(
df1, df2, how=how, left_on=['key', 'key2'], right_on=['key', 'key3']
).reset_index(drop=True)
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_invalid_predicates(how, left, right):
predicate = (left.key == right.key) & (left.key2 <= right.key3)
expr = left.join(right, predicate, how=how)
with pytest.raises(TypeError):
expr.execute()
predicate = left.key >= right.key
expr = left.join(right, predicate, how=how)
with pytest.raises(TypeError):
expr.execute()
@join_type
@pytest.mark.xfail(reason='Hard to detect this case')
def test_join_with_duplicate_non_key_columns(how, left, right, df1, df2):
left = left.mutate(x=left.value * 2)
right = right.mutate(x=right.other_value * 3)
expr = left.join(right, left.key == right.key, how=how)
# This is undefined behavior because `x` is duplicated. This is difficult
# to detect
with pytest.raises(ValueError):
expr.execute()
@join_type
def test_join_with_duplicate_non_key_columns_not_selected(
how, left, right, df1, df2
):
left = left.mutate(x=left.value * 2)
right = right.mutate(x=right.other_value * 3)
right = right[['key', 'other_value']]
expr = left.join(right, left.key == right.key, how=how)[
left, right.other_value
]
result = expr.execute()
expected = pd.merge(
df1.assign(x=df1.value * 2),
df2[['key', 'other_value']],
how=how,
on='key',
)
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_post_expression_selection(how, left, right, df1, df2):
join = left.join(right, left.key == right.key, how=how)
expr = join[left.key, left.value, right.other_value]
result = expr.execute()
expected = pd.merge(df1, df2, on='key', how=how)[
['key', 'value', 'other_value']
]
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_post_expression_filter(how, left):
lhs = left[['key', 'key2']]
rhs = left[['key2', 'value']]
joined = lhs.join(rhs, 'key2', how=how)
projected = joined[lhs, rhs.value]
expr = projected[projected.value == 4]
result = expr.execute()
df1 = lhs.execute()
df2 = rhs.execute()
expected = pd.merge(df1, df2, on='key2', how=how)
expected = expected.loc[expected.value == 4].reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@join_type
def test_multi_join_with_post_expression_filter(how, left, df1):
lhs = left[['key', 'key2']]
rhs = left[['key2', 'value']]
rhs2 = left[['key2', 'value']].relabel(dict(value='value2'))
joined = lhs.join(rhs, 'key2', how=how)
projected = joined[lhs, rhs.value]
filtered = projected[projected.value == 4]
joined2 = filtered.join(rhs2, 'key2')
projected2 = joined2[filtered.key, rhs2.value2]
expr = projected2[projected2.value2 == 3]
result = expr.execute()
df1 = lhs.execute()
df2 = rhs.execute()
df3 = rhs2.execute()
expected = pd.merge(df1, df2, on='key2', how=how)
expected = expected.loc[expected.value == 4].reset_index(drop=True)
expected = pd.merge(expected, df3, on='key2')[['key', 'value2']]
expected = expected.loc[expected.value2 == 3].reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@join_type
def test_join_with_non_trivial_key(how, left, right, df1, df2):
# also test that the order of operands in the predicate doesn't matter
join = left.join(right, right.key.length() == left.key.length(), how=how)
expr = join[left.key, left.value, right.other_value]
result = expr.execute()
expected = (
pd.merge(
df1.assign(key_len=df1.key.str.len()),
df2.assign(key_len=df2.key.str.len()),
on='key_len',
how=how,
)
.drop(['key_len', 'key_y', 'key2', 'key3'], axis=1)
.rename(columns={'key_x': 'key'})
)
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_non_trivial_key_project_table(how, left, right, df1, df2):
# also test that the order of operands in the predicate doesn't matter
join = left.join(right, right.key.length() == left.key.length(), how=how)
expr = join[left, right.other_value]
expr = expr[expr.key.length() == 1]
result = expr.execute()
expected = (
pd.merge(
df1.assign(key_len=df1.key.str.len()),
df2.assign(key_len=df2.key.str.len()),
on='key_len',
how=how,
)
.drop(['key_len', 'key_y', 'key2', 'key3'], axis=1)
.rename(columns={'key_x': 'key'})
)
expected = expected.loc[expected.key.str.len() == 1]
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_project_right_duplicate_column(client, how, left, df1, df3):
# also test that the order of operands in the predicate doesn't matter
right = client.table('df3')
join = left.join(right, ['key'], how=how)
expr = join[left.key, right.key2, right.other_value]
result = expr.execute()
expected = (
pd.merge(df1, df3, on='key', how=how)
.drop(['key2_x', 'key3', 'value'], axis=1)
.rename(columns={'key2_y': 'key2'})
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_join_with_window_function(
players_base, players_df, batting, batting_df
):
players = players_base
# this should be semi_join
tbl = batting.left_join(players, ['playerID'])
t = tbl[batting.G, batting.playerID, batting.teamID]
expr = t.groupby(t.teamID).mutate(
team_avg=lambda d: d.G.mean(),
demeaned_by_player=lambda d: d.G - d.G.mean(),
)
result = expr.execute()
expected = pd.merge(
batting_df, players_df[['playerID']], on='playerID', how='left'
)[['G', 'playerID', 'teamID']]
team_avg = expected.groupby('teamID').G.transform('mean')
expected = expected.assign(
team_avg=team_avg, demeaned_by_player=lambda df: df.G - team_avg
)
tm.assert_frame_equal(result[expected.columns], expected)
merge_asof_minversion = pytest.mark.skipif(
pd.__version__ < '0.19.2',
reason="at least pandas-0.19.2 required for merge_asof",
)
@merge_asof_minversion
def test_asof_join(time_left, time_right, time_df1, time_df2):
expr = time_left.asof_join(time_right, 'time')[
time_left, time_right.other_value
]
result = expr.execute()
expected = pd.merge_asof(time_df1, time_df2, on='time')
tm.assert_frame_equal(result[expected.columns], expected)
@merge_asof_minversion
def test_asof_join_predicate(time_left, time_right, time_df1, time_df2):
expr = time_left.asof_join(time_right, time_left.time == time_right.time)[
time_left, time_right.other_value
]
result = expr.execute()
expected = pd.merge_asof(time_df1, time_df2, on='time')
tm.assert_frame_equal(result[expected.columns], expected)
@merge_asof_minversion
def test_keyed_asof_join(
time_keyed_left, time_keyed_right, time_keyed_df1, time_keyed_df2
):
expr = time_keyed_left.asof_join(time_keyed_right, 'time', by='key')[
time_keyed_left, time_keyed_right.other_value
]
result = expr.execute()
expected = pd.merge_asof(
time_keyed_df1, time_keyed_df2, on='time', by='key'
)
tm.assert_frame_equal(result[expected.columns], expected)
@merge_asof_minversion
def test_keyed_asof_join_with_tolerance(
time_keyed_left, time_keyed_right, time_keyed_df1, time_keyed_df2
):
expr = time_keyed_left.asof_join(
time_keyed_right, 'time', by='key', tolerance=2 * ibis.interval(days=1)
)[time_keyed_left, time_keyed_right.other_value]
result = expr.execute()
expected = pd.merge_asof(
time_keyed_df1,
time_keyed_df2,
on='time',
by='key',
tolerance=pd.Timedelta('2D'),
)
tm.assert_frame_equal(result[expected.columns], expected)
@pytest.mark.parametrize(
"how",
[
"left",
pytest.param(
"right",
marks=pytest.mark.xfail(
raises=AttributeError, reason="right_join is not an ibis API"
),
),
"inner",
"outer",
],
)
@pytest.mark.parametrize(
"func",
[
pytest.param(lambda join: join["a0", "a1"], id="tuple"),
pytest.param(lambda join: join[["a0", "a1"]], id="list"),
pytest.param(lambda join: join.select(["a0", "a1"]), id="select"),
],
)
@pytest.mark.xfail(
raises=(com.IbisError, AttributeError),
reason="Select from unambiguous joins not implemented",
)
def test_select_on_unambiguous_join(how, func):
df_t = pd.DataFrame(dict(a0=[1, 2, 3], b1=list("aab")))
df_s = pd.DataFrame(dict(a1=[2, 3, 4], b2=list("abc")))
con = ibis.pandas.connect({"t": df_t, "s": df_s})
t = con.table("t")
s = con.table("s")
method = getattr(t, "{}_join".format(how))
join = method(s, t.b1 == s.b2)
expected = pd.merge(df_t, df_s, left_on=["b1"], right_on=["b2"], how=how)[
["a0", "a1"]
]
assert not expected.empty
expr = func(join)
result = expr.execute()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"func",
[
pytest.param(lambda join: join["a0", "a1"], id="tuple"),
pytest.param(lambda join: join[["a0", "a1"]], id="list"),
pytest.param(lambda join: join.select(["a0", "a1"]), id="select"),
],
)
@pytest.mark.xfail(
raises=(com.IbisError, AttributeError),
reason="Select from unambiguous joins not implemented",
)
@merge_asof_minversion
def test_select_on_unambiguous_asof_join(func):
df_t = pd.DataFrame(
dict(a0=[1, 2, 3], b1=pd.date_range("20180101", periods=3))
)
df_s = pd.DataFrame(
dict(a1=[2, 3, 4], b2=pd.date_range("20171230", periods=3))
)
con = ibis.pandas.connect({"t": df_t, "s": df_s})
t = con.table("t")
s = con.table("s")
join = t.asof_join(s, t.b1 == s.b2)
expected = pd.merge_asof(df_t, df_s, left_on=["b1"], right_on=["b2"])[
["a0", "a1"]
]
assert not expected.empty
expr = func(join)
result = expr.execute()
tm.assert_frame_equal(result, expected)
|
apache-2.0
| -4,809,177,219,277,779,000 | 30.534772 | 79 | 0.615817 | false | 3.051752 | true | false | false |
ihartung/460-Lab1
|
lab4/Lab4.py
|
1
|
16224
|
from __future__ import print_function
import sys
sys.path.append('..')
from src.sim import Sim
from src.packet import Packet
from dvrouting import DvroutingApp
from networks.network import Network
class BroadcastApp(object):
def __init__(self, node):
self.node = node
def receive_packet(self, packet):
print(Sim.scheduler.current_time(), self.node.hostname, packet.ident)
def p_setup(nodey):
dv = DvroutingApp(nodey)
nodey.add_protocol(protocol="dvrouting", handler=dv)
def exp1():
# parameters
Sim.scheduler.reset()
Sim.set_debug(True)
# setup network
net = Network('../networks/l4e1.txt')
# get nodes
n1 = net.get_node('n1')
n2 = net.get_node('n2')
n3 = net.get_node('n3')
n4 = net.get_node('n4')
n5 = net.get_node('n5')
# setup broadcast application
p_setup(n1)
p_setup(n2)
p_setup(n3)
p_setup(n4)
p_setup(n5)
#send to every node from n1
p = Packet(destination_address=n2.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n1.send_packet)
p = Packet(destination_address=n3.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n1.send_packet)
p = Packet(destination_address=n4.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n1.send_packet)
p = Packet(destination_address=n5.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n1.send_packet)
#send to every node from n2
p = Packet(destination_address=n1.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n2.send_packet)
p = Packet(destination_address=n5.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n2.send_packet)
p = Packet(destination_address=n3.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n2.send_packet)
p = Packet(destination_address=n4.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n2.send_packet)
#send to every node from n3
p = Packet(destination_address=n1.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n3.send_packet)
p = Packet(destination_address=n2.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n3.send_packet)
p = Packet(destination_address=n4.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n3.send_packet)
p = Packet(destination_address=n5.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n3.send_packet)
#send to every node from n4
p = Packet(destination_address=n2.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n4.send_packet)
p = Packet(destination_address=n1.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n4.send_packet)
p = Packet(destination_address=n3.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n4.send_packet)
p = Packet(destination_address=n5.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n4.send_packet)
#send to every node from n5
p = Packet(destination_address=n2.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n5.send_packet)
p = Packet(destination_address=n3.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n5.send_packet)
p = Packet(destination_address=n4.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n5.send_packet)
p = Packet(destination_address=n1.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n5.send_packet)
# run the simulation
Sim.scheduler.run()
def exp2():
# parameters
Sim.scheduler.reset()
Sim.set_debug(True)
# setup network
net = Network('../networks/l4e2.txt')
# get nodes
n1 = net.get_node('n1')
n2 = net.get_node('n2')
n3 = net.get_node('n3')
n4 = net.get_node('n4')
n5 = net.get_node('n5')
# setup broadcast application
p_setup(n1)
p_setup(n2)
p_setup(n3)
p_setup(n4)
p_setup(n5)
#send to every node from n1
p = Packet(destination_address=n2.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n1.send_packet)
p = Packet(destination_address=n3.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n1.send_packet)
p = Packet(destination_address=n4.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n1.send_packet)
p = Packet(destination_address=n5.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n1.send_packet)
#send to every node from n2
p = Packet(destination_address=n1.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n2.send_packet)
p = Packet(destination_address=n5.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n2.send_packet)
p = Packet(destination_address=n3.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n2.send_packet)
p = Packet(destination_address=n4.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n2.send_packet)
#send to every node from n3
p = Packet(destination_address=n1.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n3.send_packet)
p = Packet(destination_address=n2.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n3.send_packet)
p = Packet(destination_address=n4.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n3.send_packet)
p = Packet(destination_address=n5.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n3.send_packet)
#send to every node from n4
p = Packet(destination_address=n2.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n4.send_packet)
p = Packet(destination_address=n1.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n4.send_packet)
p = Packet(destination_address=n3.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n4.send_packet)
p = Packet(destination_address=n5.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n4.send_packet)
#send to every node from n5
p = Packet(destination_address=n2.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n5.send_packet)
p = Packet(destination_address=n3.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n5.send_packet)
p = Packet(destination_address=n4.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n5.send_packet)
p = Packet(destination_address=n1.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=5, event=p, handler=n5.send_packet)
Sim.scheduler.add(delay=6, event=None, handler=n1.get_link('n2').down)
#wait for things to update
#send to every node from n1
p = Packet(destination_address=n2.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n1.send_packet)
p = Packet(destination_address=n3.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n1.send_packet)
p = Packet(destination_address=n4.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n1.send_packet)
p = Packet(destination_address=n5.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n1.send_packet)
#send to every node from n2
p = Packet(destination_address=n1.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n2.send_packet)
p = Packet(destination_address=n5.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n2.send_packet)
p = Packet(destination_address=n3.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n2.send_packet)
p = Packet(destination_address=n4.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n2.send_packet)
#send to every node from n3
p = Packet(destination_address=n1.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n3.send_packet)
p = Packet(destination_address=n2.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n3.send_packet)
p = Packet(destination_address=n4.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n3.send_packet)
p = Packet(destination_address=n5.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n3.send_packet)
#send to every node from n4
p = Packet(destination_address=n2.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n4.send_packet)
p = Packet(destination_address=n1.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n4.send_packet)
p = Packet(destination_address=n3.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n4.send_packet)
p = Packet(destination_address=n5.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n4.send_packet)
#send to every node from n5
p = Packet(destination_address=n2.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n5.send_packet)
p = Packet(destination_address=n3.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n5.send_packet)
p = Packet(destination_address=n4.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n5.send_packet)
p = Packet(destination_address=n1.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=10, event=p, handler=n5.send_packet)
Sim.scheduler.add(delay=11, event=None, handler=n1.get_link('n2').up)
#send to every node from n1
p = Packet(destination_address=n2.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n1.send_packet)
p = Packet(destination_address=n3.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n1.send_packet)
p = Packet(destination_address=n4.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n1.send_packet)
p = Packet(destination_address=n5.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n1.send_packet)
#send to every node from n2
p = Packet(destination_address=n1.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n2.send_packet)
p = Packet(destination_address=n5.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n2.send_packet)
p = Packet(destination_address=n3.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n2.send_packet)
p = Packet(destination_address=n4.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n2.send_packet)
#send to every node from n3
p = Packet(destination_address=n1.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n3.send_packet)
p = Packet(destination_address=n2.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n3.send_packet)
p = Packet(destination_address=n4.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n3.send_packet)
p = Packet(destination_address=n5.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n3.send_packet)
#send to every node from n4
p = Packet(destination_address=n2.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n4.send_packet)
p = Packet(destination_address=n1.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n4.send_packet)
p = Packet(destination_address=n3.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n4.send_packet)
p = Packet(destination_address=n5.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n4.send_packet)
#send to every node from n5
p = Packet(destination_address=n2.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n5.send_packet)
p = Packet(destination_address=n3.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n5.send_packet)
p = Packet(destination_address=n4.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n5.send_packet)
p = Packet(destination_address=n1.get_address('n1'), ident=1, protocol='delay', length=1000)
Sim.scheduler.add(delay=15, event=p, handler=n5.send_packet)
# run the simulation
Sim.scheduler.run()
def exp3():
# parameters
Sim.scheduler.reset()
Sim.set_debug(True)
# setup network
net = Network('../networks/l4e3.txt')
# get nodes
n1 = net.get_node('n1')
n2 = net.get_node('n2')
n3 = net.get_node('n3')
n4 = net.get_node('n4')
n5 = net.get_node('n5')
n6 = net.get_node('n6')
n7 = net.get_node('n7')
n8 = net.get_node('n8')
n9 = net.get_node('n9')
n10 = net.get_node('n10')
n11 = net.get_node('n11')
n12 = net.get_node('n12')
n13 = net.get_node('n13')
n14 = net.get_node('n14')
n15 = net.get_node('n15')
# setup broadcast application
p_setup(n1)
p_setup(n2)
p_setup(n3)
p_setup(n4)
p_setup(n5)
p_setup(n6)
p_setup(n7)
p_setup(n8)
p_setup(n9)
p_setup(n10)
p_setup(n11)
p_setup(n12)
p_setup(n13)
p_setup(n14)
p_setup(n15)
# run the simulation
Sim.scheduler.run()
def main():
exp1()
exp2()
#exp3()
if __name__ == '__main__':
main()
|
gpl-2.0
| 3,474,339,910,995,493,000 | 46.717647 | 96 | 0.685343 | false | 2.92852 | false | false | false |
peterhogan/python
|
operations_test.py
|
1
|
1700
|
from random import randint
from random import shuffle
import operator
ops = { "+": operator.add, "-": operator.sub, "/": operator.truediv, "*": operator.mul}
all_ops = ["+","-","/","*"]*6
shuffle(all_ops)
bigs = [25,50,75,100,250,500,750,1000]*6
shuffle(bigs)
smalls = [1,2,3,4,5,6,7,8,9]*6
shuffle(smalls)
numbers = [bigs[0],smalls[0],smalls[1],smalls[2],bigs[1]]
#numbers = [bigs[0],smalls[0],smalls[1],smalls[2],bigs[1]]
print(numbers)
print("Calculating Number....")
shuffle(numbers)
counter = 1
correct_number = False
while correct_number == False:
try:
to_calc = ops[all_ops[0]](numbers[0],ops[all_ops[1]](numbers[1],ops[all_ops[2]](numbers[2],numbers[3])))
condition = (to_calc == round(to_calc)) and (to_calc > 30) and (to_calc < 1000)
if condition == True:
if counter > 1:
print("Got a sensible number after %d tries." % counter)
else:
print("Got a sensible number after 1 try.")
correct_number = True
else:
counter += 1
print("Got %f: trying again.." % to_calc)
shuffle(numbers)
shuffle(all_ops)
except ZeroDivisionError:
counter += 1
print("Division by Zero: trying again..")
shuffle(numbers)
shuffle(all_ops)
print("The number to find is: ")
print(to_calc)
list_to_calc = [numbers[0],all_ops[0],numbers[1],all_ops[1],numbers[2],all_ops[2],numbers[3]]
print(list_to_calc)
|
mit
| 4,423,803,057,749,753,300 | 33 | 120 | 0.514706 | false | 3.505155 | false | false | false |
osrsbox/osrsbox-db
|
scripts/cache/generate_summary_models.py
|
1
|
5551
|
"""
Author: PH01L
Email: [email protected]
Website: https://www.osrsbox.com
Description:
Parse OSRS cache data and extract model ID numbers for items, npcs, and
objects. Known keys for models:
- items: inventoryModel
- npcs: models, models_2 (version 2 does not seem to be used)
- objects: objectModels
Copyright (c) 2020, PH01L
###############################################################################
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""
import json
from pathlib import Path
from typing import List
from typing import Dict
import config
from scripts.cache import cache_constants
SKIP_EMPTY_NAMES = ("null", "Null", "")
def extract_model_ids_int(json_data: Dict) -> List[Dict]:
"""Extracts the model ID numbers for NPCs and NPC Chat heads.
:param json_data: A dictionary from an item, npc or object definition file.
:return models: A list of dictionaries containing ID, type, type ID and model ID.
"""
# Set up output dict (to be populated with 1 or more model_dict)
models = {}
model_keys = {
"item_model_ground": "inventoryModel",
"item_model_male0": "maleModel0",
"item_model_male1": "maleModel1",
"item_model_male2": "maleModel2",
"item_model_female0": "femaleModel0",
"item_model_female1": "femaleModel1",
"item_model_female2": "femaleModel2"
}
for model_type, model_key in model_keys.items():
model_dict = dict()
# Set base properties
model_dict["model_type"] = model_type
model_dict["model_type_id"] = json_data["id"]
model_dict["model_name"] = json_data["name"]
# Extract NPC model numbers
try:
model_dict["model_ids"] = json_data[model_key]
except KeyError:
continue
if model_dict["model_ids"] == -1:
continue
model_dict_key = f"{model_dict['model_type']}_{model_dict['model_type_id']}_{model_dict['model_ids']}"
models[model_dict_key] = model_dict
# Return a list of model_dicts
return models
def extract_model_ids_list(json_data: Dict) -> List[Dict]:
"""Extracts the model ID numbers for ground, male and female item models.
:param json_data: A dictionary from an item, npc or object definition file.
:return models: A list of dictionaries containing ID, type, type ID and model ID.
"""
# Set up output dict (to be populated with 1 or more model_dict)
models = {}
model_keys = {
"npc_model": "models",
"npc_chathead": "chatheadModels",
"object_model": "objectModels"
}
for model_type, model_key in model_keys.items():
model_dict = dict()
# Set base properties
model_dict["model_type"] = model_type
model_dict["model_type_id"] = json_data["id"]
model_dict["model_name"] = json_data["name"]
# Extract NPC model numbers
try:
model_dict["model_ids"] = ", ".join(str(n) for n in json_data[model_key])
except KeyError:
continue
model_dict_key = f"{model_dict['model_type']}_{model_dict['model_type_id']}_{model_dict['model_ids']}"
models[model_dict_key] = model_dict
# Return a list of model_dicts
return models
def process():
"""Extract OSRS model ID numbers that map to names."""
all_models = dict()
# Loop three cache types (items, npcs and objects)
all_definitions = {
"items": cache_constants.ITEM_DEFINITIONS,
"npcs": cache_constants.NPC_DEFINITIONS,
"objects": cache_constants.OBJECT_DEFINITIONS
}
for cache_name, definitions in all_definitions.items():
# Loop all entries in the loaded definition file
for id_number in definitions:
# Fetch the decompressed JSON data
json_data = definitions[id_number]
# Name check (it is of no use if it is empty/null, so exclude)
if json_data["name"] in SKIP_EMPTY_NAMES:
continue
# Process cache definition based on type (item, npc, object)
# Items: Have single interger model IDs
# NPCs: Have list of interger model IDs
# Objects: Have list of integer model IDs
if cache_name == "items":
extracted_models = extract_model_ids_int(json_data)
elif cache_name == "npcs":
extracted_models = extract_model_ids_list(json_data)
elif cache_name == "objects":
extracted_models = extract_model_ids_list(json_data)
# Add extracted models to all_models dictionary
all_models.update(extracted_models)
# Save all extracted models ID numbers to JSON file
out_fi = Path(config.DOCS_PATH / "models-summary.json")
with open(out_fi, "w") as f:
json.dump(all_models, f, indent=4)
if __name__ == "__main__":
process()
|
gpl-3.0
| 7,765,241,992,713,787,000 | 33.69375 | 110 | 0.619168 | false | 3.884535 | false | false | false |
patta42/pySICM
|
pySICMgui/actionhandler.py
|
1
|
2074
|
# Copyright (C) 2015 Patrick Happel <[email protected]>
#
# This file is part of pySICM.
#
# pySICM is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 2 of the License, or (at your option) any later
# version.
#
# pySICM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# pySICM. If not, see <http://www.gnu.org/licenses/>.
class HandledAction:
action=None
function=None
mdiArea=None
def __init__(self, action, function, mdiArea):
self.action = action
self.function = function
self.mdiArea = mdiArea
self.action.triggered.connect(self.actionCalled)
def actionCalled(self):
widget = self.mdiArea.currentSubWindow().widget()
try:
getattr(widget, self.function)()
except AttributeError:
getattr(widget, self.function)()
print "Widget "+str(widget)+" does not implement a method "+str(self.function)
def setEnabledStatus(self):
swin = self.mdiArea.currentSubWindow()
if swin is not None:
widget = swin.widget()
else:
widget = None
self.action.setEnabled(hasattr(widget, self.function))
class ActionHandler:
'''This class automates the support of calling a specific function in a
MdiArea-subwindow if the corresponding widget contains the respective
function. The main window should inherit from this class.'''
handlers = []
def __init__(self):
pass
def addActionHandler(self, action, funcname):
self.handlers.append(HandledAction(action, funcname, self.mdiArea))
def setEnabledStatus(self):
for ac in self.handlers:
ac.setEnabledStatus()
|
gpl-3.0
| 4,511,619,947,610,642,000 | 34.758621 | 90 | 0.679364 | false | 4.106931 | false | false | false |
Valka7a/python-playground
|
python-course-softuni/data-structures/ex4.py
|
1
|
1336
|
prices = []
while True:
# User input
user_input = input("Enter price or stop: ")
# Show warning if there isn't enough prices
if user_input == 'stop':
if len(prices) < 4:
print("You must enter 4 or more prices.")
continue
else:
break
try:
price = float(user_input)
if price <= 0:
raise Exception('Price cannot be less then or equal to 0.')
# Collect the price
prices.append(price)
except ValueError:
print('Invalid input!')
exit()
except Exception as error:
print(error)
exit()
# Print minimum and maximum prices
min_price = min(prices)
max_price = max(prices)
# Check if all the prices are the same.
if min_price == max_price:
print('All prices are the same: {0:.2f}'.format(min_price))
exit()
# Print min and max prices
print('Min price: {0:.2f}'.format(min_price))
print('Max prices: {0:.2f}'.format(max_price))
# Filter the rest of the prices
prices = list(filter(lambda item: item not in [min_price, max_price], prices))
# Check if there is average price
if len(prices) < 1:
print('Average price not found.')
exit()
# Calculate and print the average price
avg_price = sum(i for i in prices) / len(prices)
print('Average: {0:.2f}'.format(avg_price))
|
mit
| -4,293,241,250,670,452,700 | 24.207547 | 78 | 0.613024 | false | 3.562667 | false | false | false |
anubhav929/eden
|
modules/s3/s3fields.py
|
1
|
34578
|
# -*- coding: utf-8 -*-
""" S3 Extensions for gluon.dal.Field, reusable fields
@requires: U{B{I{gluon}} <http://web2py.com>}
@copyright: 2009-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = [
"S3ReusableField",
"s3_uid",
"s3_meta_deletion_status",
"s3_meta_deletion_fk",
"s3_meta_deletion_rb",
"s3_deletion_status",
"s3_timestamp",
"s3_ownerstamp",
"s3_meta_fields",
"s3_all_meta_field_names", # Used by GIS
"s3_role_required", # Used by GIS
"s3_roles_permitted", # Used by CMS
"s3_lx_fields",
"s3_lx_onvalidation",
"s3_lx_update",
"s3_address_fields",
"s3_address_hide",
"s3_address_onvalidation",
"s3_address_update",
"s3_comments",
"s3_currency",
"s3_date",
]
from datetime import datetime
from uuid import uuid4
from gluon import *
# Here are dependencies listed for reference:
#from gluon import current
#from gluon.dal import Field
#from gluon.html import *
#from gluon.validators import *
from gluon.dal import Query, SQLCustomType
from gluon.storage import Storage
from s3utils import S3DateTime, s3_auth_user_represent, s3_auth_group_represent
from s3widgets import S3DateWidget
try:
db = current.db
except:
# Running from 000_1st_run
db = None
# =============================================================================
class FieldS3(Field):
"""
S3 extensions of the gluon.sql.Field clas
If Server Side Pagination is on, the proper CAST is needed to
match the lookup table id
"""
def __init__(self, fieldname,
type="string",
length=None,
default=None,
required=False,
requires="<default>",
ondelete="CASCADE",
notnull=False,
unique=False,
uploadfield=True,
widget=None,
label=None,
comment=None,
writable=True,
readable=True,
update=None,
authorize=None,
autodelete=False,
represent=None,
uploadfolder=None,
compute=None,
sortby=None):
self.sortby = sortby
Field.__init__(self,
fieldname,
type,
length,
default,
required,
requires,
ondelete,
notnull,
unique,
uploadfield,
widget,
label,
comment,
writable,
readable,
update,
authorize,
autodelete,
represent,
uploadfolder,
compute)
def join_via(self, value):
if self.type.find("reference") == 0:
return Query(self, "=", value)
else:
return QueryS3(self, "join_via", value)
# =============================================================================
class QueryS3(Query):
"""
S3 extensions of the gluon.sql.Query class
If Server Side Pagination is on, the proper CAST is needed to match
the string-typed id to lookup table id
"""
def __init__(self, left, op=None, right=None):
if op <> "join_via":
Query.__init__(self, left, op, right)
else:
self.sql = "CAST(TRIM(%s,"|") AS INTEGER)=%s" % (left, right)
# =============================================================================
class S3ReusableField(object):
"""
DRY Helper for reusable fields:
This creates neither a Table nor a Field, but just
an argument store. The field is created with the __call__
method, which is faster than copying an existing field.
"""
def __init__(self, name, type="string", **attr):
self.name = name
self.__type = type
self.attr = Storage(attr)
def __call__(self, name=None, **attr):
if not name:
name = self.name
ia = Storage(self.attr)
if attr:
if not attr.get("empty", True):
requires = ia.requires
if requires:
if not isinstance(requires, (list, tuple)):
requires = [requires]
if requires:
r = requires[0]
if isinstance(r, IS_EMPTY_OR):
requires = r.other
ia.update(requires=requires)
if "empty" in attr:
del attr["empty"]
ia.update(**attr)
if "script" in ia:
if ia.script:
if ia.comment:
ia.comment = TAG[""](ia.comment, ia.script)
else:
ia.comment = ia.script
del ia["script"]
if ia.sortby is not None:
return FieldS3(name, self.__type, **ia)
else:
return Field(name, self.__type, **ia)
# =============================================================================
# Record identity meta-fields
# Use URNs according to http://tools.ietf.org/html/rfc4122
s3uuid = SQLCustomType(type = "string",
native = "VARCHAR(128)",
encoder = lambda x: "%s" % (uuid4().urn
if x == ""
else str(x.encode("utf-8"))),
decoder = lambda x: x)
if db and current.db._adapter.represent("X", s3uuid) != "'X'":
# Old web2py DAL, must add quotes in encoder
s3uuid = SQLCustomType(type = "string",
native = "VARCHAR(128)",
encoder = (lambda x: "'%s'" % (uuid4().urn
if x == ""
else str(x.encode("utf-8")).replace("'", "''"))),
decoder = (lambda x: x))
# Universally unique identifier for a record
s3_meta_uuid = S3ReusableField("uuid", type=s3uuid,
length=128,
notnull=True,
unique=True,
readable=False,
writable=False,
default="")
# Master-Copy-Index (for Sync)
s3_meta_mci = S3ReusableField("mci", "integer",
default=0,
readable=False,
writable=False)
def s3_uid():
return (s3_meta_uuid(),
s3_meta_mci())
# =============================================================================
# Record "soft"-deletion meta-fields
# "Deleted"-flag
s3_meta_deletion_status = S3ReusableField("deleted", "boolean",
readable=False,
writable=False,
default=False)
# Parked foreign keys of a deleted record in JSON format
# => to be restored upon "un"-delete
s3_meta_deletion_fk = S3ReusableField("deleted_fk", #"text",
readable=False,
writable=False)
# ID of the record replacing this record
# => for record merger (de-duplication)
s3_meta_deletion_rb = S3ReusableField("deleted_rb", "integer",
readable=False,
writable=False)
def s3_deletion_status():
return (s3_meta_deletion_status(),
s3_meta_deletion_fk(),
s3_meta_deletion_rb())
# =============================================================================
# Record timestamp meta-fields
s3_meta_created_on = S3ReusableField("created_on", "datetime",
readable=False,
writable=False,
default=lambda: datetime.utcnow())
s3_meta_modified_on = S3ReusableField("modified_on", "datetime",
readable=False,
writable=False,
default=lambda: datetime.utcnow(),
update=lambda: datetime.utcnow())
def s3_timestamp():
return (s3_meta_created_on(),
s3_meta_modified_on())
# =========================================================================
# Record authorship meta-fields
def s3_ownerstamp():
"""
Record ownership meta-fields
"""
db = current.db
auth = current.auth
session = current.session
# Individual user who owns the record
s3_meta_owned_by_user = S3ReusableField("owned_by_user", db.auth_user,
readable=False,
writable=False,
requires=None,
default=session.auth.user.id
if auth.is_logged_in()
else None,
represent=lambda id: \
id and s3_auth_user_represent(id) or \
current.messages.UNKNOWN_OPT,
ondelete="RESTRICT")
# Role of users who collectively own the record
s3_meta_owned_by_group = S3ReusableField("owned_by_group", "integer",
readable=False,
writable=False,
requires=None,
default=None,
represent=s3_auth_group_represent)
# Person Entity owning the record
s3_meta_owned_by_entity = S3ReusableField("owned_by_entity", "integer",
readable=False,
writable=False,
requires=None,
default=None,
# use a lambda here as we don't
# want the model to be loaded yet
represent=lambda val: \
current.s3db.pr_pentity_represent(val))
return (s3_meta_owned_by_user(),
s3_meta_owned_by_group(),
s3_meta_owned_by_entity())
# =========================================================================
def s3_meta_fields():
"""
Normal meta-fields added to every table
"""
db = current.db
auth = current.auth
session = current.session
if auth.is_logged_in():
current_user = session.auth.user.id
else:
current_user = None
# Author of a record
s3_meta_created_by = S3ReusableField("created_by", db.auth_user,
readable=False,
writable=False,
requires=None,
default=current_user,
represent=s3_auth_user_represent,
ondelete="RESTRICT")
# Last author of a record
s3_meta_modified_by = S3ReusableField("modified_by", db.auth_user,
readable=False,
writable=False,
requires=None,
default=current_user,
update=current_user,
represent=s3_auth_user_represent,
ondelete="RESTRICT")
# Approver of a record
s3_meta_approved_by = S3ReusableField("approved_by", db.auth_user,
readable=False,
writable=False,
requires=None,
represent=s3_auth_user_represent,
ondelete="RESTRICT")
fields = (s3_meta_uuid(),
s3_meta_mci(),
s3_meta_deletion_status(),
s3_meta_deletion_fk(),
s3_meta_deletion_rb(),
s3_meta_created_on(),
s3_meta_modified_on(),
s3_meta_created_by(),
s3_meta_modified_by(),
s3_meta_approved_by(),
)
fields = (fields + s3_ownerstamp())
return fields
def s3_all_meta_field_names():
return [field.name for field in s3_meta_fields()]
# =========================================================================
# Reusable roles fields
def s3_role_required():
"""
Role Required to access a resource
- used by GIS for map layer permissions management
"""
from s3validators import IS_ONE_OF
from s3widgets import S3AutocompleteWidget
T = current.T
db = current.db
f = S3ReusableField("role_required", db.auth_group,
sortby="role",
requires = IS_NULL_OR(IS_ONE_OF(db,
"auth_group.id",
"%(role)s",
zero=T("Public"))),
widget = S3AutocompleteWidget("admin",
"group",
fieldname="role"),
represent = s3_auth_group_represent,
label = T("Role Required"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Role Required"),
T("If this record should be restricted then select which role is required to access the record here."))),
ondelete = "RESTRICT")
return f()
# -------------------------------------------------------------------------
def s3_roles_permitted(name="roles_permitted", **attr):
"""
List of Roles Permitted to access a resource
- used by CMS
"""
from s3validators import IS_ONE_OF
T = current.T
if "label" not in attr:
label = T("Roles Permitted")
if "sortby" not in attr:
sortby = "role"
if "represent" not in attr:
represent = s3_auth_group_represent
if "requires" not in attr:
requires = IS_NULL_OR(IS_ONE_OF(current.db,
"auth_group.id",
"%(role)s",
multiple=True))
if "comment" not in attr:
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Roles Permitted"),
T("If this record should be restricted then select which role(s) are permitted to access the record here.")))
if "ondelete" not in attr:
ondelete = "RESTRICT"
f = S3ReusableField(name, "list:reference auth_group",
sortby = sortby,
requires = requires,
represent = represent,
# @ToDo
#widget = S3CheckboxesWidget(lookup_table_name = "auth_group",
# lookup_field_name = "role",
# multiple = True),
label = label,
comment = comment,
ondelete = ondelete)
return f()
# =============================================================================
# Lx
#
# These fields are populated onaccept from location_id
# - for many reads to fewer writes, this is faster than Virtual Fields
# - @ToDO: No need for virtual fields - replace with simple joins
#
# Labels that vary by country are set by gis.update_table_hierarchy_labels()
#
address_L4 = S3ReusableField("L4",
readable=False,
writable=False)
address_L3 = S3ReusableField("L3",
readable=False,
writable=False)
address_L2 = S3ReusableField("L2",
readable=False,
writable=False)
address_L1 = S3ReusableField("L1",
readable=False,
writable=False)
address_L0 = S3ReusableField("L0",
readable=False,
writable=False)
def s3_lx_fields():
"""
Return the fields used to report on resources by location
"""
fields = (
address_L4(),
address_L3(),
address_L2(),
address_L1(),
address_L0(label=current.T("Country")),
)
return fields
# -----------------------------------------------------------------------------
def s3_lx_onvalidation(form):
"""
Write the Lx fields from the Location
- used by pr_person, hrm_training, irs_ireport
@ToDo: Allow the reverse operation.
If these fields are populated then create/update the location
"""
vars = form.vars
if "location_id" in vars and vars.location_id:
db = current.db
table = current.s3db.gis_location
query = (table.id == vars.location_id)
location = db(query).select(table.name,
table.level,
table.parent,
table.path,
limitby=(0, 1)).first()
if location:
if location.level == "L0":
vars.L0 = location.name
elif location.level == "L1":
vars.L1 = location.name
if location.parent:
query = (table.id == location.parent)
country = db(query).select(table.name,
limitby=(0, 1)).first()
if country:
vars.L0 = country.name
else:
# Get Names of ancestors at each level
vars = current.gis.get_parent_per_level(vars,
vars.location_id,
feature=location,
ids=False,
names=True)
# -----------------------------------------------------------------------------
def s3_lx_update(table, record_id):
"""
Write the Lx fields from the Location
- used by hrm_human_resource & pr_address
@ToDo: Allow the reverse operation.
If these fields are populated then create/update the location
"""
if "location_id" in table:
db = current.db
ltable = current.s3db.gis_location
query = (table.id == record_id) & \
(ltable.id == table.location_id)
location = db(query).select(ltable.id,
ltable.name,
ltable.level,
ltable.parent,
ltable.path,
limitby=(0, 1)).first()
if location:
vars = Storage()
if location.level == "L0":
vars.L0 = location.name
elif location.level == "L1":
vars.L1 = location.name
if location.parent:
query = (ltable.id == location.parent)
country = db(query).select(ltable.name,
limitby=(0, 1)).first()
if country:
vars.L0 = country.name
else:
# Get Names of ancestors at each level
vars = current.gis.get_parent_per_level(vars,
location.id,
feature=location,
ids=False,
names=True)
# Update record
db(table.id == record_id).update(**vars)
# =============================================================================
# Addresses
#
# These fields are populated onaccept from location_id
#
# @ToDo: Add Postcode to gis.update_table_hierarchy_labels()
#
address_building_name = S3ReusableField("building_name",
readable=False,
writable=False)
address_address = S3ReusableField("address",
readable=False,
writable=False)
address_postcode = S3ReusableField("postcode",
readable=False,
writable=False)
def s3_address_fields():
"""
Return the fields used to add an address to a site
"""
T = current.T
fields = (
address_building_name(label=T("Building Name")),
address_address(label=T("Address")),
address_postcode(label=current.deployment_settings.get_ui_label_postcode()),
address_L4(),
address_L3(),
address_L2(),
address_L1(),
address_L0(),
)
return fields
# -----------------------------------------------------------------------------
# Hide Address fields in Create forms
# inc list_create (list_fields over-rides)
def s3_address_hide(table):
table.building_name.readable = False
table.address.readable = False
table.L4.readable = False
table.L3.readable = False
table.L2.readable = False
table.L1.readable = False
table.L0.readable = False
table.postcode.readable = False
return
# -----------------------------------------------------------------------------
def s3_address_onvalidation(form):
"""
Write the Address fields from the Location
- used by pr_address, org_office & cr_shelter
@ToDo: Allow the reverse operation.
If these fields are populated then create/update the location
"""
vars = form.vars
if "location_id" in vars and vars.location_id:
db = current.db
table = current.s3db.gis_location
# Read Postcode & Street Address
query = (table.id == vars.location_id)
location = db(query).select(table.addr_street,
table.addr_postcode,
table.name,
table.level,
table.parent,
table.path,
limitby=(0, 1)).first()
if location:
vars.address = location.addr_street
vars.postcode = location.addr_postcode
if location.level == "L0":
vars.L0 = location.name
elif location.level == "L1":
vars.L1 = location.name
if location.parent:
query = (table.id == location.parent)
country = db(query).select(table.name,
limitby=(0, 1)).first()
if country:
vars.L0 = country.name
else:
if location.level is None:
vars.building_name = location.name
# Get Names of ancestors at each level
vars = current.gis.get_parent_per_level(vars,
vars.location_id,
feature=location,
ids=False,
names=True)
# -----------------------------------------------------------------------------
def s3_address_update(table, record_id):
"""
Write the Address fields from the Location
- used by asset_asset & hrm_human_resource
@ToDo: Allow the reverse operation.
If these fields are populated then create/update the location
"""
if "location_id" in table:
db = current.db
ltable = current.s3db.gis_location
# Read Postcode & Street Address
query = (table.id == record_id) & \
(ltable.id == table.location_id)
location = db(query).select(ltable.id,
ltable.addr_street,
ltable.addr_postcode,
ltable.name,
ltable.level,
ltable.parent,
ltable.path,
limitby=(0, 1)).first()
if location:
vars = Storage()
vars.address = location.addr_street
vars.postcode = location.addr_postcode
if location.level == "L0":
vars.L0 = location.name
elif location.level == "L1":
vars.L1 = location.name
if location.parent:
query = (ltable.id == location.parent)
country = db(query).select(ltable.name,
limitby=(0, 1)).first()
if country:
vars.L0 = country.name
else:
if location.level is None:
vars.building_name = location.name
# Get Names of ancestors at each level
vars = current.gis.get_parent_per_level(vars,
location.id,
feature=location,
ids=False,
names=True)
# Update record
db(table.id == record_id).update(**vars)
# =============================================================================
# Comments
#
def s3_comments(name="comments", **attr):
"""
Return a standard Comments field
"""
from s3widgets import s3_comments_widget
T = current.T
if "label" not in attr:
attr["label"] = T("Comments")
if "widget" not in attr:
attr["widget"] = s3_comments_widget
if "comment" not in attr:
attr["comment"] = DIV(_class="tooltip",
_title="%s|%s" % \
(T("Comments"),
T("Please use this field to record any additional information, including a history of the record if it is updated.")))
f = S3ReusableField(name, "text",
**attr)
return f()
# =============================================================================
# Currency field
#
# @ToDo: Move to a Finance module
#
def s3_currency(name="currency", **attr):
"""
Return a standard Currency field
"""
settings = current.deployment_settings
if "label" not in attr:
attr["label"] = current.T("Currency")
if "default" not in attr:
attr["default"] = settings.get_fin_currency_default()
if "requires" not in attr:
currency_opts = settings.get_fin_currencies()
attr["requires"] = IS_IN_SET(currency_opts.keys(),
zero=None)
if "writable" not in attr:
attr["writable"] = settings.get_fin_currency_writable()
f = S3ReusableField(name, length=3,
**attr)
return f()
# =============================================================================
# Date field
#
# @ToDo: s3_datetime
#
def s3_date(name="date", **attr):
"""
Return a standard Date field
Additional options to normal S3ResuableField:
default == "now" (in addition to usual meanings)
past = x months
future = x months
"""
if "past" in attr:
past = attr["past"]
del attr["past"]
else:
past = None
if "future" in attr:
future = attr["future"]
del attr["future"]
else:
future = None
if "default" in attr and attr["default"] == "now":
attr["default"] = current.request.utcnow
if "label" not in attr:
attr["label"] = current.T("Date")
if "represent" not in attr:
represent = S3DateTime.date_represent
if "requires" not in attr:
if past is None and future is None:
requires = IS_DATE(
format=current.deployment_settings.get_L10n_date_format()
)
else:
now = current.request.utcnow.date()
current_month = now.month
if past is None:
future_month = now.month + future
if future_month <= 12:
max = now.replace(month=future_month)
else:
current_year = now.year
years = int(future_month/12)
future_year = current_year + years
future_month = future_month - (years * 12)
max = now.replace(year=future_year,
month=future_month)
requires = IS_DATE_IN_RANGE(
format=current.deployment_settings.get_L10n_date_format(),
maximum=max,
error_message=current.T("Date must be %(max)s or earlier!")
)
elif future is None:
if past < current_month:
min = now.replace(month=current_month - past)
else:
current_year = now.year
past_years = int(past/12)
past_months = past - (past_years * 12)
min = now.replace(year=current_year - past_years,
month=current_month - past_months)
requires = IS_DATE_IN_RANGE(
format=current.deployment_settings.get_L10n_date_format(),
minimum=min,
error_message=current.T("Date must be %(min)s or later!")
)
else:
future_month = now.month + future
if future_month < 13:
max = now.replace(month=future_month)
else:
current_year = now.year
years = int(future_month/12)
future_year = now.year + years
future_month = future_month - (years * 12)
max = now.replace(year=future_year,
month=future_month)
if past < current_month:
min = now.replace(month=current_month - past)
else:
current_year = now.year
past_years = int(past/12)
past_months = past - (past_years * 12)
min = now.replace(year=current_year - past_years,
month=current_month - past_months)
requires = IS_DATE_IN_RANGE(
format=current.deployment_settings.get_L10n_date_format(),
maximum=max,
minimum=min,
error_message=current.T("Date must be between %(min)s and %(max)s!")
)
if "empty" in attr:
if attr["empty"] is False:
attr["requires"] = requires
else:
attr["requires"] = IS_EMPTY_OR(requires)
del attr["empty"]
else:
# Default
attr["requires"] = IS_EMPTY_OR(requires)
if "widget" not in attr:
if past is None and future is None:
attr["widget"] = S3DateWidget()
elif past is None:
attr["widget"] = S3DateWidget(future=future)
elif future is None:
attr["widget"] = S3DateWidget(past=past)
else:
attr["widget"] = S3DateWidget(past=past, future=future)
f = S3ReusableField(name, "date", **attr)
return f()
# END =========================================================================
|
mit
| -5,980,965,802,391,904,000 | 36.914474 | 153 | 0.438834 | false | 4.961687 | false | false | false |
pz325/hornbook-django
|
apps/hornbook_api/hanzi_base.py
|
1
|
3751
|
# -*- coding: utf-8 -*-
TONES = (
(0, u'轻声'),
(1, u'一声'),
(2, u'二声'),
(3, u'三声'),
(4, u'四声')
)
INITIALS = (
('b', 'b'),
('p', 'p'),
('m', 'm'),
('f', 'f'),
('d', 'd'),
('t', 't'),
('n', 'n'),
('l', 'l'),
('g', 'g'),
('k', 'k'),
('h', 'h'),
('j', 'j'),
('q', 'q'),
('x', 'x'),
('zh', 'zh'),
('ch', 'ch'),
('sh', 'sh'),
('r', 'r'),
('z', 'z'),
('c', 'c'),
('s', 's'),
('y', 'y'),
('w', 'w'),
)
FINALS = (
('a', 'a'),
('o', 'o'),
('e', 'e'),
('i', 'i'),
('u', 'u'),
('v', u'ü'),
('ia', 'ia'),
('ua', 'ua'),
('uo', 'uo'),
('ie', 'ie'),
('ve', u'üe'),
('ai', 'ai'),
('uai', 'uai'),
('ei', 'ei'),
('ui', 'ui'),
('ao', 'ao'),
('iao', 'iao'),
('ou', 'ou'),
('iu', 'iu'),
('an', 'an'),
('ian', 'ian'),
('uan', 'uan'),
('van', u'üan'),
('en', 'en'),
('in', 'in'),
('un', 'un'),
('vn', u'ün'),
('anɡ', 'anɡ'),
('ianɡ', 'ianɡ'),
('uanɡ', 'uanɡ'),
('enɡ', 'enɡ'),
('ing', 'ing'),
('onɡ', 'onɡ')
)
FINALSTR = {
'a': 'a',
'o': 'o',
'e': 'e',
'i': 'i',
'u': 'u',
'v': u'ü',
'ia': 'ia',
'ua': 'ua',
'uo': 'uo',
'ie': 'ie',
've': u'üe',
'ai': 'ai',
'uai': 'uai',
'ei': 'ei',
'ui': 'ui',
'ao': 'ao',
'iao': 'iao',
'ou': 'ou',
'iu': 'iu',
'an': 'an',
'ian': 'ian',
'uan': 'uan',
'van': u'üan',
'en': 'en',
'in': 'in',
'un': 'un',
'vn': u'ün',
'anɡ': 'anɡ',
'ianɡ': 'ianɡ',
'uanɡ': 'uanɡ',
'enɡ': 'enɡ',
'ing': 'ing',
'onɡ': 'onɡ'
}
ATONES = u'ā á ǎ à'.split(' ')
OTONES = u'ō ó ǒ ò'.split(' ')
ETONES = u'ē é ě è'.split(' ')
ITONES = u'ī í ǐ ì'.split(' ')
UTONES = u'ū ú ǔ ù'.split(' ')
YUTONES = u'ǖ ǘ ǚ ǜ'.split(' ')
TONE_ANNOTATION_REPLACEMENTS = {
'a': 'ATONES',
'o': 'OTONES',
'e': 'ETONES',
'i': 'ITONES',
'u': 'UTONES',
'v': 'YUTONES'
}
TONE_ANNOTATIONS = {
'a': 'a',
'o': 'o',
'e': 'e',
'i': 'i',
'u': 'u',
'v': 'v', # ü
'ia': 'a',
'ua': 'a',
'uo': 'o',
'ie': 'e',
've': 'e', # üe
'ai': 'a',
'uai': 'a',
'ei': 'e',
'ui': 'i',
'ao': 'a',
'iao': 'a',
'ou': 'o',
'iu': 'u',
'an': 'a',
'ian': 'a',
'uan': 'a',
'van': 'a', # üan
'en': 'e',
'in': 'i', # in
'un': 'u',
'ang': 'a',
'iang': 'a',
'uang': 'a',
'eng': 'e',
'ing': 'i',
'ong': 'o',
}
def getPinyinStr(initial, final, tone):
'''
Generate tonated pinyin string
e.g. initial = b, final = a, tone = 3, pinyinStr = bǎ
@param initial
@param final ü input as 'v'
@tone
@return tonated pinyin string
'''
finalStr = FINALSTR[final]
if tone == 0:
return initial+finalStr
replace = TONE_ANNOTATIONS[final]
tonatedFinal = []
for c in final:
if c == replace:
tonatedFinal.append(TONE_ANNOTATION_REPLACEMENTS[replace][tone-1])
else:
tonatedFinal.append(c)
f = ''.join(tonatedFinal)
return initial+f
# TODO: to accomplish this
RADIX = (
(u'艹', u'草字头'),
(u'木', u'木字旁'),
(u'', u'独体字'),
(u'冫', u'两点水儿'),
(u'冖', u'秃宝盖儿'),
(u'讠', u'言字旁儿'),
(u'厂', u'偏厂儿'),
)
# final = '''a o e i u ü ia ua uo ie üe ai uai ei ui ao iao ou iu an ian uan üan en in un ün ang iang uang eng ing ong'''
# finals = ',\n'.join(["('{f}', '{f}')".format(f=f) for f in final.split(' ')])
# x = 'ā ɑ a'
|
bsd-3-clause
| -8,652,714,250,542,228,000 | 17.408163 | 121 | 0.34867 | false | 2.176116 | false | false | false |
dkulikovsky/graphite-ch-web
|
webapp/graphite/render/views.py
|
1
|
20253
|
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import csv
import math
import pytz
from datetime import datetime
import sys
import signal
from time import time, mktime
from random import shuffle
from httplib import CannotSendRequest
from urllib import urlencode
from urlparse import urlsplit, urlunsplit
from cgi import parse_qs
from cStringIO import StringIO
from multiprocessing import Process, Queue
try:
import cPickle as pickle
except ImportError:
import pickle
try: # See if there is a system installation of pytz first
import pytz
except ImportError: # Otherwise we fall back to Graphite's bundled version
from graphite.thirdparty import pytz
from graphite.util import getProfileByUsername, getProfile, json, unpickle
from graphite.remote_storage import HTTPConnectionWithTimeout
from graphite.logger import log
from graphite.render.evaluator import evaluateTarget
from graphite.render.attime import parseATTime
from graphite.render.functions import PieFunctions
from graphite.render.hashing import hashRequest, hashData, hashRequestWTime
from graphite.render.glyph import GraphTypes
from django.http import HttpResponse, HttpResponseServerError, HttpResponseRedirect
from django.utils.datastructures import MultiValueDict
from django.template import Context, loader
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
def renderView(request):
start = time()
try:
global_timeout_duration = getattr(settings, 'RENDER_DURATION_TIMEOUT')
except:
global_timeout_duration = 60
if request.REQUEST.has_key('json_request'):
(graphOptions, requestOptions) = parseDataOptions(request.REQUEST['json_request'])
elif request.is_ajax() and request.method == 'POST':
(graphOptions, requestOptions) = parseDataOptions(request.raw_post_data)
else:
(graphOptions, requestOptions) = parseOptions(request)
useCache = 'noCache' not in requestOptions
cacheTimeout = requestOptions['cacheTimeout']
requestContext = {
'startTime' : requestOptions['startTime'],
'endTime' : requestOptions['endTime'],
'localOnly' : requestOptions['localOnly'],
'data' : []
}
data = requestContext['data']
# add template to graphOptions
try:
user_profile = getProfile(request, allowDefault=False)
graphOptions['defaultTemplate'] = user_profile.defaultTemplate
except:
graphOptions['defaultTemplate'] = "default"
if request.method == 'GET':
cache_request_obj = request.GET.copy()
else:
cache_request_obj = request.POST.copy()
# hack request object to add defaultTemplate param
cache_request_obj.appendlist("template", graphOptions['defaultTemplate'])
# First we check the request cache
requestKey = hashRequest(cache_request_obj)
requestHash = hashRequestWTime(cache_request_obj)
requestContext['request_key'] = requestHash
request_data = ""
if request.method == "POST":
for k,v in request.POST.items():
request_data += "%s=%s&" % (k.replace("\t",""),v.replace("\t",""))
else:
request_data = request.META['QUERY_STRING']
log.info("DEBUG:Request_meta:[%s]\t%s\t%s\t%s\t\"%s\"" %\
(requestHash,\
request.META['REMOTE_ADDR'],\
request.META['REQUEST_METHOD'],\
request_data,\
request.META['HTTP_USER_AGENT']))
if useCache:
cachedResponse = cache.get(requestKey)
if cachedResponse:
log.cache('Request-Cache hit [%s]' % requestHash)
log.rendering('[%s] Returned cached response in %.6f' % (requestHash, (time() - start)))
log.info("RENDER:[%s]:Timings:Cached %.5f" % (requestHash, time() - start))
return cachedResponse
else:
log.cache('Request-Cache miss [%s]' % requestHash)
# Now we prepare the requested data
if requestOptions['graphType'] == 'pie':
for target in requestOptions['targets']:
if target.find(':') >= 0:
try:
name,value = target.split(':',1)
value = float(value)
except:
raise ValueError("Invalid target '%s'" % target)
data.append( (name,value) )
else:
q = Queue(maxsize=1)
p = Process(target = evaluateWithQueue, args = (q, requestContext, target))
p.start()
seriesList = None
try:
seriesList = q.get(True, global_timeout_duration)
p.join()
except Exception, e:
log.info("DEBUG:[%s] got an exception on trying to get seriesList from queue, error: %s" % (requestHash,e))
p.terminate()
return errorPage("Failed to fetch data")
if seriesList == None:
log.info("DEBUG:[%s] request timed out" % requestHash)
p.terminate()
return errorPage("Request timed out")
for series in seriesList:
func = PieFunctions[requestOptions['pieMode']]
data.append( (series.name, func(requestContext, series) or 0 ))
elif requestOptions['graphType'] == 'line':
# Let's see if at least our data is cached
if useCache:
targets = requestOptions['targets']
startTime = requestOptions['startTime']
endTime = requestOptions['endTime']
dataKey = hashData(targets, startTime, endTime)
cachedData = cache.get(dataKey)
if cachedData:
log.cache("Data-Cache hit [%s]" % dataKey)
else:
log.cache("Data-Cache miss [%s]" % dataKey)
else:
cachedData = None
if cachedData is not None:
requestContext['data'] = data = cachedData
log.rendering("[%s] got data cache Retrieval" % requestHash)
else: # Have to actually retrieve the data now
# best place for multiprocessing
log.info("DEBUG:render:[%s] targets [ %s ]" % (requestHash, requestOptions['targets']))
start_t = time()
for target in requestOptions['targets']:
if not target.strip():
continue
t = time()
q = Queue(maxsize=1)
p = Process(target = evaluateWithQueue, args = (q, requestContext, target))
p.start()
seriesList = None
try:
seriesList = q.get(True, global_timeout_duration)
p.join()
except Exception, e:
log.info("DEBUG:[%s] got an exception on trying to get seriesList from queue, error: %s" % (requestHash, e))
p.terminate()
return errorPage("Failed to fetch data")
if seriesList == None:
log.info("DEBUG:[%s] request timed out" % requestHash)
p.terminate()
return errorPage("Request timed out")
data.extend(seriesList)
log.rendering("[%s] Retrieval took %.6f" % (requestHash, (time() - start_t)))
log.info("RENDER:[%s]:Timigns:Retrieve %.6f" % (requestHash, (time() - start_t)))
if useCache:
cache.add(dataKey, data, cacheTimeout)
# If data is all we needed, we're done
format = requestOptions.get('format')
if format == 'csv':
response = HttpResponse(content_type='text/csv')
writer = csv.writer(response, dialect='excel')
for series in data:
for i, value in enumerate(series):
timestamp = datetime.fromtimestamp(series.start + (i * series.step), requestOptions['tzinfo'])
writer.writerow((series.name, timestamp.strftime("%Y-%m-%d %H:%M:%S"), value))
return response
if format == 'json':
series_data = []
if 'maxDataPoints' in requestOptions and any(data):
startTime = min([series.start for series in data])
endTime = max([series.end for series in data])
timeRange = endTime - startTime
maxDataPoints = requestOptions['maxDataPoints']
for series in data:
if len(set(series)) == 1 and series[0] is None: continue
numberOfDataPoints = timeRange/series.step
if maxDataPoints < numberOfDataPoints:
valuesPerPoint = math.ceil(float(numberOfDataPoints) / float(maxDataPoints))
secondsPerPoint = int(valuesPerPoint * series.step)
# Nudge start over a little bit so that the consolidation bands align with each call
# removing 'jitter' seen when refreshing.
nudge = secondsPerPoint + (series.start % series.step) - (series.start % secondsPerPoint)
series.start = series.start + nudge
valuesToLose = int(nudge/series.step)
for r in range(1, valuesToLose):
del series[0]
series.consolidate(valuesPerPoint)
timestamps = range(int(series.start), int(series.end)+1, int(secondsPerPoint))
else:
timestamps = range(int(series.start), int(series.end)+1, int(series.step))
datapoints = zip(series, timestamps)
series_data.append(dict(target=series.name, datapoints=datapoints))
else:
for series in data:
if len(set(series)) == 1 and series[0] is None: continue
timestamps = range(int(series.start), int(series.end)+1, int(series.step))
datapoints = zip(series, timestamps)
series_data.append(dict(target=series.name, datapoints=datapoints))
if 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(series_data)),
content_type='text/javascript')
else:
response = HttpResponse(content=json.dumps(series_data), content_type='application/json')
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
if format == 'raw':
response = HttpResponse(content_type='text/plain')
for series in data:
response.write( "%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step) )
response.write( ','.join(map(str,series)) )
response.write('\n')
log.rendering('Total rawData rendering time %.6f' % (time() - start))
return response
if format == 'svg':
graphOptions['outputFormat'] = 'svg'
if format == 'pickle':
response = HttpResponse(content_type='application/pickle')
seriesInfo = [series.getInfo() for series in data]
pickle.dump(seriesInfo, response, protocol=-1)
log.rendering('Total pickle rendering time %.6f' % (time() - start))
return response
start_render_time = time()
# We've got the data, now to render it
graphOptions['data'] = data
if settings.REMOTE_RENDERING: # Rendering on other machines is faster in some situations
image = delegateRendering(requestOptions['graphType'], graphOptions)
else:
image = doImageRender(requestOptions['graphClass'], graphOptions)
log.info("RENDER:[%s]:Timings:imageRender %.5f" % (requestHash, time() - start_render_time))
useSVG = graphOptions.get('outputFormat') == 'svg'
if useSVG and 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(image)),
content_type='text/javascript')
else:
response = buildResponse(image, useSVG and 'image/svg+xml' or 'image/png')
if useCache:
cache.set(requestKey, response, cacheTimeout)
log.rendering('[%s] Total rendering time %.6f seconds' % (requestHash, (time() - start)))
log.info("RENDER:[%s]:Timings:Total %.5f" % (requestHash, time() - start))
return response
def parseOptions(request):
queryParams = request.REQUEST
return parseOptionsDictionary(queryParams)
def parseDataOptions(data):
queryParams = MultiValueDict()
try:
options = json.loads(data)
for k,v in options.items():
if isinstance(v, list):
queryParams.setlist(k, v)
else:
queryParams[k] = unicode(v)
except:
log.exception('json_request decode error')
return parseOptionsDictionary(queryParams)
def parseOptionsDictionary(queryParams):
# Start with some defaults
graphOptions = {'width' : 330, 'height' : 250}
requestOptions = {}
graphType = queryParams.get('graphType','line')
assert graphType in GraphTypes, "Invalid graphType '%s', must be one of %s" % (graphType,GraphTypes.keys())
graphClass = GraphTypes[graphType]
# Fill in the requestOptions
requestOptions['graphType'] = graphType
requestOptions['graphClass'] = graphClass
requestOptions['pieMode'] = queryParams.get('pieMode', 'average')
requestOptions['cacheTimeout'] = int( queryParams.get('cacheTimeout', settings.DEFAULT_CACHE_DURATION) )
requestOptions['targets'] = []
# Extract the targets out of the queryParams
mytargets = []
# json_request format
if len(queryParams.getlist('targets')) > 0:
mytargets = queryParams.getlist('targets')
# Normal format: ?target=path.1&target=path.2
if len(queryParams.getlist('target')) > 0:
mytargets = queryParams.getlist('target')
# Rails/PHP/jQuery common practice format: ?target[]=path.1&target[]=path.2
elif len(queryParams.getlist('target[]')) > 0:
mytargets = queryParams.getlist('target[]')
# Collect the targets
for target in mytargets:
requestOptions['targets'].append(target)
if 'pickle' in queryParams:
requestOptions['format'] = 'pickle'
if 'rawData' in queryParams:
requestOptions['format'] = 'raw'
if 'format' in queryParams:
requestOptions['format'] = queryParams['format']
if 'jsonp' in queryParams:
requestOptions['jsonp'] = queryParams['jsonp']
if 'noCache' in queryParams:
requestOptions['noCache'] = True
if 'maxDataPoints' in queryParams and queryParams['maxDataPoints'].isdigit():
requestOptions['maxDataPoints'] = int(queryParams['maxDataPoints'])
requestOptions['localOnly'] = queryParams.get('local') == '1'
# Fill in the graphOptions
for opt in graphClass.customizable:
if opt in queryParams:
val = unicode(queryParams[opt])
if (val.isdigit() or (val.startswith('-') and val[1:].isdigit())) and 'color' not in opt.lower():
val = int(val)
elif '.' in val and (val.replace('.','',1).isdigit() or (val.startswith('-') and val[1:].replace('.','',1).isdigit())):
val = float(val)
elif val.lower() in ('true','false'):
val = val.lower() == 'true'
elif val.lower() == 'default' or val == '':
continue
graphOptions[opt] = val
tzinfo = pytz.timezone(settings.TIME_ZONE)
if 'tz' in queryParams:
try:
tzinfo = pytz.timezone(queryParams['tz'])
except pytz.UnknownTimeZoneError:
pass
requestOptions['tzinfo'] = tzinfo
# Get the time interval for time-oriented graph types
if graphType == 'line' or graphType == 'pie':
if 'until' in queryParams:
untilTime = parseATTime(queryParams['until'], tzinfo)
else:
untilTime = parseATTime('now', tzinfo)
if 'from' in queryParams:
fromTime = parseATTime(queryParams['from'], tzinfo)
else:
fromTime = parseATTime('-1d', tzinfo)
startTime = min(fromTime, untilTime)
endTime = max(fromTime, untilTime)
assert startTime != endTime, "Invalid empty time range"
requestOptions['startTime'] = startTime
requestOptions['endTime'] = endTime
return (graphOptions, requestOptions)
connectionPools = {}
def delegateRendering(graphType, graphOptions):
start = time()
postData = graphType + '\n' + pickle.dumps(graphOptions)
servers = settings.RENDERING_HOSTS[:] #make a copy so we can shuffle it safely
shuffle(servers)
for server in servers:
start2 = time()
try:
# Get a connection
try:
pool = connectionPools[server]
except KeyError: #happens the first time
pool = connectionPools[server] = set()
try:
connection = pool.pop()
except KeyError: #No available connections, have to make a new one
connection = HTTPConnectionWithTimeout(server)
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
# Send the request
try:
connection.request('POST','/render/local/', postData)
except CannotSendRequest:
connection = HTTPConnectionWithTimeout(server) #retry once
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
connection.request('POST', '/render/local/', postData)
# Read the response
response = connection.getresponse()
assert response.status == 200, "Bad response code %d from %s" % (response.status,server)
contentType = response.getheader('Content-Type')
imageData = response.read()
assert contentType == 'image/png', "Bad content type: \"%s\" from %s" % (contentType,server)
assert imageData, "Received empty response from %s" % server
# Wrap things up
log.rendering('Remotely rendered image on %s in %.6f seconds' % (server,time() - start2))
log.rendering('Spent a total of %.6f seconds doing remote rendering work' % (time() - start))
pool.add(connection)
return imageData
except:
log.exception("Exception while attempting remote rendering request on %s" % server)
log.rendering('Exception while remotely rendering on %s wasted %.6f' % (server,time() - start2))
continue
def renderLocalView(request):
try:
start = time()
reqParams = StringIO(request.body)
graphType = reqParams.readline().strip()
optionsPickle = reqParams.read()
reqParams.close()
graphClass = GraphTypes[graphType]
options = unpickle.loads(optionsPickle)
image = doImageRender(graphClass, options)
log.rendering("Delegated rendering request took %.6f seconds" % (time() - start))
return buildResponse(image)
except:
log.exception("Exception in graphite.render.views.rawrender")
return HttpResponseServerError()
def renderMyGraphView(request,username,graphName):
profile = getProfileByUsername(username)
if not profile:
return errorPage("No such user '%s'" % username)
try:
graph = profile.mygraph_set.get(name=graphName)
except ObjectDoesNotExist:
return errorPage("User %s doesn't have a MyGraph named '%s'" % (username,graphName))
request_params = dict(request.REQUEST.items())
if request_params:
url_parts = urlsplit(graph.url)
query_string = url_parts[3]
if query_string:
url_params = parse_qs(query_string)
# Remove lists so that we can do an update() on the dict
for param, value in url_params.items():
if isinstance(value, list) and param != 'target':
url_params[param] = value[-1]
url_params.update(request_params)
# Handle 'target' being a list - we want duplicate &target params out of it
url_param_pairs = []
for key,val in url_params.items():
if isinstance(val, list):
for v in val:
url_param_pairs.append( (key,v) )
else:
url_param_pairs.append( (key,val) )
query_string = urlencode(url_param_pairs)
url = urlunsplit(url_parts[:3] + (query_string,) + url_parts[4:])
else:
url = graph.url
return HttpResponseRedirect(url)
def doImageRender(graphClass, graphOptions):
pngData = StringIO()
t = time()
img = graphClass(**graphOptions)
img.output(pngData)
log.rendering('Rendered PNG in %.6f seconds' % (time() - t))
imageData = pngData.getvalue()
pngData.close()
return imageData
def buildResponse(imageData, content_type="image/png"):
response = HttpResponse(imageData, content_type=content_type)
response['Cache-Control'] = 'no-cache'
response['Pragma'] = 'no-cache'
return response
def errorPage(message):
template = loader.get_template('500.html')
context = Context(dict(message=message))
return HttpResponseServerError( template.render(context) )
def evaluateWithQueue(queue, requestContext, target):
result = evaluateTarget(requestContext, target)
queue.put_nowait(result)
return
|
apache-2.0
| 8,425,600,202,721,885,000 | 35.958029 | 125 | 0.666914 | false | 3.913623 | false | false | false |
dbarbier/privot
|
python/test/t_AnalyticalResult_std.py
|
1
|
2108
|
#! /usr/bin/env python
from openturns import *
from math import *
TESTPREAMBLE()
def printNumericalPoint(point, digits) :
oss = "["
eps = pow(0.1, digits)
for i in range(point.getDimension()) :
if i == 0 :
sep = ""
else :
sep = ","
if fabs(point[i]) < eps :
oss += sep + '%.6f' % fabs(point[i])
else :
oss += sep + '%.6f' % point[i]
sep = ","
oss += "]"
return oss
try :
# We create a numerical math function
myFunction = NumericalMathFunction("poutre")
dim = myFunction.getInputDimension()
# We create a normal distribution point of dimension 1
mean = NumericalPoint(dim, 0.0)
mean[0] = 50.0 # E
mean[1] = 1.0 # F
mean[2] = 10.0 # L
mean[3] = 5.0 # I
sigma = NumericalPoint(dim, 1.0)
R = IdentityMatrix(dim)
myDistribution = Normal(mean, sigma, R)
# We create a 'usual' RandomVector from the Distribution
vect = RandomVector(myDistribution)
# We create a composite random vector
output = RandomVector(myFunction, vect)
# We create an Event from this RandomVector
myEvent = Event(output, ComparisonOperator(Less()), -3.0)
# We create an AnalyticalResult based on fictive results
result = AnalyticalResult(sigma, myEvent, False)
print "result=", result
digits = 5
print "standard space design point=", printNumericalPoint(result.getStandardSpaceDesignPoint(), digits)
print "physical space design point=", printNumericalPoint(result.getPhysicalSpaceDesignPoint(), digits)
print "is standard point origin in failure space? ", result.getIsStandardPointOriginInFailureSpace()
print "importance factors=", printNumericalPoint(result.getImportanceFactors(), digits)
print "Hasofer reliability index=%.5f" % result.getHasoferReliabilityIndex()
print "graph importance factors=", result.drawImportanceFactors()
print "graph sensitivity=", result.drawHasoferReliabilityIndexSensitivity()
except :
import sys
print "t_AnalyticalResult_std.py", sys.exc_type, sys.exc_value
|
lgpl-3.0
| 6,099,109,483,741,794,000 | 31.9375 | 107 | 0.659393 | false | 3.61578 | false | false | false |
fwilson42/irc2
|
irc2/ircd/channel.py
|
1
|
1684
|
import collections
import time
from . import utils
from .numerics import *
from ..utils import join_max_length
class Channel:
def __init__(self, name):
self.name = name
self.ts = time.time()
self.topic = "haha yes look a topic"
self.topic_set_at = time.time()
self.topic_belongs_to = ""
self.members = dict()
self.modes = collections.defaultdict(lambda: None)
def add(self, client):
# update state
client.data["channels"].add(self)
self.members[client] = "" if self.members else "o"
# send JOIN
self.send(client.hostmask(), "JOIN", self.name)
# send TOPIC
if self.topic:
client.send_numeric(RPL_TOPIC, self.name, self.topic)
client.send_numeric(RPL_TOPICBY, self.name, self.topic_belongs_to, str(self.topic_set_at))
# send NAMES
names = [(utils.prefixes[value[0]] if value else "") + key.data["nickname"]
for key, value in
sorted(self.members.items(), key=lambda k: k[0].data["nickname"])]
while names:
cur, names = join_max_length(names, " ")
client.send_numeric(RPL_NAMREPLY, "=", self.name, cur)
client.send_numeric(RPL_ENDOFNAMES, self.name, "End of NAMES list.")
def send(self, *data):
for member in self.members:
member.send(*data)
def send_except(self, exc, *data):
for member in self.members:
if member != exc:
member.send(*data)
class Channels(dict):
def __missing__(self, key):
self[key] = Channel(key)
return self[key]
channels = Channels()
|
bsd-3-clause
| -2,169,495,262,614,063,600 | 29.618182 | 102 | 0.576603 | false | 3.62931 | false | false | false |
USGSDenverPychron/pychron
|
pychron/image/toupcam/viewer.py
|
1
|
4772
|
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import HasTraits, Instance, Button, Event, Range, on_trait_change
from traitsui.api import View, UItem, Item, HGroup, VGroup, spring
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.core.helpers.ctx_managers import no_update
from pychron.core.helpers.filetools import unique_path2
from pychron.core.ui.qt.camera_editor import CameraEditor
from pychron.image.toupcam.camera import ToupCamCamera
from pychron.paths import paths
class D(HasTraits):
camera = Instance(ToupCamCamera, ())
save_button = Button
save_event = Event
awb_button = Button
contrast_default_button = Button('Defaults')
hue_default_button = Button('Defaults')
temperature = Range(2000, 15000, mode='slider')
tint = Range(200, 2500, mode='slider')
hue = Range(-180, 180, mode='slider')
saturation = Range(0, 255, mode='slider')
brightness = Range(-64, 64, mode='slider')
contrast = Range(-100, 100, mode='slider')
gamma = Range(0, 180, mode='slider')
_no_update = False
def activate(self):
self.camera.open()
self._update_color()
# handlers
def _awb_button_fired(self):
self.camera.do_awb(self._update_temptint)
def _save_button_fired(self):
# p = '/Users/ross/Desktop/output_uint8.jpg'
p, _ = unique_path2(paths.sample_image_dir, 'nosample', extension='.tiff')
self.camera.save(p)
# self.save_event = p
def _hue_default_button_fired(self):
self.trait_set(hue=0, saturation=128, brightness=0)
def _contrast_default_button_fired(self):
self.trait_set(contrast=0, gamma=100)
@on_trait_change('hue,saturation,brightness,contrast,gamma')
def _handle_color_change(self, name, new):
if not self._no_update:
getattr(self.camera, 'set_{}'.format(name))(new)
def _temperature_changed(self):
self._set_temp_tint()
def _tint_changed(self):
self._set_temp_tint()
# private
def _update_temptint(self, args=None):
if args is None:
args = self.camera.get_temperature_tint()
if args:
with no_update(self):
self.trait_set(temperature=int(args[0]), tint=int(args[1]))
def _set_temp_tint(self):
if not self._no_update:
self.camera.set_temperature_tint(self.temperature, self.tint)
def _update_color(self):
self._update_temptint()
with no_update(self):
d = {k: getattr(self.camera, 'get_{}'.format(k))() for k in
('hue', 'saturation', 'brightness', 'contrast', 'gamma')}
self.trait_set(**d)
def traits_view(self):
hue_grp = VGroup(HGroup(spring, UItem('hue_default_button')),
Item('hue'),
Item('saturation'),
Item('brightness'),
show_border=True,
label='Hue/Saturation/Brightness')
c_gamma_grp = VGroup(HGroup(spring, UItem('contrast_default_button')),
Item('contrast'),
Item('gamma'),
show_border=True,
label='Contrast/Gamma')
ctrl_grp = VGroup(UItem('save_button'),
UItem('awb_button'),
Item('temperature', label='Temp.', width=300),
Item('tint'),
hue_grp, c_gamma_grp)
v = View(HGroup(ctrl_grp,
UItem('camera', editor=CameraEditor())),
width=896 + 350, height=680,
resizable=True)
return v
if __name__ == '__main__':
paths.build('_dev')
d = D()
d.activate()
d.configure_traits()
d.camera.close()
# ============= EOF =============================================
|
apache-2.0
| 1,978,467,678,381,373,400 | 33.832117 | 82 | 0.551551 | false | 3.943802 | false | false | false |
LuisMiranda132/FightSimulator
|
proy3/gabil.py
|
1
|
6524
|
from pyevolve import G1DList
from pyevolve import GSimpleGA
from pyevolve import G1DBinaryString
from pyevolve import Util
from pyevolve import Selectors
from random import randint as rand_randint, uniform as rand_uniform, choice as rand_choice, randrange as randrange
import sys
MAX_SET_SIZE = 3
RULE_SIZE = 36
INITIAL_POP = 10
TRAINING_SET = []
i = 0
##################################Atributos######################################
def attr1(num):
num = float(num)
if num < 4:
return '1000000'
if num < 5:
return '0100000'
if num < 5.5:
return '0010000'
if num < 6:
return '0001000'
if num < 6.5:
return '0000100'
if num < 7:
return '0000010'
return '0000001'
def attr2(num):
num = float(num)
if num < 2:
return '1000000'
if num < 2.6:
return '0100000'
if num < 2.8:
return '0010000'
if num < 3:
return '0001000'
if num < 3.2:
return '0000100'
if num < 4:
return '0000010'
return '0000001'
def attr3(num):
num = float(num)
if num < 0.6:
return '10000000000'
if num < 1:
return '01000000000'
if num < 1.5:
return '00100000000'
if num < 2:
return '00010000000'
if num < 3:
return '00001000000'
if num < 3.5:
return '00000100000'
if num < 4:
return '00000010000'
if num < 4.5:
return '00000001000'
if num < 5:
return '00000000100'
if num < 6:
return '00000000010'
return '00000000001'
def attr4(num):
num = float(num)
if num < 0.2:
return '100000000'
if num < 0.4:
return '010000000'
if num < 0.6:
return '001000000'
if num < 0.8:
return '000100000'
if num < 1:
return '000010000'
if num < 1.5:
return '000001000'
if num < 2:
return '000000100'
if num < 2.5:
return '000000010'
return '000000001'
def attr5(num):
num = int(num)
if num==1:
return '00'
if num==2:
return '10'
return '01'
###############################Inicializacion#####################################
def init_func(genome, **args):
the_set = []
set_size = randrange(1,MAX_SET_SIZE+1)
for i in xrange(set_size):
rule = [rand_choice(('0','1')) for j in xrange(RULE_SIZE)]
the_set = the_set + rule
genome.genomeList = the_set
#################################Fitness#########################################
def match(chromosome,sample,opt=0):
s = long(sample,2)
c = ''.join(chromosome.genomeList)
for i in range(0,len(c),RULE_SIZE):
if (long(c[i:i+RULE_SIZE],2) & s) == s:return True
return False
def standard_fitness(chromosome):
score = 0
for sample in TRAINING_SET:
if(match(chromosome,sample)):
score+=1
return pow(score,2)
def size_fitness(chromosome):
score = 0
for sample in TRAINING_SET:
if(match(chromosome,sample)):
score+=1
return int(float(pow(score,2)) + 1.0/(pow(len(chromosome.genomeList)/RULE_SIZE,2)))
##############################Crossover##########################################
def gabil_cross(genome, **args):
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if(len(gMom)>len(gDad)):
dummy = gMom
gMom = gDad
gDad = dummy
cuts = [0,0]
cuts[0] = rand_randint(1,len(gMom)-2)
cuts[1] = rand_randint(cuts[0]+1,len(gMom)-1)
newcuts = map(lambda x:divmod(x,RULE_SIZE)[1],cuts)
while True:
dpos = rand_randint(0,(len(gDad)/RULE_SIZE)-1)
dummy0 = newcuts[0]+dpos*RULE_SIZE
dpos = rand_randint(0,(len(gDad)/RULE_SIZE)-1)
dummy1 = newcuts[1]+dpos*RULE_SIZE
if dummy0 < dummy1:
newcuts[0] = dummy0
newcuts[1] = dummy1
break
sister = gMom.clone()
sister.resetStats()
sister.genomeList = gMom[:cuts[0]] + gDad[newcuts[0]:newcuts[1]] + gMom[cuts[1]:]
brother = gDad.clone()
brother.resetStats()
brother.genomeList = gDad[:newcuts[0]] + gMom[cuts[0]:cuts[1]] + gDad[newcuts[1]:]
return (sister, brother)
################################Mutacion#########################################
def gabil_mutation(genome, **args):
if args["pmut"] <= 0.0: return 0
stringLength = len(genome)
mutations = args["pmut"] * (stringLength)
if mutations < 1.0:
mutations = 0
for it in xrange(stringLength):
if Util.randomFlipCoin(args["pmut"]):
if genome.genomeList[it] == '0': genome.genomeList[it] = '1'
else: genome.genomeList[it] = '0'
mutations+=1
else:
for it in xrange(int(round(mutations))):
which = rand_randint(0,stringLength-1)
if genome.genomeList[which] == '0': genome.genomeList[which] = '1'
else: genome.genomeList[which] = '0'
return int(mutations)
####################################Main#########################################
###Definir el conjunto de entrenamiento###
if len(sys.argv) != 7:
response = "usage: python gabil.py <trainingFile>"
response += " <sampleFile> <selector> <fitness>"
response += " <mutationRate> <crossoverRate>\n"
response += "selector must be either 1(Rank) or 2(Roulette)\n"
response += "fitness must be either 1(Standard) or 2(Size)"
print response
sys.exit()
f = open(sys.argv[1],'r')
for line in f:
l = line.split(" ")
t = ""
t = t + attr1(l[0])
t = t + attr2(l[1])
t = t + attr3(l[2])
t = t + attr4(l[3])
t = t + attr5(l[4])
TRAINING_SET = TRAINING_SET + [t]
genome = G1DBinaryString.G1DBinaryString(MAX_SET_SIZE)
genome.initializator.set(init_func)
if(int(sys.argv[4]) == 1): genome.evaluator.set(standard_fitness)
else: genome.evaluator.set(size_fitness)
genome.crossover.set(gabil_cross)
genome.mutator.set(gabil_mutation)
ga = GSimpleGA.GSimpleGA(genome)
ga.terminationCriteria.set(GSimpleGA.FitnessStatsCriteria)
if(int(sys.argv[3]) == 1): ga.selector.set(Selectors.GRankSelector)
else: ga.selector.set(Selectors.GRouletteWheel)
ga.setMutationRate(float(sys.argv[5]))
ga.setCrossoverRate(float(sys.argv[6]))
ga.setGenerations(100)
ga.setPopulationSize(INITIAL_POP)
ga.evolve(freq_stats=0)
f.close()
f = open(sys.argv[2],'r')
SAMPLE_SET = []
for line in f:
l = line.split(" ")
t = ""
t = t + attr1(l[0])
t = t + attr2(l[1])
t = t + attr3(l[2])
t = t + attr4(l[3])
t = t + attr5(l[4])
SAMPLE_SET = SAMPLE_SET + [t]
score = 0
for sample in SAMPLE_SET:
if(match(ga.bestIndividual(),sample)):
score+=1
print ''.join(ga.bestIndividual().genomeList)
print float(score)/len(SAMPLE_SET)
f.close()
|
mit
| -6,613,592,181,887,477,000 | 24.584314 | 114 | 0.578786 | false | 2.962761 | false | false | false |
gemrb/gemrb
|
gemrb/GUIScripts/bg2/GUICG15.py
|
1
|
3499
|
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#character generation, racial enemy (GUICG15)
import GemRB
import CharGenCommon
import CommonTables
import GUICommon
from GUIDefines import *
from ie_stats import *
RaceWindow = 0
TextAreaControl = 0
DoneButton = 0
RaceTable = 0
RaceCount = 0
TopIndex = 0
MyChar = 0
#the size of the selection list
LISTSIZE = 11
def DisplayRaces():
global TopIndex
TopIndex=GemRB.GetVar("TopIndex")
for i in range(LISTSIZE):
Button = RaceWindow.GetControl(i+6)
Val = RaceTable.GetValue(i+TopIndex,0)
if Val==0:
Button.SetText("")
Button.SetDisabled(True)
else:
Button.SetText(Val)
Button.SetDisabled(False)
Button.SetEvent(IE_GUI_BUTTON_ON_PRESS, RacePress)
Button.SetVarAssoc("HatedRace",RaceTable.GetValue(i+TopIndex,1) )
return
def OnLoad():
global RaceWindow, TextAreaControl, DoneButton
global RaceTable, RaceCount, TopIndex, MyChar
MyChar = GemRB.GetVar ("Slot")
ClassName = GUICommon.GetClassRowName (MyChar)
TableName = CommonTables.ClassSkills.GetValue(ClassName, "HATERACE")
if TableName == "*":
GemRB.SetNextScript("GUICG7")
return
RaceWindow = GemRB.LoadWindow(15, "GUICG")
CharGenCommon.PositionCharGenWin (RaceWindow)
RaceTable = GemRB.LoadTable(TableName)
RaceCount = RaceTable.GetRowCount()-LISTSIZE
if RaceCount<0:
RaceCount=0
TopIndex = 0
GemRB.SetVar("TopIndex", 0)
ScrollBarControl = RaceWindow.GetControl(1)
ScrollBarControl.SetVarAssoc("TopIndex", RaceCount)
ScrollBarControl.SetEvent(IE_GUI_SCROLLBAR_ON_CHANGE, DisplayRaces)
RaceWindow.SetEventProxy(ScrollBarControl)
for i in range(LISTSIZE):
Button = RaceWindow.GetControl(i+6)
Button.SetFlags(IE_GUI_BUTTON_RADIOBUTTON,OP_OR)
GemRB.SetVar("HatedRace",0)
BackButton = RaceWindow.GetControl(4)
BackButton.SetText(15416)
BackButton.MakeEscape()
DoneButton = RaceWindow.GetControl(5)
DoneButton.SetText(11973)
DoneButton.MakeDefault()
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
TextAreaControl = RaceWindow.GetControl(2)
TextAreaControl.SetText(17256)
DoneButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, NextPress)
BackButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, BackPress)
RaceWindow.Focus()
DisplayRaces()
return
def RacePress():
Race = GemRB.GetVar("HatedRace")
Row = RaceTable.FindValue(1, Race)
TextAreaControl.SetText(RaceTable.GetValue(Row, 2) )
DoneButton.SetState(IE_GUI_BUTTON_ENABLED)
return
def BackPress():
if RaceWindow:
RaceWindow.Unload()
GemRB.SetPlayerStat (MyChar, IE_HATEDRACE, 0) #scrapping the race value
GemRB.SetNextScript("CharGen6")
return
def NextPress():
if RaceWindow:
RaceWindow.Unload()
# save the hated race
GemRB.SetPlayerStat (MyChar, IE_HATEDRACE, GemRB.GetVar ("HatedRace"))
GemRB.SetNextScript("GUICG7") #mage spells
return
|
gpl-2.0
| -2,945,485,312,536,015,400 | 27.680328 | 81 | 0.766505 | false | 2.891736 | false | false | false |
biodec/biodec.recipe.riak
|
src/biodec/recipe/riak/instance.py
|
1
|
5723
|
# -*- coding: utf-8 -*-
# Copyright (C)2012 'Biodec'
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""Config Recipe riak"""
import os
import subprocess
import logging
RECIPE_BUILD_NAME = 'biodec.recipe.riak:build'
def get_options_from_build(buildout, options):
part = options.get('riakbuildpart', None)
if part:
return buildout[part]
for part in buildout.keys():
if 'recipe' in buildout[part] and \
buildout[part]['recipe'] == RECIPE_BUILD_NAME:
return buildout[part]
return {}
class InstanceRecipe(object):
"""This recipe is used by zc.buildout"""
def __init__(self, buildout, name, options):
self.buildout = buildout
self.name = name
location = options.get(
'location', buildout['buildout']['parts-directory'])
print location
options['location'] = os.path.join(location, name)
options['prefix'] = options['location']
self.options = options
self.buildoptions = get_options_from_build(buildout, options)
self.logger = logging.getLogger(__name__)
def gen_scripts(self, target_dir):
"""Generates Riak bin scripts."""
bindir = self.buildout['buildout']['bin-directory']
erlang_path = self.options.get('erlang-path')
if erlang_path:
erlang_path = 'PATH=%s:$PATH' % erlang_path
else:
erlang_path = ''
scripts = []
for scriptname in ('riak', 'riak-admin', 'search-cmd'):
script = os.path.join(bindir, "%s.%s" % (self.name, scriptname))
f = open(script, 'wb')
f.write('#!/usr/bin/env bash\n%s\ncd %s\nexec bin/%s $@\n' %
(erlang_path, target_dir, scriptname))
print erlang_path, target_dir, scriptname
f.close()
os.chmod(script, 0755)
scripts.append(script)
return scripts
def install(self):
""" install riak instance """
dst = self.options.setdefault(
'location',
os.path.join(self.buildout['buildout']['parts-directory'],
self.name))
print 'dst', dst
if not os.path.isdir(dst):
os.mkdir(dst)
var = os.path.join(
self.buildout['buildout']['directory'],
'var', self.name)
print 'var', var
if not os.path.isdir(var):
os.mkdir(var)
target_dir = os.path.join(dst, 'rel')
overlay_vars = os.path.join(dst, 'vars.config')
open(overlay_vars, 'w').write(CONFIG_TEMPLATE % dict(
root=target_dir,
var=var,
web_ip=self.options.get('web_ip', '127.0.0.1'),
web_port=self.options.get('web_port', 8098)
))
old_cwd = os.getcwd()
os.chdir(self.buildoptions['location'])
my_env = os.environ.copy()
if self.buildoptions.get('erlang-path'):
my_env["PATH"] = "%s:%s" % (
self.buildoptions.get('erlang-path'), my_env.get("PATH"))
retcode = subprocess.Popen(
['./rebar', 'generate',
'target_dir=%s' % target_dir, 'overlay_vars=%s' % overlay_vars],
env=my_env).wait()
if retcode != 0:
raise Exception("Creating Riak instance %s" % self.name)
os.chdir(old_cwd)
scripts = self.gen_scripts(target_dir)
return [dst, ] + scripts
def update(self):
""" update riak instance """
self.logger.warning('not implemented')
CONFIG_TEMPLATE = '''
%%%% -*- mode: erlang;erlang-indent-level: 4;indent-tabs-mode: nil -*-
%%%% ex: ft=erlang ts=4 sw=4 et
%%%% Platform-specific installation paths
{platform_bin_dir, "%(root)s/bin"}.
{platform_data_dir, "%(var)s/data"}.
{platform_etc_dir, "%(root)s/etc"}.
{platform_lib_dir, "%(root)s/lib"}.
{platform_log_dir, "%(var)s/log"}.
%%%%
%%%% etc/app.config
%%%%
{web_ip, "%(web_ip)s"}.
{web_port, %(web_port)s}.
{handoff_port, 8099}.
{pb_ip, "127.0.0.1"}.
{pb_port, 8087}.
{ring_state_dir, "{{platform_data_dir}}/ring"}.
{bitcask_data_root, "{{platform_data_dir}}/bitcask"}.
{leveldb_data_root, "{{platform_data_dir}}/leveldb"}.
{sasl_error_log, "{{platform_log_dir}}/sasl-error.log"}.
{sasl_log_dir, "{{platform_log_dir}}/sasl"}.
{mapred_queue_dir, "{{platform_data_dir}}/mr_queue"}.
%%%% riak_search
{merge_index_data_root, "{{platform_data_dir}}/merge_index"}.
%%%% secondary indices
{merge_index_data_root_2i, "{{platform_data_dir}}/merge_index_2i"}.
%%%% Javascript VMs
{map_js_vms, 8}.
{reduce_js_vms, 6}.
{hook_js_vms, 2}.
%%%%
%%%% etc/vm.args
%%%%
{node, "[email protected]"}.
{crash_dump, "{{platform_log_dir}}/erl_crash.dump"}.
%%%%
%%%% bin/riak
%%%%
{runner_script_dir, "$(cd ${0%%/*} && pwd)"}.
{runner_base_dir, "${RUNNER_SCRIPT_DIR%%/*}"}.
{runner_etc_dir, "$RUNNER_BASE_DIR/etc"}.
{runner_log_dir, "{{platform_log_dir}}"}.
{pipe_dir, "%(var)s/tmp/"}.
{runner_user, ""}.
'''
|
lgpl-3.0
| 3,816,028,748,366,181,400 | 32.467836 | 77 | 0.580465 | false | 3.231508 | false | false | false |
SushiTee/teerace
|
teerace/race/south_migrations/0016_auto__add_field_bestrun_ghost_file.py
|
1
|
9389
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'BestRun.ghost_file'
db.add_column('race_bestrun', 'ghost_file', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'BestRun.ghost_file'
db.delete_column('race_bestrun', 'ghost_file')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'race.bestrun': {
'Meta': {'ordering': "['time', 'run__created_at']", 'unique_together': "(('user', 'map'),)", 'object_name': 'BestRun'},
'demo_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'ghost_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['race.Map']"}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'run': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['race.Run']"}),
'time': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '3'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'race.map': {
'Meta': {'object_name': 'Map'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'crc': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'download_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'grenade_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'has_deathtiles': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'has_image': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_speedups': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'has_teleporters': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'has_unhookables': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'heart_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'map_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'map_type': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['race.MapType']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'shield_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'})
},
'race.maptype': {
'Meta': {'object_name': 'MapType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'displayed_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '20', 'db_index': 'True'})
},
'race.run': {
'Meta': {'ordering': "['time', 'created_at']", 'object_name': 'Run'},
'checkpoints': ('django.db.models.fields.CharField', [], {'max_length': '349', 'blank': 'True'}),
'clan': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['race.Map']"}),
'nickname': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'runs'", 'null': 'True', 'to': "orm['race.Server']"}),
'time': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '3'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'race.server': {
'Meta': {'object_name': 'Server'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'anonymous_players': ('picklefield.fields.PickledObjectField', [], {}),
'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_connection_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'maintained_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'maintained_servers'", 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'played_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['race.Map']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['race']
|
bsd-3-clause
| -35,702,658,502,015,064 | 75.333333 | 182 | 0.545958 | false | 3.594564 | false | false | false |
bitmazk/django-people
|
people/models.py
|
1
|
7856
|
"""Models for the ``people`` app."""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from cms.models.pluginmodel import CMSPlugin
from filer.fields.file import FilerFileField
from hvad.models import TranslatedFields, TranslatableModel
from localized_names.templatetags.localized_names_tags import get_name
from . import settings
# Hack to have these strings translated
mr = _('Mr')
mrs = _('Ms')
GENDER_CHOICES = [
('male', _('male')),
('female', _('female')),
]
TITLE_CHOICES = [
('Dr', _('Dr')),
('Prof', _('Prof')),
('Prof Dr', _('Prof Dr')),
]
@python_2_unicode_compatible
class LinkType(TranslatableModel):
"""
A link type could be ``Facebook`` or ``Twitter`` or ``Website``.
This is masterdata that should be created by the admins when the site is
deployed for the first time.
For translateable fields see ``LinkTypeTranslation`` model.
:ordering: Enter numbers here if you want links to be displayed in a
special order.
"""
slug = models.SlugField(
max_length=256,
verbose_name=_('Slug'),
help_text=_(
'Use this field to define a simple identifier that can be used'
' to style the different link types (i.e. assign social media'
' icons to them)'),
blank=True,
)
ordering = models.PositiveIntegerField(
verbose_name=_('Ordering'),
null=True, blank=True,
)
translations = TranslatedFields(
name=models.CharField(
max_length=256,
verbose_name=_('Name'),
)
)
class Meta:
ordering = ['ordering', ]
def __str__(self):
return self.safe_translation_getter('name', self.slug)
@python_2_unicode_compatible
class Nationality(TranslatableModel):
"""
The nationality of a Person.
For translateable fields see the ``NationalityTranslation`` model.
"""
translations = TranslatedFields(
name=models.CharField(
max_length=128,
verbose_name=_('Name'),
)
)
def __str__(self):
return self.safe_translation_getter(
'name', 'Nationality No. {0}'.format(self.id))
class Meta:
verbose_name_plural = _('Nationalities')
@python_2_unicode_compatible
class Role(TranslatableModel):
"""
People can have certain roles in an organisation.
For translateable fields see ``RoleTranslation`` model.
:name: The name of the role.
"""
translations = TranslatedFields(
name=models.CharField(
max_length=256,
verbose_name=_('Role'),
),
role_description=models.TextField(
max_length=4000,
verbose_name=_('Role description'),
blank=True,
),
)
def __str__(self):
return self.safe_translation_getter(
'name', 'Role No. {0}'.format(self.id))
@python_2_unicode_compatible
class Person(TranslatableModel):
"""
A model that holds information about a person.
For translateable fields see ``PersonTitle`` model.
:roman_first_name: The first name in roman letters.
:roman_last_name: The last name in roman letters.
:non_roman_first_name: The first name in non roman letters.
:non_roman_last_name: The last name in non roman letters.
:gender: The gender of the person.
:title: The title of the person.
:chosen_name: For asian people, this is the chosen western name.
:role: Role of the person within the organisation.
:picture: A picture of the person.
:phone: Phonenumber of the person.
:email: Email address of the person.
:ordering: Enter numbers if you want to order the list of persons on your
site in a special way.
:nationality: The nationality of a person.
"""
roman_first_name = models.CharField(
max_length=256,
verbose_name=_('Roman first name'),
blank=True
)
roman_last_name = models.CharField(
max_length=256,
verbose_name=_('Roman last name'),
blank=True,
)
non_roman_first_name = models.CharField(
max_length=256,
verbose_name=_('Non roman first name'),
blank=True
)
non_roman_last_name = models.CharField(
max_length=256,
verbose_name=_('Non roman last name'),
blank=True,
)
gender = models.CharField(
max_length=16,
choices=GENDER_CHOICES,
verbose_name=_('Gender'),
blank=True,
)
title = models.CharField(
max_length=16,
choices=TITLE_CHOICES,
verbose_name=_('Title'),
blank=True,
)
chosen_name = models.CharField(
max_length=256,
verbose_name=_('Chosen name'),
blank=True,
)
role = models.ForeignKey(
Role,
verbose_name=_('Role'),
null=True, blank=True,
)
picture = FilerFileField(
verbose_name=_('Picture'),
null=True, blank=True,
)
phone = models.CharField(
max_length=32,
verbose_name=_('Phone'),
blank=True,
)
email = models.EmailField(
verbose_name=_('Email'),
blank=True,
)
ordering = models.PositiveIntegerField(
verbose_name=_('Ordering'),
null=True, blank=True,
)
nationality = models.ForeignKey(
Nationality,
verbose_name=_('Nationality'),
blank=True, null=True,
)
translations = TranslatedFields(
short_bio=models.TextField(
max_length=512,
verbose_name=_('Short bio'),
blank=True,
),
bio=models.TextField(
max_length=4000,
verbose_name=_('Biography'),
blank=True,
),
)
class Meta:
ordering = ['ordering', ]
verbose_name_plural = _('People')
def __str__(self):
return get_name(self)
def get_gender(self):
"""Returns either 'Mr.' or 'Ms.' depending on the gender."""
if self.gender == 'male':
return 'Mr'
elif self.gender == 'female':
return 'Ms'
return ''
def get_title(self):
"""Returns the title of the person."""
return self.title
def get_romanized_first_name(self):
"""Returns the first name in roman letters."""
return self.roman_first_name
def get_romanized_last_name(self):
"""Returns the first name in roman letters."""
return self.roman_last_name
def get_non_romanized_first_name(self):
"""Returns the non roman version of the first name."""
return self.non_roman_first_name
def get_non_romanized_last_name(self):
"""Returns the non roman version of the first name."""
return self.non_roman_last_name
def get_nickname(self):
"""Returns the nickname of a person in roman letters."""
return self.chosen_name
class PersonPluginModel(CMSPlugin):
"""Model for the ``PersonPlugin`` cms plugin."""
display_type = models.CharField(
max_length=256,
choices=settings.DISPLAY_TYPE_CHOICES,
verbose_name=_('Display type'),
)
person = models.ForeignKey(
Person,
verbose_name=_('Person'),
)
def copy_relations(self, oldinstance):
self.person = oldinstance.person
@python_2_unicode_compatible
class Link(models.Model):
"""
A person can have many links.
"""
person = models.ForeignKey(
Person,
verbose_name=_('Person'),
)
link_type = models.ForeignKey(
LinkType,
verbose_name=_('Link type'),
)
url = models.URLField(
verbose_name=_('URL'),
)
def __str__(self):
return self.url
|
mit
| -6,344,347,272,851,630,000 | 23.939683 | 77 | 0.594323 | false | 4.039075 | false | false | false |
mworks/mworks
|
examples/Examples/FindTheCircle/analysis/Python/selection_counts.py
|
1
|
1241
|
import sys
from matplotlib import pyplot
import numpy
sys.path.insert(0, '/Library/Application Support/MWorks/Scripting/Python')
from mworks.data import MWKFile
def selection_counts(filename):
with MWKFile(filename) as f:
r_codec = f.reverse_codec
red_code = r_codec['red_selected']
green_code = r_codec['green_selected']
blue_code = r_codec['blue_selected']
red_count = 0
green_count = 0
blue_count = 0
for evt in f.get_events_iter(codes=[red_code, green_code, blue_code]):
if evt.data:
if evt.code == red_code:
red_count += 1
elif evt.code == green_code:
green_count += 1
else:
assert evt.code == blue_code
blue_count += 1
index = numpy.arange(3)
pyplot.bar(index,
[red_count, green_count, blue_count],
0.5,
color = ['r', 'g', 'b'],
align = 'center')
pyplot.xticks(index, ['Red', 'Green', 'Blue'])
pyplot.title('Selection Counts')
pyplot.show()
if __name__ == '__main__':
selection_counts(sys.argv[1])
|
mit
| -3,228,629,496,021,222,000 | 27.860465 | 78 | 0.51249 | false | 3.830247 | false | false | false |
t-/gromacs_ligand_param
|
tools/top2itp/RUNME2.py
|
1
|
21522
|
import numpy as np
class merge_dihedrals:
def __init__(self,filepath,filelist,dihetype='9',fmerged='merged_dihedrals.itp'):
clist=[]
for fi in filelist:
flist1 = self.read_dihedrals(filepath+fi,t=dihetype)
clist=clist+flist1
#print 'processing',fi
print 'fixing type',dihetype,'dihedrals'
clist=self.fix_permuted_entries(clist)
clist=self.fix_double_entries(clist)
clist=self.get_similars(clist,filepath,fname='dihedral_errors.dat')
#self.print_dihedrals(clist)
clist.sort()
self.print2file_dihedrals(clist,filepath,fmerged)
def print_dihedrals(self,clist):
for lin in clist:
clin=lin[0]+' '+lin[1]
top=clin.split(' ')
out='%4s%4s%4s%4s %2s%8s%14s%4s' % (top[0],top[1],top[2],top[3],top[4],top[5],top[6],top[7])
print out
def print2file_dihedrals(self,clist,fpath,fname):
f=open(fpath+fname,'w')
print >>f, '[ dihedraltypes ]'
print >>f, '; i j k l func phase kd pn'
for lin in clist:
clin=lin[0]+' '+lin[1]
top=clin.split(' ')
out='%4s%4s%4s%4s %2s%8s%14s%4s' % (top[0],top[1],top[2],top[3],top[4],top[5],top[6],top[7])
print >>f,out
def get_similars(self,clist,filepath,fname='dihedral_errors.dat'):
print 'fixing similar dihedrals - output written to',filepath+fname
#=======================================================================
# fixes:
# character identical entries with different force constants
# cag cag cag cag 9 180.0 15.16700 2
# cag cag cag cag 9 180.0 16.73600 2
# Will always use the larger one of the two by default
#=======================================================================
sim_clist={}
for lin in clist:
clin=lin[0]+' '+lin[1]
top=clin.split(' ')
sim_clist[top[3] + ' ' + top[2] + ' ' + top[1] + ' ' + top[0]+' '+top[4] + ' ' + top[5] + ' ' + top[7]]=[top[6],lin]
f=open(filepath+fname,'aw')
print >> f, 'fixed dihedrals'
for i in xrange(len(clist)):
lin=clist[i]
clin=lin[0]+' '+lin[1]
top=clin.split(' ')
cur = top[3] + ' ' + top[2] + ' ' + top[1] + ' ' + top[0]+' '+top[4] + ' ' + top[5] + ' ' + top[7]
if top[6] != sim_clist[cur][0]:
#This will allways use the larger force constant from the set
if float(top[6]) > float(sim_clist[cur][0]):
print >> f, 'new',top[6],'old',sim_clist[cur][0],sim_clist[cur][1]
sim_clist[top[3] + ' ' + top[2] + ' ' + top[1] + ' ' + top[0]+' '+top[4] + ' ' + top[5] + ' ' + top[7]] = [top[6],[top[0] + ' ' + top[1] + ' ' + top[2] + ' ' + top[3],top[4] + ' ' + top[5] + ' ' + top[6] + ' ' + top[7]]]
if float(top[6]) < float(sim_clist[cur][0]):
print >> f, 'new',sim_clist[cur][0],'old',top[6],sim_clist[cur][1]
new_clist=[]
f.close()
for i in sim_clist.keys():
new_clist.append(sim_clist[i][1])
return clist
def fix_permuted_entries(self,clist):
print 'fixing permuted dihedrals'
#=======================================================================
# fixes:
# character identical permuted entries like
# nhg c2g ceg hag 9 180.0 27.82360 2
# hag ceg c2g nhg 9 180.0 27.82360 2
#=======================================================================
perm_clist=[]
for lin in clist:
clin=lin[0]+' '+lin[1]
top=clin.split(' ')
order=[top[0]+' '+top[1],top[3]+' '+top[2]]
order_ref=[top[0]+' '+top[1],top[3]+' '+top[2]]
order_ref.sort()
if order!=order_ref:
perm_clist.append([top[3] + ' ' + top[2] + ' ' + top[1] + ' ' + top[0],top[4] + ' ' + top[5] + ' ' + top[6] + ' ' + top[7]])
else:
perm_clist.append([top[0] + ' ' + top[1] + ' ' + top[2] + ' ' + top[3],top[4] + ' ' + top[5] + ' ' + top[6] + ' ' + top[7]])
return perm_clist
def fix_double_entries(self,clist):
print 'fixing double dihedrals'
#=======================================================================
# fixes:
# character identical entries like
# nhg c2g ceg hag 9 180.0 27.82360 2
# nhg c2g ceg hag 9 180.0 27.82360 2
#=======================================================================
keys = {}
for e in clist:
ie=e[0]+' '+e[1]
keys[ie] = 1
lins=keys.keys()
lins.sort()
#splits list up again and converts it back into input format: ['cag cfg ceg hg','9 180.0 27.82360 2']
linreturn=[]
for lin in lins:
top=lin.split(' ')
linreturn.append([top[0] + ' ' + top[1] + ' ' + top[2] + ' ' + top[3],top[4] + ' ' + top[5] + ' ' + top[6] + ' ' + top[7]])
return linreturn
def read_dihedrals(self, filename, t='9'):
if t=='9':
blockn=3
if t=='4':
blockn=2
block = []
blocklist = []
#Read Topology and separate it into blocks [ atoms ], [ bonds ], etc.
for i in open(filename, 'r'):
if len(i.strip('\n')) == 0: # blank line indicates the end of a block [ atoms ], [ bonds ], etc.
if len(block) > 0: blocklist.append(block);
block = []
elif len(i.strip('\n')) > 0: # read block
block.append(i.strip('\n'))
blocklist.append(block);
dihedralslist = []
for dihedral in blocklist[blockn]:
if dihedral[0] != '[' and dihedral[0] != ';':
top = dihedral.split(' ')
for i in range(top.count('')): top.remove(''); #remove blanks from array
dihedralslist.append([top[0] + ' ' + top[1] + ' ' + top[2] + ' ' + top[3],top[4] + ' ' + top[5] + ' ' + top[6] + ' ' + top[7]])
return dihedralslist
class merge_bonds:
def __init__(self,filepath,filelist,fmerged='merged_bonds.itp'):
clist=[]
for fi in filelist:
flist1 = self.read_bonds(filepath+fi)
clist=clist+flist1
#print 'processing',fi
clist=self.fix_permuted_entries(clist)
clist=self.fix_double_entries(clist)
clist=self.get_similars(clist,filepath,fname='bond_errors.dat')
#self.print_bonds(clist)
self.print2file_bonds(clist,filepath,fmerged)
def print_bonds(self,clist):
for lin in clist:
clin=lin[0]+' '+lin[1]
top=clin.split(' ')
out='%4s%4s %2s%8s%14s' % (top[0],top[1],top[2],top[3],top[4])
print out
def print2file_bonds(self,clist,fpath,fname):
f=open(fpath+fname,'w')
print >>f, '[ bondtypes ]'
print >>f, '; i j func b0 kb'
for lin in clist:
clin=lin[0]+' '+lin[1]
top=clin.split(' ')
out='%4s%4s %2s%8s%14s' % (top[0],top[1],top[2],top[3],top[4])
print >>f,out
def get_similars(self,clist,filepath,fname='bond_errors.dat'):
print 'fixing similar bonds - output written to',filepath+fname
#=======================================================================
# fixes:
# character identical entries with different force constants
# cag cag 1 0.1387 400330.0
# cag cag 1 0.1429 350030.0
#=======================================================================
sim_clist={}
for lin in clist:
clin=lin[0]+' '+lin[1]
top=clin.split(' ')
sim_clist[top[0] + ' ' + top[1]]=[top[2] + ' ' + top[3] + ' ' + top[4],[lin]]
for lin in clist:
clin=lin[0]+' '+lin[1]
top=clin.split(' ')
cur = top[0] + ' ' + top[1]
if top[2] + ' ' + top[3] + ' ' + top[4] != sim_clist[cur][0]:
sim_clist[cur][1].append([top[0] + ' ' + top[1],top[2] + ' ' + top[3] + ' ' + top[4]])
f=open(filepath+fname,'w')
for lin in sim_clist.keys():
dmean=[]
kmean=[]
if len(sim_clist[lin][1])>1:
for element in sim_clist[lin][1]:
dmean.append(float(element[1].split(' ')[1]))
kmean.append(float(element[1].split(' ')[2]))
print >>f,'\nBOND TYPE ',sim_clist[lin][1][0][0]
print >>f,' distances ',np.array(dmean)
print >>f,' mean',np.array(dmean).mean(),'+\-',np.array(dmean).std()
print >>f,' forceconstants',np.array(kmean)
print >>f,' mean',np.array(kmean).mean(),'+\-',np.array(kmean).std()
#replacing old bond with new averaged bond parameters
sim_clist[lin][0] = '1 '+str(np.round(np.array(dmean).mean(),4))+' '+str(np.round(np.array(kmean).mean(),0))
f.close()
#creating new clist with averaged bond parameters
new_clist=[]
for i in sim_clist.keys():
new_clist.append([i,sim_clist[i][0]])
new_clist.sort()
return new_clist
def fix_permuted_entries(self,clist):
print 'fixing permuted bonds'
#=======================================================================
# fixes:
# character identical permuted entries like
# cag osg 1 0.1373 311620.0
# osg cag 1 0.1373 311620.0
#=======================================================================
perm_clist=[]
for lin in clist:
clin=lin[0]+' '+lin[1]
top=clin.split(' ')
order=[top[0],top[1]]
order_ref=[top[1],top[0]]
order_ref.sort()
if order!=order_ref:
perm_clist.append([top[1] + ' ' + top[0],top[2] + ' ' + top[3] + ' ' + top[4]])
else:
perm_clist.append([top[0] + ' ' + top[1],top[2] + ' ' + top[3] + ' ' + top[4]])
return perm_clist
def fix_double_entries(self,clist):
print 'fixing double bonds'
#=======================================================================
# fixes:
# character identical entries like
# cag cag 1 0.1429 350030.0
# cag cag 1 0.1429 350030.0
#=======================================================================
keys = {}
for e in clist:
ie=e[0]+' '+e[1]
keys[ie] = 1
lins=keys.keys()
lins.sort()
#splits list up again and converts it back into input format: ['cag cfg ceg hg','9 180.0 27.82360 2']
linreturn=[]
for lin in lins:
top=lin.split(' ')
linreturn.append([top[0] + ' ' + top[1],top[2] + ' ' + top[3] + ' ' + top[4]])
return linreturn
def read_bonds(self, filename):
block = []
blocklist = []
#Read Topology and separate it into blocks [ atoms ], [ bonds ], etc.
for i in open(filename, 'r'):
if len(i.strip('\n')) == 0: # blank line indicates the end of a block [ atoms ], [ bonds ], etc.
if len(block) > 0: blocklist.append(block);
block = []
elif len(i.strip('\n')) > 0: # read block
block.append(i.strip('\n'))
blocklist.append(block);
bondslist = []
for bond in blocklist[0]:
if bond[0] != '[' and bond[0] != ';':
top = bond.split(' ')
for i in range(top.count('')): top.remove(''); #remove blanks from array
bondslist.append([top[0] + ' ' + top[1],top[2] + ' ' + top[3] + ' ' + top[4]])
return bondslist
class merge_angles:
def __init__(self,filepath,filelist,fmerged='merged_angles.itp'):
clist=[]
for fi in filelist:
flist1 = self.read_angles(filepath+fi)
clist=clist+flist1
#print 'processing',fi
clist=self.fix_permuted_entries(clist)
clist=self.fix_double_entries(clist)
clist.sort()
clist=self.get_similars(clist,filepath,fname='angle_errors.dat')
#self.print_angles(clist)
self.print2file_angles(clist,filepath,fmerged)
def fix_permuted_entries(self,clist):
print 'fixing permuted angles'
#=======================================================================
# fixes:
# character identical permuted entries like
# ssg c3g h1g 1 109.340 449.030 ; TTT
# h1g c3g ssg 1 109.340 449.030 ; TTT
#=======================================================================
perm_clist=[]
for lin in clist:
clin=lin[0]+' '+lin[1]
top=clin.split(' ')
order=[top[0],top[2]]
order_ref=[top[2],top[0]]
order_ref.sort()
if order!=order_ref:
perm_clist.append([top[2] + ' ' + top[1] + ' ' + top[0], top[3] + ' ' + top[4] + ' ' + top[5]])
else:
perm_clist.append([top[0] + ' ' + top[1] + ' ' + top[2], top[3] + ' ' + top[4] + ' ' + top[5]])
return perm_clist
def fix_double_entries(self,clist):
print 'fixing double angles'
#=======================================================================
# fixes:
# character identical entries like
# ssg c3g h1g 1 109.340 449.030 ; TTT
# ssg c3g h1g 1 109.340 449.030 ; TTT
#=======================================================================
keys = {}
for e in clist:
ie=e[0]+' '+e[1]
keys[ie] = 1
lins=keys.keys()
lins.sort()
#splits list up again and converts it back into input format: ['cag cfg ceg','9 180.0 27.82360']
linreturn=[]
for lin in lins:
top=lin.split(' ')
linreturn.append([top[0] + ' ' + top[1] + ' ' + top[2], top[3] + ' ' + top[4] + ' ' + top[5]])
return linreturn
def get_similars(self,clist,filepath,fname='angle_errors.dat'):
print 'fixing similar angles - output written to',filepath+fname
#=======================================================================
# fixes:
# character identical entries with different force constants
# ssg c3g h1g 1 109.340 449.030 ; TTT
# ssg c3g h1g 1 29.340 142.030 ; TTT
#=======================================================================
sim_clist={}
for lin in clist:
clin=lin[0]+' '+lin[1]
top=clin.split(' ')
sim_clist[top[0] + ' ' + top[1] + ' ' + top[2]]=[top[3] + ' ' + top[4] + ' ' + top[5],[lin]]
for lin in clist:
clin=lin[0]+' '+lin[1]
top=clin.split(' ')
cur = top[0] + ' ' + top[1] + ' ' + top[2]
if top[3] + ' ' + top[4] + ' ' + top[5] != sim_clist[cur][0]:
sim_clist[cur][1].append([top[0] + ' ' + top[1] + ' ' + top[2], top[3] + ' ' + top[4] + ' ' + top[5]])
f=open(filepath+fname,'w')
for lin in sim_clist.keys():
dmean=[]
kmean=[]
if len(sim_clist[lin][1])>1:
for element in sim_clist[lin][1]:
dmean.append(float(element[1].split(' ')[1]))
kmean.append(float(element[1].split(' ')[2]))
print >>f,'\nAngle TYPE ',sim_clist[lin][1][0][0]
print >>f,' distances ',np.array(dmean)
print >>f,' mean',np.array(dmean).mean(),'+\-',np.array(dmean).std()
print >>f,' forceconstants',np.array(kmean)
print >>f,' mean',np.array(kmean).mean(),'+\-',np.array(kmean).std()
#replacing old bond with new averaged bond parameters
sim_clist[lin][0] = '1 '+str(np.round(np.array(dmean).mean(),4))+' '+str(np.round(np.array(kmean).mean(),0))
f.close()
#creating new clist with averaged bond parameters
new_clist=[]
for i in sim_clist.keys():
new_clist.append([i,sim_clist[i][0]])
new_clist.sort()
return new_clist
def read_angles(self, filename):
block = []
blocklist = []
#Read Topology and separate it into blocks [ atoms ], [ bonds ], etc.
for i in open(filename, 'r'):
if len(i.strip('\n')) == 0: # blank line indicates the end of a block [ atoms ], [ bonds ], etc.
if len(block) > 0: blocklist.append(block);
block = []
elif len(i.strip('\n')) > 0: # read block
block.append(i.strip('\n'))
blocklist.append(block);
angleslist = []
for angle in blocklist[1]:
if angle[0] != '[' and angle[0] != ';':
top = angle.split(' ')
for i in range(top.count('')): top.remove(''); #remove blanks from array
angleslist.append([top[0] + ' ' + top[1] + ' ' + top[2] , top[3] + ' ' + top[4] + ' ' +top[5]])
return angleslist
def print_angles(self,clist):
for lin in clist:
clin=lin[0]+' '+lin[1]
top=clin.split(' ')
out='%4s%4s%4s %8s%14s%14s' % (top[0],top[1],top[2],top[3],top[4],top[5])
print out
def print2file_angles(self,clist,fpath,fname):
f=open(fpath+fname,'w')
print >>f, '[ angletypes ]'
print >>f, '; i j k func phi0 k'
for lin in clist:
clin=lin[0]+' '+lin[1]
top=clin.split(' ')
out='%4s%4s%4s %8s%14s%14s' % (top[0],top[1],top[2],top[3],top[4],top[5])
print >>f,out
class merge_atomtypes:
def __init__(self,filepath,filelist,fmerged='merged_atomtypes.atp'):
clist=[]
for fi in filelist:
flist1 = self.read_atomtypes(filepath+fi)
clist=clist+flist1
clist=self.fix_double_entries(clist)
clist.sort()
self.print2file_angles(clist,filepath,fmerged)
def read_atomtypes(self,filename):
clist=[]
for i in open(filename):
clist.append( i.strip('\n') )
return clist
def fix_double_entries(self,clist):
print 'fixing double atomtypes'
#=======================================================================
# fixes:
# character identical entries like
# n2g 14.01000 ; TTT
# n2g 14.01000 ; TTT
#=======================================================================
keys = {}
for e in clist:
keys[e] = 1
lins=keys.keys()
lins.sort()
#splits list up again and converts it back into input format: ['cag cfg ceg','9 180.0 27.82360']
return lins
def print2file_angles(self,clist,fpath,fname):
f=open(fpath+fname,'w')
for lin in clist:
print >>f,lin
class merge_nonbonded:
def __init__(self,filepath,filelist,fmerged='merged_nbonds.itp'):
clist=[]
for fi in filelist:
flist1 = self.read_atomtypes(filepath+fi)
clist=clist+flist1
#print 'processing',fi
clist=self.fix_double_entries(clist)
clist.sort()
self.print2file_angles(clist,filepath,fmerged)
def read_atomtypes(self,filename):
clist=[]
for i in open(filename):
if i.find('[')<0:
clist.append( i.strip('\n') )
return clist
def fix_double_entries(self,clist):
print 'fixing double nonbonded parameters'
#=======================================================================
# fixes:
# character identical entries like
# ohg 8 16.0 0.0000 A 3.066470e-01 8.803140e-01
# ohg 8 16.0 0.0000 A 3.066470e-01 8.803140e-01
#=======================================================================
keys = {}
for e in clist:
keys[e] = 1
lins=keys.keys()
lins.sort()
return lins
def print2file_angles(self,clist,fpath,fname):
f=open(fpath+fname,'w')
print >>f, '[ atomtypes ]'
for lin in clist:
print >>f,lin
def main():
fpath='./'
print 'working in directory',fpath
f=open(fpath+'dihedral_errors.dat','w')
print >>f,''
f.close()
merge_dihedrals('./',['res_ffbonded.itp'],dihetype='9',fmerged='merged_dihedrals.itp')
print ''
merge_dihedrals('./',['res_ffbonded.itp'],dihetype='4',fmerged='merged_impropers.itp')
print ''
merge_bonds('./',['res_ffbonded.itp'],fmerged='merged_bonds.itp')
print ''
merge_angles('./',['res_ffbonded.itp'],fmerged='merged_angles.itp')
print ''
merge_atomtypes('./',['res_atomtypes.atp'],fmerged='merged_atomtypes.atp')
print ''
merge_nonbonded('./',['res_ffnonbonded.itp'],fmerged='merged_nbonds.itp')
if __name__ == '__main__':
main()
|
gpl-3.0
| 7,514,140,246,321,269,000 | 42.216867 | 240 | 0.452003 | false | 3.362287 | false | false | false |
andreas-p/admin4
|
modPg/Function.py
|
1
|
3307
|
# The Admin4 Project
# (c) 2013-2014 Andreas Pflug
#
# Licensed under the Apache License,
# see LICENSE.TXT for conditions of usage
from _objects import SchemaObject
from _pgsql import pgQuery
from wh import xlt, YesNo
import logger
class Function(SchemaObject):
typename=xlt("Function")
shortname=xlt("Function")
refreshOid="pro.oid"
allGrants='X'
favtype='f'
relkind='P'
@staticmethod
def FindQuery(schemaName, schemaOid, patterns):
sql=pgQuery("pg_proc p")
sql.AddCol("'P' as kind")
sql.AddCol("nspname")
sql.AddCol("proname as name")
sql.AddCol("n.oid as nspoid")
sql.AddCol("p.oid")
sql.AddJoin("pg_namespace n ON n.oid=pronamespace")
SchemaObject.AddFindRestrictions(sql, schemaName, schemaOid, 'proname', patterns)
return sql
@staticmethod
def InstancesQuery(parentNode):
sql=pgQuery("pg_proc pro")
sql.AddCol("pro.oid, pg_get_userbyid(proowner) AS owner, proacl as acl, proname as name, pro.*, nspname, ns.oid as nspoid, lanname, description")
if parentNode.GetServer().version >= 8.4:
sql.AddCol("pg_get_function_arguments(pro.oid) as arguments, pg_get_function_result(pro.oid) as result")
sql.AddJoin("pg_language lang ON lang.oid=prolang")
sql.AddLeft("pg_namespace ns ON ns.oid=pronamespace")
sql.AddLeft("pg_description des ON (des.objoid=pro.oid AND des.objsubid=0)")
sql.AddWhere("pronamespace", parentNode.parentNode.GetOid())
sql.AddOrder("proname")
return sql
def __init__(self, parentNode, info):
super(Function, self).__init__(parentNode, info)
args=self.info.get('arguments')
if args!= None:
self.name="%s(%s)" % (self.name, args)
def GetIcon(self):
icons=[]
icons.append("Function")
if self.GetOid() in self.GetDatabase().favourites:
icons.append('fav')
return self.GetImageId(icons)
def GetSql(self):
definition=self.info.get('definition')
if not definition:
definition=self.GetCursor().ExecuteSingle("SELECT pg_get_functiondef(%d)" % self.GetOid())
self.info['definition']=definition
return "%(def)s\n%(grant)s" % {
'object': self.ObjectSql(),
'def': definition, 'grant': self.GrantCommentSql() }
def GetProperties(self):
if not len(self.properties):
args=self.info.get('arguments')
if args == None:
logger.error("PGSQL < 8.4; no function args/returns")
args=""
self.info['arguments']=""
self.info['result']=""
self.info['definition']=None
result=self.info.get('result', "")
self.properties = [
(xlt("Name"), "%s(%s)" % (self.info['name'], args)),
(xlt("Namespace"), self.info['nspname']),
(xlt("Language"), self.info['lanname']),
(xlt("Strict"), YesNo(self.info['proisstrict'])),
( "OID" , self.info['oid']),
(xlt("Returns"), result),
(xlt("Owner"), self.info['owner']),
(xlt("ACL"), self.info['acl'])
]
self.AddProperty(xlt("Description"), self.info['description'])
return self.properties
nodeinfo= [ { "class" : Function, "parents": ["Schema"], "sort": 60, "collection": "Functions", "pages": ["SqlPage"] } ]
|
apache-2.0
| 7,796,791,559,803,237,000 | 31.742574 | 149 | 0.617478 | false | 3.350557 | false | false | false |
Wintermute0110/advanced-emulator-launcher
|
resources/rom_audit.py
|
1
|
46115
|
# -*- coding: utf-8 -*-
#
# Advanced Emulator Launcher
#
# Copyright (c) 2016-2017 Wintermute0110 <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# --- Python standard library ---
from __future__ import unicode_literals
import xml.etree.ElementTree as ET
# --- Modules/packages in this plugin ---
from constants import *
from utils import *
# -------------------------------------------------------------------------------------------------
# Data structures
# -------------------------------------------------------------------------------------------------
# DTD "http://www.logiqx.com/Dats/datafile.dtd"
def audit_new_rom_logiqx():
return {
'name' : '',
'cloneof' : '',
'year' : '',
'manufacturer' : ''
}
# HyperList doesn't include Plot
def audit_new_rom_HyperList():
return {
'name' : '',
'description' : '',
'cloneof' : '',
'crc' : '',
'manufacturer' : '',
'year' : '',
'genre' : '',
'rating' : '',
'enabled' : ''
}
def audit_new_rom_GameDB():
return {
'name' : '',
'description' : '',
'year' : '',
'rating' : '',
'manufacturer' : '',
'genre' : '',
'player' : '',
'story' : ''
}
def audit_new_rom_AEL_Offline():
return {
'ROM' : '',
'title' : '',
'year' : '',
'genre' : '',
'publisher' : '',
'developer' : '',
'rating' : '',
'nplayers' : '',
'score' : '',
'plot' : ''
}
def audit_new_LB_game():
return {
'Name' : '',
'ReleaseYear' : '',
'Overview' : '',
'MaxPlayers' : '',
'Cooperative' : '',
'VideoURL' : '',
'DatabaseID' : '',
'CommunityRating' : '',
'Platform' : '',
'Genres' : '',
'Publisher' : '',
'Developer' : '',
'ReleaseDate' : '',
'ESRB' : '',
'WikipediaURL' : '',
'DOS' : '',
'StartupFile' : '',
'StartupMD5' : '',
'SetupFile' : '',
'SetupMD5' : '',
'StartupParameters' : '',
}
def audit_new_LB_platform():
return {
'Name' : '',
'Emulated' : '',
'ReleaseDate' : '',
'Developer' : '',
'Manufacturer' : '',
'Cpu' : '',
'Memory' : '',
'Graphics' : '',
'Sound' : '',
'Display' : '',
'Media' : '',
'MaxControllers' : '',
'Notes' : '',
'Category' : '',
'UseMameFiles' : '',
}
def audit_new_LB_gameImage():
return {
'DatabaseID' : '',
'FileName' : '',
'Type' : '',
'CRC32' : '',
'Region' : '',
}
def audit_load_LB_metadata_XML(filename_FN, games_dic, platforms_dic, gameimages_dic):
if not filename_FN.exists():
log_error("Cannot load file '{0}'".format(xml_file))
return
# --- Parse using cElementTree ---
log_verb('audit_load_LB_metadata_XML() Loading "{0}"'.format(filename_FN.getPath()))
try:
xml_tree = ET.parse(filename_FN.getPath())
except ET.ParseError, e:
log_error('(ParseError) Exception parsing XML categories.xml')
log_error('(ParseError) {0}'.format(str(e)))
return
xml_root = xml_tree.getroot()
for xml_element in xml_root:
if xml_element.tag == 'Game':
game = audit_new_LB_game()
for xml_child in xml_element:
xml_tag = xml_child.tag
xml_text = xml_child.text if xml_child.text is not None else ''
if xml_tag not in game:
log_info('Unknown <Game> child tag <{0}>'.format(xml_tag))
return
game[xml_tag] = text_unescape_XML(xml_text)
games_dic[game['Name']] = game
elif xml_element.tag == 'Platform':
platform = audit_new_LB_platform()
for xml_child in xml_element:
xml_tag = xml_child.tag
xml_text = xml_child.text if xml_child.text is not None else ''
if xml_tag not in platform:
log_info('Unknown <Platform> child tag <{0}>'.format(xml_tag))
return
platform[xml_tag] = text_unescape_XML(xml_text)
platforms_dic[platform['Name']] = platform
elif xml_element.tag == 'PlatformAlternateName':
pass
elif xml_element.tag == 'Emulator':
pass
elif xml_element.tag == 'EmulatorPlatform':
pass
elif xml_element.tag == 'GameAlternateName':
pass
elif xml_element.tag == 'GameImage':
game_image = audit_new_LB_gameImage()
for xml_child in xml_element:
xml_tag = xml_child.tag
xml_text = xml_child.text if xml_child.text is not None else ''
if xml_tag not in game_image:
log_info('Unknown <GameImage> child tag <{0}>'.format(xml_tag))
return
game_image[xml_tag] = text_unescape_XML(xml_text)
gameimages_dic[game_image['FileName']] = game_image
else:
log_info('Unknwon main tag <{0}>'.format(xml_element.tag))
return
log_verb('audit_load_LB_metadata_XML() Loaded {0} games ({1} bytes)'.format(len(games_dic), sys.getsizeof(games_dic)))
log_verb('audit_load_LB_metadata_XML() Loaded {0} platforms'.format(len(platforms_dic)))
log_verb('audit_load_LB_metadata_XML() Loaded {0} game images'.format(len(gameimages_dic)))
# -------------------------------------------------------------------------------------------------
# Functions
# -------------------------------------------------------------------------------------------------
#
# Loads offline scraper information XML file.
#
def audit_load_OfflineScraper_XML(xml_file):
__debug_xml_parser = False
games = {}
# --- Check that file exists ---
if not os.path.isfile(xml_file):
log_error("Cannot load file '{}'".format(xml_file))
return games
# --- Parse using cElementTree ---
log_debug('audit_load_OfflineScraper_XML() Loading "{}"'.format(xml_file))
try:
xml_tree = ET.parse(xml_file)
except ET.ParseError, e:
log_error('(ParseError) Exception parsing XML categories.xml')
log_error('(ParseError) {}'.format(str(e)))
return games
xml_root = xml_tree.getroot()
for game_element in xml_root:
if __debug_xml_parser:
log_debug('=== Root child tag "{}" ==='.format(game_element.tag))
if game_element.tag == 'game':
# Default values
game = audit_new_rom_AEL_Offline()
# ROM name is an attribute of <game>
game['ROM'] = game_element.attrib['ROM']
if __debug_xml_parser: log_debug('Game name = "{}"'.format(game['ROM']))
# Parse child tags of category
for game_child in game_element:
# By default read strings
xml_text = game_child.text if game_child.text is not None else ''
xml_text = text_unescape_XML(xml_text)
xml_tag = game_child.tag
if __debug_xml_parser: log_debug('Tag "{}" --> "{}"'.format(xml_tag, xml_text))
game[xml_tag] = xml_text
games[game['ROM']] = game
return games
#
# Loads a No-Intro Parent-Clone XML DAT file. Creates a data structure like
# roms_nointro = {
# 'rom_name_A' : { 'name' : 'rom_name_A', 'cloneof' : '' | 'rom_name_parent},
# 'rom_name_B' : { 'name' : 'rom_name_B', 'cloneof' : '' | 'rom_name_parent},
# }
#
def audit_load_NoIntro_XML_file(xml_FN):
nointro_roms = {}
# --- If file does not exist return empty dictionary ---
if not xml_FN.exists():
log_error('Does not exists "{0}"'.format(xml_FN.getPath()))
return nointro_roms
# --- Parse using cElementTree ---
log_verb('Loading XML "{0}"'.format(xml_FN.getOriginalPath()))
try:
xml_tree = ET.parse(xml_FN.getPath())
except ET.ParseError as e:
log_error('(ParseError) Exception parsing XML categories.xml')
log_error('(ParseError) {0}'.format(str(e)))
return nointro_roms
except IOError as e:
log_error('(IOError) {0}'.format(str(e)))
return nointro_roms
xml_root = xml_tree.getroot()
for root_element in xml_root:
if root_element.tag == 'game':
nointro_rom = audit_new_rom_logiqx()
rom_name = root_element.attrib['name']
nointro_rom['name'] = rom_name
if 'cloneof' in root_element.attrib:
nointro_rom['cloneof'] = root_element.attrib['cloneof']
nointro_roms[rom_name] = nointro_rom
return nointro_roms
def audit_load_GameDB_XML(xml_FN):
__debug_xml_parser = 0
games = {}
# --- Check that file exists and load ---
if not xml_FN.exists():
log_error('Does not exists "{0}"'.format(xml_FN.getPath()))
return games
log_verb('Loading XML "{0}"'.format(xml_FN.getPath()))
try:
xml_tree = ET.parse(xml_FN.getPath())
except ET.ParseError as e:
log_error('(ParseError) Exception parsing XML categories.xml')
log_error('(ParseError) {0}'.format(str(e)))
return games
xml_root = xml_tree.getroot()
for game_element in xml_root:
if __debug_xml_parser:
log_debug('=== Root child tag "{0}" ==='.format(game_element.tag))
if game_element.tag == 'game':
# Default values
game = audit_new_rom_GameDB()
# ROM name is an attribute of <game>
game['name'] = game_element.attrib['name']
if __debug_xml_parser: log_debug('Game name = "{0}"'.format(game['name']))
# Parse child tags of category
for game_child in game_element:
# By default read strings
xml_text = game_child.text if game_child.text is not None else ''
xml_text = text_unescape_XML(xml_text)
xml_tag = game_child.tag
if __debug_xml_parser: log_debug('Tag "{0}" --> "{1}"'.format(xml_tag, xml_text))
game[xml_tag] = xml_text
key = game['name']
games[key] = game
return games
def audit_load_Tempest_INI(file_FN):
games = {}
# Read_status FSM values
# 0 -> Looking for '[game_name]' tag
# 1 -> Reading fields fiel_name=field_value
read_status = 0
__debug_INI_parser = False
# --- Check that file exists ---
if not file_FN.exists():
log_error('Does not exists "{0}"'.format(file_FN.getPath()))
return games
log_verb('Loading XML "{0}"'.format(file_FN.getPath()))
try:
f = open(file_FN.getPath(), 'rt')
except IOError:
log_info('audit_load_Tempest_INI() IOError opening "{0}"'.format(filename))
return {}
for file_line in f:
stripped_line = file_line.strip().decode(errors = 'replace')
if __debug_INI_parser: print('Line "' + stripped_line + '"')
if read_status == 0:
m = re.search(r'\[([^\]]+)\]', stripped_line)
if m:
game = audit_new_rom_GameDB()
game_key = m.group(1)
game['name'] = m.group(1)
if __debug_INI_parser: print('Found game [{0}]'.format(game['name']))
read_status = 1
elif read_status == 1:
line_list = stripped_line.split("=")
if len(line_list) == 1:
read_status = 0
games[game_key] = game
if __debug_INI_parser: print('Added game key "{0}"'.format(game_key))
else:
if __debug_INI_parser: print('Line list -> ' + str(line_list))
field_name = line_list[0]
field_value = line_list[1]
if field_name == 'Publisher': game['manufacturer'] = field_value
elif field_name == 'Developer': game['dev'] = field_value
elif field_name == 'Released': game['year'] = field_value
elif field_name == 'Systems': pass
elif field_name == 'Genre': game['genre'] = field_value
elif field_name == 'Perspective': pass
elif field_name == 'Score': game['score'] = field_value
elif field_name == 'Controls': pass
elif field_name == 'Players': game['player'] = field_value
elif field_name == 'Esrb': game['rating'] = field_value
elif field_name == 'Url': pass
elif field_name == 'Description': game['story'] = field_value
elif field_name == 'Goodname': pass
elif field_name == 'NoIntro': pass
elif field_name == 'Tosec': pass
else:
raise NameError
else:
raise CriticalError('Unknown read_status FSM value')
f.close()
log_info('audit_load_Tempest_INI() Number of games {0}'.format(len(games)))
return games
def audit_load_HyperList_XML(xml_FN):
__debug_xml_parser = 0
games = {}
# --- Check that file exists and load ---
if not xml_FN.exists():
log_error('Does not exists "{0}"'.format(xml_FN.getPath()))
return games
log_verb('Loading XML "{0}"'.format(xml_FN.getPath()))
try:
xml_tree = ET.parse(xml_FN.getPath())
except ET.ParseError as e:
log_error('(ParseError) Exception parsing XML categories.xml')
log_error('(ParseError) {0}'.format(str(e)))
return games
except IOError as e:
log_error('(IOError) {0}'.format(str(e)))
return games
xml_root = xml_tree.getroot()
for game_element in xml_root:
if __debug_xml_parser:
log_debug('=== Root child tag "{0}" ==='.format(game_element.tag))
if game_element.tag == 'game':
# Default values
game = audit_new_rom_HyperList()
# ROM name is an attribute of <game>
game['name'] = game_element.attrib['name']
if __debug_xml_parser: log_debug('Game name = "{0}"'.format(game['name']))
# Parse child tags of category
for game_child in game_element:
# By default read strings
xml_text = game_child.text if game_child.text is not None else ''
xml_text = text_unescape_XML(xml_text)
xml_tag = game_child.tag
if __debug_xml_parser: log_debug('Tag "{0}" --> "{1}"'.format(xml_tag, xml_text))
game[xml_tag] = xml_text
key = game['name']
games[key] = game
return games
def audit_make_NoIntro_PClone_dic(nointro_dic):
log_info('Making PClone dictionary ...')
main_pclone_dic = {}
for machine_name in nointro_dic:
machine = nointro_dic[machine_name]
if machine['cloneof']:
parent_name = machine['cloneof']
# >> If parent already in main_pclone_dic then add clone to parent list.
# >> If parent not there, then add parent first and then add clone.
if parent_name not in main_pclone_dic: main_pclone_dic[parent_name] = []
main_pclone_dic[parent_name].append(machine_name)
else:
# >> Machine is a parent. Add to main_pclone_dic if not already there.
if machine_name not in main_pclone_dic: main_pclone_dic[machine_name] = []
return main_pclone_dic
def audit_make_NoIntro_Parents_dic(nointro_dic):
log_info('Making Parents dictionary ...')
main_pclone_dic = {}
main_clone_to_parent_dic = {}
for machine_name in nointro_dic:
machine = nointro_dic[machine_name]
if machine['cloneof']:
parent_name = machine['cloneof']
main_clone_to_parent_dic[machine_name] = parent_name
return main_clone_to_parent_dic
# -------------------------------------------------------------------------------------------------
# No-Intro/Redump audit
# -------------------------------------------------------------------------------------------------
#
# Creates and returns Parent/Clone MD5 index dictionary.
# This dictionary will be save in database roms_base_noext_PClone_index.json.
#
# unknown_ROMs_are_parents = True
# roms_pclone_index_by_id = {
# 'parent_id_1' : ['clone_id_1', 'clone_id_2', 'clone_id_3'],
# 'parent_id_2' : ['clone_id_1', 'clone_id_2', 'clone_id_3'],
# ... ,
# 'unknown_rom_id_1' : [], # Unknown ROMs never have clones
# 'unknown_rom_id_2' : [],
# ...
# }
#
# unknown_ROMs_are_parents = False
# roms_pclone_index_by_id = {
# 'parent_id_1' : ['clone_id_1', 'clone_id_2', 'clone_id_3'],
# 'parent_id_2' : ['clone_id_1', 'clone_id_2', 'clone_id_3'],
# ... ,
# UNKNOWN_ROMS_PARENT_ID : ['unknown_id_1', 'unknown_id_2', 'unknown_id_3']
# }
#
def audit_generate_DAT_PClone_index(roms, roms_nointro, unknown_ROMs_are_parents):
roms_pclone_index_by_id = {}
# --- Create a dictionary to convert ROMbase_noext names into IDs ---
names_to_ids_dic = {}
for rom_id in roms:
rom = roms[rom_id]
ROMFileName = FileName(rom['filename'])
rom_name = ROMFileName.getBase_noext()
# log_debug('{0} --> {1}'.format(rom_name, rom_id))
# log_debug('{0}'.format(rom))
names_to_ids_dic[rom_name] = rom_id
# --- Build PClone dictionary using ROM base_noext names ---
for rom_id in roms:
rom = roms[rom_id]
ROMFileName = FileName(rom['filename'])
rom_nointro_name = ROMFileName.getBase_noext()
# log_debug('rom_id {0}'.format(rom_id))
# log_debug(' nointro_status "{0}"'.format(rom['nointro_status']))
# log_debug(' filename "{0}"'.format(rom['filename']))
# log_debug(' ROM_base_noext "{0}"'.format(ROMFileName.getBase_noext()))
# log_debug(' rom_nointro_name "{0}"'.format(rom_nointro_name))
if rom['nointro_status'] == AUDIT_STATUS_UNKNOWN:
if unknown_ROMs_are_parents:
# Unknown ROMs are parents
if rom_id not in roms_pclone_index_by_id:
roms_pclone_index_by_id[rom_id] = []
else:
# Unknown ROMs are clones
# Also, if the parent ROMs of all clones does not exist yet then create it
if UNKNOWN_ROMS_PARENT_ID not in roms_pclone_index_by_id:
roms_pclone_index_by_id[UNKNOWN_ROMS_PARENT_ID] = []
roms_pclone_index_by_id[UNKNOWN_ROMS_PARENT_ID].append(rom_id)
else:
roms_pclone_index_by_id[UNKNOWN_ROMS_PARENT_ID].append(rom_id)
elif rom['nointro_status'] == AUDIT_STATUS_EXTRA:
# Extra ROMs are parents.
if rom_id not in roms_pclone_index_by_id:
roms_pclone_index_by_id[rom_id] = []
else:
nointro_rom = roms_nointro[rom_nointro_name]
# ROM is a parent
if nointro_rom['cloneof'] == '':
if rom_id not in roms_pclone_index_by_id:
roms_pclone_index_by_id[rom_id] = []
# ROM is a clone
else:
parent_name = nointro_rom['cloneof']
parent_id = names_to_ids_dic[parent_name]
clone_id = rom['id']
if parent_id in roms_pclone_index_by_id:
roms_pclone_index_by_id[parent_id].append(clone_id)
else:
roms_pclone_index_by_id[parent_id] = []
roms_pclone_index_by_id[parent_id].append(clone_id)
return roms_pclone_index_by_id
#
# Returns a dictionary with parent ROMs to be stored in database roms_base_noext_parents.json
# If the parent of the Unknown ROMs is detected in the Parent dictionary then create fake
# metadata for it.
#
def audit_generate_parent_ROMs_dic(roms, roms_pclone_index):
p_roms = {}
# --- Build parent ROM dictionary ---
for rom_id in roms_pclone_index:
# >> roms_pclone_index make contain the fake ROM id. Skip it if so because the fake
# >> ROM is not in roms dictionary (KeyError exception)
if rom_id == UNKNOWN_ROMS_PARENT_ID:
rom = fs_new_rom()
rom['id'] = UNKNOWN_ROMS_PARENT_ID
rom['m_name'] = '[Unknown ROMs]'
rom['m_plot'] = 'Special virtual ROM parent of all Unknown ROMs'
rom['nointro_status'] = NOINTRO_STATUS_NONE
p_roms[UNKNOWN_ROMS_PARENT_ID] = rom
else:
# >> Make a copy of the dictionary or the original dictionary in ROMs will be modified!
# >> Clean parent ROM name tags from ROM Name
p_roms[rom_id] = dict(roms[rom_id])
return p_roms
def audit_generate_filename_PClone_index(roms, roms_nointro, unknown_ROMs_are_parents):
roms_pclone_index_by_id = {}
# --- Create a dictionary 'rom_base_name' : 'romID' ---
rom_ID_bname_dic = {}
for romID in roms:
rom = roms[romID]
base_name = audit_get_ROM_base_name(rom['filename'])
rom_ID_bname_dic[romID] = base_name
# --- Create a parent/clone list based on the baseName of the ROM ---
# parent_bname : [parent_ID, clone_ID_1, clone_ID_2, ...]
pclone_bname_dict = {}
for id in rom_ID_bname_dic:
base_name = rom_ID_bname_dic[id]
# >> If base_name exists, add this ROM to that
if base_name in pclone_bname_dict:
pclone_bname_dict[base_name].append(id)
# >> If not, create a new entry
else:
IDs = []
IDs.append(id)
pclone_bname_dict[base_name] = IDs
# --- Build filename-based PClone dictionary ---
# NOTE To avoid problems with artwork substitution, make sure the list of
# clones is alphabetically sorted, so the output of the program is
# always the same for the same input. Otherwise, due to dictionary race
# conditions the order of this list may vary from execution to execution, and
# that is bad!
# For now sorted alpahbetically by ID until I code something better.
for base_name in pclone_bname_dict:
id_group = pclone_bname_dict[base_name]
parent_id = id_group[0]
clone_list_id = sorted(id_group[1:])
roms_pclone_index_by_id[parent_id] = clone_list_id
return roms_pclone_index_by_id
# -------------------------------------------------------------------------------------------------
# NARS (NARS Advanced ROM Sorting) stuff
# -------------------------------------------------------------------------------------------------
#
# Get baseName from filename (no extension, no tags).
#
def audit_get_ROM_base_name(romFileName):
# >> re.search() returns a MatchObject
regSearch = re.search("[^\(\)]*", romFileName)
if regSearch is None:
raise NameError('audit_get_ROM_base_name() regSearch is None')
regExp_result = regSearch.group()
return regExp_result.strip()
# -------------------------------------------------------------------------------------------------
# Retroarch System directory BIOS audit
# -------------------------------------------------------------------------------------------------
# Ordered as they show in the BIOS check report.
Retro_core_dic = {
'atari800' : 'Atari 8-bit computer systems and 5200 (Atari800)',
'prosystem' : 'Atari 7800 (ProSystem)',
'mednafen_lynx' : 'Atari Lynx (Beetle Handy)',
'handy' : 'Atari Lynx (Handy)',
'hatari' : 'Atari ST/STE/TT/Falcon (Hatari)',
'o2em' : 'Odyssey2 / Videopac+ (O2EM)',
'fmsx' : 'MSX (fMSX)',
'mednafen_pce_fast' : 'PC Engine/PCE-CD (Beetle PCE FAST)',
'mednafen_supergrafx' : 'PC Engine SuperGrafx (Beetle SGX)',
'mednafen_pcfx' : 'PC-FX (Beetle PC-FX)',
'fceumm' : 'NES / Famicom (FCEUmm)',
'nestopia' : 'NES / Famicom (Nestopia UE)',
'gambatte' : 'Game Boy / Game Boy Color (Gambatte)',
'gpsp' : 'Game Boy Advance (gpSP)',
'mednafen_gba' : 'Game Boy Advance (Beetle GBA)',
'mgba' : 'Game Boy Advance (mGBA)',
'tempgba' : 'Game Boy Advance (TempGBA)',
'vba_next' : 'Game Boy Advance (VBA Next)',
'dolphin' : 'GameCube / Wii (Dolphin)',
'parallel_n64' : 'Nintendo 64 (ParaLLEl N64)',
'pokemini' : 'Pokémon Mini (PokeMini)',
'bsnes_accuracy' : 'SNES / Super Famicom (bsnes Accuracy)',
'bsnes_balanced' : 'SNES / Super Famicom (bsnes Balanced)',
'bsnes_performance' : 'SNES / Super Famicom (bsnes Performance)',
'bsnes_mercury_accuracy' : 'SNES / Super Famicom (bsnes-mercury Accuracy)',
'bsnes_mercury_balanced' : 'SNES / Super Famicom (bsnes-mercury Balanced)',
'bsnes_mercury_performance' : 'SNES / Super Famicom (bsnes-mercury Performance)',
'reicast' : 'Sega Dreamcast (Reicast)',
'redream' : 'Sega Dreamcast (Redream)',
'genesis_plus_gx' : 'Sega MS/GG/MD/CD (Genesis Plus GX)',
'picodrive' : 'Sega MS/MD/CD/32X (PicoDrive)',
'mednafen_saturn' : 'Sega Saturn (Beetle Saturn)',
'yabause' : 'Sega Saturn (Yabause)',
'px68k' : 'Sharp X68000 (Portable SHARP X68000 Emulator)',
'mednafen_psx' : 'PlayStation (Beetle PSX)',
'mednafen_psx_hw' : 'PlayStation (Beetle PSX HW)',
'pcsx_rearmed' : 'PlayStation (PCSX ReARMed)',
'pcsx1' : 'PlayStation (PCSX1)',
'ppsspp' : 'PSP (PPSSPP)',
'psp1' : 'psp1',
'4do' : '3DO (4DO)',
}
# See https://github.com/libretro/libretro-database/blob/master/dat/BIOS.dat
# See https://github.com/libretro/libretro-database/blob/master/dat/BIOS%20-%20Non-Merged.dat
Libretro_BIOS_list = [
# --- Atari 5200 ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/atari800_libretro.info
{'filename' : '5200.rom', 'size' : 2048, 'md5': '281f20ea4320404ec820fb7ec0693b38',
'mandatory' : True, 'cores' : ['atari800']},
# --- Atari 7800 ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/prosystem_libretro.info
{'filename' : '7800 BIOS (E).rom', 'size' : 16384, 'md5': '397bb566584be7b9764e7a68974c4263',
'mandatory' : True, 'cores' : ['prosystem']},
{'filename' : '7800 BIOS (U).rom', 'size' : 4096, 'md5': '0763f1ffb006ddbe32e52d497ee848ae',
'mandatory' : True, 'cores' : ['prosystem']},
# --- Atari Lynx ---
{'filename' : 'lynxboot.img', 'size' : 512, 'md5': 'fcd403db69f54290b51035d82f835e7b',
'mandatory' : False, 'cores' : ['mednafen_lynx', 'handy']},
# --- Atari ST ---
{'filename' : 'tos.img', 'size' : -1, 'md5': 'c1c57ce48e8ee4135885cee9e63a68a2',
'mandatory' : True, 'cores' : ['hatari']},
# --- Id Software - Doom ---
# --- Magnavox - Odyssey2 ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/o2em_libretro.info
{'filename' : 'o2rom.bin', 'size' : 1024, 'md5': '562d5ebf9e030a40d6fabfc2f33139fd',
'mandatory' : True, 'cores' : ['o2em']},
# --- Microsoft - MSX ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/fmsx_libretro.info
{'filename' : 'MSX.ROM', 'size' : 32768, 'md5': 'aa95aea2563cd5ec0a0919b44cc17d47',
'mandatory' : True, 'cores' : ['fmsx']},
{'filename' : 'MSX2.ROM', 'size' : 32768, 'md5': 'ec3a01c91f24fbddcbcab0ad301bc9ef',
'mandatory' : True, 'cores' : ['fmsx']},
{'filename' : 'MSX2EXT.ROM', 'size' : 16384, 'md5': '2183c2aff17cf4297bdb496de78c2e8a',
'mandatory' : True, 'cores' : ['fmsx']},
{'filename' : 'MSX2P.ROM', 'size' : 32768, 'md5': '6d8c0ca64e726c82a4b726e9b01cdf1e',
'mandatory' : True, 'cores' : ['fmsx']},
{'filename' : 'MSX2PEXT.ROM', 'size' : 16384, 'md5': '7c8243c71d8f143b2531f01afa6a05dc',
'mandatory' : True, 'cores' : ['fmsx']},
# --- NEC - PC Engine and Supergrafx ---
{'filename' : 'syscard3.pce', 'size' : 262144, 'md5': '38179df8f4ac870017db21ebcbf53114',
'mandatory' : True, 'cores' : ['mednafen_pce_fast', 'mednafen_supergrafx']},
{'filename' : 'syscard2.pce', 'size' : -1, 'md5': '0',
'mandatory' : False, 'cores' : ['mednafen_pce_fast', 'mednafen_supergrafx']},
{'filename' : 'syscard1.pce', 'size' : -1, 'md5': '0',
'mandatory' : False, 'cores' : ['mednafen_pce_fast', 'mednafen_supergrafx']},
{'filename' : 'gexpress.pce', 'size' : -1, 'md5': '0',
'mandatory' : False, 'cores' : ['mednafen_pce_fast', 'mednafen_supergrafx']},
# --- NEC - PC-FX ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/mednafen_pcfx_libretro.info
{'filename' : 'pcfx.rom', 'size' : 1048576, 'md5': '08e36edbea28a017f79f8d4f7ff9b6d7',
'mandatory' : True, 'cores' : ['mednafen_pcfx']},
# {'filename' : 'fx-scsi.rom', 'size' : 524288, 'md5': '430e9745f9235c515bc8e652d6ca3004',
# 'mandatory' : True, 'cores' : [ ]},
# {'filename' : 'pcfxbios.bin', 'size' : 1048576, 'md5': '08e36edbea28a017f79f8d4f7ff9b6d7',
# 'mandatory' : True, 'cores' : [ ]},
# {'filename' : 'pcfxv101.bin', 'size' : 1048576, 'md5': 'e2fb7c7220e3a7838c2dd7e401a7f3d8',
# 'mandatory' : True, 'cores' : [ ]},
# {'filename' : 'pcfxga.rom', 'size' : 1048576, 'md5': '5885bc9a64bf80d4530b9b9b978ff587',
# 'mandatory' : True, 'cores' : [ ]},
# --- Nintendo - Famicom Disk System ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/fceumm_libretro.info
# https://github.com/libretro/libretro-super/blob/master/dist/info/nestopia_libretro.info
{'filename' : 'disksys.rom', 'size' : 8192, 'md5': 'ca30b50f880eb660a320674ed365ef7a',
'mandatory' : True, 'cores' : ['fceumm', 'nestopia']},
# --- Nintendo - Gameboy ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/gambatte_libretro.info
{'filename' : 'gb_bios.bin', 'size' : -1, 'md5': '32fbbd84168d3482956eb3c5051637f5',
'mandatory' : False, 'cores' : ['gambatte']},
{'filename' : 'gbc_bios.bin', 'size' : -1, 'md5': 'dbfce9db9deaa2567f6a84fde55f9680',
'mandatory' : False, 'cores' : ['gambatte']},
# --- Nintendo - Game Boy Advance ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/gpsp_libretro.info
# https://github.com/libretro/libretro-super/blob/master/dist/info/mednafen_gba_libretro.info
# https://github.com/libretro/libretro-super/blob/master/dist/info/mgba_libretro.info
# https://github.com/libretro/libretro-super/blob/master/dist/info/tempgba_libretro.info
# https://github.com/libretro/libretro-super/blob/master/dist/info/vba_next_libretro.info
{'filename' : 'gba_bios.bin', 'size' : -1, 'md5': 'a860e8c0b6d573d191e4ec7db1b1e4f6',
'mandatory' : False, 'cores' : ['gpsp', 'mednafen_gba', 'mgba', 'tempgba', 'vba_next']},
# --- Nintendo - Gameboy Color ---
# --- Nintendo - GameCube ---
# Dolphin files must be in a special directory, not in the system directory.
# https://github.com/libretro/libretro-super/blob/master/dist/info/dolphin_libretro.info
{'filename' : 'gc-ntsc-10.bin', 'size' : 2097152 , 'md5': 'fc924a7c879b661abc37cec4f018fdf3',
'mandatory' : True, 'cores' : ['dolphin']},
{'filename' : 'gc-pal-10.bin', 'size' : 2097152 , 'md5': '0cdda509e2da83c85bfe423dd87346cc',
'mandatory' : True, 'cores' : ['dolphin']},
{'filename' : 'gc-pal-12.bin', 'size' : 2097152 , 'md5': 'db92574caab77a7ec99d4605fd6f2450',
'mandatory' : True, 'cores' : ['dolphin']},
{'filename' : 'gc-dvd-20010608.bin', 'size' : 131072 , 'md5': '561532ad496f644897952d2cef5bb431',
'mandatory' : True, 'cores' : ['dolphin']},
{'filename' : 'gc-dvd-20010831.bin', 'size' : 131072 , 'md5': 'b953eb1a8fc9922b3f7051c1cdc451f1',
'mandatory' : True, 'cores' : ['dolphin']},
{'filename' : 'gc-dvd-20020402.bin', 'size' : 131072 , 'md5': '413154dd0e2c824c9b18b807fd03ec4e',
'mandatory' : True, 'cores' : ['dolphin']},
{'filename' : 'gc-dvd-20020823.bin', 'size' : 131072 , 'md5': 'c03f6bbaf644eb9b3ee261dbe199eb42',
'mandatory' : True, 'cores' : ['dolphin']},
# --- Nintendo - Nintendo 64DD ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/parallel_n64_libretro.info
{'filename' : '64DD_IPL.bin', 'size' : 4194304, 'md5': '8d3d9f294b6e174bc7b1d2fd1c727530',
'mandatory' : False, 'cores' : ['parallel_n64']},
# --- Nintendo - Nintendo DS ---
# >> Cannot found this BIOSes on the INFO files
# {'filename' : 'bios7.bin', 'size' : 16384, 'md5': 'df692a80a5b1bc90728bc3dfc76cd948',
# 'mandatory' : True, 'cores' : []},
# {'filename' : 'bios9.bin', 'size' : 4096, 'md5': 'a392174eb3e572fed6447e956bde4b25',
# 'mandatory' : True, 'cores' : []},
# {'filename' : 'firmware.bin', 'size' : 262144, 'md5': 'e45033d9b0fa6b0de071292bba7c9d13',
# 'mandatory' : True, 'cores' : []},
# --- Nintendo - Nintendo Entertainment System ---
# --- Nintendo - Pokemon Mini ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/pokemini_libretro.info
{'filename' : 'bios.min', 'size' : 4096, 'md5': '1e4fb124a3a886865acb574f388c803d',
'mandatory' : True, 'cores' : ['pokemini']},
# --- Nintendo - Super Nintendo Entertainment System ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/bsnes_accuracy_libretro.info
{'filename' : 'dsp1.data.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
{'filename' : 'dsp1.program.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
{'filename' : 'dsp1b.data.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
{'filename' : 'dsp1b.program.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
{'filename' : 'dsp2.data.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
{'filename' : 'dsp2.program.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
{'filename' : 'dsp3.data.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
{'filename' : 'dsp3.program.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
{'filename' : 'dsp4.data.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
{'filename' : 'dsp4.program.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
{'filename' : 'cx4.data.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
{'filename' : 'st010.data.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
{'filename' : 'st010.program.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
{'filename' : 'st011.data.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
{'filename' : 'st011.program.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
{'filename' : 'st018.data.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
{'filename' : 'st018.program.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
{'filename' : 'sgb.boot.rom', 'size' : -1, 'md5': '',
'mandatory' : False, 'cores' : ['bsnes_accuracy', 'bsnes_balanced', 'bsnes_performance',
'bsnes_mercury_accuracy', 'bsnes_mercury_balanced', 'bsnes_mercury_performance']},
# --- Phillips - Videopac+ ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/o2em_libretro.info
{'filename' : 'c52.bin', 'size' : 1024, 'md5': 'f1071cdb0b6b10dde94d3bc8a6146387',
'mandatory' : True, 'cores' : ['o2em']},
{'filename' : 'g7400.bin', 'size' : 1024, 'md5': 'c500ff71236068e0dc0d0603d265ae76',
'mandatory' : True, 'cores' : ['o2em']},
{'filename' : 'jopac.bin', 'size' : 1024, 'md5': '279008e4a0db2dc5f1c048853b033828',
'mandatory' : True, 'cores' : ['o2em']},
# --- Sega - Dreamcast ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/reicast_libretro.info
# https://github.com/libretro/libretro-super/blob/master/dist/info/redream_libretro.info
{'filename' : 'dc/dc_boot.bin', 'size' : 2097152, 'md5': 'e10c53c2f8b90bab96ead2d368858623',
'mandatory' : True, 'cores' : ['reicast', 'redream']},
{'filename' : 'dc/dc_flash.bin', 'size' : 131072, 'md5': '0a93f7940c455905bea6e392dfde92a4',
'mandatory' : True, 'cores' : ['reicast', 'redream']},
# --- Sega - Game Gear ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/genesis_plus_gx_libretro.info
{'filename' : 'bios.gg', 'size' : 1024, 'md5': '672e104c3be3a238301aceffc3b23fd6',
'mandatory' : False, 'cores' : ['genesis_plus_gx']},
# --- Sega - Master System ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/genesis_plus_gx_libretro.info
{'filename' : 'bios_E.sms', 'size' : 8192, 'md5': '840481177270d5642a14ca71ee72844c',
'mandatory' : False, 'cores' : ['genesis_plus_gx']},
{'filename' : 'bios_J.sms', 'size' : 8192, 'md5': '24a519c53f67b00640d0048ef7089105',
'mandatory' : False, 'cores' : ['genesis_plus_gx']},
{'filename' : 'bios_U.sms', 'size' : 8192, 'md5': '840481177270d5642a14ca71ee72844c',
'mandatory' : False, 'cores' : ['genesis_plus_gx']},
# --- Sega - Mega Drive - Genesis ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/genesis_plus_gx_libretro.info
{'filename' : 'areplay.bin', 'size' : 32768, 'md5': 'a0028b3043f9d59ceeb03da5b073b30d',
'mandatory' : False, 'cores' : ['genesis_plus_gx']},
# https://github.com/libretro/libretro-super/blob/master/dist/info/genesis_plus_gx_libretro.info
# https://github.com/libretro/libretro-super/blob/master/dist/info/picodrive_libretro.info
{'filename' : 'bios_CD_E.bin', 'size' : 131072, 'md5': 'e66fa1dc5820d254611fdcdba0662372',
'mandatory' : True, 'cores' : ['genesis_plus_gx', 'picodrive']},
{'filename' : 'bios_CD_U.bin', 'size' : 131072, 'md5': '2efd74e3232ff260e371b99f84024f7f',
'mandatory' : True, 'cores' : ['genesis_plus_gx', 'picodrive']},
{'filename' : 'bios_CD_J.bin', 'size' : 131072, 'md5': '278a9397d192149e84e820ac621a8edd',
'mandatory' : True, 'cores' : ['genesis_plus_gx', 'picodrive']},
# https://github.com/libretro/libretro-super/blob/master/dist/info/genesis_plus_gx_libretro.info
{'filename' : 'ggenie.bin', 'size' : 32768, 'md5': 'b5d5ff1147036b06944b4d2cac2dd1e1',
'mandatory' : False, 'cores' : ['genesis_plus_gx']},
{'filename' : 'sk.bin', 'size' : 2097152, 'md5': '4ea493ea4e9f6c9ebfccbdb15110367e',
'mandatory' : False, 'cores' : ['genesis_plus_gx']},
{'filename' : 'sk2chip.bin', 'size' : 262144, 'md5': 'b4e76e416b887f4e7413ba76fa735f16',
'mandatory' : False, 'cores' : ['genesis_plus_gx']},
# --- Sega Saturn ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/mednafen_saturn_libretro.info
{'filename' : 'sega_101.bin', 'size' : 524288, 'md5': '85ec9ca47d8f6807718151cbcca8b964',
'mandatory' : True, 'cores' : ['mednafen_saturn']},
{'filename' : 'mpr-17933.bin', 'size' : 524288, 'md5': '3240872c70984b6cbfda1586cab68dbe',
'mandatory' : True, 'cores' : ['mednafen_saturn']},
{'filename' : 'mpr-18811-mx.ic1', 'size' : 2097152, 'md5': '255113ba943c92a54facd25a10fd780c',
'mandatory' : True, 'cores' : ['mednafen_saturn']},
{'filename' : 'mpr-19367-mx.ic1', 'size' : 2097152, 'md5': '1cd19988d1d72a3e7caa0b73234c96b4',
'mandatory' : True, 'cores' : ['mednafen_saturn']},
# https://github.com/libretro/libretro-super/blob/master/dist/info/yabause_libretro.info
{'filename' : 'saturn_bios.bin', 'size' : 524288, 'md5': 'af5828fdff51384f99b3c4926be27762',
'mandatory' : False, 'cores' : ['yabause']},
# --- Sharp - X68000 ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/px68k_libretro.info
{'filename' : 'keropi/iplrom.dat', 'size' : 131072, 'md5': '7fd4caabac1d9169e289f0f7bbf71d8e',
'mandatory' : True, 'cores' : ['px68k']},
{'filename' : 'keropi/cgrom.dat', 'size' : 786432, 'md5': 'cb0a5cfcf7247a7eab74bb2716260269',
'mandatory' : True, 'cores' : ['px68k']},
{'filename' : 'keropi/iplrom30.dat', 'size' : -1, 'md5': '0',
'mandatory' : False, 'cores' : ['px68k']},
# --- Sony PlayStation ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/pcsx_rearmed_libretro.info
# https://github.com/libretro/libretro-super/blob/master/dist/info/pcsx1_libretro.info
{'filename' : 'scph5500.bin', 'size' : 524288, 'md5': '8dd7d5296a650fac7319bce665a6a53c',
'mandatory' : True, 'cores' : ['mednafen_psx', 'mednafen_psx_hw', 'pcsx_rearmed', 'pcsx1']},
{'filename' : 'scph5501.bin', 'size' : 524288, 'md5': '490f666e1afb15b7362b406ed1cea246',
'mandatory' : True, 'cores' : ['mednafen_psx', 'mednafen_psx_hw', 'pcsx_rearmed']},
{'filename' : 'scph5502.bin', 'size' : 524288, 'md5': '32736f17079d0b2b7024407c39bd3050',
'mandatory' : True, 'cores' : ['mednafen_psx', 'mednafen_psx_hw', 'pcsx_rearmed']},
# --- Sony PlayStation Portable ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/ppsspp_libretro.info
{'filename' : 'PPSSPP/ppge_atlas.zim', 'size' : 784968, 'md5': 'a93fc411c1ce7d001a2a812643c70085',
'mandatory' : True, 'cores' : ['ppsspp', 'psp1']},
# --- The 3DO Company - 3DO ---
# https://github.com/libretro/libretro-super/blob/master/dist/info/4do_libretro.info
{'filename' : 'panafz10.bin', 'size' : 1048576, 'md5': '51f2f43ae2f3508a14d9f56597e2d3ce',
'mandatory' : True, 'cores' : ['4do']},
# {'filename' : 'goldstar.bin', 'size' : 1048576, 'md5': '8639fd5e549bd6238cfee79e3e749114',
# 'mandatory' : True, 'cores' : []},
# {'filename' : 'panafz1.bin', 'size' : 1048576, 'md5': 'f47264dd47fe30f73ab3c010015c155b',
# 'mandatory' : True, 'cores' : []},
# {'filename' : 'sanyotry.bin', 'size' : 1048576, 'md5': '35fa1a1ebaaeea286dc5cd15487c13ea',
# 'mandatory' : True, 'cores' : []},
]
|
gpl-2.0
| 6,399,298,034,993,082,000 | 47.286911 | 122 | 0.563777 | false | 3.043427 | false | false | false |
glaubitz/fs-uae-debian
|
launcher/amitools/fs/Repacker.py
|
1
|
2618
|
from __future__ import absolute_import
from __future__ import print_function
from .ADFSVolume import ADFSVolume
from amitools.fs.blkdev.BlkDevFactory import BlkDevFactory
class Repacker:
def __init__(self, in_image_file, in_options=None):
self.in_image_file = in_image_file
self.in_options = in_options
self.in_blkdev = None
self.out_blkdev = None
self.in_volume = None
self.out_volume = None
def create_in_blkdev(self):
f = BlkDevFactory()
self.in_blkdev = f.open(self.in_image_file, read_only=True, options=self.in_options)
return self.in_blkdev
def create_in_volume(self):
if self.in_blkdev == None:
return None
self.in_volume = ADFSVolume(self.in_blkdev)
self.in_volume.open()
return self.in_volume
def create_in(self):
if self.create_in_blkdev() == None:
return False
if self.create_in_volume() == None:
return False
return True
def create_out_blkdev(self, image_file, force=True, options=None):
if self.in_blkdev == None:
return None
# clone geo from input
if options == None:
options = self.in_blkdev.get_chs_dict()
f = BlkDevFactory()
self.out_blkdev = f.create(image_file, force=force, options=options)
return self.out_blkdev
def create_out_volume(self, blkdev=None):
if blkdev != None:
self.out_blkdev = blkdev
if self.out_blkdev == None:
return None
if self.in_volume == None:
return None
# clone input volume
iv = self.in_volume
name = iv.get_volume_name()
dos_type = iv.get_dos_type()
meta_info = iv.get_meta_info()
boot_code = iv.get_boot_code()
self.out_volume = ADFSVolume(self.out_blkdev)
self.out_volume.create(name, meta_info=meta_info, dos_type=dos_type, boot_code=boot_code)
return self.out_volume
def repack(self):
self.repack_node_dir(self.in_volume.get_root_dir(), self.out_volume.get_root_dir())
def repack_node_dir(self, in_root, out_root):
entries = in_root.get_entries()
for e in entries:
self.repack_node(e, out_root)
def repack_node(self, in_node, out_dir):
name = in_node.get_file_name().get_name()
meta_info = in_node.get_meta_info()
# sub dir
if in_node.is_dir():
sub_dir = out_dir.create_dir(name, meta_info, False)
for child in in_node.get_entries():
self.repack_node(child, sub_dir)
sub_dir.flush()
# file
elif in_node.is_file():
data = in_node.get_file_data()
out_file = out_dir.create_file(name, data, meta_info, False)
out_file.flush()
in_node.flush()
|
gpl-2.0
| 4,303,926,271,777,007,600 | 30.166667 | 93 | 0.643621 | false | 3.04065 | false | false | false |
axeltidemann/self_dot
|
communication.py
|
1
|
1564
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
# Copyright 2014 Oeyvind Brandtsegg and Axel Tidemann
#
# This file is part of [self.]
#
# [self.] is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3
# as published by the Free Software Foundation.
#
# [self.] is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with [self.]. If not, see <http://www.gnu.org/licenses/>.
'''Very simple communication module for [self.]
@author: Axel Tidemann
@contact: [email protected]
@license: GPL
Talk to [self.] over ØMQ sockets.
'''
import sys
import zmq
# Setup so it can be accessed from processes which don't have a zmq context, i.e. for one-shot messaging.
# Do not use this in contexts where timing is important, i.e. create a proper socket similar to this one.
def send(message, context=None, host='localhost', port=5566):
print 'This send() should only be used in simple circumstances, i.e. not in something that runs in performance-critical code!'
context = context or zmq.Context()
sender = context.socket(zmq.PUSH)
sender.connect('tcp://{}:{}'.format(host, port))
sender.send_json(message)
if __name__ == '__main__':
if len(sys.argv) > 1:
send(' '.join(sys.argv[1:]))
|
gpl-3.0
| -1,690,744,494,526,749,700 | 34.545455 | 130 | 0.695652 | false | 3.522523 | false | false | false |
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
|
orcid_api_v3/models/research_resources_v30.py
|
1
|
4928
|
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.last_modified_date_v30 import LastModifiedDateV30 # noqa: F401,E501
from orcid_api_v3.models.research_resource_group_v30 import ResearchResourceGroupV30 # noqa: F401,E501
class ResearchResourcesV30(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_modified_date': 'LastModifiedDateV30',
'group': 'list[ResearchResourceGroupV30]',
'path': 'str'
}
attribute_map = {
'last_modified_date': 'last-modified-date',
'group': 'group',
'path': 'path'
}
def __init__(self, last_modified_date=None, group=None, path=None): # noqa: E501
"""ResearchResourcesV30 - a model defined in Swagger""" # noqa: E501
self._last_modified_date = None
self._group = None
self._path = None
self.discriminator = None
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if group is not None:
self.group = group
if path is not None:
self.path = path
@property
def last_modified_date(self):
"""Gets the last_modified_date of this ResearchResourcesV30. # noqa: E501
:return: The last_modified_date of this ResearchResourcesV30. # noqa: E501
:rtype: LastModifiedDateV30
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this ResearchResourcesV30.
:param last_modified_date: The last_modified_date of this ResearchResourcesV30. # noqa: E501
:type: LastModifiedDateV30
"""
self._last_modified_date = last_modified_date
@property
def group(self):
"""Gets the group of this ResearchResourcesV30. # noqa: E501
:return: The group of this ResearchResourcesV30. # noqa: E501
:rtype: list[ResearchResourceGroupV30]
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this ResearchResourcesV30.
:param group: The group of this ResearchResourcesV30. # noqa: E501
:type: list[ResearchResourceGroupV30]
"""
self._group = group
@property
def path(self):
"""Gets the path of this ResearchResourcesV30. # noqa: E501
:return: The path of this ResearchResourcesV30. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this ResearchResourcesV30.
:param path: The path of this ResearchResourcesV30. # noqa: E501
:type: str
"""
self._path = path
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResearchResourcesV30, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResearchResourcesV30):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
mit
| 6,569,534,095,213,337,000 | 28.866667 | 119 | 0.581981 | false | 4.022857 | false | false | false |
oldhawaii/oldhawaii-metadata
|
www/oldhawaii_metadata/apps/users/forms.py
|
1
|
6532
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import redirect
from flask import request
from flask import url_for
from flask.ext.wtf import Form as BaseForm
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.orm.exc import NoResultFound
from wtforms import fields
from wtforms import validators
from .confirmable import user_requires_confirmation
from .models import User
from .utilities import get_message
from .utilities import get_redirect
from .utilities import is_safe_redirect_url
_default_form_field_labels = {
'email': 'Email Address',
'password': 'Password',
'password_confirm': 'Password Confirmation',
'remember_me': 'Remember Me',
}
class ValidatorMixin(object):
def __call__(self, form, field):
if self.message and self.message.isupper():
self.message = get_message(self.message)
return super(ValidatorMixin, self).__call__(form, field)
class EqualTo(ValidatorMixin, validators.EqualTo):
pass
class Required(ValidatorMixin, validators.Required):
pass
class Email(ValidatorMixin, validators.Email):
pass
class Length(ValidatorMixin, validators.Length):
pass
email_required = Required(message='EMAIL_ADDRESS_NOT_PROVIDED')
email_validator = Email(message='EMAIL_ADDRESS_INVALID')
password_required = Required(message='PASSWORD_NOT_PROVIDED')
password_length = Length(min=8, max=128, message='PASSWORD_LENGTH_INVALID')
def valid_user_email(form, field):
form.user = User.query.filter_by(email=field.data).first()
if form.user is None:
raise validators.ValidationError(get_message('USER_DOES_NOT_EXIST'))
def get_form_field_label(form_field_label):
""" Modify here if you want i18n. Probably need to turn defaults into
lazy_gettext calls, etc. """
return _default_form_field_labels.get(form_field_label, form_field_label)
class Form(BaseForm):
def __init__(self, *args, **kwargs):
super(Form, self).__init__(*args, **kwargs)
class NextFormMixin(object):
next = fields.HiddenField()
def __init__(self, *args, **kwargs):
super(NextFormMixin, self).__init__(*args, **kwargs)
if not self.next.data:
self.next.data = get_redirect() or ''
def redirect(self, endpoint, **values):
if is_safe_redirect_url(self.next.data):
return redirect(self.next.data)
redirect_url = get_redirect()
return redirect(redirect_url or url_for(endpoint, **values))
class EmailFormMixin(object):
email = fields.StringField(
get_form_field_label('email'),
validators=[email_required, email_validator])
class PasswordFormMixin(object):
password = fields.PasswordField(
get_form_field_label('password'), validators=[password_required])
class NewPasswordFormMixin(object):
password = fields.PasswordField(
get_form_field_label('password'),
validators=[password_required, password_length])
class PasswordConfirmFormMixin(object):
password_confirm = fields.PasswordField(
get_form_field_label('password_confirm'),
validators=[EqualTo('password',
message='PASSWORD_CONFIRMATION_INVALID')])
class ForgotPasswordForm(Form):
user = None
email = fields.StringField(
get_form_field_label('email'),
validators=[email_required, email_validator, valid_user_email])
def __init__(self, *args, **kwargs):
super(ForgotPasswordForm, self).__init__(*args, **kwargs)
if request.method == 'GET':
self.email.data = request.args.get('email', None)
def validate(self):
if not super(ForgotPasswordForm, self).validate():
return False
if user_requires_confirmation(self.user):
self.email.errors.append(
get_message('EMAIL_ADDRESS_NOT_CONFIRMED'))
return False
return True
class LoginForm(Form,
EmailFormMixin,
PasswordFormMixin,
NextFormMixin):
user = None
remember_me = fields.BooleanField(get_form_field_label('remember_me'))
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
def validate(self):
if not super(LoginForm, self).validate():
return False
try:
self.user = User.query.filter(User.email == self.email.data).one()
except (MultipleResultsFound, NoResultFound):
self.email.errors.append(get_message('PASSWORD_INVALID'))
return False
if self.user is None:
self.email.errors.append(get_message('PASSWORD_INVALID'))
return False
elif not self.user.is_valid_password(self.password.data):
self.email.errors.append(get_message('PASSWORD_INVALID'))
return False
elif user_requires_confirmation(self.user):
self.email.errors.append(
get_message('EMAIL_ADDRESS_NOT_CONFIRMED'))
return False
return True
class RegisterForm(Form,
EmailFormMixin,
PasswordFormMixin,
NewPasswordFormMixin,
PasswordConfirmFormMixin):
user = None
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
def validate(self):
if not super(RegisterForm, self).validate():
return False
user = User.query.filter_by(email=self.email.data).first()
if user:
self.email.errors.append(get_message('EMAIL_ADDRESS_EXISTS'))
return False
return True
class ResetPasswordForm(Form,
NewPasswordFormMixin,
PasswordConfirmFormMixin):
pass
class SendConfirmationForm(Form):
user = None
email = fields.StringField(
get_form_field_label('email'),
validators=[email_required, email_validator, valid_user_email])
def __init__(self, *args, **kwargs):
super(SendConfirmationForm, self).__init__(*args, **kwargs)
if request.method == 'GET':
self.email.data = request.args.get('email', None)
def validate(self):
if not super(SendConfirmationForm, self).validate():
return False
if self.user.confirmed_at is not None:
self.email.errors.append(
get_message('EMAIL_ADDRESSS_ALREADY_CONFIRMED'))
return False
return True
# vim: filetype=python
|
mit
| -5,542,365,095,312,385,000 | 28.160714 | 78 | 0.641457 | false | 4.067248 | false | false | false |
metamarcdw/metamarket
|
qtui/send_chanmsg_dialog.py
|
1
|
3520
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'send_chanmsg_dialog.ui'
#
# Created: Wed May 11 01:26:04 2016
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_send_chanmsg_dialog(object):
def setupUi(self, send_chanmsg_dialog):
send_chanmsg_dialog.setObjectName(_fromUtf8("send_chanmsg_dialog"))
send_chanmsg_dialog.resize(400, 300)
self.gridLayout = QtGui.QGridLayout(send_chanmsg_dialog)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox = QtGui.QGroupBox(send_chanmsg_dialog)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.plainTextEdit = QtGui.QPlainTextEdit(self.groupBox)
self.plainTextEdit.setLineWrapMode(QtGui.QPlainTextEdit.WidgetWidth)
self.plainTextEdit.setObjectName(_fromUtf8("plainTextEdit"))
self.gridLayout_2.addWidget(self.plainTextEdit, 1, 0, 1, 1)
self.subjectLineEdit = QtGui.QLineEdit(self.groupBox)
self.subjectLineEdit.setObjectName(_fromUtf8("subjectLineEdit"))
self.gridLayout_2.addWidget(self.subjectLineEdit, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox)
self.buttonBox = QtGui.QDialogButtonBox(send_chanmsg_dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setCenterButtons(True)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout.addWidget(self.buttonBox)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.retranslateUi(send_chanmsg_dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), send_chanmsg_dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), send_chanmsg_dialog.reject)
QtCore.QMetaObject.connectSlotsByName(send_chanmsg_dialog)
send_chanmsg_dialog.setTabOrder(self.plainTextEdit, self.subjectLineEdit)
send_chanmsg_dialog.setTabOrder(self.subjectLineEdit, self.buttonBox)
def retranslateUi(self, send_chanmsg_dialog):
send_chanmsg_dialog.setWindowTitle(_translate("send_chanmsg_dialog", "Send Channel Message", None))
self.groupBox.setTitle(_translate("send_chanmsg_dialog", "Enter Message:", None))
self.subjectLineEdit.setPlaceholderText(_translate("send_chanmsg_dialog", "Subject", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
send_chanmsg_dialog = QtGui.QDialog()
ui = Ui_send_chanmsg_dialog()
ui.setupUi(send_chanmsg_dialog)
send_chanmsg_dialog.show()
sys.exit(app.exec_())
|
mit
| -8,277,862,675,880,958,000 | 45.315789 | 114 | 0.716761 | false | 3.740701 | false | false | false |
emanlove/robotframework-selenium2library
|
src/SeleniumLibrary/keywords/screenshot.py
|
1
|
10147
|
# Copyright 2008-2011 Nokia Networks
# Copyright 2011-2016 Ryan Tomac, Ed Manlove and contributors
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from robot.utils import get_link_path
from SeleniumLibrary.base import LibraryComponent, keyword
from SeleniumLibrary.utils.path_formatter import _format_path
DEFAULT_FILENAME_PAGE = "selenium-screenshot-{index}.png"
DEFAULT_FILENAME_ELEMENT = "selenium-element-screenshot-{index}.png"
EMBED = "EMBED"
class ScreenshotKeywords(LibraryComponent):
@keyword
def set_screenshot_directory(self, path: str) -> str:
"""Sets the directory for captured screenshots.
``path`` argument specifies the absolute path to a directory where
the screenshots should be written to. If the directory does not
exist, it will be created. The directory can also be set when
`importing` the library. If it is not configured anywhere,
screenshots are saved to the same directory where Robot Framework's
log file is written.
If ``path`` equals to EMBED (case insensitive) and
`Capture Page Screenshot` or `capture Element Screenshot` keywords
filename argument is not changed from the default value, then
the page or element screenshot is embedded as Base64 image to
the log.html.
The previous value is returned and can be used to restore
the original value later if needed.
Returning the previous value is new in SeleniumLibrary 3.0.
The persist argument was removed in SeleniumLibrary 3.2 and
EMBED is new in SeleniumLibrary 4.2.
"""
if path is None:
path = None
elif path.upper() == EMBED:
path = EMBED
else:
path = os.path.abspath(path)
self._create_directory(path)
previous = self._screenshot_root_directory
self._screenshot_root_directory = path
return previous
@keyword
def capture_page_screenshot(self, filename: str = DEFAULT_FILENAME_PAGE) -> str:
"""Takes a screenshot of the current page and embeds it into a log file.
``filename`` argument specifies the name of the file to write the
screenshot into. The directory where screenshots are saved can be
set when `importing` the library or by using the `Set Screenshot
Directory` keyword. If the directory is not configured, screenshots
are saved to the same directory where Robot Framework's log file is
written.
If ``filename`` equals to EMBED (case insensitive), then screenshot
is embedded as Base64 image to the log.html. In this case file is not
created in the filesystem.
Starting from SeleniumLibrary 1.8, if ``filename`` contains marker
``{index}``, it will be automatically replaced with an unique running
index, preventing files to be overwritten. Indices start from 1,
and how they are represented can be customized using Python's
[https://docs.python.org/3/library/string.html#format-string-syntax|
format string syntax].
An absolute path to the created screenshot file is returned or if
``filename`` equals to EMBED, word `EMBED` is returned.
Support for EMBED is new in SeleniumLibrary 4.2
Examples:
| `Capture Page Screenshot` | |
| `File Should Exist` | ${OUTPUTDIR}/selenium-screenshot-1.png |
| ${path} = | `Capture Page Screenshot` |
| `File Should Exist` | ${OUTPUTDIR}/selenium-screenshot-2.png |
| `File Should Exist` | ${path} |
| `Capture Page Screenshot` | custom_name.png |
| `File Should Exist` | ${OUTPUTDIR}/custom_name.png |
| `Capture Page Screenshot` | custom_with_index_{index}.png |
| `File Should Exist` | ${OUTPUTDIR}/custom_with_index_1.png |
| `Capture Page Screenshot` | formatted_index_{index:03}.png |
| `File Should Exist` | ${OUTPUTDIR}/formatted_index_001.png |
| `Capture Page Screenshot` | EMBED |
| `File Should Not Exist` | EMBED |
"""
if not self.drivers.current:
self.info("Cannot capture screenshot because no browser is open.")
return
if self._decide_embedded(filename):
return self._capture_page_screen_to_log()
return self._capture_page_screenshot_to_file(filename)
def _capture_page_screenshot_to_file(self, filename):
path = self._get_screenshot_path(filename)
self._create_directory(path)
if not self.driver.save_screenshot(path):
raise RuntimeError(f"Failed to save screenshot '{path}'.")
self._embed_to_log_as_file(path, 800)
return path
def _capture_page_screen_to_log(self):
screenshot_as_base64 = self.driver.get_screenshot_as_base64()
self._embed_to_log_as_base64(screenshot_as_base64, 800)
return EMBED
@keyword
def capture_element_screenshot(
self, locator: str, filename: str = DEFAULT_FILENAME_ELEMENT
) -> str:
"""Captures a screenshot from the element identified by ``locator`` and embeds it into log file.
See `Capture Page Screenshot` for details about ``filename`` argument.
See the `Locating elements` section for details about the locator
syntax.
An absolute path to the created element screenshot is returned.
Support for capturing the screenshot from an element has limited support
among browser vendors. Please check the browser vendor driver documentation
does the browser support capturing a screenshot from an element.
New in SeleniumLibrary 3.3. Support for EMBED is new in SeleniumLibrary 4.2.
Examples:
| `Capture Element Screenshot` | id:image_id | |
| `Capture Element Screenshot` | id:image_id | ${OUTPUTDIR}/id_image_id-1.png |
| `Capture Element Screenshot` | id:image_id | EMBED |
"""
if not self.drivers.current:
self.info(
"Cannot capture screenshot from element because no browser is open."
)
return
element = self.find_element(locator, required=True)
if self._decide_embedded(filename):
return self._capture_element_screen_to_log(element)
return self._capture_element_screenshot_to_file(element, filename)
def _capture_element_screenshot_to_file(self, element, filename):
path = self._get_screenshot_path(filename)
self._create_directory(path)
if not element.screenshot(path):
raise RuntimeError(f"Failed to save element screenshot '{path}'.")
self._embed_to_log_as_file(path, 400)
return path
def _capture_element_screen_to_log(self, element):
self._embed_to_log_as_base64(element.screenshot_as_base64, 400)
return EMBED
@property
def _screenshot_root_directory(self):
return self.ctx.screenshot_root_directory
@_screenshot_root_directory.setter
def _screenshot_root_directory(self, value):
self.ctx.screenshot_root_directory = value
def _decide_embedded(self, filename):
filename = filename.lower()
if (
filename == DEFAULT_FILENAME_PAGE
and self._screenshot_root_directory == EMBED
):
return True
if (
filename == DEFAULT_FILENAME_ELEMENT
and self._screenshot_root_directory == EMBED
):
return True
if filename == EMBED.lower():
return True
return False
def _get_screenshot_path(self, filename):
if self._screenshot_root_directory != EMBED:
directory = self._screenshot_root_directory or self.log_dir
else:
directory = self.log_dir
filename = filename.replace("/", os.sep)
index = 0
while True:
index += 1
formatted = _format_path(filename, index)
path = os.path.join(directory, formatted)
# filename didn't contain {index} or unique path was found
if formatted == filename or not os.path.exists(path):
return path
def _create_directory(self, path):
target_dir = os.path.dirname(path)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
def _embed_to_log_as_base64(self, screenshot_as_base64, width):
# base64 image is shown as on its own row and thus previous row is closed on
# purpose. Depending on Robot's log structure is a bit risky.
self.info(
'</td></tr><tr><td colspan="3">'
'<img alt="screenshot" class="robot-seleniumlibrary-screenshot" '
f'src="data:image/png;base64,{screenshot_as_base64}" width="{width}px">',
html=True,
)
def _embed_to_log_as_file(self, path, width):
# Image is shown on its own row and thus previous row is closed on
# purpose. Depending on Robot's log structure is a bit risky.
src = get_link_path(path, self.log_dir)
self.info(
'</td></tr><tr><td colspan="3">'
f'<a href="{src}"><img src="{src}" width="{width}px"></a>',
html=True,
)
|
apache-2.0
| -5,964,730,719,708,307,000 | 42.549356 | 104 | 0.625505 | false | 4.277825 | false | false | false |
voltnor/gp438
|
src/ned/proc2.py
|
1
|
5877
|
"""
Processing.
Author: Dave Hale, Colorado School of Mines
Version: 2012.05.20
---
Receiver stations: 954 - 1295 ( 954 <=> 0.000)
Source stations: 1003 - ???? (1003 <=> 7.350)
"""
from imports import *
s1 = Sampling(4001,0.002,0.000) # time sampling
s2 = Sampling(342,0.015,0.000) # receiver sampling (first group at 954)
s3 = Sampling(215,0.015,0.735) # shot sampling (first shot at 1003)
#s3 = Sampling(1,0.015,0.735)
n1,n2,n3 = s1.count,s2.count,s3.count
d1,d2,d3 = s1.delta,s2.delta,s3.delta
f1,f2,f3 = s1.first,s2.first,s3.first
#shotDir = "/data/seis/csm/fc2012/"
#segdDir = "/data/seis/csm/fc2012/segd/test139/"
shotDir = "/data/seis/csm/fc2013/segd/141/"
segdDir = "/gpfc/ckohnke/fc2013/segd/141/"
#############################################################################
def main(args):
#process()
display()
def process():
f = readData(shotDir+"shotsp.dat")
#lowpass(35.0,f)
tpow(f)
balance(f)
#g = copy(f)
#for i3 in range(n3):
# plot(g[i3],title="Shot at "+str(s3.getValue(i3)))
muteAirWave(f)
taperEdges(f)
removeSlowWaves(f)
#muteFirstBreak(f)
#balance(f)
#for i3 in range(n3):
# plot(f[i3],title="Shot at "+str(s3.getValue(i3)))
writeData(f,shotDir+"shotsq.dat")
def display():
f = readData(shotDir+"shotsq.dat")
sf = SimpleFrame()
ip = sf.addImagePanels(f)
ip.setPercentiles(1,99)
#ip.setClips(-2.5,2.5)
def balance(f):
mf = MedianFinder(n1)
for i3 in range(n3):
for i2 in range(n2):
ma = mf.findMedian(abs(f[i3][i2]))
if ma==0.0:
ma = 0.00001
div(f[i3][i2],ma,f[i3][i2])
def taperEdges(f):
t1 = 50
h = fillfloat(1.0,n1,n2)
for i2 in range(n2):
for i1 in range(0,t1+t1):
h[i2][i1] = max(0.0,float(i1-t1)/t1)
for i1 in range(n1-t1-t1,n1):
h[i2][i1] = max(0.0,float(n1-t1-i1)/t1)
for i3 in range(n3):
mul(h,f[i3],f[i3])
def muteAirWave(f):
vel = 0.330 # km/s
lmute = 0.2/d1
nmute = 1+2*lmute
for i3 in range(n3):
for i2 in range(n2):
f32 = f[i3][i2]
offset = s2.getValue(i2)-s3.getValue(i3)
imute = s1.indexOfNearest(abs(offset)/vel)
i1min = max(0,imute-lmute)
i1max = min(n1-1,imute+lmute)
for i1 in range(i1min,i1max+1):
f32[i1] = 0.0
def muteFirstBreak(f):
vel = 4.000 # km/s
kmute = s1.indexOfNearest(0.3)
for i3 in range(n3):
for i2 in range(n2):
f32 = f[i3][i2]
offset = s2.getValue(i2)-s3.getValue(i3)
imute = s1.indexOfNearest(abs(offset)/vel)
for i1 in range(0,kmute+imute):
f32[i1] = 0.0
def muteNearOffsets(f):
lkill = 3
for i3 in range(n3):
i2 = s2.indexOfNearest(s3.getValue(i3))
i2min = i2-lkill
i2max = i2+lkill
for i2 in range(i2min,i2max+1):
#scale = max(0.0,1.0-sin(0.5*PI*(i2-i2min)/lkill))
scale = 0.0
mul(scale,f[i3][i2],f[i3][i2])
"""
refracted shear?
shot 116
321-93: 0.456 s
155-102: 0.795 km
vel = 1.75
"""
def removeSlowWaves(f):
#vgr = 1.1 # ground-roll velocity
vgr = 0.1 # ground-roll velocity
vrs = 2.3 # refracted shear wave?
slopeFilter(1.0/vrs,1.0/vgr,f)
def slopeFilter(pmin,pmax,f):
ci = CubicInterpolator(
CubicInterpolator.Method.LINEAR,4,
[pmin-0.1,pmin,pmax,pmax+0.1],[1,0,0,1])
fft = Fft(s1,s2)
fft.setComplex(False)
fft.setCenter2(True)
fft.setPadding1(200)
fft.setPadding2(100)
sw = fft.getFrequencySampling1()
sk = fft.getFrequencySampling2()
nw,nk = sw.count,sk.count
h = fillfloat(1.0,nw,nk)
for ik in range(nk):
k = sk.getValue(ik)
for iw in range(nw):
w = sw.getValue(iw)
if w!=0.0:
h[ik][iw] = min(1.0,ci.interpolate(abs(k/w)))
h = cmplx(h,zerofloat(nw,nk))
for i3 in range(n3):
g = copy(f[i3])
g = fft.applyForward(g)
cmul(h,g,g)
g = fft.applyInverse(g)
copy(g,f[i3])
def readData(fileName,bo=ByteOrder.LITTLE_ENDIAN):
f = zerofloat(n1,n2,n3)
ais = ArrayInputStream(fileName,bo)
ais.readFloats(f)
ais.close()
return f
def writeData(f,fileName,bo=ByteOrder.LITTLE_ENDIAN):
n3 = len(f)
print "writing",n3," shot records to",fileName
aos = ArrayOutputStream(fileName,bo)
for i3 in range(n3):
print " writing i3 =",i3
aos.writeFloats(f[i3])
print " closing ..."
aos.close()
print " done"
def tpow(f):
t = rampfloat(f1,d1,0.0,n1,n2) # time
mul(t,t,t) # time squared
for f3 in f:
mul(t,f3,f3)
def gain(f,hw=40.0):
ref = RecursiveExponentialFilter(hw)
for f3 in f:
if max(abs(f3))>0.0:
g = mul(f3,f3)
ref.apply1(g,g)
div(f3,add(0.0001,sqrt(g)),f3)
def lowpass(f3db,f):
bf = ButterworthFilter(f3db*d1,6,ButterworthFilter.Type.LOW_PASS)
bf.apply1ForwardReverse(f,f)
def plot(f,title=None):
print "plot f: min =",min(f),"max =",max(f)
sp = SimplePlot(SimplePlot.Origin.UPPER_LEFT)
#sp.setSize(750,1000)
sp.setSize(900,900)
sp.setVLabel("Time (s)")
if s2.delta==1.0:
sp.setHLabel("Station")
else:
sp.setHLabel("Offset (km)")
sp.setVLimits(0.0,4.0)
if title:
sp.setTitle(title)
pv = sp.addPixels(s1,s2,f)
#pv.setColorModel(ColorMap.BLUE_WHITE_RED)
pv.setPercentiles(1,99)
#pv.setClips(-2.5,2.5)
def plotAmp(f,title=None):
fft = Fft(s1)
sf = fft.getFrequencySampling1()
ff = zerofloat(sf.count,s2.count)
for i2 in range(s2.count):
ff[i2] = cabs(fft.applyForward(f[i2]))
sp = SimplePlot(SimplePlot.Origin.UPPER_LEFT)
#sp.setSize(750,1000)
sp.setSize(900,900)
sp.setVLabel("Frequency (Hz)")
if s2.delta==1.0:
sp.setHLabel("Station")
else:
sp.setHLabel("Offset (km)")
sp.setVLimits(0.0,120.0)
if title:
sp.setTitle(title)
pv = sp.addPixels(sf,s2,ff)
pv.setColorModel(ColorMap.JET)
pv.setPercentiles(1,99)
#pv.setClips(-2.5,2.5)
#############################################################################
class RunMain(Runnable):
def run(self):
main(sys.argv)
SwingUtilities.invokeLater(RunMain())
|
epl-1.0
| -1,206,655,710,301,369,000 | 24.663755 | 77 | 0.614259 | false | 2.32109 | false | false | false |
nacx/kahuna
|
kahuna/session.py
|
1
|
2458
|
#!/usr/bin/env jython
import atexit
import logging
from config import Config
from java.util import Properties
from org.jclouds import ContextBuilder
from org.jclouds.abiquo import AbiquoApiMetadata, AbiquoContext
from org.jclouds.logging.slf4j.config import SLF4JLoggingModule
from org.jclouds.sshj.config import SshjSshClientModule
log = logging.getLogger('kahuna')
class ContextLoader:
""" Sets the context to call Abiquo's API.
This class must be the first one to be instantiated when we want to
start a session with Abiquo's API. Just initialize it and call the
load() method.
"""
def __init__(self, overrides=None):
""" Sets the properties and context builders """
self.__context = None
self.__config = Config()
self.__endpoint = "http://" + self.__config.address + "/api"
if overrides:
log.debug("Overriding default config values")
for property in sorted(overrides.iterkeys()):
setattr(self.__config, property, overrides[property])
def __del__(self):
""" Closes the context before destroying """
if self.__context:
log.debug("Disconnecting from %s" % self.__endpoint)
self.__context.close()
def load(self):
""" Creates and configures the context """
if not self.__context: # Avoid loading the same context twice
props = self._load_config()
log.debug("Connecting to %s as %s" % (self.__endpoint,
self.__config.user))
self.__context = ContextBuilder.newBuilder(AbiquoApiMetadata()) \
.endpoint(self.__endpoint) \
.credentials(self.__config.user, self.__config.password) \
.modules([SshjSshClientModule(), SLF4JLoggingModule()]) \
.overrides(props) \
.buildView(AbiquoContext)
api_version = self.__context.getApiContext() \
.getProviderMetadata().getApiMetadata().getVersion()
log.debug("Using Abiquo version: %s" % api_version)
# Close context automatically when exiting
atexit.register(self.__del__)
return self.__context
def _load_config(self):
""" Returns the default jclouds client configuration """
props = Properties()
[props.put(name, value)
for (name, value) in self.__config.client_config]
return props
|
mit
| 7,743,021,286,421,032,000 | 37.40625 | 77 | 0.616355 | false | 4.312281 | true | false | false |
JohnVinyard/zounds
|
zounds/spectral/weighting.py
|
1
|
2857
|
import numpy as np
from .frequencyadaptive import FrequencyAdaptive
class FrequencyWeighting(object):
def __init__(self):
super(FrequencyWeighting, self).__init__()
def __numpy_ufunc__(self, *args, **kwargs):
raise NotImplementedError()
def _wdata(self, scale):
return np.ones(len(scale))
def weights(self, other):
"""
Compute weights, given a scale or time-frequency representation
:param other: A time-frequency representation, or a scale
:return: a numpy array of weights
"""
try:
return self._wdata(other)
except AttributeError:
frequency_dim = other.dimensions[-1]
return self._wdata(frequency_dim.scale)
def _get_factors(self, arr):
for i, d in enumerate(arr.dimensions):
try:
weights = self._wdata(d.scale)
expanded = d.weights(weights, arr, i)
return expanded
except AttributeError as e:
pass
raise ValueError('arr must have a frequency dimension')
def __array_ufunc__(self, ufunc, method, *args, **kwargs):
if ufunc == np.multiply or ufunc == np.divide:
if args[0] is self:
first_arg = self._get_factors(args[1])
second_arg = args[1]
else:
first_arg = args[0]
second_arg = self._get_factors(args[0])
return getattr(ufunc, method)(first_arg, second_arg, **kwargs)
else:
return NotImplemented
class AWeighting(FrequencyWeighting):
"""
An A-weighting (https://en.wikipedia.org/wiki/A-weighting) that can be
applied to a frequency axis via multiplication.
Examples:
>>> from zounds import ArrayWithUnits, GeometricScale
>>> from zounds import FrequencyDimension, AWeighting
>>> import numpy as np
>>> scale = GeometricScale(20, 20000, 0.05, 10)
>>> raw = np.ones(len(scale))
>>> arr = ArrayWithUnits(raw, [FrequencyDimension(scale)])
>>> arr * AWeighting()
ArrayWithUnits([ 1. , 18.3172567 , 31.19918106, 40.54760374,
47.15389876, 51.1554151 , 52.59655479, 52.24516649,
49.39906912, 42.05409205])
"""
def __init__(self):
super(AWeighting, self).__init__()
def _wdata(self, scale):
center_frequencies = np.array(list(scale.center_frequencies)) ** 2
a = (12200 ** 2) * (center_frequencies ** 2)
b = center_frequencies + (20.6 ** 2)
c = center_frequencies + (107.7 ** 2)
d = center_frequencies + (737.9 ** 2)
e = center_frequencies + (12200 ** 2)
f = a / (b * np.sqrt(c * d) * e)
result = 2.0 + (20 * np.log10(f))
return 1 + (result - np.min(result))
|
mit
| -8,583,206,622,282,662,000 | 34.271605 | 79 | 0.563878 | false | 3.804261 | false | false | false |
jirivrany/riskflow123d-post
|
flowIni/transport.py
|
1
|
15980
|
#!/opt/python/bin/python
'''
@author: Jiri Vrany
A module for handling flow123d transport output
Parses transport_out pos file, takes only non-zero values of concetrations
and saves them to json file, also saves list of times (even if all conc at
such time was zero).
'''
from multiprocessing import Process, Queue, cpu_count
from iniparse import INIConfig
import os
import time
import getopt
import sys
import shutil
import flow
FNAME_TIME = 'times'
FNAME_ELEMS = 'elements_concentration'
FNAME_SUMA = 'conc_suma'
FNAME_EXT = {'json':'json', 'pickle':'pck'}
def worker(input_queue, done_queue, substances=False):
'''
Worker process - takes data from input, saves results to disk
and puts time of computation to output
:param: input_queue / multiprocessing Queue
:param: output_queue / multiprocessing Queue
'''
for reseni in iter(input_queue.get, 'STOP'):
start_time = time.time()
#grabs host from queue
if substances:
work_on_multiple_substances(reseni)
else:
work_on_single_substance(reseni)
done_queue.put(time.time() - start_time)
def read_transport(fname, suma=False, substances=False):
"""
Read a Flow .pos file.
@param: suma - set True if sum of concentration has to be computed too
"""
try:
with open(fname, "r") as mshfile:
data = mshfile.readlines()
except IOError:
print 'Error - failed to open solution pos file %s ' % fname
return False
else:
#in result times, elements, elems_suma
if substances:
result = parse_multiple_substances(data, suma)
else:
result = parse_single_substances(data, suma)
if suma:
return result[0], result[1], result[2]
else:
return result[0], result[1]
def parse_single_substances(data_lines, suma=False):
'''
parses transport data for classic task / only one substance
'''
elements = {}
times = []
elems_suma = {}
readmode = 0
curent_time = 0
for line in data_lines:
line = line.strip()
if line.startswith('$'):
if line == '$ElementData':
readmode = 1
counter = 0
else:
readmode = 0
elif readmode:
if counter < 9:
counter += 1
columns = line.split()
if len(columns) > 1 and counter > 7:
key = int(columns[0])
val = float(columns[1])
if val > 0:
if elements.has_key(key):
elements[key][curent_time] = val
if suma:
elems_suma[key] += val
else:
elements[key] = {curent_time:val}
if suma:
elems_suma[key] = val
elif len(columns) == 1 and counter == 4:
curent_time = float(columns[0])
times.append(curent_time)
if suma:
return times, elements, elems_suma
else:
return times, elements
def parse_multiple_substances(data_lines, suma=False):
'''
parses transport data for multiple substances task
at each simulation time there are @substances number of results
'''
all_subs = {}
times = set()
all_sumas = {}
readmode = 0
current_time = 0
current_sub = ''
for line in data_lines:
line = line.strip()
if line.startswith('$'):
if line == '$ElementData':
readmode = 1
counter = 0
else:
readmode = 0
elif readmode:
if counter < 9:
counter += 1
columns = line.split()
if len(columns) > 1 and counter > 7:
key = int(columns[0])
val = float(columns[1])
if val > 0:
if all_subs[current_sub].has_key(key):
all_subs[current_sub][key][current_time] = val
if suma:
all_sumas[current_sub][key] += val
else:
all_subs[current_sub][key] = {current_time:val}
if suma:
all_sumas[current_sub][key] = val
elif len(columns) == 1 and counter == 4:
#4th row after element is simulation time
current_time = float(columns[0])
times.add(current_time)
elif len(columns) == 1 and counter == 2:
#2nd row after element is substantion name
current_sub = columns[0][1:-1]
if current_sub not in all_subs:
all_subs[current_sub] = {}
if suma and current_sub not in all_sumas:
all_sumas[current_sub] = {}
times = sorted(times)
if suma:
return times, all_subs, all_sumas
else:
return times, all_subs
def parse_task_dirs(dirname, search_for='ini', substance_name=False):
'''
dispatcher for parsing function
'''
if substance_name:
return _parse_for_substances(dirname, search_for, substance_name)
else:
return _parse_for_simple(dirname, search_for)
def _parse_for_simple(dirname, search_for):
'''
walk through dirname -r
find file of search_for type file
'''
inifiles = set()
for root, dirs, files in os.walk(dirname):
#no subdirs means basic problem, we can search
#for monte and sentitivty we need only subdirs with tasks
if len(dirs) == 0 or root != dirname:
for fname in files:
if fname.lower().endswith(search_for):
found = os.path.join(root, fname)
inifiles.add(found)
elif fname == search_for:
found = os.path.join(root, dirs, fname)
inifiles.add(found)
return inifiles
def _parse_for_substances(dirname, search_for, substance_name):
'''
walk through dirname -r
find file of search_for type file
and look only to substances dir
'''
inifiles = set()
for root, dirs, files in os.walk(dirname):
#no subdirs means basic problem, we can search
#for monte and sentitivty we need only subdirs with tasks
if len(dirs) == 0 or root != dirname:
for fname in files:
nstr = '{0}{1}{0}'.format(os.sep, substance_name)
if fname.lower().endswith(search_for):
found = os.path.join(root, fname)
if nstr in found:
inifiles.add(found)
elif fname == search_for:
found = os.path.join(root, dirs, fname)
if nstr in found:
inifiles.add(found)
return inifiles
def get_name_from_ini_file(ininame):
'''
Quick open inifile and find filename of solution
'''
try:
file_handler = open(ininame,'r')
except IOError:
print 'failed to open %s' % ininame
else:
pars = INIConfig(file_handler)
return pars['Transport']['Transport_out']
def create_ini_file_for_substance(ininame, substance):
'''
copy inifile to subfolder
'''
dir_name, file_name = os.path.split(ininame)
dir_name = os.path.join(dir_name, substance)
file_name = substance + '_' + file_name
if not os.path.exists(dir_name):
os.makedirs(dir_name)
new_file_name = os.path.join(dir_name, file_name)
shutil.copy2(ininame, new_file_name)
flow.change_paths_in_file(new_file_name, '..')
def get_result_files(dirname, substances=False):
'''
Search dirname for solution files
test if solution file exists
'''
res = []
inifiles = parse_task_dirs(dirname)
for inif in inifiles:
dir_name, _fin = os.path.split(inif)
res.append(os.path.join(dir_name, get_name_from_ini_file(inif)))
if substances:
return zip(inifiles, res)
return res
def read_process_substances(source, fformat='json'):
'''
Read solution data from task dirs, remove zeros
save non-zero concentration elements and times to pickle file
'''
for reseni in source:
work_on_multiple_substances(reseni)
def read_process_all(source, fformat='json'):
'''
Read solution data from task dirs, remove zeros
save non-zero concentration elements and times to pickle file
'''
for reseni in source:
work_on_single_substance(reseni)
def work_on_multiple_substances(reseni):
'''
parse one transport file for data with multiple substances
'''
inifile = reseni[0]
posfile = reseni[1]
klic, _sou = os.path.split(posfile)
result = read_transport(posfile, True, True)
if result:
times, elements, suma = result
for subst in elements.keys():
names = subst.split('_')
sub_name = names[0]
create_ini_file_for_substance(inifile, sub_name)
fname = os.path.join(klic, sub_name, FNAME_ELEMS)
save_vysledek(fname, elements[subst])
fname = os.path.join(klic, sub_name, FNAME_SUMA)
save_vysledek(fname, suma[subst])
fname = os.path.join(klic, sub_name, FNAME_TIME)
save_vysledek(fname, times)
#multiple processing hack
fname = os.path.join(klic, FNAME_ELEMS+'.json')
with open(fname, 'w') as done_file:
done_file.write('{"_comment" : "data are saved in nested substances subdirectories",\n"completed" : "true"}')
def work_on_single_substance(reseni):
'''
parse one transport file, for data with only one substance
'''
jmena = os.path.split(reseni)
klic = jmena[0]
result = read_transport(reseni, True)
if result:
times, elements, suma = result
fname = os.path.join(klic, FNAME_ELEMS)
save_vysledek(fname, elements)
fname = os.path.join(klic, FNAME_SUMA)
save_vysledek(fname, suma)
fname = os.path.join(klic, FNAME_TIME)
save_vysledek(fname, times)
return 'zpracovano %s' % klic
def save_vysledek(filename, vysledek, fformat = 'json'):
'''
wrapper for file format
save result vysledek to a filename, using file format
@param: fformat - json, pickle
'''
if not filename.endswith(FNAME_EXT[fformat]):
filename = filename + '.' + FNAME_EXT[fformat]
globals()['__save_'+fformat](filename, vysledek)
def __save_json(filename, vysledek):
'''
save result vysledek to a filename, using JSON format
'''
import json
try:
fout = open(filename,'wb')
fout.write(json.dumps(vysledek, fout))
fout.close()
except IOError:
print "failed to write data in %s" % filename
def __save_pickle(filename, vysledek):
'''
save result vysledek to a filename, using pickle
'''
import cPickle
try:
fout = open(filename,'wb')
cPickle.dump(vysledek, fout)
fout.close()
except IOError:
print "failed to write data in %s" % filename
def load_vysledek(filename, fformat = 'json'):
'''
wrapper for file format
load result vysledek from filename, using file format
@param: fformat - json, pickle
'''
if not filename.endswith(FNAME_EXT[fformat]):
filename = filename + '.' + FNAME_EXT[fformat]
return globals()['__load_'+fformat](filename)
def __load_pickle(filename):
'''
load result vysledek from a filename, using pickle
:return: vysledek
:rtype: dict
'''
import cPickle
pars = open(filename, 'rb')
vysledek = cPickle.load(pars)
return vysledek
def __load_json(filename):
'''
load result vysledek from a filename, using json
:return: vysledek
:rtype: dict
'''
import json
pars = open(filename, 'rb')
vysledek = json.load(pars)
return vysledek
def dict_to_csv(dct):
'''
converts dict to a csv
:param: dictionary of values
:return: csv string
'''
rslt = ''
for el_id, sol in dct.items():
rslt += str(el_id)
rslt += ';'
for val in sol.values():
rslt += str(val)
rslt += ';'
rslt += '\n'
return rslt
def __test_vysledek_save():
'''
testing func.
'''
pokus = '../../data/post/Sensitivity'
rslts = get_result_files(pokus)
read_process_all(rslts, 'json')
def __test_vysledek_load():
'''
testing func.
'''
inpt = '../../data/post/Sensitivity/001/elements_concentration'
data = load_vysledek(inpt)
print data
print data['19506']
def main_multiprocess(dirname, substances=False):
'''
main loop for multiprocess run
'''
rslts = get_result_files(dirname, substances)
nr_of_proc = cpu_count()
# Create queues
task_queue = Queue()
done_queue = Queue()
#populate queue with data
for result in rslts:
task_queue.put(result)
#Start worker processes
for i in range(nr_of_proc):
Process(target=worker, args=(task_queue, done_queue, substances)).start()
# Get and print results
sumtime = 0
print 'Unordered results:'
for i in range(len(rslts)):
rtime = done_queue.get()
print '\t', rtime
sumtime += rtime
# Tell child processes to stop
for i in range(nr_of_proc):
task_queue.put('STOP')
print "Stopping Process #%s" % i
print 'Total runtime %s sec' % sumtime
def usage():
'''
shows help
'''
print 'Tool for flow123d transport_out data compression.'
print 'Recursively search given directory for files, and write output in json format'
print 'usage: transport -s dirname for single process, with single substance'
print 'usage: transport -u dirname for single process, with multiple substances'
print 'usage: transport -m dirname for multiprocess (multicore CPU is a big advantage for this)'
print 'usage: transport -c dirname for multiprocess with multiple substances'
def main():
'''
getopt main procedure
'''
try:
opts, args = getopt.getopt(sys.argv[1:], "s:m:h:u:c:", ["single=", "multi=", "help", "msubst=", "subpro="])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
if len(opts) == 0:
usage()
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-s", "--single"):
rslts = get_result_files(arg)
read_process_all(rslts, 'json')
elif opt in ("-u", "--msubst"):
rslts = get_result_files(arg, True)
read_process_substances(rslts, 'json')
elif opt in ("-m", "--multi"):
main_multiprocess(arg)
elif opt in ("-c", "--subpro"):
main_multiprocess(arg, True)
else:
usage()
sys.exit()
if __name__ == "__main__":
main()
|
gpl-2.0
| -4,437,923,285,083,970,600 | 29.382129 | 125 | 0.545432 | false | 3.823881 | false | false | false |
Eveler/libs
|
__Python__/ufms_blanks/templates/editor.py
|
1
|
21334
|
# -*- coding: utf-8 -*-
import json
from json.decoder import WHITESPACE
import logging
from traceback import print_exc
try:
# from PyQt5.Qt import (QMainWindow, QApplication, QFileDialog, QToolBar, QWidget, QVBoxLayout, QTextEdit, QTimer,
# QLabel, QColor, QByteArray, QBuffer, QPixmap, QBoxLayout, QPainter, QPen,
# QPrintPreviewWidget, QPrinter, QPrintDialog)
from PyQt5.Qt import *
except ImportError:
# from PyQt4.Qt import (QMainWindow, QApplication, QFileDialog, QToolBar, QWidget, QVBoxLayout, QTextEdit, QTimer,
# QLabel, QColor, QByteArray, QBuffer, QPixmap, QBoxLayout, QPainter, QPen,
# QPrintPreviewWidget, QPrinter, QPrintDialog)
# from PyQt4.Qt import *
pass
__author__ = 'Savenko'
class ReportDict(dict):
def __init__(self):
super(dict, self).__init__()
def __getattr__(self, item):
if isinstance(self[item], dict) and not isinstance(self[item], ReportDict):
obj = ReportDict()
for k, v in self[item].items():
obj.__setattr__(k, v)
return obj
else:
return self[item]
def __setattr__(self, key, value):
if isinstance(value, dict) and not isinstance(value, ReportDict):
obj = ReportDict()
for k, v in value.items():
obj.__setattr__(k, v)
self[key] = obj
else:
self[key] = value
class Report(ReportDict):
def __init__(self):
super(ReportDict, self).__init__()
# self["report"] = {'page': {"size": "A4"}}
page = [ReportDict()]
page[0].size = "A4"
page[0].orientation = "Portrait"
# image = QPixmap("D:\\devel\\python\\ufms_blanks\\templates\\ScanImage001bw.png")
# image = image.convertToFormat(QImage.Format_ARGB32)
# image = image.scaledToHeight(image.height()*96/600)
# img = _MyDict()
# img.x = 0
# img.y = 0
# ba = QByteArray()
# buf = QBuffer(ba)
# buf.open(QBuffer.WriteOnly)
# image.save(buf, 'PNG')
# img.data = str(ba.toBase64().data())
# page[0].Image = img
report = ReportDict()
report.pages = page
self.report = report
class _ReportDecoder(json.JSONDecoder):
def decode(self, s, _w=WHITESPACE.match):
ret = json.JSONDecoder.decode(self, s, _w)
if "report" in ret:
rep = Report()
rep.report = ret["report"]
return rep
return ret
class ReportPreview(QWidget):
def __init__(self, parent=None, report=''):
from icons import icon_fit, icon_zoom_in, icon_zoom_out, icon_print, icon_pdf
super(QWidget, self).__init__(parent)
self.set_data()
if report:
self.set_report(report)
else:
self.report = report
layout = QVBoxLayout(self)
self.setLayout(layout)
toolbar = QToolBar(self)
toolbar.addAction(icon_fit, QObject().tr(u"Вместить"), self.fit)
toolbar.addSeparator()
toolbar.addAction(icon_zoom_in, QObject().tr(u"Увеличить"), self.zoom_in)
toolbar.addAction(icon_zoom_out, QObject().tr(u"Уменьшить"), self.zoom_out)
toolbar.addSeparator()
toolbar.addAction(icon_print, QObject().tr(u"Распечатать"), self.print)
toolbar.addAction(icon_pdf, QObject().tr(u"Сохранить в PDF"), self.print2pdf)
layout.addWidget(toolbar)
self.printer = QPrinter()
self.preview = QPrintPreviewWidget(self.printer, self)
self.preview.paintRequested.connect(self.generate)
layout.addWidget(self.preview)
def fit(self):
self.preview.fitToWidth()
def zoom_in(self):
self.preview.zoomIn()
def zoom_out(self):
self.preview.zoomOut()
def print(self):
d = QPrintDialog(self.printer, self)
if d.exec_() == QPrintDialog.Accepted:
self.preview.print_()
def print2pdf(self):
file_name = QFileDialog.getSaveFileName(self, "Сохранить в", filter="PDF (*.pdf)")
if file_name[0]:
self.printer.setOutputFileName(file_name[0])
self.preview.print_()
self.printer.setOutputFileName(None)
def set_report(self, report):
self.report = report
assert self.report, "report is null"
self.preview.updatePreview()
def set_data(self, data={}):
self.data = data
def _generate_json(self, painter, report):
def clear_preview():
# item = self.preview.layout().takeAt(0)
# while item:
# item = self.preview.layout().takeAt(0)
pass
# clear_preview()
# dpi = 96
# A4 by default, 210 x 297 mm
# w, h = 210 * dpi / 25, 297 * dpi / 25
i = 0
for page in report.report.pages:
# if i >= self.preview.layout().count():
# lb = QLabel(self.preview)
# self.preview.layout().addWidget(lb)
# lb = self.preview.layout().itemAt(i)
# i += 1
# if lb:
# lb = lb.widget()
# report = _MyDict()
# report.page = page
# if report.page.size == "A3":
# w, h = 297 * dpi / 25, 420 * dpi / 25
# pix = QPixmap(w, h)
# pix.fill(QColor(255, 255, 255))
# painter = QPainter()
# for key, val in report.page.items():
# if key != "size" and key != "orientation":
# try:
# painter.begin(pix)
# if key == "Image":
# x = val.x
# y = val.y
# ba = QByteArray().fromBase64(val.data)
# p = QPixmap()
# p.loadFromData(ba)
# painter.drawPixmap(x, y, p)
# else:
# params = ['"%s"' % v if type(v) == str else "%s" % v for v in val]
# to_eval = "painter.draw%s(%s)" % (key, ','.join(params))
# eval(to_eval)
# except ValueError or TypeError as e:
# clear_preview()
# lb = QLabel(self.preview)
# lb.clear()
# lb.setText(str(e))
# self.preview.layout().addWidget(lb)
# print(e)
# except Exception as e:
# print(e)
# finally:
# painter.end()
# lb.setPixmap(pix)
if i > 0:
self.printer.newPage()
i += 1
self.printer.setPageSize(eval("QPrinter().%s" % page['size'] if page['size'] else "A4"))
self.printer.setOrientation(QPrinter.Portrait)
# setPageMargins(left, top, right, bottom)
self.printer.setPageMargins(20, 15, 15, 15, QPrinter.Millimeter)
rep = ReportDict()
rep.page = page
for key, val in rep.page.items():
if key != "size" and key != "orientation":
try:
if key == "Image":
x = val.x
y = val.y
ba = QByteArray().fromBase64(val.data)
p = QPixmap()
p.loadFromData(ba)
painter.drawPixmap(x, y, p)
else:
params = ['"%s"' % v if type(v) == str else "%s" % v for v in val]
to_eval = "painter.draw%s(%s)" % (key, ','.join(params))
print(to_eval)
eval(to_eval)
except ValueError or TypeError as e:
print(e)
except Exception as e:
print(e)
def _generate_xml(self, painter, report, **kwargs):
# init some variables
class TestData(str):
def __init__(self, name='TestData'):
super(TestData, self).__init__()
self.__str__ = self.__repr__
self._name = name
def __getattr__(self, item):
# return (str(item) + u': Тестовые данные ') * 10
return TestData(item)
def __getitem__(self, item):
# return (str(item) + u': Тестовые данные ') * 10
return TestData(item)
def __repr__(self):
return "%s: %s" % (str(self._name), u' Тестовые данные' * 10)
def __call__(self, *args, **kwargs):
return "%s: %s" % (str(self._name), u' Тестовые данные' * 10)
if report.tag == "template":
dpi = int(report.get("dpi", 96))
inch = self.printer.resolution()
cm = inch / 2.54
mm = cm / 10
else:
if "dpi" in kwargs['kwargs']:
dpi = kwargs['kwargs']["dpi"]
inch = kwargs['kwargs']["inch"]
cm = kwargs['kwargs']["cm"]
mm = kwargs['kwargs']["mm"]
if "page_right_px" in kwargs['kwargs']:
page_right_px = kwargs['kwargs']['page_right_px']
page_bottom_px = kwargs['kwargs']['page_bottom_px']
margin_right = kwargs['kwargs']['margin_right']
margin_bottom = kwargs['kwargs']['margin_bottom']
args = kwargs['kwargs']
for key in args:
if key in ('TestData', 'kwargs', 'self', 'report', 'painter', 'args', 'key'):
continue
vars()[key] = args[key]
if self.data:
data = self.data
else:
data = TestData()
# In the cases of loops we need to skip children nodes as they analyze them by himself
skip_children = False
# page_count = 0
self.err_str = ''
for child in report:
try:
if child.tag == "newPage":
self.printer.newPage()
if child.tag in ("newPage", "page"):
self.printer.setPageSize(eval("QPrinter().%s" % child.get("size", "A4")))
self.printer.setOrientation(eval("QPrinter.%s" % child.get("orientation", "Portrait")))
# setPageMargins(left, top, right, bottom)
margin_left = float(child.get("margin_left", 20))
margin_right = float(child.get("margin_right", 15))
margin_top = float(child.get("margin_top", 15))
margin_bottom = float(child.get("margin_bottom", 15))
self.printer.setPageMargins(margin_left, margin_top, margin_right, margin_bottom,
QPrinter.Millimeter)
page_right_px = self.printer.pageLayout().pageSize().sizePixels(self.printer.resolution()).width()
page_bottom_px = self.printer.pageLayout().pageSize().sizePixels(self.printer.resolution()).height()
# page_left_mm = self.printer.pageLayout().pageSize().size(QPageSize().Millimeter).width()
# page_bottom_mm = self.printer.pageLayout().pageSize().sizePixels(self.printer.resolution()).height()
elif child.tag == "assign":
# to_eval = "variables['%s'] = %s" % (child.get("name"), child.get("value", ''))
to_eval = "%s = %s" % (child.get("name"), child.get("value", ''))
logging.debug(to_eval)
# assign(child.get("name"), child.get("value", ''))
vars()[child.get("name")] = eval(child.get("value", ''))
elif child.tag == "script":
to_eval = "%s" % child.text
logging.debug(to_eval)
# eval(to_eval)
exec(to_eval)
elif child.tag == "for":
to_eval = "for %s in %s: self._generate_xml(painter, child, kwargs=locals())" % \
(child.get("var"), child.get("in", ''))
logging.debug(to_eval)
# eval(to_eval)
exec(to_eval)
skip_children = True
elif child.tag == "while":
to_eval = """
%s = eval(%s)
while %s: self._generate_xml(painter, child, kwargs=locals())
""" % (child.get("init_var", ''), child.get("init_val", ''), child.get("cond"))
logging.debug(to_eval)
# eval(to_eval)
exec(to_eval)
skip_children = True
elif child.tag == "if":
to_eval = "%s" % child.get("cond")
logging.debug('if ' + to_eval)
if eval(to_eval):
self._generate_xml(painter, child, kwargs=locals())
skip_children = True
else:
to_eval = "painter.%s(%s)" % (child.tag, child.get("params", ''))
logging.debug(to_eval)
eval(to_eval)
except Exception:
from sys import exc_info
logging.error('to_eval = "%s"' % to_eval, exc_info=True)
tp, val, tb = exc_info()
self.err_str += "%s: %s\n" % \
(str(tp).replace('<class ', '').replace("'>:", ''), str(val).replace('\n', '\n<br/>'))
print_exc()
if not skip_children:
self._generate_xml(painter, child, kwargs=locals())
def generate(self, printer):
self.err_str = ''
from xml.etree import ElementTree as ET
if (self.report is None) or (not self.report):
return
assert self.report, "Report is not set"
painter = QPainter()
try:
painter.begin(printer)
if isinstance(self.report, ReportDict):
self._generate_json(painter, self.report)
elif ET.iselement(self.report):
self._generate_xml(painter, self.report)
else:
raise ValueError("Cannot parse report")
except Exception:
tp, val, tb = exc_info()
self.err_str = "%s: %s" % \
(str(tp).replace('<class ', '').replace("'>", "'"), str(val).replace('\n', '\n<br/>'))
print_exc()
finally:
painter.end()
def error_string(self):
return self.err_str
class MainWnd(QMainWindow):
def __init__(self):
self.file_name = ''
from icons import icon_new, icon_open, icon_floppy, icon_update
super(QMainWindow, self).__init__()
self.title = u'[*] Редактор шаблонов'
self.setWindowTitle(self.title)
self.setMinimumWidth(640)
self.setMinimumHeight(480)
lb = QLabel(u"стр.")
self.statusBar().addPermanentWidget(lb)
self.cur_line = QLabel()
self.statusBar().addPermanentWidget(self.cur_line)
lb = QLabel(u'кол.')
self.statusBar().addPermanentWidget(lb)
self.cur_col = QLabel()
self.statusBar().addPermanentWidget(self.cur_col)
lb = QWidget()
# lb.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
# lb.setMinimumWidth(100)
self.statusBar().addPermanentWidget(lb, 1)
self.timer = QTimer(self)
self.timer.timeout.connect(self.preview_update)
wgt = QWidget(self)
self.setCentralWidget(wgt)
layout = QBoxLayout(QBoxLayout.LeftToRight, wgt)
wgt.setLayout(layout)
wgt = QWidget(self)
la = QVBoxLayout(wgt)
self.text_edit = QTextEdit(self)
self.text_edit.textChanged.connect(self.text_changed)
self.text_edit.cursorPositionChanged.connect(self.show_pos)
self.new()
# layout.addWidget(self.text_edit)
la.addWidget(self.text_edit)
self.l_err = QLabel(wgt)
la.addWidget(self.l_err)
layout.addWidget(wgt)
# self.preview = QWidget(self)
# la = QVBoxLayout(self.preview)
# self.preview.setLayout(la)
# sa = QScrollArea(self)
# sa.setWidgetResizable(True)
# sa.setWidget(self.preview)
# layout.addWidget(sa)
self.preview = ReportPreview(self)
layout.addWidget(self.preview)
self.toolbar = QToolBar("Main", self)
self.toolbar.addAction(icon_new, u"Новый", self.new)
self.toolbar.addAction(icon_open, u"Открыть", self.open)
self.toolbar.addAction(icon_floppy, u"Сохранить", self.save)
self.toolbar.addSeparator()
self.toolbar.addAction(icon_update, u"Обновить", self.preview_update)
self.addToolBar(self.toolbar)
rep = Report()
print(rep)
self.text_edit.setPlainText(json.dumps(rep, indent=4))
def show_pos(self):
cursor = self.text_edit.textCursor()
self.cur_col.setText(str(cursor.columnNumber()))
line_num = 0
pos = cursor.position()
lines = self.text_edit.toPlainText().split('\n')
lines_len = 0
for line in lines:
if pos <= lines_len:
break
lines_len += len(line) + 1
line_num += 1
if line_num > len(lines):
line_num -= 1
if cursor.columnNumber() == 0:
line_num += 1
self.cur_line.setText(str(line_num))
def set_err(self, text=''):
tp, val, tb = exc_info()
text = text or ("%s: %s" % (str(tp).replace('<class ', '').replace("'>:", ''), val))
self.l_err.setText('<p style="color: red">%s</p>' % text)
print("ERROR: %s" % text)
def new(self):
try:
self.text_edit.textChanged.disconnect()
except:
self.set_err("disconnect() failed between 'text_changed' and all its connections")
self.text_edit.clear()
self.file_name = ''
# self.setWindowTitle(self.title)
self.setWindowModified(False)
self.text_edit.textChanged.connect(self.text_changed)
def open(self):
file_name = QFileDialog.getOpenFileName(self, u"Открыть", '', "Файлы шаблонов (*.utpl);;Все файлы (*.*)")
if file_name:
try:
self.text_edit.textChanged.disconnect()
except:
self.set_err("disconnect() failed between 'text_changed' and all its connections")
try:
self.text_edit.setPlainText(open(file_name[0], 'r').read(-1))
self.file_name = file_name[0]
self.setWindowModified(False)
self.setWindowTitle("%s - %s" % (self.file_name, self.title))
self.text_edit.textChanged.connect(self.text_changed)
self.preview_update()
except:
self.set_err()
def save(self):
if not self.file_name:
file_name = QFileDialog.getSaveFileName(self, u"Сохранить", '', "Файлы шаблонов (*.utpl);;Все файлы (*.*)")
else:
file_name = [self.file_name]
if file_name[0]:
try:
res = open(file_name[0], "w").write(self.text_edit.toPlainText())
if res:
self.file_name = file_name[0]
self.setWindowTitle("%s - %s" % (self.file_name, self.title))
self.setWindowModified(False)
except:
self.set_err()
def text_changed(self):
# self.setWindowTitle(self.title + " *")
self.setWindowModified(True)
if self.timer.isActive():
self.timer.stop()
# self.preview_update()
self.timer.start(500)
def preview_update(self):
self.timer.stop()
self.l_err.clear()
try:
try:
self.report = json.loads(self.text_edit.toPlainText(), cls=_ReportDecoder)
except ValueError or TypeError as e:
# self.set_err(str(e))
from xml.etree import ElementTree as ET
self.report = ET.fromstring(self.text_edit.toPlainText())
if "report" in self.report:
self.preview.set_report(self.report)
elif self.report.tag == "template":
self.preview.set_report(self.report)
if self.preview.error_string():
self.set_err(self.preview.error_string())
except Exception as e:
self.set_err()
if __name__ == "__main__":
from sys import argv, exc_info
app = QApplication(argv)
from icons import icon_editor
app.setWindowIcon(icon_editor)
wnd = MainWnd()
app.setActiveWindow(wnd)
wnd.showMaximized()
app.exec_()
|
gpl-3.0
| 5,254,551,033,503,943,000 | 37.737615 | 122 | 0.506158 | false | 3.837848 | true | false | false |
CrowdEmotion/crowdemotion-api-clients-examples
|
python/crowdemotion_api_client_python/apis/research_api.py
|
1
|
24008
|
# coding: utf-8
"""
CloudEmotion API v1
CrowdEmotion API
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ResearchApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def research_get(self, **kwargs):
"""
Find all Research
<p>Returns all the Research created by an admin user.</p> <p><strong>Permissions:</strong> ✓ Respondent ✗ Customer ✓ Manager</p>
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.research_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int skip: The number of results to skip.
:param int limit: The maximum number of results to return.
:param str where: JSON formatted string condition.
:param str sort: Attribute used to sort results.
:return: list[Research]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.research_get_with_http_info(**kwargs)
else:
(data) = self.research_get_with_http_info(**kwargs)
return data
def research_get_with_http_info(self, **kwargs):
"""
Find all Research
<p>Returns all the Research created by an admin user.</p> <p><strong>Permissions:</strong> ✓ Respondent ✗ Customer ✓ Manager</p>
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.research_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int skip: The number of results to skip.
:param int limit: The maximum number of results to return.
:param str where: JSON formatted string condition.
:param str sort: Attribute used to sort results.
:return: list[Research]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['skip', 'limit', 'where', 'sort']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method research_get" % key
)
params[key] = val
del params['kwargs']
resource_path = '/research'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'skip' in params:
query_params['skip'] = params['skip']
if 'limit' in params:
query_params['limit'] = params['limit']
if 'where' in params:
query_params['where'] = params['where']
if 'sort' in params:
query_params['sort'] = params['sort']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['api_key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Research]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def research_post(self, body, **kwargs):
"""
Create a Research Project
<p>New research projects can only be created with an admin account.</p> <p><strong>Permissions:</strong> ✗ Respondent ✗ Customer ✓ Manager</p>
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.research_post(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ResearchCreation body: Request body (required)
:return: Research
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.research_post_with_http_info(body, **kwargs)
else:
(data) = self.research_post_with_http_info(body, **kwargs)
return data
def research_post_with_http_info(self, body, **kwargs):
"""
Create a Research Project
<p>New research projects can only be created with an admin account.</p> <p><strong>Permissions:</strong> ✗ Respondent ✗ Customer ✓ Manager</p>
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.research_post_with_http_info(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ResearchCreation body: Request body (required)
:return: Research
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method research_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `research_post`")
resource_path = '/research'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['api_key']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Research',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def research_research_id_delete(self, research_id, **kwargs):
"""
Delete Research Project
<p><strong>Permissions:</strong> ✗ Respondent ✗ Customer ✓ Manager</p>
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.research_research_id_delete(research_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int research_id: (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.research_research_id_delete_with_http_info(research_id, **kwargs)
else:
(data) = self.research_research_id_delete_with_http_info(research_id, **kwargs)
return data
def research_research_id_delete_with_http_info(self, research_id, **kwargs):
"""
Delete Research Project
<p><strong>Permissions:</strong> ✗ Respondent ✗ Customer ✓ Manager</p>
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.research_research_id_delete_with_http_info(research_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int research_id: (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['research_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method research_research_id_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'research_id' is set
if ('research_id' not in params) or (params['research_id'] is None):
raise ValueError("Missing the required parameter `research_id` when calling `research_research_id_delete`")
resource_path = '/research/{research_id}'.replace('{format}', 'json')
path_params = {}
if 'research_id' in params:
path_params['research_id'] = params['research_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['api_key']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def research_research_id_get(self, research_id, **kwargs):
"""
Find a Research Project
<p><strong>Permissions:</strong> ✗ Respondent ✗ Customer ✓ Manager</p>
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.research_research_id_get(research_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int research_id: ID of Research Project to be found. (required)
:return: Research
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.research_research_id_get_with_http_info(research_id, **kwargs)
else:
(data) = self.research_research_id_get_with_http_info(research_id, **kwargs)
return data
def research_research_id_get_with_http_info(self, research_id, **kwargs):
"""
Find a Research Project
<p><strong>Permissions:</strong> ✗ Respondent ✗ Customer ✓ Manager</p>
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.research_research_id_get_with_http_info(research_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int research_id: ID of Research Project to be found. (required)
:return: Research
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['research_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method research_research_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'research_id' is set
if ('research_id' not in params) or (params['research_id'] is None):
raise ValueError("Missing the required parameter `research_id` when calling `research_research_id_get`")
resource_path = '/research/{research_id}'.replace('{format}', 'json')
path_params = {}
if 'research_id' in params:
path_params['research_id'] = params['research_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['api_key']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Research',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def research_research_id_put(self, research_id, body, **kwargs):
"""
Edit Research Project details
<p><strong>Permissions:</strong> ✗ Respondent ✗ Customer ✓ Manager</p>
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.research_research_id_put(research_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int research_id: (required)
:param ResearchCreation body: Request body (required)
:return: Research
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.research_research_id_put_with_http_info(research_id, body, **kwargs)
else:
(data) = self.research_research_id_put_with_http_info(research_id, body, **kwargs)
return data
def research_research_id_put_with_http_info(self, research_id, body, **kwargs):
"""
Edit Research Project details
<p><strong>Permissions:</strong> ✗ Respondent ✗ Customer ✓ Manager</p>
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.research_research_id_put_with_http_info(research_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int research_id: (required)
:param ResearchCreation body: Request body (required)
:return: Research
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['research_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method research_research_id_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'research_id' is set
if ('research_id' not in params) or (params['research_id'] is None):
raise ValueError("Missing the required parameter `research_id` when calling `research_research_id_put`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `research_research_id_put`")
resource_path = '/research/{research_id}'.replace('{format}', 'json')
path_params = {}
if 'research_id' in params:
path_params['research_id'] = params['research_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['api_key']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Research',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
|
mit
| 6,895,661,252,991,409,000 | 39.727891 | 150 | 0.552447 | false | 4.750645 | true | false | false |
tymofij/adofex
|
transifex/resources/tests/lib/base.py
|
1
|
5917
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from mock import patch
import os
import logging
from django.conf import settings
from django.utils.hashcompat import md5_constructor
from transifex.txcommon.tests import base
from transifex.resources.formats.compilation import \
NormalDecoratorBuilder as Decorator
from transifex.resources.formats.utils.hash_tag import hash_tag
from transifex.resources.models import SourceEntity, Translation
from transifex.resources.formats.compilation import Mode
class FormatsBaseTestCase(base.BaseTestCase):
"""Base class for tests on supported formats."""
def setUp(self):
super(FormatsBaseTestCase, self).setUp()
def compare_to_actual_file(self, handler, actual_file):
template = handler.template
compiler = handler.CompilerClass(handler.resource)
compiler._tdecorator = Decorator(escape_func=handler._escape)
compiler._examine_content(handler.template)
compiler.language = handler.language
sources = [
(idx, "%s" % hash_tag(s.source_entity, ""))
for idx, s in enumerate(handler.stringset)
]
translations = dict([
(idx, s.translation)
for idx, s in enumerate(handler.stringset)
])
with patch.object(compiler, '_get_source_strings') as smock:
with patch.object(compiler, '_tset', create=True) as tmock:
smock.return_value = sources
tmock.return_value = translations
compiler._compile(handler.template)
template = compiler.compiled_template
with open(actual_file, 'r') as f:
actual_content = f.read()
self.assertEquals(template, actual_content)
def get_translation(self, t, compiler):
if not t:
return ""
return t
def get_content_from_file(self, filename, encoding=False):
"""Get content from a file as required by handler's
bind_content() method"""
f = open(filename, 'r')
content = f.read()
f.close()
if encoding:
content = content.decode(encoding)
return content
def _save_source(self, handler, resource, source_file,
source_entity_count, source_translation_count):
"""Save source translations
handler: Handler instance for i18n_type
resource: a Resource instance
source_file: path to source file
source_entity_count: expected count for source entities saved
source_translation_count: expected count for translations in
resource.source_language
Returns a handler
"""
l = resource.source_language
handler.set_language(l)
handler.bind_resource(resource)
handler.bind_content(self.get_content_from_file(source_file))
handler.parse_file(is_source=True)
handler.save2db(is_source=True)
self.assertEqual(SourceEntity.objects.filter(resource=resource
).count(), source_entity_count)
self.assertEqual(len(Translation.objects.filter(
source_entity__resource=resource, language=l)),
source_translation_count)
return handler
def _save_translation(self, handler, resource, target_lang,
translation_file, translation_count):
"""
Save translations from a translation file for a resource
handler: Handler instance for i18n_type
resource: a Resource instance
target_lang: target language instance
translation_file: path to translation file
translation_count: expected count for translations saved in
target_lang for resource
Returns a handler
"""
handler.bind_resource(resource)
handler.bind_content(self.get_content_from_file(translation_file))
handler.set_language(target_lang)
handler.parse_file()
handler.save2db()
self.assertEqual(len(Translation.objects.filter(
source_entity__resource=resource,
language=target_lang)), translation_count)
return handler
def _mark_translation_as_reviewed(self, resource, source_strings, language,
expected_reviewed_count):
"""
Mark translation strings as reviewed
resource: A Resource instance
source_strings: A list containing source strings
language: Language for translations to be reveiewed
expected_reviewed_count: Expected number of translations marked as
reviewed
"""
Translation.objects.filter(source_entity__in=resource.source_entities.filter(
string__in=source_strings), language=language).update(reviewed=True)
self.assertEqual(Translation.objects.filter(
source_entity__resource=resource, reviewed=True
).count(), expected_reviewed_count)
def _check_compilation(self, handler, resource, language, compiled_file,
mode=Mode.DEFAULT):
"""
Verify compilation with a compiled_file's content
handler: A Handler instance
resource: A Resource instance
language: Language in which the resource will be compiled
compiled_file: path to a compiled file
mode: Compilation Mode instance
"""
if isinstance(mode, str):
if mode == 'REVIEWED':
mode = Mode.REVIEWED
elif mode == 'TRANSLATED':
mode = Mode.TRANSLATED
else:
mode = Mode.DEFAULT
handler.bind_resource(resource)
handler.set_language(language)
compiled_template = handler.compile(mode=mode)
f = open(compiled_file, 'r')
expected_compiled_template = f.read()
f.close()
self.assertEqual(compiled_template, expected_compiled_template)
|
gpl-3.0
| -7,842,665,203,933,623,000 | 38.446667 | 85 | 0.641372 | false | 4.555042 | false | false | false |
lizardsystem/lizard-htmlreport
|
lizard_htmlreport/migrations/0003_auto__del_field_generatedreport_dataset__add_field_generatedreport_dat.py
|
1
|
8524
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'GeneratedReport.dataset'
db.delete_column('lizard_htmlreport_generatedreport', 'dataset_id')
# Adding field 'GeneratedReport.data_set'
db.add_column('lizard_htmlreport_generatedreport', 'data_set', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_security.DataSet'], null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Adding field 'GeneratedReport.dataset'
db.add_column('lizard_htmlreport_generatedreport', 'dataset', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_security.DataSet'], null=True, blank=True), keep_default=False)
# Deleting field 'GeneratedReport.data_set'
db.delete_column('lizard_htmlreport_generatedreport', 'data_set_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lizard_area.area': {
'Meta': {'ordering': "('name',)", 'object_name': 'Area', '_ormbases': ['lizard_area.Communique']},
'area_class': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'communique_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_area.Communique']", 'unique': 'True', 'primary_key': 'True'}),
'data_administrator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.DataAdministrator']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'})
},
'lizard_area.communique': {
'Meta': {'object_name': 'Communique', '_ormbases': ['lizard_geo.GeoObject']},
'code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'geoobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_geo.GeoObject']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'lizard_area.dataadministrator': {
'Meta': {'object_name': 'DataAdministrator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_geo.geoobject': {
'Meta': {'object_name': 'GeoObject'},
'geo_object_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_geo.GeoObjectGroup']"}),
'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'lizard_geo.geoobjectgroup': {
'Meta': {'object_name': 'GeoObjectGroup'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'source_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_htmlreport.generatedreport': {
'Meta': {'object_name': 'GeneratedReport'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']"}),
'data_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataSet']", 'null': 'True', 'blank': 'True'}),
'document_pdf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'document_rtf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'generated_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_htmlreport.ReportTemplate']"}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'lizard_htmlreport.reporttemplate': {
'Meta': {'object_name': 'ReportTemplate'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'lizard_security.dataset': {
'Meta': {'ordering': "['name']", 'object_name': 'DataSet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'})
}
}
complete_apps = ['lizard_htmlreport']
|
gpl-3.0
| 7,115,927,551,136,286,000 | 68.868852 | 203 | 0.564641 | false | 3.677308 | false | false | false |
sckott/pytaxize
|
pytaxize/gn/gni.py
|
1
|
1679
|
import sys
import requests
import json
from pytaxize.refactor import Refactor
class NoResultException(Exception):
pass
def parse(names):
"""
Uses the Global Names Index to parse scientific names
:param names: List of scientific names.
Usage::
from pytaxize import gn
gn.gni.parse(names = ['Cyanistes caeruleus','Helianthus annuus'])
"""
url = "http://gni.globalnames.org/parsers.json"
names = "|".join(names)
params = {"names": names}
out = Refactor(url, payload=params, request="get").json()
return out
def search(search_term="ani*", per_page=30, page=1):
"""
Search for names against the Global names index
:param search_term: Search term
:param per_page: Items to return per page
:param page: Page to return
Usage::
from pytaxize import gn
gn.gni.search(search_term = 'ani*')
"""
url = "http://gni.globalnames.org/name_strings.json"
params = {"search_term": search_term, "per_page": per_page, "page": page}
out = Refactor(url, payload=params, request="get").json()
return out
def details(id=17802847, all_records=1):
"""
Usage::
from pytaxize import gn
gn.gni.details(id = 17802847)
"""
url = "http://gni.globalnames.org/name_strings/"
mylist = [url, str(id), ".json"]
url2 = "".join(mylist)
params = {"all_records": all_records}
out = Refactor(url2, payload=params, request="get").json()
try:
return out
except (ValueError):
raise NoResultException("GNI didn't return a result (id: %s)" % id)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
mit
| -9,037,382,266,054,791,000 | 23.333333 | 77 | 0.62299 | false | 3.47619 | false | false | false |
neudesk/neucloud
|
openstack_dashboard/dashboards/admin/flavors/tables.py
|
1
|
3841
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from openstack_dashboard import api
class DeleteFlavor(tables.DeleteAction):
data_type_singular = _("Flavor")
data_type_plural = _("Flavors")
def delete(self, request, obj_id):
api.nova.flavor_delete(request, obj_id)
class CreateFlavor(tables.LinkAction):
name = "create"
verbose_name = _("Create Flavor")
url = "horizon:admin:flavors:create"
classes = ("ajax-modal", "btn-create")
class UpdateFlavor(tables.LinkAction):
name = "update"
verbose_name = _("Edit Flavor")
url = "horizon:admin:flavors:update"
classes = ("ajax-modal", "btn-edit")
class ViewFlavorExtras(tables.LinkAction):
name = "extras"
verbose_name = _("View Extra Specs")
url = "horizon:admin:flavors:extras:index"
classes = ("btn-edit",)
class ModifyAccess(tables.LinkAction):
name = "projects"
verbose_name = _("Modify Access")
url = "horizon:admin:flavors:update"
classes = ("ajax-modal", "btn-edit")
def get_link_url(self, flavor):
step = 'update_flavor_access'
base_url = reverse(self.url, args=[flavor.id])
param = urlencode({"step": step})
return "?".join([base_url, param])
class FlavorFilterAction(tables.FilterAction):
def filter(self, table, flavors, filter_string):
"""Really naive case-insensitive search."""
q = filter_string.lower()
def comp(flavor):
return q in flavor.name.lower()
return filter(comp, flavors)
def get_size(flavor):
return _("%sMB") % flavor.ram
def get_swap_size(flavor):
return _("%sMB") % (flavor.swap or 0)
class FlavorsTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Flavor Name'))
vcpus = tables.Column('vcpus', verbose_name=_('VCPUs'))
ram = tables.Column(get_size,
verbose_name=_('RAM'),
attrs={'data-type': 'size'})
disk = tables.Column('disk', verbose_name=_('Root Disk'))
ephemeral = tables.Column('OS-FLV-EXT-DATA:ephemeral',
verbose_name=_('Ephemeral Disk'))
swap = tables.Column(get_swap_size,
verbose_name=_('Swap Disk'),
attrs={'data-type': 'size'})
flavor_id = tables.Column('id', verbose_name=_('ID'))
public = tables.Column("is_public",
verbose_name=_("Public"),
empty_value=False,
filters=(filters.yesno, filters.capfirst))
class Meta:
name = "flavors"
verbose_name = _("Flavors")
table_actions = (FlavorFilterAction, CreateFlavor, DeleteFlavor)
row_actions = (UpdateFlavor,
ModifyAccess,
ViewFlavorExtras,
DeleteFlavor)
|
apache-2.0
| -8,237,139,014,634,401,000 | 31.82906 | 78 | 0.631606 | false | 4.009395 | false | false | false |
olix0r/pub
|
lib/client/register_key.py
|
1
|
1804
|
import os, sys
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.plugin import IPlugin
from twisted.python.filepath import FilePath
from twisted.python.usage import UsageError
from zope.interface import implements
from jersey import log
from pub.client import cli
from pub.crypto import Key
from pub.iface import KeyAlreadyExists
class Options(cli.Options):
optParameters = [
["comment", "c", "", "Key comment.",],
]
def getSynopsis(self):
return "{0} {1} [options] entity-id key-file".format(
self.parent.getSynopsis(),
self.parent.subCommand)
def parseArgs(self, id, keyFile):
self["id"] = id
try:
self["key"] = Key.fromFile(keyFile)
except:
raise UsageError("Invalid key file: {0}".format(keyFile))
class Command(cli.Command):
_keyFmt = "\n".join([
"Entity ID: {0.entityId}",
"Key ID: {0.id}",
"Key Type: {0.type}",
"Comment: {0.comment}",
])
@inlineCallbacks
def execute(self):
log.debug("Registering an entity")
try:
ent = yield self.pub.getEntity(self.config["id"])
pubKey = yield ent.registerKey(
self.config["key"], self.config["comment"])
except KeyAlreadyExists:
print >>sys.stderr, "Key already exists: {0}".format(
self.config["key"].id)
else:
log.debug("Registered a key")
print self._keyFmt.format(pubKey)
class Loader(cli.CommandFactory):
implements(IPlugin)
command = Command
options = Options
name = "register-key"
shortcut = "R"
description = "Register an entity"
loader = Loader()
|
bsd-3-clause
| 432,916,004,201,920,600 | 21.835443 | 69 | 0.587583 | false | 3.964835 | false | false | false |
emccode/HeliosBurn
|
heliosburn/django/hbproject/api/views/auth.py
|
1
|
1878
|
import json
import hashlib
import logging
import os
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from api.models import db_model
logger = logging.getLogger(__name__)
@csrf_exempt
def login(request):
"""
Authenticates given 'username' and 'password_hash' against user in database.
"""
if request.method != 'POST':
r = HttpResponse('Invalid method. Only POST method accepted.', status=405)
r['Allow'] = 'POST'
return r
try:
in_json = json.loads(request.body)
assert "username" in in_json
assert "password" in in_json
except AssertionError:
return HttpResponseBadRequest("argument mismatch")
except ValueError as e:
return HttpResponseBadRequest("invalid JSON")
dbc = db_model.connect()
user = dbc.hbuser.find_one({"username": in_json['username']})
if user is None:
# not returning "user not found" to avoid attackers to guess valid users
return HttpResponse(status=401)
else:
m = hashlib.sha512()
m.update(in_json['password'])
password_hash = m.hexdigest()
if user['password'] == password_hash:
m = hashlib.sha512()
m.update(os.urandom(64))
token_string = m.hexdigest()
from api.models import redis_wrapper
r = redis_wrapper.init_redis()
r.set(token_string, user['username'], settings.TOKEN_TTL) # Store tokens to expire in 1 hour
r = HttpResponse()
r['X-Auth-Token'] = token_string
logger.info("login success for user '%s'" % in_json['username'])
return r
else:
logger.info("login failed for user '%s'" % in_json['username'])
return HttpResponse(status=401)
|
mit
| 5,699,081,220,727,780,000 | 33.145455 | 105 | 0.625666 | false | 4.220225 | false | false | false |
ferriman/SSandSP
|
raspberrypi/egocentricPanel.py
|
1
|
1082
|
import time
from twython import TwythonStreamer
import subprocess
def say(words):
devnull = open("/dev/null","w")
subprocess.call(["espeak","-v", "en-rp",words],stderr=devnull)
def showinpanel():
devnull = open("/dev/null","w")
subprocess.call(["sudo","./rpi-rgb-led-matrix-master/led-matrix","-p","2","-D","1","-t","23","twitter4.ppm"],stderr=devnull)
# Search terms
TERMS = '@DIYProjectLog'
# Twitter application authentication
APP_KEY = ''
APP_SECRET = ''
OAUTH_TOKEN = ''
OAUTH_TOKEN_SECRET = ''
# Setup callbacks from Twython Streamer
class TalkingTwitter(TwythonStreamer):
def on_success(self, data):
if 'text' in data:
print data['text'].encode('utf-8')
print
#say(data['text'].encode('utf-8'))
#say("You have been mentioned in Twitter")
showinpanel()
print "Egocentric panel is listening..."
# Create streamer
try:
stream = TalkingTwitter(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
stream.statuses.filter(track=TERMS)
except KeyboardInterrupt:
print "Bye Bye!"
|
gpl-3.0
| 366,088,186,372,964,700 | 26.74359 | 125 | 0.655268 | false | 3.229851 | false | false | false |
HexaMonkey/hexamonkey
|
test/resources/parser/test_msgpack.py
|
1
|
1831
|
import msgpack, math
with open("test_msgpack.msgpack", 'w') as f:
def write(str):
f.write(str)
def dump(item, **kwargs):
msgpack.dump(item, f, **kwargs)
def dumpList(list):
for item in list:
dump(item)
write(b"\xdc\x00\x24");
dumpList([None,
False,
True,
42,
-5,
150,
33000,
2000000])
write(b"\xcf\x00\x07\x1a\xfd\x49\x8d\x00\x00") # issue with msgpack module with large integers
dumpList([-60,
-3000,
-200000,
-20000000000L])
dump(math.pi, use_single_float = True);
dump(math.pi, use_single_float = False);
dump("Hello world!");
string = "Hello world!";
write(b"\xd9\x0c");
write(string);
write(b"\xda\x00\x0c");
write(string);
write(b"\xdb\x00\x00\x00\x0c");
write(string);
string = "\x00"*8
write(b"\xc4\x08");
write(string);
write(b"\xc5\x00\x08");
write(string);
write(b"\xc6\x00\x00\x00\x08");
write(string);
list = [1,2,3,4,5,6,7,None];
dump(list);
write(b"\xdc\x00\x08");
dumpList(list);
write(b"\xdd\x00\x00\x00\x08");
dumpList(list);
write(b"\x84");
dumpList(list);
write(b"\xde\x00\x04");
dumpList(list);
write(b"\xdf\x00\x00\x00\x04");
dumpList(list);
write(b"\xd4\x2a");
write("\x00");
write(b"\xd5\x2a");
write("\x00"*2);
write(b"\xd6\x2a");
write("\x00"*4);
write(b"\xd7\x2a");
write("\x00"*8);
write(b"\xd8\x2a");
write("\x00"*16);
string = "\x00"*8
write(b"\xc7\x08\x2a");
write(string);
write(b"\xc8\x00\x08\x2a");
write(string);
write(b"\xc9\x00\x00\x00\x08\x2a");
write(string);
|
gpl-2.0
| -3,161,185,039,400,671,000 | 18.902174 | 98 | 0.506827 | false | 2.778452 | false | false | false |
optimamodel/Optima
|
server/webapp/dbmodels.py
|
1
|
7577
|
import os
#from flask_restful_swagger import swagger
from sqlalchemy import text
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.dialects.postgresql import JSON
import optima as op
from .dbconn import db, redis
#@swagger.model
class UserDb(db.Model):
__tablename__ = 'users'
id = db.Column(UUID(True), server_default=text("uuid_generate_v1mc()"), primary_key=True)
username = db.Column(db.String(255))
name = db.Column(db.String(60))
email = db.Column(db.String(200))
password = db.Column(db.String(200))
country = db.Column(db.String(60))
organization = db.Column(db.String(60))
position = db.Column(db.String(60))
is_admin = db.Column(db.Boolean, server_default=text('FALSE'))
projects = db.relationship('ProjectDb', backref='user', lazy='dynamic')
def __init__(self, name, email, password, username, country, organization,
position, is_admin=False):
self.name = name
self.email = email
self.password = password
self.username = username
self.country = country
self.organization = organization
self.position = position
self.is_admin = is_admin
def get_id(self):
return self.id
def is_active(self): # pylint: disable=R0201
return True
def is_anonymous(self): # pylint: disable=R0201
return False
def is_authenticated(self): # pylint: disable=R0201
return True
#@swagger.model
class PyObjectDb(db.Model):
__tablename__ = 'objects'
id = db.Column(
UUID(True), server_default=text("uuid_generate_v1mc()"), primary_key=True)
user_id = db.Column(UUID(True), db.ForeignKey('users.id'))
type = db.Column(db.Text, default=None)
name = db.Column(db.Text, default=None)
attr = db.Column(JSON)
def load(self):
print(">> PyObjectDb.load " + self.id.hex)
redis_entry = redis.get(self.id.hex)
print(redis_entry)
if redis_entry is None:
print('WARNING, object %s not found' % self.id.hex)
return None
else:
return op.loadstr(redis_entry)
def save_obj(self, obj):
print(">> PyObjectDb.save " + self.id.hex)
redis.set(self.id.hex, op.dumpstr(obj))
def cleanup(self):
print(">> PyObjectDb.cleanup " + self.id.hex)
redis.delete(self.id.hex)
def as_portfolio_file(self, loaddir, filename=None):
portfolio = self.load()
filename = os.path.join(loaddir, portfolio.name + ".prt")
op.saveobj(filename, portfolio)
return portfolio.name + ".prt"
#@swagger.model
class ProjectDb(db.Model):
__tablename__ = 'projects'
id = db.Column(UUID(True), server_default=text("uuid_generate_v1mc()"), primary_key=True)
user_id = db.Column(UUID(True), db.ForeignKey('users.id'))
results = db.relationship('ResultsDb', backref='project')
def __init__(self, user_id):
self.user_id = user_id
def load(self):
print(">> ProjectDb.load " + self.id.hex)
redis_entry = redis.get(self.id.hex)
project = op.loadproj(redis_entry, fromdb=True)
return project
def save_obj(self, obj):
print(">> ProjectDb.save " + self.id.hex)
redis.set(self.id.hex, op.dumpstr(obj))
def as_file(self, loaddir, filename=None):
project = self.load()
filename = os.path.join(loaddir, project.name + ".prj")
op.saveobj(filename, project)
return project.name + ".prj"
def delete_dependent_objects(self, synchronize_session=False):
str_project_id = str(self.id)
# Pull out all results rows with Project UID matching str_project_id.
result_records = db.session.query(ResultsDb).filter_by(project_id=str_project_id)
# Call the cleanup for each record (i.e., deleting the Redis entries).
for result_record in result_records:
result_record.cleanup()
# Now delete the Postgres results entries.
result_records.delete(synchronize_session)
# Pull out all undo_stacks rows with Project UID matching str_project_id.
undo_stack_records = db.session.query(UndoStackDb).filter_by(project_id=str_project_id)
# Call the cleanup for each record (i.e., deleting the Redis entries).
for undo_stack_record in undo_stack_records:
undo_stack_record.cleanup()
# Now delete the Postgres undo_stacks entries.
undo_stack_records.delete(synchronize_session)
db.session.flush()
def recursive_delete(self, synchronize_session=False):
str_project_id = str(self.id)
# delete all relevant entries explicitly
self.delete_dependent_objects(synchronize_session=synchronize_session)
# db.session.query(ProjectDataDb).filter_by(id=str_project_id).delete(synchronize_session)
db.session.query(ProjectDb).filter_by(id=str_project_id).delete(synchronize_session)
db.session.flush()
class ResultsDb(db.Model):
DEFAULT_CALCULATION_TYPE = 'calibration' # 'calibration' or 'optimization'
# todo make enum when all types are known
__tablename__ = 'results'
id = db.Column(UUID(True), server_default=text("uuid_generate_v1mc()"), primary_key=True)
parset_id = db.Column(UUID(True))
project_id = db.Column(UUID(True), db.ForeignKey('projects.id', ondelete='SET NULL'))
calculation_type = db.Column(db.Text)
def __init__(self, parset_id, project_id, calculation_type, id=None):
self.parset_id = parset_id
self.project_id = project_id
self.calculation_type = calculation_type
if id:
self.id = id
def load(self):
print(">> ResultsDb.load result-" + self.id.hex)
return op.loadstr(redis.get("result-" + self.id.hex))
def save_obj(self, obj):
print(">> ResultsDb.save result-" + self.id.hex)
redis.set("result-" + self.id.hex, op.dumpstr(obj))
def cleanup(self):
print(">> ResultsDb.cleanup result-" + self.id.hex)
redis.delete("result-" + self.id.hex)
class WorkLogDb(db.Model): # pylint: disable=R0903
__tablename__ = "work_log"
work_status = db.Enum('started', 'completed', 'cancelled', 'error', 'blocked', name='work_status')
id = db.Column(UUID(True), server_default=text("uuid_generate_v1mc()"), primary_key=True)
task_id = db.Column(db.String(128), default=None)
start_time = db.Column(db.DateTime(timezone=True), server_default=text('now()'))
stop_time = db.Column(db.DateTime(timezone=True), default=None)
status = db.Column(work_status, default='started')
error = db.Column(db.Text, default=None)
class UndoStackDb(db.Model):
__tablename__ = 'undo_stacks'
id = db.Column(UUID(True), server_default=text("uuid_generate_v1mc()"), primary_key=True)
project_id = db.Column(UUID(True), db.ForeignKey('projects.id', ondelete='SET NULL'))
def __init__(self, project_id, id=None):
self.project_id = project_id
if id:
self.id = id
def load(self):
print(">> UndoStackDb.load undo-stack-" + self.id.hex)
return op.loadstr(redis.get("undo-stack-" + self.id.hex))
def save_obj(self, obj):
print(">> UndoStackDb.save undo-stack-" + self.id.hex)
redis.set("undo-stack-" + self.id.hex, op.dumpstr(obj))
def cleanup(self):
print(">> UndoStackDb.cleanup undo-stack-" + self.id.hex)
redis.delete("undo-stack-" + self.id.hex)
|
lgpl-3.0
| 8,831,208,499,364,056,000 | 34.577465 | 102 | 0.634024 | false | 3.516009 | false | false | false |
fredmorcos/attic
|
projects/plantmaker/archive/20100520/src/utils.py
|
1
|
1766
|
import math
from schedule import Schedule
def parseSolutions(solutions, plant, orderList):
parsedSolutions = []
for solution in solutions:
solutionItems = solution.items()
schedule = Schedule()
for item in solutionItems:
if "enter" in item[0]:
parsedItem = item[0].split("-")
order = orderList.orderFromID(int(parsedItem[0]))
machineName = parsedItem[2]
time = item[1]
if not (time == 0 and order.currentMachine != "") or \
(time == 0 and order.currentMachine == machineName):
schedule.schedule.append([order, machineName, time])
if "finish" in item[0]:
parsedItem = item[0].split("-")
order = orderList.orderFromID(int(parsedItem[0]))
schedule.finishTimes.append([order, item[1]])
schedule.sort()
schedule.finishTimes.sort(lambda a, b: cmp(a[1], b[1]))
parsedSolutions.append(schedule)
return parsedSolutions
def bestSolution(solutions):
maxFitness = 0
maxFitnessIndex = 0
for i, solution in enumerate(solutions):
if i == 0:
maxFitness = solution.fitness
else:
if solution.fitness > maxFitness:
maxFitness = solution.fitness
maxFitnessIndex = i
return solutions[maxFitnessIndex]
def normalizeValues(plant, orderList):
min = plant.craneMoveTime
if plant.zincBreakTime < min:
min = plant.zincBreakTime
for o in orderList.orders:
for r in o.recipe.recipe:
if r[1] < min and r[1] != 0:
min = r[1]
min = float(abs(min))
plant.craneMoveTime = int(math.ceil(plant.craneMoveTime / min))
if plant.zincBreakTime < min:
plant.zincBreakTime = int(math.ceil(plant.zincBreakTime / min))
for o in orderList.orders:
o.deadline = int(math.ceil(o.deadline / min))
for r in o.recipe.recipe:
r[1] = int(math.ceil(r[1] / min))
return int(min)
|
isc
| 1,680,595,651,206,730,000 | 26.169231 | 65 | 0.686297 | false | 2.978078 | false | false | false |
TresysTechnology/setools
|
tests/policyrep/mls.py
|
1
|
30650
|
# Copyright 2015, Tresys Technology, LLC
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SETools. If not, see <http://www.gnu.org/licenses/>.
#
import unittest
from unittest.mock import Mock
from setools import SELinuxPolicy
from setools.policyrep import qpol
from setools.policyrep.exception import MLSDisabled, InvalidLevel, InvalidLevelDecl, InvalidRange, \
InvalidSensitivity, InvalidCategory, NoStatement
from setools.policyrep.mls import sensitivity_factory, category_factory, level_factory, \
range_factory, level_decl_factory
class SensitivityTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.p = SELinuxPolicy("tests/policyrep/mls.conf")
def mock_sens_factory(self, sens, aliases=[]):
"""Factory function for Sensitivity objects, using a mock qpol object."""
mock_sens = Mock(qpol.qpol_level_t)
mock_sens.name.return_value = sens
mock_sens.isalias.return_value = False
mock_sens.value.return_value = int(sens[1:])
mock_sens.alias_iter = lambda x: iter(aliases)
return sensitivity_factory(self.p.policy, mock_sens)
def test_000_mls_disabled(self):
"""Sensitivity factory on MLS-disabled policy."""
mock_p = Mock(qpol.qpol_policy_t)
mock_p.capability.return_value = False
self.assertRaises(MLSDisabled, sensitivity_factory, mock_p, None)
def test_001_lookup(self):
"""Sensitivity factory policy lookup."""
sens = sensitivity_factory(self.p.policy, "s1")
self.assertEqual("s1", sens.qpol_symbol.name(self.p.policy))
def test_002_lookup_invalid(self):
"""Sensitivity factory policy invalid lookup."""
with self.assertRaises(InvalidSensitivity):
sensitivity_factory(self.p.policy, "INVALID")
def test_003_lookup_object(self):
"""Sensitivity factory policy lookup of Sensitivity object."""
sens1 = sensitivity_factory(self.p.policy, "s1")
sens2 = sensitivity_factory(self.p.policy, sens1)
self.assertIs(sens2, sens1)
def test_010_string(self):
"""Sensitivity basic string rendering."""
sens = self.mock_sens_factory("s0")
self.assertEqual("s0", str(sens))
def test_020_statement(self):
"""Sensitivity basic statement rendering."""
sens = self.mock_sens_factory("s0")
self.assertEqual("sensitivity s0;", sens.statement())
def test_021_statement_alias(self):
"""Sensitivity one alias statement rendering."""
sens = self.mock_sens_factory("s0", ["name1"])
self.assertEqual("sensitivity s0 alias name1;", sens.statement())
def test_022_statement_alias(self):
"""Sensitivity two alias statement rendering."""
sens = self.mock_sens_factory("s0", ["name1", "name2"])
self.assertEqual("sensitivity s0 alias { name1 name2 };", sens.statement())
def test_030_value(self):
"""Sensitivity value."""
sens = self.mock_sens_factory("s17")
self.assertEqual(17, sens._value)
def test_031_equal(self):
"""Sensitivity equal."""
sens1 = self.mock_sens_factory("s0")
sens2 = self.mock_sens_factory("s0")
self.assertEqual(sens1, sens2)
def test_032_equal_str(self):
"""Sensitivity equal to string."""
sens = self.mock_sens_factory("s17")
self.assertEqual("s17", sens)
def test_033_not_equal(self):
"""Sensitivity not equal."""
sens1 = self.mock_sens_factory("s17")
sens2 = self.mock_sens_factory("s23")
self.assertNotEqual(sens1, sens2)
def test_034_not_equal_str(self):
"""Sensitivity not equal to string."""
sens = self.mock_sens_factory("s17")
self.assertNotEqual("s0", sens)
def test_035_lt(self):
"""Sensitivity less-than."""
# less
sens1 = self.mock_sens_factory("s17")
sens2 = self.mock_sens_factory("s23")
self.assertTrue(sens1 < sens2)
# equal
sens1 = self.mock_sens_factory("s17")
sens2 = self.mock_sens_factory("s17")
self.assertFalse(sens1 < sens2)
# greater
sens1 = self.mock_sens_factory("s17")
sens2 = self.mock_sens_factory("s0")
self.assertFalse(sens1 < sens2)
def test_036_le(self):
"""Sensitivity less-than-or-equal."""
# less
sens1 = self.mock_sens_factory("s17")
sens2 = self.mock_sens_factory("s23")
self.assertTrue(sens1 <= sens2)
# equal
sens1 = self.mock_sens_factory("s17")
sens2 = self.mock_sens_factory("s17")
self.assertTrue(sens1 <= sens2)
# greater
sens1 = self.mock_sens_factory("s17")
sens2 = self.mock_sens_factory("s0")
self.assertFalse(sens1 <= sens2)
def test_037_ge(self):
"""Sensitivity greater-than-or-equal."""
# less
sens1 = self.mock_sens_factory("s17")
sens2 = self.mock_sens_factory("s23")
self.assertFalse(sens1 >= sens2)
# equal
sens1 = self.mock_sens_factory("s17")
sens2 = self.mock_sens_factory("s17")
self.assertTrue(sens1 >= sens2)
# greater
sens1 = self.mock_sens_factory("s17")
sens2 = self.mock_sens_factory("s0")
self.assertTrue(sens1 >= sens2)
def test_038_gt(self):
"""Sensitivity greater-than."""
# less
sens1 = self.mock_sens_factory("s17")
sens2 = self.mock_sens_factory("s23")
self.assertFalse(sens1 > sens2)
# equal
sens1 = self.mock_sens_factory("s17")
sens2 = self.mock_sens_factory("s17")
self.assertFalse(sens1 > sens2)
# greater
sens1 = self.mock_sens_factory("s17")
sens2 = self.mock_sens_factory("s0")
self.assertTrue(sens1 > sens2)
class CategoryTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.p = SELinuxPolicy("tests/policyrep/mls.conf")
def mock_cat_factory(self, cat, aliases=[]):
"""Factory function for Category objects, using a mock qpol object."""
mock_cat = Mock(qpol.qpol_cat_t)
mock_cat.name.return_value = cat
mock_cat.isalias.return_value = False
mock_cat.value.return_value = int(cat[1:])
mock_cat.alias_iter = lambda x: iter(aliases)
return category_factory(self.p.policy, mock_cat)
def test_000_mls_disabled(self):
"""Category factory on MLS-disabled policy."""
mock_p = Mock(qpol.qpol_policy_t)
mock_p.capability.return_value = False
self.assertRaises(MLSDisabled, category_factory, mock_p, None)
def test_001_lookup(self):
"""Category factory policy lookup."""
cat = category_factory(self.p.policy, "c1")
self.assertEqual("c1", cat.qpol_symbol.name(self.p.policy))
def test_002_lookup_invalid(self):
"""Category factory policy invalid lookup."""
with self.assertRaises(InvalidCategory):
category_factory(self.p.policy, "INVALID")
def test_003_lookup_object(self):
"""Category factory policy lookup of Category object."""
cat1 = category_factory(self.p.policy, "c1")
cat2 = category_factory(self.p.policy, cat1)
self.assertIs(cat2, cat1)
def test_010_statement(self):
"""Category basic string rendering."""
cat = self.mock_cat_factory("c0")
self.assertEqual("c0", str(cat))
def test_020_statement(self):
"""Category basic statement rendering."""
cat = self.mock_cat_factory("c0")
self.assertEqual("category c0;", cat.statement())
def test_021_statement_alias(self):
"""Category one alias statement rendering."""
cat = self.mock_cat_factory("c0", ["name1"])
self.assertEqual("category c0 alias name1;", cat.statement())
def test_022_statement_alias(self):
"""Category two alias statement rendering."""
cat = self.mock_cat_factory("c0", ["name1", "name2"])
self.assertEqual("category c0 alias { name1 name2 };", cat.statement())
def test_030_value(self):
"""Category value."""
cat = self.mock_cat_factory("c17")
self.assertEqual(17, cat._value)
class LevelDeclTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.p = SELinuxPolicy("tests/policyrep/mls.conf")
def mock_decl_factory(self, sens, cats=[]):
"""Factory function for LevelDecl objects, using a mock qpol object."""
mock_decl = Mock(qpol.qpol_level_t)
mock_decl.name.return_value = sens
mock_decl.isalias.return_value = False
mock_decl.value.return_value = int(sens[1:])
mock_decl.cat_iter = lambda x: iter(cats)
return level_decl_factory(self.p.policy, mock_decl)
def test_000_mls_disabled(self):
"""Level declaration factory on MLS-disabled policy."""
mock_p = Mock(qpol.qpol_policy_t)
mock_p.capability.return_value = False
self.assertRaises(MLSDisabled, level_decl_factory, mock_p, None)
def test_001_lookup(self):
"""Level declaration factory policy lookup."""
decl = level_decl_factory(self.p.policy, "s1")
self.assertEqual("s1", decl.qpol_symbol.name(self.p.policy))
def test_002_lookup_invalid(self):
"""Level declaration factory policy invalid lookup."""
with self.assertRaises(InvalidLevelDecl):
level_decl_factory(self.p.policy, "INVALID")
def test_003_lookup_object(self):
"""Level declaration factory policy lookup of LevelDecl object."""
level1 = level_decl_factory(self.p.policy, "s1")
level2 = level_decl_factory(self.p.policy, level1)
self.assertIs(level2, level1)
def test_010_string(self):
"""Level declaration basic string rendering."""
decl = self.mock_decl_factory("s0")
self.assertEqual("s0", str(decl))
def test_011_string_single_cat(self):
"""Level declaration string rendering with one category"""
decl = self.mock_decl_factory("s0", ["c0"])
self.assertEqual("s0:c0", str(decl))
def test_012_string_multiple_cat(self):
"""Level declaration string rendering with multiple categories"""
decl = self.mock_decl_factory("s0", ["c0", "c3"])
self.assertEqual("s0:c0,c3", str(decl))
def test_013_string_cat_set(self):
"""Level declaration string rendering with category set"""
decl = self.mock_decl_factory("s0", ["c0", "c1", "c2", "c3"])
self.assertEqual("s0:c0.c3", str(decl))
def test_014_string_complex(self):
"""Level declaration string rendering with complex category set"""
decl = self.mock_decl_factory("s0", ["c0", "c1", "c2", "c3", "c5", "c7", "c8", "c9"])
self.assertEqual("s0:c0.c3,c5,c7.c9", str(decl))
def test_020_statement(self):
"""Level declaration basic statement rendering."""
decl = self.mock_decl_factory("s0")
self.assertEqual("level s0;", decl.statement())
def test_021_statement_single_cat(self):
"""Level declaration statement rendering with one category"""
decl = self.mock_decl_factory("s0", ["c0"])
self.assertEqual("level s0:c0;", decl.statement())
def test_022_statement_multiple_cat(self):
"""Level declaration statement rendering with multiple categories"""
decl = self.mock_decl_factory("s0", ["c0", "c3"])
self.assertEqual("level s0:c0,c3;", decl.statement())
def test_012_string_cat_set(self):
"""Level declaration statement rendering with category set"""
decl = self.mock_decl_factory("s0", ["c0", "c1", "c2", "c3"])
self.assertEqual("level s0:c0.c3;", decl.statement())
def test_013_statement_complex(self):
"""Level declaration statement rendering with complex category set"""
decl = self.mock_decl_factory("s0", ["c0", "c1", "c2", "c3", "c5", "c7", "c8", "c9"])
self.assertEqual("level s0:c0.c3,c5,c7.c9;", decl.statement())
def test_030_equal(self):
"""Level declaration equal."""
decl1 = self.mock_decl_factory("s17", ["c0", "c1", "c2", "c3"])
decl2 = self.mock_decl_factory("s17", ["c0", "c1", "c2", "c3"])
self.assertEqual(decl1, decl2)
def test_031_equal_str(self):
"""Level declaration equal to string."""
decl = self.mock_decl_factory("s17", ["c0", "c1", "c2", "c3"])
self.assertEqual("s17:c0.c3", decl)
def test_032_not_equal(self):
"""Level declaration not equal."""
decl1 = self.mock_decl_factory("s17", ["c0", "c1", "c2", "c3"])
decl2 = self.mock_decl_factory("s23")
self.assertNotEqual(decl1, decl2)
def test_033_not_equal_str(self):
"""Level declaration not equal to string."""
decl = self.mock_decl_factory("s17", ["c0", "c1", "c2", "c3"])
self.assertNotEqual("s0:c0.c2", decl)
def test_034_lt(self):
"""Level declaration less-than."""
# less
decl1 = self.mock_decl_factory("s17", ["c0", "c1", "c2", "c3"])
decl2 = self.mock_decl_factory("s23", ["c7", "c8", "c9"])
self.assertTrue(decl1 < decl2)
# equal
decl1 = self.mock_decl_factory("s17", ["c0", "c1", "c2", "c3"])
decl2 = self.mock_decl_factory("s17", ["c0", "c1", "c2", "c3"])
self.assertFalse(decl1 < decl2)
# greater
decl1 = self.mock_decl_factory("s24")
decl2 = self.mock_decl_factory("s23", ["c7", "c8", "c9"])
self.assertFalse(decl1 < decl2)
def test_035_le(self):
"""Level declaration less-than-or-equal."""
# less
decl1 = self.mock_decl_factory("s17", ["c0", "c1", "c2", "c3"])
decl2 = self.mock_decl_factory("s23", ["c7", "c8", "c9"])
self.assertTrue(decl1 <= decl2)
# equal
decl1 = self.mock_decl_factory("s17", ["c0", "c1", "c2", "c3"])
decl2 = self.mock_decl_factory("s17", ["c0", "c1", "c2", "c3"])
self.assertTrue(decl1 <= decl2)
# greater
decl1 = self.mock_decl_factory("s24")
decl2 = self.mock_decl_factory("s23", ["c7", "c8", "c9"])
self.assertFalse(decl1 <= decl2)
def test_036_ge(self):
"""Level declaration greater-than-or-equal."""
# less
decl1 = self.mock_decl_factory("s17", ["c0", "c1", "c2", "c3"])
decl2 = self.mock_decl_factory("s23", ["c7", "c8", "c9"])
self.assertFalse(decl1 >= decl2)
# equal
decl1 = self.mock_decl_factory("s17", ["c0", "c1", "c2", "c3"])
decl2 = self.mock_decl_factory("s17", ["c0", "c1", "c2", "c3"])
self.assertTrue(decl1 >= decl2)
# greater
decl1 = self.mock_decl_factory("s24")
decl2 = self.mock_decl_factory("s23", ["c7", "c8", "c9"])
self.assertTrue(decl1 >= decl2)
def test_037_gt(self):
"""Level declaration greater-than."""
# less
decl1 = self.mock_decl_factory("s17", ["c0", "c1", "c2", "c3"])
decl2 = self.mock_decl_factory("s23", ["c7", "c8", "c9"])
self.assertFalse(decl1 > decl2)
# equal
decl1 = self.mock_decl_factory("s17", ["c0", "c1", "c2", "c3"])
decl2 = self.mock_decl_factory("s17", ["c0", "c1", "c2", "c3"])
self.assertFalse(decl1 > decl2)
# greater
decl1 = self.mock_decl_factory("s24")
decl2 = self.mock_decl_factory("s23", ["c7", "c8", "c9"])
self.assertTrue(decl1 > decl2)
class LevelTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.p = SELinuxPolicy("tests/policyrep/mls.conf")
def mock_level_factory(self, sens, cats=[]):
"""Factory function Level objects, using a mock qpol object."""
mock_level = Mock(qpol.qpol_mls_level_t)
mock_level.sens_name.return_value = sens
mock_level.cat_iter = lambda x: iter(cats)
return level_factory(self.p.policy, mock_level)
def test_000_mls_disabled(self):
"""Level factory on MLS-disabled policy."""
mock_p = Mock(qpol.qpol_policy_t)
mock_p.capability.return_value = False
self.assertRaises(MLSDisabled, level_factory, mock_p, None)
def test_001_lookup_no_cats(self):
"""Level lookup with no categories."""
levelobj = level_factory(self.p.policy, "s2")
self.assertEqual("s2", levelobj.qpol_symbol.sens_name(self.p.policy))
self.assertEqual(str(levelobj), "s2")
def test_002_lookup_cat_range(self):
"""Level lookup with category range."""
levelobj = level_factory(self.p.policy, "s1:c0.c13")
self.assertEqual(str(levelobj), "s1:c0.c13")
def test_003_lookup_complex_cats(self):
"""Level lookup with complex category set."""
levelobj = level_factory(self.p.policy, "s2:c0.c5,c7,c9.c11,c13")
self.assertEqual(str(levelobj), "s2:c0.c5,c7,c9.c11,c13")
def test_004_lookup_bad1(self):
"""Level lookup with garbage."""
self.assertRaises(InvalidLevel, level_factory, self.p.policy, "FAIL")
def test_005_lookup_bad2(self):
"""Level lookup with : in garbage."""
self.assertRaises(InvalidLevel, level_factory, self.p.policy, "FAIL:BAD")
def test_006_lookup_bad_cat(self):
"""Level lookup with invalid category."""
self.assertRaises(InvalidLevel, level_factory, self.p.policy, "s0:FAIL")
def test_007_lookup_bad_cat_range(self):
"""Level lookup with backwards category range."""
self.assertRaises(InvalidLevel, level_factory, self.p.policy, "s0:c4.c0")
def test_008_lookup_cat_range_error(self):
"""Level lookup with category range parse error."""
self.assertRaises(InvalidLevel, level_factory, self.p.policy, "s0:c0.c2.c4")
def test_009_lookup_cat_not_assoc(self):
"""Level lookup with category not associated with sensitivity."""
# c4 is not associated with s0.
self.assertRaises(InvalidLevel, level_factory, self.p.policy, "s0:c0,c4")
def test_00a_lookup_object(self):
"""Level factory policy lookup of Level object."""
level1 = level_factory(self.p.policy, "s0")
level2 = level_factory(self.p.policy, level1)
self.assertIs(level2, level1)
def test_010_equal(self):
"""Level equal."""
level1 = self.mock_level_factory("s0", ["c0", "c1", "c2", "c3"])
level2 = self.mock_level_factory("s0", ["c0", "c1", "c2", "c3"])
self.assertEqual(level1, level2)
def test_011_equal_str(self):
"""Level equal to string."""
level = self.mock_level_factory("s2", ["c0", "c1", "c2", "c3"])
self.assertEqual("s2:c0.c3", level)
def test_012_not_equal(self):
"""Level not equal."""
level1 = self.mock_level_factory("s0", ["c0", "c1", "c2", "c3"])
level2 = self.mock_level_factory("s0")
self.assertNotEqual(level1, level2)
def test_013_not_equal_str(self):
"""Level not equal to string."""
level = self.mock_level_factory("s0", ["c0", "c2"])
self.assertNotEqual("s0:c0.c2", level)
def test_014_dom(self):
"""Level dominate (ge)."""
# equal
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertTrue(level1 >= level2)
# sens dominate
level1 = self.mock_level_factory("s2", ["c0", "c1", "c2", "c3"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertTrue(level1 >= level2)
# cat set dominate
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3", "c4"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertTrue(level1 >= level2)
# sens domby
level1 = self.mock_level_factory("s0", ["c0", "c1", "c2", "c3"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertFalse(level1 >= level2)
# cat set domby
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertFalse(level1 >= level2)
# incomp
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2"])
level2 = self.mock_level_factory("s1", ["c7", "c8", "c9"])
self.assertFalse(level1 >= level2)
def test_015_domby(self):
"""Level dominate-by (le)."""
# equal
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertTrue(level1 <= level2)
# sens dominate
level1 = self.mock_level_factory("s2", ["c0", "c1", "c2", "c3"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertFalse(level1 <= level2)
# cat set dominate
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3", "c4"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertFalse(level1 <= level2)
# sens domby
level1 = self.mock_level_factory("s0", ["c0", "c1", "c2", "c3"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertTrue(level1 <= level2)
# cat set domby
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertTrue(level1 <= level2)
# incomp
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2"])
level2 = self.mock_level_factory("s1", ["c7", "c8", "c9"])
self.assertFalse(level1 <= level2)
def test_016_proper_dom(self):
"""Level proper dominate (gt)."""
# equal
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertFalse(level1 > level2)
# sens dominate
level1 = self.mock_level_factory("s2", ["c0", "c1", "c2", "c3"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertTrue(level1 > level2)
# cat set dominate
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3", "c4"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertTrue(level1 > level2)
# sens domby
level1 = self.mock_level_factory("s0", ["c0", "c1", "c2", "c3"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertFalse(level1 > level2)
# cat set domby
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertFalse(level1 > level2)
# incomp
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2"])
level2 = self.mock_level_factory("s1", ["c7", "c8", "c9"])
self.assertFalse(level1 > level2)
def test_017_proper_domby(self):
"""Level proper dominate-by (lt)."""
# equal
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertFalse(level1 < level2)
# sens dominate
level1 = self.mock_level_factory("s2", ["c0", "c1", "c2", "c3"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertFalse(level1 < level2)
# cat set dominate
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3", "c4"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertFalse(level1 < level2)
# sens domby
level1 = self.mock_level_factory("s0", ["c0", "c1", "c2", "c3"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertTrue(level1 < level2)
# cat set domby
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertTrue(level1 < level2)
# incomp
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2"])
level2 = self.mock_level_factory("s1", ["c7", "c8", "c9"])
self.assertFalse(level1 < level2)
def test_018_incomp(self):
"""Level incomparable (xor)."""
# equal
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertFalse(level1 ^ level2)
# sens dominate
level1 = self.mock_level_factory("s2", ["c0", "c1", "c2", "c3"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertFalse(level1 ^ level2)
# cat set dominate
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3", "c4"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertFalse(level1 ^ level2)
# sens domby
level1 = self.mock_level_factory("s0", ["c0", "c1", "c2", "c3"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertFalse(level1 ^ level2)
# cat set domby
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2"])
level2 = self.mock_level_factory("s1", ["c0", "c1", "c2", "c3"])
self.assertFalse(level1 ^ level2)
# incomp
level1 = self.mock_level_factory("s1", ["c0", "c1", "c2"])
level2 = self.mock_level_factory("s1", ["c7", "c8", "c9"])
self.assertTrue(level1 ^ level2)
def test_020_level_statement(self):
"""Level has no statement."""
level = self.mock_level_factory("s1")
with self.assertRaises(NoStatement):
level.statement()
class RangeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.p = SELinuxPolicy("tests/policyrep/mls.conf")
def test_000_mls_disabled(self):
"""Range factory on MLS-disabled policy."""
mock_p = Mock(qpol.qpol_policy_t)
mock_p.capability.return_value = False
self.assertRaises(MLSDisabled, range_factory, mock_p, None)
def test_001_range_lookup_single_level(self):
"""Range lookup with single-level range."""
rangeobj = range_factory(self.p.policy, "s0")
self.assertEqual(str(rangeobj), "s0")
def test_002_range_lookup_single_level_redundant(self):
"""Range lookup with single-level range (same range listed twice)."""
rangeobj = range_factory(self.p.policy, "s1-s1")
self.assertEqual(str(rangeobj), "s1")
def test_003_range_lookup_simple(self):
"""Range lookup with simple range."""
rangeobj = range_factory(self.p.policy, "s0-s1:c0.c10")
self.assertEqual(str(rangeobj), "s0 - s1:c0.c10")
def test_004_range_lookup_no_cats(self):
"""Range lookup with no categories."""
rangeobj = range_factory(self.p.policy, "s0-s1")
self.assertEqual(str(rangeobj), "s0 - s1")
def test_005_range_lookup_complex(self):
"""Range lookup with complex category set."""
rangeobj = range_factory(self.p.policy, "s0:c0.c2-s2:c0.c5,c7,c9.c11,c13")
self.assertEqual(str(rangeobj), "s0:c0.c2 - s2:c0.c5,c7,c9.c11,c13")
def test_006_range_lookup_non_dom(self):
"""Range lookup with non-dominating high level."""
self.assertRaises(InvalidRange, range_factory, self.p.policy, "s1-s0")
def test_007_range_lookup_invalid_range_low(self):
"""Range lookup with an invalid range (low)."""
# c13 is not associated with s0.
self.assertRaises(InvalidRange, range_factory, self.p.policy, "s0:c13-s2:c13")
def test_008_range_lookup_invalid_range_high(self):
"""Range lookup with an invalid range (high)."""
# c13 is not associated with s0.
self.assertRaises(InvalidRange, range_factory, self.p.policy, "s0-s0:c13")
def test_009_lookup_object(self):
"""Range factory policy lookup of Range object."""
range1 = range_factory(self.p.policy, "s0")
range2 = range_factory(self.p.policy, range1)
self.assertIs(range2, range1)
def test_020_equal(self):
"""Range equality."""
rangeobj1 = range_factory(self.p.policy, "s0:c0.c2-s2:c0.c5,c7,c9.c11,c13")
rangeobj2 = range_factory(self.p.policy, "s0:c0.c2-s2:c0.c5,c7,c9.c11,c13")
self.assertEqual(rangeobj1, rangeobj2)
def test_021_equal(self):
"""Range equal to string."""
rangeobj = range_factory(self.p.policy, "s0:c0.c2-s2:c0.c5,c7,c9.c11,c13")
self.assertEqual("s0:c0.c2-s2:c0.c5,c7,c9.c11,c13", rangeobj)
self.assertEqual("s0:c0.c2- s2:c0.c5,c7,c9.c11,c13", rangeobj)
self.assertEqual("s0:c0.c2 -s2:c0.c5,c7,c9.c11,c13", rangeobj)
self.assertEqual("s0:c0.c2 - s2:c0.c5,c7,c9.c11,c13", rangeobj)
def test_022_contains(self):
"""Range contains a level."""
rangeobj = range_factory(self.p.policy, "s0:c1-s2:c0.c10")
# too low
level1 = level_factory(self.p.policy, "s0")
self.assertNotIn(level1, rangeobj)
# low level
level2 = level_factory(self.p.policy, "s0:c1")
self.assertIn(level2, rangeobj)
# mid
level3 = level_factory(self.p.policy, "s1:c1,c5")
self.assertIn(level3, rangeobj)
# high level
level4 = level_factory(self.p.policy, "s2:c0.c10")
self.assertIn(level4, rangeobj)
# too high
level5 = level_factory(self.p.policy, "s2:c0.c11")
self.assertNotIn(level5, rangeobj)
def test_030_range_statement(self):
"""Range has no statement."""
rangeobj = range_factory(self.p.policy, "s0")
with self.assertRaises(NoStatement):
rangeobj.statement()
|
lgpl-2.1
| -5,423,022,946,715,485,000 | 38.244558 | 100 | 0.592365 | false | 3.164034 | true | false | false |
olivierverdier/sfepy
|
sfepy/physics/potentials.py
|
1
|
3446
|
"""
Classes for constructing potentials of atoms and molecules.
"""
from sfepy.base.base import *
from sfepy.linalg import norm_l2_along_axis
class CompoundPotential(Container):
"""
Sum of several potentials.
"""
def __init__(self, objs=None):
Container.__init__(self, objs=objs)
self.update_expression()
def insert(self, ii, obj):
Container.insert(self, ii, obj)
self.update_expression()
def append(self, obj):
Container.append(self, obj)
self.update_expression()
def update_expression(self):
self.expression = []
for pot in self:
aux = [pot.sign, pot.name, pot.centre]
self.expression.append(aux)
def __mul__(self, other):
out = CompoundPotential()
for name, pot in self.iteritems():
out.append(pot * other)
return out
def __rmul__(self, other):
return self * other
def __add__(self, other):
if isinstance(other, PotentialBase):
out = self.copy()
out.append(other)
elif isinstance(other, CompoundPotential):
out = CompoundPotential(self._objs + other._objs)
else:
raise ValueError('cannot add CompoundPotential with %s!' % other)
return out
def __radd__(self, other):
return self + other
def __sub__(self, other):
if isinstance(other, PotentialBase):
out = self + (-other)
elif isinstance(other, CompoundPotential):
out = self + (-other)
else:
raise ValueError('cannot subtract CompoundPotential with %s!' \
% other)
return out
def __rsub__(self, other):
return -self + other
def __pos__(self):
return self
def __neg__(self):
return -1.0 * self
def __call__(self, coors):
val = 0.0
for pot in self:
val += pot(coors)
return val
class PotentialBase(Struct):
"""
Base class for potentials.
"""
def __mul__(self, other):
try:
mul = as_float_or_complex(other)
except ValueError:
raise ValueError('cannot multiply PotentialBase with %s!' % other)
out = self.copy(name=self.name)
out.sign = mul * self.sign
return out
def __rmul__(self, other):
return self * other
def __add__(self, other):
if isinstance(other, PotentialBase):
out = CompoundPotential([self, other])
else:
out = NotImplemented
return out
def __sub__(self, other):
if isinstance(other, PotentialBase):
out = CompoundPotential([self, -1.0 * other])
else:
out = NotImplemented
return out
def __pos__(self):
return self
def __neg__(self):
out = -1.0 * self
return out
class Potential(PotentialBase):
"""
Single potential.
"""
def __init__(self, name, function, centre=None, dim=3):
self.name = name
self.function = function
if centre is None:
centre = nm.array([0.0] * dim, dtype=nm.float64)
self.centre = nm.asarray(centre, dtype=nm.float64)
self.sign = 1.0
def __call__(self, coors):
r = norm_l2_along_axis(coors - self.centre)
pot = self.sign * self.function(r)
return pot
|
bsd-3-clause
| -7,074,944,824,542,105,000 | 21.522876 | 78 | 0.54585 | false | 4.025701 | false | false | false |
surgebiswas/poker
|
PokerBots_2017/Johnny/keras/applications/xception.py
|
2
|
10377
|
# -*- coding: utf-8 -*-
"""Xception V1 model for Keras.
On ImageNet, this model gets to a top-1 validation accuracy of 0.790
and a top-5 validation accuracy of 0.945.
Do note that the input image format for this model is different than for
the VGG16 and ResNet models (299x299 instead of 224x224),
and that the input preprocessing function
is also different (same as Inception V3).
Also do note that this model is only available for the TensorFlow backend,
due to its reliance on `SeparableConvolution` layers.
# Reference
- [Xception: Deep Learning with Depthwise Separable Convolutions](https://arxiv.org/abs/1610.02357)
"""
from __future__ import print_function
from __future__ import absolute_import
import warnings
from ..models import Model
from ..layers import Dense, Input, BatchNormalization, Activation, merge
from ..layers import Conv2D, SeparableConv2D, MaxPooling2D, GlobalAveragePooling2D
from ..engine.topology import get_source_inputs
from ..utils.data_utils import get_file
from .. import backend as K
from .imagenet_utils import decode_predictions, _obtain_input_shape
TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels_notop.h5'
def Xception(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
classes=1000):
"""Instantiate the Xception architecture,
optionally loading weights pre-trained
on ImageNet. This model is available for TensorFlow only,
and can only be used with inputs following the TensorFlow
dimension ordering `(width, height, channels)`.
You should set `image_dim_ordering="tf"` in your Keras config
located at ~/.keras/keras.json.
Note that the default input image size for this model is 299x299.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)`.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 71.
E.g. `(150, 150, 3)` would be one valid value.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
if K.backend() != 'tensorflow':
raise RuntimeError('The Xception model is only available with '
'the TensorFlow backend.')
if K.image_dim_ordering() != 'tf':
warnings.warn('The Xception model is only available for the '
'input dimension ordering "tf" '
'(width, height, channels). '
'However your settings specify the default '
'dimension ordering "th" (channels, width, height). '
'You should set `image_dim_ordering="tf"` in your Keras '
'config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "tf" dimension ordering.')
K.set_image_dim_ordering('tf')
old_dim_ordering = 'th'
else:
old_dim_ordering = None
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=299,
min_size=71,
dim_ordering=K.image_dim_ordering(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = Conv2D(32, 3, 3, subsample=(2, 2), bias=False, name='block1_conv1')(img_input)
x = BatchNormalization(name='block1_conv1_bn')(x)
x = Activation('relu', name='block1_conv1_act')(x)
x = Conv2D(64, 3, 3, bias=False, name='block1_conv2')(x)
x = BatchNormalization(name='block1_conv2_bn')(x)
x = Activation('relu', name='block1_conv2_act')(x)
residual = Conv2D(128, 1, 1, subsample=(2, 2),
border_mode='same', bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(128, 3, 3, border_mode='same', bias=False, name='block2_sepconv1')(x)
x = BatchNormalization(name='block2_sepconv1_bn')(x)
x = Activation('relu', name='block2_sepconv2_act')(x)
x = SeparableConv2D(128, 3, 3, border_mode='same', bias=False, name='block2_sepconv2')(x)
x = BatchNormalization(name='block2_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), border_mode='same', name='block2_pool')(x)
x = merge([x, residual], mode='sum')
residual = Conv2D(256, 1, 1, subsample=(2, 2),
border_mode='same', bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block3_sepconv1_act')(x)
x = SeparableConv2D(256, 3, 3, border_mode='same', bias=False, name='block3_sepconv1')(x)
x = BatchNormalization(name='block3_sepconv1_bn')(x)
x = Activation('relu', name='block3_sepconv2_act')(x)
x = SeparableConv2D(256, 3, 3, border_mode='same', bias=False, name='block3_sepconv2')(x)
x = BatchNormalization(name='block3_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), border_mode='same', name='block3_pool')(x)
x = merge([x, residual], mode='sum')
residual = Conv2D(728, 1, 1, subsample=(2, 2),
border_mode='same', bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block4_sepconv1_act')(x)
x = SeparableConv2D(728, 3, 3, border_mode='same', bias=False, name='block4_sepconv1')(x)
x = BatchNormalization(name='block4_sepconv1_bn')(x)
x = Activation('relu', name='block4_sepconv2_act')(x)
x = SeparableConv2D(728, 3, 3, border_mode='same', bias=False, name='block4_sepconv2')(x)
x = BatchNormalization(name='block4_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), border_mode='same', name='block4_pool')(x)
x = merge([x, residual], mode='sum')
for i in range(8):
residual = x
prefix = 'block' + str(i + 5)
x = Activation('relu', name=prefix + '_sepconv1_act')(x)
x = SeparableConv2D(728, 3, 3, border_mode='same', bias=False, name=prefix + '_sepconv1')(x)
x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
x = Activation('relu', name=prefix + '_sepconv2_act')(x)
x = SeparableConv2D(728, 3, 3, border_mode='same', bias=False, name=prefix + '_sepconv2')(x)
x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
x = Activation('relu', name=prefix + '_sepconv3_act')(x)
x = SeparableConv2D(728, 3, 3, border_mode='same', bias=False, name=prefix + '_sepconv3')(x)
x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)
x = merge([x, residual], mode='sum')
residual = Conv2D(1024, 1, 1, subsample=(2, 2),
border_mode='same', bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block13_sepconv1_act')(x)
x = SeparableConv2D(728, 3, 3, border_mode='same', bias=False, name='block13_sepconv1')(x)
x = BatchNormalization(name='block13_sepconv1_bn')(x)
x = Activation('relu', name='block13_sepconv2_act')(x)
x = SeparableConv2D(1024, 3, 3, border_mode='same', bias=False, name='block13_sepconv2')(x)
x = BatchNormalization(name='block13_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), border_mode='same', name='block13_pool')(x)
x = merge([x, residual], mode='sum')
x = SeparableConv2D(1536, 3, 3, border_mode='same', bias=False, name='block14_sepconv1')(x)
x = BatchNormalization(name='block14_sepconv1_bn')(x)
x = Activation('relu', name='block14_sepconv1_act')(x)
x = SeparableConv2D(2048, 3, 3, border_mode='same', bias=False, name='block14_sepconv2')(x)
x = BatchNormalization(name='block14_sepconv2_bn')(x)
x = Activation('relu', name='block14_sepconv2_act')(x)
if include_top:
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='xception')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if old_dim_ordering:
K.set_image_dim_ordering(old_dim_ordering)
return model
def preprocess_input(x):
x /= 255.
x -= 0.5
x *= 2.
return x
|
mit
| 1,069,713,095,376,036,900 | 43.536481 | 151 | 0.620989 | false | 3.53681 | false | false | false |
c3nav/c3nav
|
src/c3nav/mapdata/utils/index.py
|
1
|
1897
|
import operator
from functools import reduce
from django.core import checks
from shapely import speedups
if speedups.available:
speedups.enable()
try:
import rtree
except OSError:
rtree_index = False
class Index:
def __init__(self):
self.objects = {}
def insert(self, value, geometry):
self.objects[value] = geometry
def delete(self, value):
self.objects.pop(value)
def intersection(self, geometry):
return self.objects.values()
else:
rtree_index = True
class Index:
def __init__(self):
self._index = rtree.index.Index()
self._bounds = {}
def insert(self, value, geometry):
try:
geoms = geometry.geoms
except AttributeError:
self._bounds.setdefault(value, []).append(geometry.bounds)
self._index.insert(value, geometry.bounds)
else:
for geom in geoms:
self.insert(value, geom)
def delete(self, value):
for bounds in self._bounds.pop(value):
self._index.delete(value, bounds)
def intersection(self, geometry):
try:
geoms = geometry.geoms
except AttributeError:
return set(self._index.intersection(geometry.bounds))
else:
return reduce(operator.__or__, (self.intersection(geom) for geom in geoms), set())
@checks.register()
def check_svg_renderer(app_configs, **kwargs):
errors = []
if not rtree_index:
errors.append(
checks.Warning(
'The libspatialindex_c library is missing. This will slow down c3nav in future versions.',
obj='rtree.index.Index',
id='c3nav.mapdata.W002',
)
)
return errors
|
apache-2.0
| 5,152,785,575,294,487,000 | 26.1 | 106 | 0.555087 | false | 4.453052 | false | false | false |
atugushev/django-simple-settings
|
setup.py
|
1
|
1443
|
import os
import re
from setuptools import setup
VERSION = re.search(
r"VERSION\s*=\s*['\"](.*)['\"]",
open(os.path.join(os.path.dirname(__file__), 'simple_settings', '__init__.py')).read()
).group(1)
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-simple-settings',
version=VERSION,
packages=['simple_settings'],
install_requires=['Django>=1.3'],
include_package_data=True,
license='MIT License',
description='A very simple settings configurable in Django Admin Panel.',
long_description=README,
url='https://github.com/alikus/django-simple-settings',
author='Albert Tugushev',
author_email='[email protected]',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
],
)
|
mit
| 4,488,668,001,902,991,000 | 33.357143 | 90 | 0.619543 | false | 3.797368 | false | true | false |
haoyutan/MSA-Framework
|
modules/msa/msa/contrib/master/authentication.py
|
1
|
1199
|
from rest_framework import exceptions
from rest_framework.authentication import (
get_authorization_header, BaseAuthentication,
)
from .models import MicroService
class MicroServiceSecretAuthentication(BaseAuthentication):
def authenticate(self, request):
auth = get_authorization_header(request).split()
if not auth or auth[0].lower() != self.authenticate_header(request):
return None
if len(auth) == 1:
msg = _('Invalid token header. No credentials provided.')
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _('Invalid token header. Token string should not contain spaces.')
raise exceptions.AuthenticationFailed(msg)
return self.authenticate_credentials(auth[1])
def authenticate_credentials(self, secret):
try:
micro_service = MicroService.objects.get(secret=secret)
except MicroService.DoesNotExist:
raise exceptions.AuthenticationFailed('Invalid secret.')
return (micro_service, secret)
def authenticate_header(self, request):
# MSS stands for 'Micro Service Secret'
return b'mss'
|
mit
| -5,470,964,627,476,039,000 | 32.305556 | 84 | 0.673895 | false | 4.815261 | false | false | false |
ornotermes/WebLights
|
effects/Christmas.py
|
1
|
1135
|
#
#+ Copyright (c) 2014, 2015 Rikard Lindstrom <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
def run():
while True:
for s in [0,2]:
for d in [0, 1]:
for b in range(0, 50):
f = abs( d - b/50.0 )
c = s
for i in range(0, strip.length):
if stop: return
if(c == 0):
strip.rgb(1, 0.8, 0.5)
if(c == 1):
strip.rgb(f, 0, 0)
if(c == 2):
strip.rgb(1, 0.8, 0.5)
if(c == 3):
strip.rgb(0, f, 0)
c+=1
if(c >= 4): c = 0
strip.show()
|
gpl-3.0
| 7,070,983,920,460,769,000 | 28.102564 | 71 | 0.62467 | false | 2.971204 | false | false | false |
docileninja/CTF-Tools
|
scheduler/scheduler.py
|
1
|
1263
|
import requests as r
import threading
from functools import wraps
import os
import json
_tasks = []
ips = []
omitted = [] #list of names of skipped exploits
def info(s):
print('[*] {}'.format(s))
def warn(s):
print('[-] {}'.format(s))
def schedule(func):
@wraps(func)
def task_func(ip):
flag = func(ip)
if flag:
info('task {} retreived flag: {} from ip: {}'.format(func.__qualname__, flag, ip))
success, message = submit(flag)
if success:
info('task {} successfully submitted flag'.format(func.__qualname__))
else:
warn('task {} failed to submit flag {} because {}'.format(func.__qualname__, flag, message))
else:
warn('task {} failed to retreive flag from ip: {}'.format(func.__qualname__, ip))
_tasks.append(task_func)
return task_func
def submit(flag):
res = r.post('https://live.cyberstakesonline.com/liveapi/2/submit',
{'apikey': '8abb3ebb021c734590d41c42afd498854d848644f8cc38e11d',
'flag': flag})
res = json.loads(res.text)
return (res['status'] == '1', res['message'])
def launch(interval=240):
threading.Timer(interval, launch, args=(interval,)).start()
for task in _tasks:
if task.__qualname__ in omitted:
continue
for ip in ips:
threading.Thread(target=task, args=(ip,)).run()
|
gpl-2.0
| -6,046,373,756,138,118,000 | 25.3125 | 96 | 0.656374 | false | 2.985816 | false | false | false |
Universal-Model-Converter/UMC3.0a
|
data/Python/x86/Lib/site-packages/OpenGL/GLU/glunurbs.py
|
1
|
10429
|
"""Implementation of GLU Nurbs structure and callback methods
Same basic pattern as seen with the gluTess* functions, just need to
add some bookkeeping to the structure class so that we can keep the
Python function references alive during the calling process.
"""
from OpenGL.raw import GLU as simple
from OpenGL import platform, converters, wrapper
from OpenGL.GLU import glustruct
from OpenGL.lazywrapper import lazy
from OpenGL import arrays, error
import ctypes
import weakref
from OpenGL.platform import PLATFORM
import OpenGL
__all__ = (
'GLUnurbs',
'gluNewNurbsRenderer',
'gluNurbsCallback',
'gluNurbsCallbackData',
'gluNurbsCallbackDataEXT',
'gluNurbsCurve',
'gluNurbsSurface',
'gluPwlCurve',
)
# /usr/include/GL/glu.h 242
class GLUnurbs(glustruct.GLUStruct, simple.GLUnurbs):
"""GLU Nurbs structure with oor and callback storage support
IMPORTANT NOTE: the texture coordinate callback receives a raw ctypes
data-pointer, as without knowing what type of evaluation is being done
(1D or 2D) we cannot safely determine the size of the array to convert
it. This is a limitation of the C implementation. To convert to regular
data-pointer, just call yourNurb.ptrAsArray( ptr, size, arrays.GLfloatArray )
with the size of data you expect.
"""
FUNCTION_TYPE = PLATFORM.functionTypeFor(PLATFORM.GLU)
CALLBACK_FUNCTION_REGISTRARS = {
# mapping from "which" to a function that should take 3 parameters,
# the nurb, the which and the function pointer...
}
CALLBACK_TYPES = {
# mapping from "which" GLU enumeration to a ctypes function type
simple.GLU_NURBS_BEGIN: FUNCTION_TYPE(
None, simple.GLenum
),
simple.GLU_NURBS_BEGIN_DATA: FUNCTION_TYPE(
None, simple.GLenum, ctypes.POINTER(simple.GLvoid)
),
simple.GLU_NURBS_VERTEX: FUNCTION_TYPE(
None, ctypes.POINTER(simple.GLfloat)
),
simple.GLU_NURBS_VERTEX_DATA: FUNCTION_TYPE(
None, ctypes.POINTER(simple.GLfloat), ctypes.POINTER(simple.GLvoid)
),
simple.GLU_NURBS_NORMAL: FUNCTION_TYPE(
None, ctypes.POINTER(simple.GLfloat)
),
simple.GLU_NURBS_NORMAL_DATA: FUNCTION_TYPE(
None, ctypes.POINTER(simple.GLfloat), ctypes.POINTER(simple.GLvoid)
),
simple.GLU_NURBS_COLOR: FUNCTION_TYPE(
None, ctypes.POINTER(simple.GLfloat)
),
simple.GLU_NURBS_COLOR_DATA: FUNCTION_TYPE(
None, ctypes.POINTER(simple.GLfloat), ctypes.POINTER(simple.GLvoid)
),
simple.GLU_NURBS_TEXTURE_COORD: FUNCTION_TYPE(
None, ctypes.POINTER(simple.GLfloat)
),
simple.GLU_NURBS_TEXTURE_COORD_DATA: FUNCTION_TYPE(
None, ctypes.POINTER(simple.GLfloat), ctypes.POINTER(simple.GLvoid)
),
simple.GLU_NURBS_END:FUNCTION_TYPE(
None
),
simple.GLU_NURBS_END_DATA: FUNCTION_TYPE(
None, ctypes.POINTER(simple.GLvoid)
),
simple.GLU_NURBS_ERROR:FUNCTION_TYPE(
None, simple.GLenum,
),
}
WRAPPER_METHODS = {
simple.GLU_NURBS_BEGIN: None,
simple.GLU_NURBS_BEGIN_DATA: '_justOOR',
simple.GLU_NURBS_VERTEX: '_vec3',
simple.GLU_NURBS_VERTEX_DATA: '_vec3',
simple.GLU_NURBS_NORMAL: '_vec3',
simple.GLU_NURBS_NORMAL_DATA: '_vec3',
simple.GLU_NURBS_COLOR: '_vec4',
simple.GLU_NURBS_COLOR_DATA: '_vec4',
simple.GLU_NURBS_TEXTURE_COORD: '_tex',
simple.GLU_NURBS_TEXTURE_COORD_DATA: '_tex',
simple.GLU_NURBS_END: None,
simple.GLU_NURBS_END_DATA: '_justOOR',
simple.GLU_NURBS_ERROR: None,
}
def _justOOR( self, function ):
"""Just do OOR on the last argument..."""
def getOOR( *args ):
args = args[:-1] + (self.originalObject(args[-1]),)
return function( *args )
return getOOR
def _vec3( self, function, size=3 ):
"""Convert first arg to size-element array, do OOR on arg2 if present"""
def vec( *args ):
vec = self.ptrAsArray(args[0],size,arrays.GLfloatArray)
if len(args) > 1:
oor = self.originalObject(args[1])
return function( vec, oor )
else:
return function( vec )
return vec
def _vec4( self, function ):
"""Size-4 vector version..."""
return self._vec3( function, 4 )
def _tex( self, function ):
"""Texture coordinate callback
NOTE: there is no way for *us* to tell what size the array is, you will
get back a raw data-point, not an array, as you do for all other callback
types!!!
"""
def oor( *args ):
if len(args) > 1:
oor = self.originalObject(args[1])
return function( args[0], oor )
else:
return function( args[0] )
return oor
# XXX yes, this is a side-effect...
simple.gluNewNurbsRenderer.restype = ctypes.POINTER( GLUnurbs )
def _callbackWithType( funcType ):
"""Get gluNurbsCallback function with set last arg-type"""
result = platform.copyBaseFunction(
simple.gluNurbsCallback
)
result.argtypes = [ctypes.POINTER(GLUnurbs), simple.GLenum, funcType]
assert result.argtypes[-1] == funcType
return result
for (c,funcType) in GLUnurbs.CALLBACK_TYPES.items():
cb = _callbackWithType( funcType )
GLUnurbs.CALLBACK_FUNCTION_REGISTRARS[ c ] = cb
assert funcType == GLUnurbs.CALLBACK_TYPES[c]
assert cb.argtypes[-1] == funcType
try:
del c,cb, funcType
except NameError, err:
pass
def gluNurbsCallback( nurb, which, CallBackFunc ):
"""Dispatch to the nurb's addCallback operation"""
return nurb.addCallback( which, CallBackFunc )
@lazy( simple.gluNewNurbsRenderer )
def gluNewNurbsRenderer( baseFunction ):
"""Return a new nurbs renderer for the system (dereferences pointer)"""
newSet = baseFunction()
new = newSet[0]
#new.__class__ = GLUnurbs # yes, I know, ick
return new
@lazy( simple.gluNurbsCallbackData )
def gluNurbsCallbackData( baseFunction, nurb, userData ):
"""Note the Python object for use as userData by the nurb"""
return baseFunction(
nurb, nurb.noteObject( userData )
)
MAX_ORDER = 8
def checkOrder( order,knotCount,name ):
"""Check that order is valid..."""
if order < 1:
raise error.GLUError(
"""%s should be 1 or more, is %s"""%( name,order,)
)
elif order > MAX_ORDER:
raise error.GLUError(
"""%s should be %s or less, is %s"""%( name, MAX_ORDER, order)
)
elif knotCount < (2*order):
raise error.GLUError(
"""Knotcount must be at least 2x %s is %s should be at least %s"""%( name, knotCount, 2*order)
)
def checkKnots( knots, name ):
"""Check that knots are in ascending order"""
if len(knots):
knot = knots[0]
for next in knots[1:]:
if next < knot:
raise error.GLUError(
"""%s has decreasing knot %s after %s"""%( name, next, knot )
)
@lazy( simple.gluNurbsCallbackDataEXT )
def gluNurbsCallbackDataEXT( baseFunction,nurb, userData ):
"""Note the Python object for use as userData by the nurb"""
return baseFunction(
nurb, nurb.noteObject( userData )
)
@lazy( simple.gluNurbsCurve )
def gluNurbsCurve( baseFunction, nurb, knots, control, type ):
"""Pythonic version of gluNurbsCurve
Calculates knotCount, stride, and order automatically
"""
knots = arrays.GLfloatArray.asArray( knots )
knotCount = arrays.GLfloatArray.arraySize( knots )
control = arrays.GLfloatArray.asArray( control )
try:
length,step = arrays.GLfloatArray.dimensions( control )
except ValueError, err:
raise error.GLUError( """Need a 2-dimensional control array""" )
order = knotCount - length
if OpenGL.ERROR_CHECKING:
checkOrder( order, knotCount, 'order of NURBS curve')
checkKnots( knots, 'knots of NURBS curve')
return baseFunction(
nurb, knotCount, knots, step, control, order, type,
)
@lazy( simple.gluNurbsSurface )
def gluNurbsSurface( baseFunction, nurb, sKnots, tKnots, control, type ):
"""Pythonic version of gluNurbsSurface
Calculates knotCount, stride, and order automatically
"""
sKnots = arrays.GLfloatArray.asArray( sKnots )
sKnotCount = arrays.GLfloatArray.arraySize( sKnots )
tKnots = arrays.GLfloatArray.asArray( tKnots )
tKnotCount = arrays.GLfloatArray.arraySize( tKnots )
control = arrays.GLfloatArray.asArray( control )
try:
length,width,step = arrays.GLfloatArray.dimensions( control )
except ValueError, err:
raise error.GLUError( """Need a 3-dimensional control array""" )
sOrder = sKnotCount - length
tOrder = tKnotCount - width
sStride = width*step
tStride = step
if OpenGL.ERROR_CHECKING:
checkOrder( sOrder, sKnotCount, 'sOrder of NURBS surface')
checkOrder( tOrder, tKnotCount, 'tOrder of NURBS surface')
checkKnots( sKnots, 'sKnots of NURBS surface')
checkKnots( tKnots, 'tKnots of NURBS surface')
if not (sKnotCount-sOrder)*(tKnotCount-tOrder) == length*width:
raise error.GLUError(
"""Invalid NURB structure""",
nurb, sKnotCount, sKnots, tKnotCount, tKnots,
sStride, tStride, control,
sOrder,tOrder,
type
)
result = baseFunction(
nurb, sKnotCount, sKnots, tKnotCount, tKnots,
sStride, tStride, control,
sOrder,tOrder,
type
)
return result
@lazy( simple.gluPwlCurve )
def gluPwlCurve( baseFunction, nurb, data, type ):
"""gluPwlCurve -- piece-wise linear curve within GLU context
data -- the data-array
type -- determines number of elements/data-point
"""
data = arrays.GLfloatArray.asArray( data )
if type == simple.GLU_MAP1_TRIM_2:
divisor = 2
elif type == simple.GLU_MAP_TRIM_3:
divisor = 3
else:
raise ValueError( """Unrecognised type constant: %s"""%(type))
size = arrays.GLfloatArray.arraySize( data )
size = int(size//divisor)
return baseFunction( nurb, size, data, divisor, type )
|
mit
| -4,510,473,603,208,233,000 | 35.337979 | 106 | 0.636111 | false | 3.512631 | false | false | false |
odty101/MediaCollector
|
MediaCollector/MediaCollector/settings.py
|
1
|
3199
|
"""
Django settings for MediaCollector project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2944(*7enb^_l^5h848)%cy7iu@dkle(*ak+m_dnt+v*g2q5-x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'Movies.apps.MoviesConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'lineage'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MediaCollector.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MediaCollector.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/movies/'
|
mit
| 8,056,945,365,130,964,000 | 24.798387 | 91 | 0.687402 | false | 3.515385 | false | false | false |
ErickMurillo/aprocacaho
|
aprocacaho/settings.py
|
1
|
3231
|
"""
Django settings for aprocacaho project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
from local_settings import *
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r&4=i=5tb!0!vzsl0*9h!cg2dysp_c)%42by=5c%a+yovj7-c)'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'organizacion',
'productores',
'lugar',
'configuracion',
'sorl.thumbnail',
'smart_selects',
'multiselectfield',
'ckeditor',
'import_export',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'aprocacaho.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'aprocacaho.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
MEDIA_ROOT = os.environ.get('MEDIA_ROOT', os.path.join(BASE_DIR, 'media'))
MEDIA_URL = '/media/'
STATIC_ROOT = os.environ.get('STATIC_ROOT', os.path.join(BASE_DIR, 'static'))
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static_media"),
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
CKEDITOR_JQUERY_URL = '//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'
|
mit
| -6,922,736,693,737,744,000 | 28.642202 | 91 | 0.692355 | false | 3.511957 | false | false | false |
asterisk/asterisk_rest_libraries
|
lib/javascript.py
|
1
|
5168
|
#!/usr/bin/env python
"""
Copyright (C) 2013 Digium, Inc.
Erin Spiceland <[email protected]>
See http://www.asterisk.org for more information about
the Asterisk project. Please do not directly contact
any of the maintainers of this project for assistance;
the project provides a web site, mailing lists and IRC
channels for your use.
This program is free software, distributed under the terms
detailed in the the LICENSE file at the top of the source tree.
"""
import re
from utils import get_file_content, wrap_line
LANGUAGE = 'javascript'
FILE_EXTENSION = 'js'
CODE_WRAP_MARKERS = [
# 1st element used in regex to identify wrappable lines
# Remember to comment characters that have special meanings in regex.
# 2nd element (indent_marker) used in regex to identify index of indention
# 3rd element (indent_suffix) placed between whitespace indentation and
# wrapped line
# 4th element (indent_offset) is number of chars to add to indentation
# index before content
('\/\*', '/*', ' * ', 0),
('this.\w+ = function', '', '', 2)
]
def make_filename(name):
"""Manipulate a string to form the name without file extension for each
module in the package.
'name' will usually be one word, like 'channels'. Whether singular
or plural should not be assumed
"""
name = re.sub('s$', '', name)
return name.lower()
def make_class_name(name):
"""Manipulate a string to form the name without file extension for each
module in the package.
'name' will usually be one word, like 'channels'. Whether singular
or plural should not be assumed
"""
name = re.sub('s$', '', name)
name = name[0].upper() + name[1:]
name = re.sub('/', '::', name)
return name
def make_method_name(name, class_name):
"""Manipulate a string to form the name without file extension for each
module in the package.
'name' will usually be one or more words in camelCase, like
'muteChannel'. Whether singular or plural should not be assumed.
"""
return name
def make_param_string(method):
"""Make the string containing the parameter definition for each method
in a class
"""
t_attr = get_file_content('%s/templates/method_params_attr.proto'
% method.lang)
if method.param_obj is None:
return ''
for p in method.param_obj:
if p['name'] == "%sId" % (method.file_name):
continue
param_name = "%s%s" % (p['name'],
p['dataType'][0].upper() + p['dataType'][1:])
if 'allowMultiple' in p and p['allowMultiple']:
param_name = param_name + "Array"
attr = re.sub('\{ATTR_NAME\}', param_name, t_attr)
attr = re.sub('\{ATTR_ORIG_NAME\}', p['name'], attr)
method.param_lines.append(attr)
method.method_params.append(param_name)
return ', '.join(method.method_params)
def make_method_comment(class_desc, method_desc):
"""Use the class and method descriptions in the Swagger resource files
to create a comment for the method.
"""
method_comments = []
if class_desc:
method_comments.append(class_desc)
if method_desc and method_desc != class_desc:
method_comments.append(method_desc)
return '\t\t/* %s */' % ('; '.join(method_comments))
def make_api_call_params(method):
"""Format the parameters to the call() method in asterisk_rest_api, the
util module which handles HTTP requests to Asterisk."""
params = ["'path': '%s'" % (method.path),
"'http_method': '%s'" % (method.http_method)]
if method.method_params:
params.append("'parameters': params")
if method.required_id:
params.append("'object_id': this.object_id")
return '{\n\t\t\t' + ',\n\t\t\t'.join(params) + '\n\t\t}'
def wrap(codestring):
"""Wrap code created by AsteriskPy to a certain width.
Define lines to wrap and string to glean indent index from
in the CODE_WRAP_MARKERS list at the top of this file.
For many languages, this function may not need to be changed much
at all.
In perl, we want to indent at exactly the index of the code marker we use.
We must append '# ' to the indention, since perl doesn't have multi-line
comments. Use tabs. Wrap to 70 characters since use of tabs may increase
visible line length.
"""
width = 70
code_lines = codestring.split('\n')
wrapped_code_lines = []
for line in code_lines:
if len(line) < width:
wrapped_code_lines.append(line)
continue
matched = None
for each in CODE_WRAP_MARKERS:
match = re.search('^\s+(%s)' % (each[0]), line)
if match is not None:
matched = True
new_line = wrap_line(line, width, each[1], indent_char='\t',
indent_suffix=each[2],
indent_offset=each[3])
wrapped_code_lines.append(new_line)
if matched is None:
wrapped_code_lines.append(line)
return '\n'.join(wrapped_code_lines)
|
bsd-3-clause
| 2,504,914,704,043,261,400 | 30.512195 | 78 | 0.623839 | false | 3.79442 | false | false | false |
kxz/interstat
|
interstat/formatters.py
|
1
|
6274
|
"""Interstat's core single-line and whole-file formatters."""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from datetime import datetime
from itertools import tee
import re
from jinja2 import Environment, ChoiceLoader, FileSystemLoader, PackageLoader
from jinja2.utils import urlize
# I'm mildly disgusted that the __name__ import works, but we need it
# later, so let's not look a gift horse in the mouth.
from . import __name__ as PACKAGE_NAME
from .formats import formats
#: A list of available message types.
MESSAGE_TYPES = ['privmsg', 'action', 'notice', 'nick', 'join',
'part', 'quit', 'kick', 'topic', 'mode']
#: A regex matching locations in an mIRC-formatted string where the
#: rendering may change.
FORMATTING_BOUNDARIES = re.compile(r"""
\x02 | # Bold
\x03(?: # Color
([0-9]{1,2})(?: # Optional foreground number (from 0 or 00 to 99)
,([0-9]{1,2}) # Optional background number (from 0 or 00 to 99)
)?
)? |
\x0F | # Normal (revert to default formatting)
\x16 | # Reverse video (sometimes rendered as italics)
\x1F | # Underline
^ | $ # Beginning and end of string, for convenience
# This *must* go at the end, otherwise it'll
# take precedence over a control code at the
# start of a string.
""", re.VERBOSE)
#: A list mapping mIRC color codes (from 0 to 15) to CSS colors.
MIRC_COLORS = ['white', 'black', 'navy', 'green',
'red', 'maroon', 'purple', 'olive',
'yellow', 'lime', 'teal', 'cyan',
'royalblue', 'pink', 'gray', 'lightgray']
def _pairwise(iterable):
"""Yield successive overlapping pairs from *iterable*."""
a, b = tee(iterable) # pylint: disable=invalid-name
next(b, None)
return zip(a, b)
def _toggle(mapping, key, value):
"""If *key* is set in *mapping*, delete its value. Otherwise, set
*key* to *value*."""
if key in mapping:
del mapping[key]
else:
mapping[key] = value
def mirc_color(numeric):
"""Return a CSS color corresponding to an mIRC color numeric."""
try:
numeric = int(numeric)
except ValueError:
numeric = 0
# The modulo simulates the apparent behavior of a number of clients,
# while handily eliminating out-of-bounds errors.
return MIRC_COLORS[numeric % len(MIRC_COLORS)]
def line_as_html(message):
"""Given a *message* containing mIRC formatting codes, return an
HTML rendering."""
html = ''
style = dict()
matches = FORMATTING_BOUNDARIES.finditer(message)
for first, second in _pairwise(matches):
control_code = first.group(0)[:1]
if control_code == '\x02':
_toggle(style, 'font-weight', 'bold')
elif control_code == '\x03':
if first.group(1):
style['color'] = mirc_color(first.group(1))
if first.group(2):
style['background-color'] = mirc_color(first.group(2))
else:
style.pop('color', None)
style.pop('background-color', None)
elif control_code == '\x0F':
style.clear()
elif control_code == '\x16':
_toggle(style, 'font-style', 'italic')
elif control_code == '\x1F':
_toggle(style, 'text-decoration', 'underline')
text = urlize(message[first.end():second.start()])
if text: # Don't output empty <span> tags.
if style:
css = '; '.join('{}: {}'.format(k, v)
for k, v in sorted(style.items()))
html += '<span style="{}">{}</span>'.format(css, text)
else:
html += text
return html
def file_as_messages(log_file, format_name):
"""Yield message dicts from an IRC log file, parsed according to the
given log format, suitable for passing into Interstat templates."""
try:
rules = formats[format_name]
except KeyError:
raise ValueError('unknown log format: {}'.format(format_name))
for i, line in enumerate(log_file):
match = rules['line'].match(line)
if match is None:
# Just don't bother with lines we can't get a timestamp for.
continue
message = {}
message['id'] = 'L{}'.format(i + 1)
message['timestamp'] = datetime.strptime(
match.group('timestamp'), rules['timestamp'])
line = match.group('line')
for message_type in MESSAGE_TYPES:
match = rules[message_type].match(line)
if match is not None:
message['type'] = message_type
message.update(match.groupdict())
break
else:
message['type'] = 'misc'
message['content'] = line
message['template'] = 'message/{}.html'.format(message['type'])
yield message
def file_as_html(log_file, format_name, template_dir=None, **kwargs):
"""Return an HTML rendering of an IRC log file, parsed according to
the given log format.
Custom HTML templates are first looked for in *template_dir*, if
given, before falling back to the defaults. Any remaining keyword
arguments, with the exception of ``messages`` (which is reserved),
are passed directly to the renderer for use as template variables.
By default, the Interstat CLI passes ``title`` and ``stylesheet``.
"""
kwargs['messages'] = file_as_messages(log_file, format_name)
# Tell Jinja where to look for templates.
loader_choices = [PackageLoader(PACKAGE_NAME)]
if template_dir is not None:
loader_choices.insert(0, FileSystemLoader(template_dir))
env = Environment(loader=ChoiceLoader(loader_choices),
keep_trailing_newline=True)
# Import down here to avoid circularity issues.
from .filters import colorhash, ircformat
env.filters['colorhash'] = colorhash
env.filters['ircformat'] = ircformat
# pylint: disable=no-member
return env.get_template('log.html').render(**kwargs)
|
mit
| 7,675,466,667,183,133,000 | 37.024242 | 77 | 0.591967 | false | 3.95836 | false | false | false |
sklam/llvmlite
|
llvmlite/binding/executionengine.py
|
1
|
5466
|
from __future__ import print_function, absolute_import
from ctypes import byref, POINTER, c_char_p, c_bool, c_uint, c_void_p
from . import ffi, targets
# Just check these weren't optimized out of the DLL.
ffi.lib.LLVMPY_LinkInJIT
ffi.lib.LLVMPY_LinkInMCJIT
def create_jit_compiler(module, opt=2):
"""Create an ExecutionEngine for a module
"""
engine = ffi.LLVMExecutionEngineRef()
with ffi.OutputString() as outerr:
if ffi.lib.LLVMPY_CreateJITCompiler(byref(engine), module, opt,
outerr):
raise RuntimeError(str(outerr))
return ExecutionEngine(engine, module=module)
def create_mcjit_compiler(module, target_machine):
"""
Create a MCJIT ExecutionEngine from the given *module* and
*target_machine*.
"""
with ffi.OutputString() as outerr:
engine = ffi.lib.LLVMPY_CreateMCJITCompiler(
module, target_machine, outerr)
if not engine:
raise RuntimeError(str(outerr))
target_machine._owned = True
return ExecutionEngine(engine, module=module)
def create_jit_compiler_with_tm(module, target_machine):
"""
Create a JIT ExecutionEngine from the given *module* and
*target_machine*.
"""
with ffi.OutputString() as outerr:
engine = ffi.lib.LLVMPY_CreateJITCompilerWithTM(
module, target_machine, outerr)
if not engine:
raise RuntimeError(str(outerr))
target_machine._owned = True
return ExecutionEngine(engine, module=module)
class ExecutionEngine(ffi.ObjectRef):
"""An ExecutionEngine owns all Modules associated with it.
Deleting the engine will remove all associated modules.
It is an error to delete the associated modules.
"""
def __init__(self, ptr, module):
"""
Module ownership is transferred to the EE
"""
self._modules = set([module])
self._td = None
module._owned = True
ffi.ObjectRef.__init__(self, ptr)
def get_pointer_to_global(self, gv):
# XXX getPointerToGlobal is deprecated for MCJIT,
# getGlobalValueAddress should be used instead.
ptr = ffi.lib.LLVMPY_GetPointerToGlobal(self, gv)
if ptr is None:
raise ValueError("Cannot find given global value %r" % (gv.name))
return ptr
get_pointer_to_function = get_pointer_to_global
def add_global_mapping(self, gv, addr):
# XXX unused?
ffi.lib.LLVMPY_AddGlobalMapping(self, gv, addr)
def add_module(self, module):
"""
Ownership of module is transferred to the execution engine
"""
if module in self._modules:
raise KeyError("module already added to this engine")
ffi.lib.LLVMPY_AddModule(self, module)
module._owned = True
self._modules.add(module)
def finalize_object(self):
ffi.lib.LLVMPY_FinalizeObject(self)
def remove_module(self, module):
"""
Ownership of module is returned
"""
with ffi.OutputString() as outerr:
if ffi.lib.LLVMPY_RemoveModule(self, module, outerr):
raise RuntimeError(str(outerr))
self._modules.remove(module)
module._owned = False
@property
def target_data(self):
"""
The TargetData for this execution engine.
"""
if self._td is not None:
return self._td
ptr = ffi.lib.LLVMPY_GetExecutionEngineTargetData(self)
self._td = targets.TargetData(ptr)
self._td._owned = True
return self._td
def _dispose(self):
# The modules will be cleaned up by the EE
for mod in self._modules:
mod.detach()
if self._td is not None:
self._td.detach()
self._modules.clear()
self._capi.LLVMPY_DisposeExecutionEngine(self)
# ============================================================================
# FFI
ffi.lib.LLVMPY_CreateJITCompiler.argtypes = [
POINTER(ffi.LLVMExecutionEngineRef),
ffi.LLVMModuleRef,
c_uint,
POINTER(c_char_p),
]
ffi.lib.LLVMPY_CreateJITCompiler.restype = c_bool
ffi.lib.LLVMPY_CreateJITCompilerWithTM.argtypes = [
ffi.LLVMModuleRef,
ffi.LLVMTargetMachineRef,
POINTER(c_char_p),
]
ffi.lib.LLVMPY_CreateJITCompilerWithTM.restype = ffi.LLVMExecutionEngineRef
ffi.lib.LLVMPY_CreateMCJITCompiler.argtypes = [
ffi.LLVMModuleRef,
ffi.LLVMTargetMachineRef,
POINTER(c_char_p),
]
ffi.lib.LLVMPY_CreateMCJITCompiler.restype = ffi.LLVMExecutionEngineRef
ffi.lib.LLVMPY_RemoveModule.argtypes = [
ffi.LLVMExecutionEngineRef,
ffi.LLVMModuleRef,
POINTER(c_char_p),
]
ffi.lib.LLVMPY_RemoveModule.restype = c_bool
ffi.lib.LLVMPY_AddModule.argtypes = [
ffi.LLVMExecutionEngineRef,
ffi.LLVMModuleRef
]
ffi.lib.LLVMPY_GetPointerToGlobal.argtypes = [ffi.LLVMExecutionEngineRef,
ffi.LLVMValueRef]
ffi.lib.LLVMPY_GetPointerToGlobal.restype = c_void_p
ffi.lib.LLVMPY_AddGlobalMapping.argtypes = [ffi.LLVMExecutionEngineRef,
ffi.LLVMValueRef,
c_void_p]
ffi.lib.LLVMPY_FinalizeObject.argtypes = [ffi.LLVMExecutionEngineRef]
ffi.lib.LLVMPY_GetExecutionEngineTargetData.argtypes = [
ffi.LLVMExecutionEngineRef
]
ffi.lib.LLVMPY_GetExecutionEngineTargetData.restype = ffi.LLVMTargetDataRef
|
bsd-2-clause
| 5,138,272,336,224,354,000 | 29.536313 | 78 | 0.637578 | false | 3.619868 | false | false | false |
Zavteq/fixofx
|
lib/ofx/document.py
|
1
|
3703
|
#coding: utf-8
# Copyright 2005-2010 Wesabe, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ofx.document - abstract OFX document.
#
import ofx
import xml.sax.saxutils as sax
class Document:
def as_xml(self, original_format=None, date_format=None):
"""Formats this document as an OFX 2.0 XML document."""
xml = ""
# NOTE: Encoding in OFX, particularly in OFX 1.02,
# is kind of a mess. The OFX 1.02 spec talks about "UNICODE"
# as a supported encoding, which the OFX 2.0 spec has
# back-rationalized to "UTF-8". The "US-ASCII" encoding is
# given as "USASCII". Yet the 1.02 spec acknowledges that
# not everyone speaks English nor uses UNICODE, so they let
# you throw any old encoding in there you'd like. I'm going
# with the idea that if the most common encodings are named
# in an OFX file, they should be translated to "real" XML
# encodings, and if no encoding is given, UTF-8 (which is a
# superset of US-ASCII) should be assumed; but if a named
# encoding other than USASCII or 'UNICODE' is given, that
# should be preserved. I'm also adding a get_encoding()
# method so that we can start to survey what encodings
# we're actually seeing, and use that to maybe be smarter
# about this in the future.
#forcing encoding to utf-8
encoding = "UTF-8"
header_dict = self.parse_dict["header"]
if 'OLDFILEUID' not in header_dict:
OLDFILEUID = 'NONE'
else:
OLDFILEUID = header_dict['OLDFILEUID']
if 'NEWFILEUID' not in header_dict:
NEWFILEUID = 'NONE'
else:
NEWFILEUID = header_dict['NEWFILEUID']
xml += """<?xml version="1.0" encoding="%s"?>\n""" % encoding
xml += """<?OFX OFXHEADER="200" VERSION="200" """ + \
"""SECURITY="%s" OLDFILEUID="%s" NEWFILEUID="%s"?>\n""" % \
(self.parse_dict["header"]["SECURITY"],
OLDFILEUID,
NEWFILEUID)
if original_format is not None:
xml += """<!-- Converted from: %s -->\n""" % original_format
if date_format is not None:
xml += """<!-- Date format was: %s -->\n""" % date_format
taglist = self.parse_dict["body"]["OFX"].asList()
if len(taglist) == 1 and isinstance(taglist[0], list):
xml += self._format_xml(taglist[0])
else:
xml += self._format_xml(taglist)
return xml
def _format_xml(self, mylist, indent=0):
xml = ""
indentstring = " " * indent
tag = mylist.pop(0)
if len(mylist) > 0 and isinstance(mylist[0], list):
xml += "%s<%s>\n" % (indentstring, tag)
for value in mylist:
xml += self._format_xml(value, indent=indent + 2)
xml += "%s</%s>\n" % (indentstring, tag)
elif len(mylist) > 0:
# Unescape then reescape so we don't wind up with '&lt;', oy.
value = sax.escape(sax.unescape(mylist[0]))
xml += "%s<%s>%s</%s>\n" % (indentstring, tag, value, tag)
return xml
|
apache-2.0
| -1,877,798,710,501,073,700 | 38.393617 | 77 | 0.591142 | false | 3.703 | false | false | false |
rhyolight/nupic.son
|
app/melange/request/access.py
|
1
|
13682
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for checking access to pages."""
from django.utils import translation
from melange.logic import user as user_logic
from melange.models import profile as profile_model
from melange.request import exception
from melange.request import links
from soc.models import program as program_model
_MESSAGE_NOT_PROGRAM_ADMINISTRATOR = translation.ugettext(
'You need to be a program administrator to access this page.')
_MESSAGE_NOT_DEVELOPER = translation.ugettext(
'This page is only accessible to developers.')
_MESSAGE_HAS_PROFILE = translation.ugettext(
'This page is accessible only to users without a profile.')
_MESSAGE_NO_PROFILE = translation.ugettext(
'Active profile is required to access this page.')
_MESSAGE_NO_URL_PROFILE = translation.ugettext(
'Active profile for %s is required to access this page.')
_MESSAGE_PROGRAM_NOT_EXISTING = translation.ugettext(
'Requested program does not exist.')
_MESSAGE_PROGRAM_NOT_ACTIVE = translation.ugettext(
'Requested program is not active at this moment.')
_MESSAGE_STUDENTS_DENIED = translation.ugettext(
'This page is not accessible to users with student profiles.')
_MESSAGE_NOT_USER_IN_URL = translation.ugettext(
'You are not logged in as the user in the URL.')
_MESSAGE_NOT_ORG_ADMIN_FOR_ORG = translation.ugettext(
'You are not organization administrator for %s')
_MESSAGE_INACTIVE_BEFORE = translation.ugettext(
'This page is inactive before %s.')
_MESSAGE_INACTIVE_OUTSIDE = translation.ugettext(
'This page is inactive before %s and after %s.')
_MESSAGE_INVALID_URL_ORG_STATUS = translation.ugettext(
'This page is not accessible to organizations with status %s.')
def ensureLoggedIn(data):
"""Ensures that the user is logged in.
Args:
data: request_data.RequestData for the current request.
Raises:
exception.LoginRequired: If the user is not logged in.
"""
if not data.gae_user:
raise exception.LoginRequired()
def ensureLoggedOut(data):
"""Ensures that the user is logged out.
Args:
data: request_data.RequestData for the current request.
Raises:
exception.Redirect: If the user is logged in this
exception will redirect them to the logout page.
"""
if data.gae_user:
raise exception.Redirect(links.LINKER.logout(data.request))
class AccessChecker(object):
"""Interface for page access checkers."""
def checkAccess(self, data, check):
"""Ensure that the user's request should be satisfied.
Implementations of this method must not effect mutations of the
passed parameters (or anything else).
Args:
data: A request_data.RequestData describing the current request.
check: An access_checker.AccessChecker object.
Raises:
exception.LoginRequired: Indicating that the user is not logged
in, but must log in to access the resource specified in their
request.
exception.Redirect: Indicating that the user is to be redirected
to another URL.
exception.UserError: Describing what was erroneous about the
user's request and describing an appropriate response.
exception.ServerError: Describing some problem that arose during
request processing and describing an appropriate response.
"""
raise NotImplementedError()
class AllAllowedAccessChecker(AccessChecker):
"""AccessChecker that allows all requests for access."""
def checkAccess(self, data, check):
"""See AccessChecker.checkAccess for specification."""
pass
ALL_ALLOWED_ACCESS_CHECKER = AllAllowedAccessChecker()
# TODO(nathaniel): There's some ninja polymorphism to be addressed here -
# RequestData doesn't actually have an "is_host" attribute, but its two
# major subclasses (the GCI-specific and GSoC-specific RequestData classes)
# both do, so this "works" but isn't safe or sanely testable.
class ProgramAdministratorAccessChecker(AccessChecker):
"""AccessChecker that ensures that the user is a program administrator."""
def checkAccess(self, data, check):
"""See AccessChecker.checkAccess for specification."""
if data.is_developer:
# NOTE(nathaniel): Developers are given all the powers of
# program administrators.
return
elif not data.gae_user:
raise exception.LoginRequired()
elif not user_logic.isHostForProgram(data.ndb_user, data.program.key()):
raise exception.Forbidden(message=_MESSAGE_NOT_PROGRAM_ADMINISTRATOR)
PROGRAM_ADMINISTRATOR_ACCESS_CHECKER = ProgramAdministratorAccessChecker()
# TODO(nathaniel): Eliminate this or make it a
# "SiteAdministratorAccessChecker" - there should be no aspects of Melange
# that require developer action or are limited only to developers.
class DeveloperAccessChecker(AccessChecker):
"""AccessChecker that ensures that the user is a developer."""
def checkAccess(self, data, check):
"""See AccessChecker.checkAccess for specification."""
if not data.is_developer:
raise exception.Forbidden(message=_MESSAGE_NOT_DEVELOPER)
DEVELOPER_ACCESS_CHECKER = DeveloperAccessChecker()
class ConjuctionAccessChecker(AccessChecker):
"""Aggregated access checker that holds a collection of other access
checkers and ensures that access is granted only if each of those checkers
grants access individually."""
def __init__(self, checkers):
"""Initializes a new instance of the access checker.
Args:
checkers: list of AccessChecker objects to be examined by this checker.
"""
self._checkers = checkers
def checkAccess(self, data, check):
"""See AccessChecker.checkAccess for specification."""
for checker in self._checkers:
checker.checkAccess(data, check)
class NonStudentUrlProfileAccessChecker(AccessChecker):
"""AccessChecker that ensures that the URL user has a non-student profile."""
def checkAccess(self, data, check):
"""See AccessChecker.checkAccess for specification."""
if data.url_ndb_profile.status != profile_model.Status.ACTIVE:
raise exception.Forbidden(
message=_MESSAGE_NO_URL_PROFILE % data.kwargs['user'])
if data.url_ndb_profile.is_student:
raise exception.Forbidden(message=_MESSAGE_STUDENTS_DENIED)
NON_STUDENT_URL_PROFILE_ACCESS_CHECKER = NonStudentUrlProfileAccessChecker()
class NonStudentProfileAccessChecker(AccessChecker):
"""AccessChecker that ensures that the currently logged-in user
has a non-student profile."""
def checkAccess(self, data, check):
"""See AccessChecker.checkAccess for specification."""
if (not data.ndb_profile
or data.ndb_profile.status != profile_model.Status.ACTIVE):
raise exception.Forbidden(message=_MESSAGE_NO_PROFILE)
if data.ndb_profile.is_student:
raise exception.Forbidden(message=_MESSAGE_STUDENTS_DENIED)
NON_STUDENT_PROFILE_ACCESS_CHECKER = NonStudentProfileAccessChecker()
class ProgramActiveAccessChecker(AccessChecker):
"""AccessChecker that ensures that the program is currently active.
A program is considered active when the current point of time comes after
its start date and before its end date. Additionally, its status has to
be set to visible.
"""
def checkAccess(self, data, check):
"""See AccessChecker.checkAccess for specification."""
if not data.program:
raise exception.NotFound(message=_MESSAGE_PROGRAM_NOT_EXISTING)
if (data.program.status != program_model.STATUS_VISIBLE
or not data.timeline.programActive()):
raise exception.Forbidden(message=_MESSAGE_PROGRAM_NOT_ACTIVE)
PROGRAM_ACTIVE_ACCESS_CHECKER = ProgramActiveAccessChecker()
class IsUrlUserAccessChecker(AccessChecker):
"""AccessChecker that ensures that the logged in user is the user whose
identifier is set in URL data.
"""
def checkAccess(self, data, check):
"""See AccessChecker.checkAccess for specification."""
key_id = data.kwargs.get('user')
if not key_id:
raise exception.BadRequest('The request does not contain user data.')
ensureLoggedIn(data)
if not data.ndb_user or data.ndb_user.key.id() != key_id:
raise exception.Forbidden(message=_MESSAGE_NOT_USER_IN_URL)
IS_URL_USER_ACCESS_CHECKER = IsUrlUserAccessChecker()
class IsUserOrgAdminForUrlOrg(AccessChecker):
"""AccessChecker that ensures that the logged in user is organization
administrator for the organization whose identifier is set in URL data.
"""
# TODO(daniel): remove this when all organizations moved to NDB
def __init__(self, is_ndb=False):
"""Initializes a new instance of this access checker.
Args:
is_ndb: a bool used to specify if the access checker will be used
for old db organizations or newer ndb organizations.
"""
self._is_ndb = is_ndb
def checkAccess(self, data, check):
"""See AccessChecker.checkAccess for specification."""
if not self._is_ndb:
if not data.profile:
raise exception.Forbidden(message=_MESSAGE_NO_PROFILE)
# good ol' db
if data.url_org.key() not in data.profile.org_admin_for:
raise exception.Forbidden(
message=_MESSAGE_NOT_ORG_ADMIN_FOR_ORG % data.url_org.key().name())
else:
if not data.ndb_profile:
raise exception.Forbidden(message=_MESSAGE_NO_PROFILE)
if data.url_ndb_org.key not in data.ndb_profile.admin_for:
raise exception.Forbidden(
message=_MESSAGE_NOT_ORG_ADMIN_FOR_ORG %
data.url_ndb_org.key.id())
IS_USER_ORG_ADMIN_FOR_ORG = IsUserOrgAdminForUrlOrg()
IS_USER_ORG_ADMIN_FOR_NDB_ORG = IsUserOrgAdminForUrlOrg(is_ndb=True)
class HasProfileAccessChecker(AccessChecker):
"""AccessChecker that ensures that the logged in user has an active profile
for the program specified in the URL.
"""
def checkAccess(self, data, check):
"""See AccessChecker.checkAccess for specification."""
if (not data.ndb_profile
or data.ndb_profile.status != profile_model.Status.ACTIVE):
raise exception.Forbidden(message=_MESSAGE_NO_PROFILE)
HAS_PROFILE_ACCESS_CHECKER = HasProfileAccessChecker()
class UrlOrgStatusAccessChecker(AccessChecker):
"""AccessChecker that ensures that the organization specified in the URL
has the required status.
"""
def __init__(self, statuses):
"""Initializes a new instance of this access checker.
Args:
statuses: List of org_model.Status options with the allowed statuses.
"""
self.statuses = statuses
def checkAccess(self, data, check):
"""See AccessChecker.checkAccess for specification."""
if data.url_ndb_org.status not in self.statuses:
raise exception.Forbidden(
message=_MESSAGE_INVALID_URL_ORG_STATUS % data.url_ndb_org.status)
class HasNoProfileAccessChecker(AccessChecker):
"""AccessChecker that ensures that the logged in user does not have a profile
for the program specified in the URL.
"""
def checkAccess(self, data, check):
"""See AccessChecker.checkAccess for specification."""
ensureLoggedIn(data)
if data.ndb_profile:
raise exception.Forbidden(message=_MESSAGE_HAS_PROFILE)
HAS_NO_PROFILE_ACCESS_CHECKER = HasNoProfileAccessChecker()
class OrgSignupStartedAccessChecker(AccessChecker):
"""AccessChecker that ensures that organization sign-up period has started
for the program specified in the URL.
"""
def checkAccess(self, data, check):
"""See AccessChecker.checkAccess for specification."""
if not data.timeline.afterOrgSignupStart():
active_from = data.timeline.orgSignupStart()
raise exception.Forbidden(message=_MESSAGE_INACTIVE_BEFORE % active_from)
ORG_SIGNUP_STARTED_ACCESS_CHECKER = OrgSignupStartedAccessChecker()
class OrgSignupActiveAccessChecker(AccessChecker):
"""AccessChecker that ensures that organization sign-up period is active
for the program specified in the URL.
"""
def checkAccess(self, data, check):
"""See AccessChecker.checkAccess for specification."""
if not data.timeline.orgSignup():
raise exception.Forbidden(message=_MESSAGE_INACTIVE_OUTSIDE % (
data.timeline.orgSignupBetween()))
ORG_SIGNUP_ACTIVE_ACCESS_CHECKER = OrgSignupActiveAccessChecker()
class OrgsAnnouncedAccessChecker(AccessChecker):
"""AccessChecker that ensures that organizations have been announced for
the program specified in the URL.
"""
def checkAccess(self, data, check):
"""See AccessChecker.checkAccess for specification."""
if not data.timeline.orgsAnnounced():
active_from = data.timeline.orgsAnnouncedOn()
raise exception.Forbidden(message=_MESSAGE_INACTIVE_BEFORE % active_from)
class StudentSignupActiveAccessChecker(AccessChecker):
"""AccessChecker that ensures that student sign-up period is active
for the program specified in the URL.
"""
def checkAccess(self, data, check):
"""See AccessChecker.checkAccess for specification."""
if not data.timeline.studentSignup():
raise exception.Forbidden(message=_MESSAGE_INACTIVE_OUTSIDE % (
data.timeline.studentsSignupBetween()))
STUDENT_SIGNUP_ACTIVE_ACCESS_CHECKER = StudentSignupActiveAccessChecker()
|
apache-2.0
| 930,231,751,972,041,200 | 34.172237 | 79 | 0.7371 | false | 4.035988 | false | false | false |
cgrima/rsr
|
rsr/fit.py
|
1
|
4401
|
"""
Various tools for extracting signal components from a fit of the amplitude
distribution
"""
from . import pdf
from .Classdef import Statfit
import numpy as np
import time
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, report_fit
def param0(sample, method='basic'):
"""Estimate initial parameters for HK fitting
Arguments
---------
sample : sequence
amplitudes
Keywords
--------
method : string
method to compute the initial parameters
"""
if method == 'basic':
a = np.nanmean(sample)
s = np.nanstd(sample)
mu = 1.
return {'a':a, 's':s, 'mu':mu}
def lmfit(sample, fit_model='hk', bins='auto', p0 = None,
xtol=1e-4, ftol=1e-4):
"""Lmfit
Arguments
---------
sample : sequence
amplitudes between 0 and 1.
Keywords
--------
fit_model : string
name of the function (in pdf module) to use for the fit
bins : string
method to compute the bin width (inherited from numpy.histogram)
p0 : dict
Initial parameters. If None, estimated automatically.
xtol : float
??
ftol : float
??
Return
------
A Statfit Class
"""
start = time.time()
winsize = len(sample)
bad = False
#--------------------------------------------------------------------------
# Clean sample
#--------------------------------------------------------------------------
sample = np.array(sample)
sample = sample[np.isfinite(sample)]
if len(sample) == 0:
bad = True
sample = np.zeros(10)+1
#--------------------------------------------------------------------------
# Make the histogram
#--------------------------------------------------------------------------
# n, edges, patches = hist(sample, bins=bins, normed=True)
n, edges = np.histogram(sample, bins=bins, density=True)
# plt.clf()
x = ((np.roll(edges, -1) + edges)/2.)[0:-1]
#--------------------------------------------------------------------------
# Initial Parameters for the fit
#--------------------------------------------------------------------------
if p0 is None:
p0 = param0(sample)
prm0 = Parameters()
# (Name, Value, Vary, Min, Max, Expr)
prm0.add('a', p0['a'], True, 0, 1, None)
prm0.add('s', p0['s'], True, 0, 1, None)
prm0.add('mu', p0['mu'], True, .5, 10, None)
prm0.add('pt', np.average(sample)**2,False, 0, 1, 'a**2+2*s**2*mu')
#if fit_model == 'hk':
# # From [Dutt and Greenleaf. 1994, eq.14]
# prm0.add('a4', np.average(sample)**4,False, 0, 1,
# '8*(1+1/mu)*s**4 + 8*s**2*s**2 + a**4')
#--------------------------------------------------------------------------
# Fit
#--------------------------------------------------------------------------
pdf2use = getattr(pdf, fit_model)
# use 'lbfgs' fit if error with 'leastsq' fit
try:
p = minimize(pdf2use, prm0, args=(x, n), method='leastsq',
xtol=xtol, ftol=ftol)
except KeyboardInterrupt:
raise
except:
print('!! Error with LEASTSQ fit, use L-BFGS-B instead')
p = minimize(pdf2use, prm0, args=(x, n), method='lbfgs')
#--------------------------------------------------------------------------
# Output
#--------------------------------------------------------------------------
elapsed = time.time() - start
values = {}
# Create values dict For lmfit >0.9.0 compatibility since it is no longer
# in the minimize output
for i in p.params.keys():
values[i] = p.params[i].value
# Results
result = Statfit(sample, pdf2use, values, p.params,
p.chisqr, p.redchi, elapsed, p.nfev, p.message, p.success,
p.residual, x, n, edges, bins=bins)
# Identify bad results
if bad is True:
result.success = False
result.values['a'] = 0
result.values['s'] = 0
result.values['mu'] = 0
result.values['pt'] = 0
result.chisqr = 0
result.redchi = 0
result.message = 'No valid data in the sample'
result.residual = 0
return result
|
mit
| -4,480,014,395,952,754,700 | 29.143836 | 81 | 0.452852 | false | 4.000909 | false | false | false |
emitrom/integra-openstack-ui
|
schedules/tables.py
|
1
|
2134
|
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from openstack_dashboard.dashboards.integra.schedules import utils
from django.utils.http import urlencode
from django.core.urlresolvers import reverse
class AddTableData(tables.LinkAction):
name = "addSchedule"
verbose_name = _("Add Schedule")
url = "horizon:integra:schedules:create"
classes = ("btn-launch", "ajax-modal")
class ScheduleTasksData(tables.LinkAction):
name = "addTask"
verbose_name = _("Schedule Tasks")
url = "horizon:integra:schedules:addTask"
classes = ("btn-launch", "ajax-modal")
def get_link_url(self, datum):
base_url = reverse(self.url)
params = urlencode({"source_id": self.table.get_object_id(datum)})
return "?".join([base_url, params])
class DeleteTableData(tables.DeleteAction):
data_type_singular = _("Schedule")
data_type_plural = _("Schedule")
def delete(self, request, obj_id):
utils.deleteSchedule(self, obj_id)
class FilterAction(tables.FilterAction):
def filter(self, table, posts, filter_string):
filterString = filter_string.lower()
return [post for post in posts
if filterString in post.title.lower()]
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, post_id):
pass
class ScheduleTable(tables.DataTable):
id = tables.Column("id",
verbose_name=_("Id"))
name = tables.Column("name",
verbose_name=_("Name"))
description = tables.Column("description",
verbose_name=_("Description"))
priority = tables.Column("priority",
verbose_name=_("Priority"))
enabled = tables.Column("enabled",
verbose_name=_("Enabled"))
class Meta:
name = "integra"
verbose_name = _("Schedules")
#status_columns = ["status"]
row_class = UpdateRow
table_actions = (AddTableData,
FilterAction)
row_actions = (DeleteTableData,ScheduleTasksData)
|
apache-2.0
| 7,110,622,473,895,393,000 | 28.232877 | 74 | 0.619963 | false | 4.135659 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.