repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
AdaptiveApplications/carnegie | tarc_bus_locator_client/quantities-0.10.1/build/lib/quantities/units/radiation.py | 4 | 1072 | """
"""
from __future__ import absolute_import
from ..unitquantity import UnitQuantity
from .time import s
from .mass import kg
from .energy import J
from .electromagnetism import coulomb
Bq = becquerel = UnitQuantity(
'becquerel',
1/s,
symbol='Bq',
aliases=['becquerels']
)
Ci = curie = UnitQuantity(
'curie',
3.7e10*becquerel,
symbol='Ci',
aliases=['curies']
)
rd = rutherford = UnitQuantity(
'rutherford',
1e6*Bq,
symbol='Rd',
aliases=['rutherfords'],
doc='this unit is obsolete, in favor of 1e6 Bq'
)
Gy = gray = Sv = sievert = UnitQuantity(
'gray',
J/kg,
symbol='Gy',
aliases=['grays', 'Sv', 'sievert', 'sieverts']
)
rem = UnitQuantity(
'rem',
1e-2*sievert,
aliases=['rems']
)
rads = UnitQuantity(
'rads',
1e-2*gray,
doc='''
rad is commonly used symbol for radian.
rads unit of radiation is deprecated.
'''
)
R = roentgen = UnitQuantity(
'roentgen',
2.58e-4*coulomb/kg,
symbol='R',
aliases=['roentgens']
)
del UnitQuantity, s, kg, J, coulomb
| mit | 8,086,113,623,462,072,000 | 17.807018 | 51 | 0.613806 | false |
rossburton/yocto-autobuilder | lib/python2.7/site-packages/SQLAlchemy-0.7.0-py2.7-linux-x86_64.egg/sqlalchemy/pool.py | 8 | 33408 | # sqlalchemy/pool.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Connection pooling for DB-API connections.
Provides a number of connection pool implementations for a variety of
usage scenarios and thread behavior requirements imposed by the
application, DB-API or database itself.
Also provides a DB-API 2.0 connection proxying mechanism allowing
regular DB-API connect() methods to be transparently managed by a
SQLAlchemy connection pool.
"""
import weakref, time, traceback
from sqlalchemy import exc, log, event, events, interfaces, util
from sqlalchemy.util import queue as sqla_queue
from sqlalchemy.util import threading, memoized_property, \
chop_traceback
proxies = {}
def manage(module, **params):
"""Return a proxy for a DB-API module that automatically
pools connections.
Given a DB-API 2.0 module and pool management parameters, returns
a proxy for the module that will automatically pool connections,
creating new connection pools for each distinct set of connection
arguments sent to the decorated module's connect() function.
:param module: a DB-API 2.0 database module
:param poolclass: the class used by the pool module to provide
pooling. Defaults to :class:`.QueuePool`.
:param \*\*params: will be passed through to *poolclass*
"""
try:
return proxies[module]
except KeyError:
return proxies.setdefault(module, _DBProxy(module, **params))
def clear_managers():
"""Remove all current DB-API 2.0 managers.
All pools and connections are disposed.
"""
for manager in proxies.itervalues():
manager.close()
proxies.clear()
class Pool(log.Identified):
"""Abstract base class for connection pools."""
def __init__(self,
creator, recycle=-1, echo=None,
use_threadlocal=False,
logging_name=None,
reset_on_return=True,
listeners=None,
events=None,
_dispatch=None):
"""
Construct a Pool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param recycle: If set to non -1, number of seconds between
connection recycling, which means upon checkout, if this
timeout is surpassed the connection will be closed and
replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: If True, connections being pulled and retrieved
from the pool will be logged to the standard output, as well
as pool sizing information. Echoing can also be achieved by
enabling logging for the "sqlalchemy.pool"
namespace. Defaults to False.
:param use_threadlocal: If set to True, repeated calls to
:meth:`connect` within the same application thread will be
guaranteed to return the same connection object, if one has
already been retrieved from the pool and has not been
returned yet. Offers a slight performance advantage at the
cost of individual transactions by default. The
:meth:`unique_connection` method is provided to bypass the
threadlocal behavior installed into :meth:`connect`.
:param reset_on_return: If true, reset the database state of
connections returned to the pool. This is typically a
ROLLBACK to release locks and transaction resources.
Disable at your own peril. Defaults to True.
:param events: a list of 2-tuples, each of the form
``(callable, target)`` which will be passed to event.listen()
upon construction. Provided here so that event listeners
can be assigned via ``create_engine`` before dialect-level
listeners are applied.
:param listeners: Deprecated. A list of
:class:`~sqlalchemy.interfaces.PoolListener`-like objects or
dictionaries of callables that receive events when DB-API
connections are created, checked out and checked in to the
pool. This has been superseded by
:func:`~sqlalchemy.event.listen`.
"""
if logging_name:
self.logging_name = self._orig_logging_name = logging_name
else:
self._orig_logging_name = None
log.instance_logger(self, echoflag=echo)
self._threadconns = threading.local()
self._creator = creator
self._recycle = recycle
self._use_threadlocal = use_threadlocal
self._reset_on_return = reset_on_return
self.echo = echo
if _dispatch:
self.dispatch._update(_dispatch, only_propagate=False)
if events:
for fn, target in events:
event.listen(self, target, fn)
if listeners:
util.warn_deprecated(
"The 'listeners' argument to Pool (and "
"create_engine()) is deprecated. Use event.listen().")
for l in listeners:
self.add_listener(l)
dispatch = event.dispatcher(events.PoolEvents)
@util.deprecated(2.7, "Pool.add_listener is deprecated. Use event.listen()")
def add_listener(self, listener):
"""Add a :class:`.PoolListener`-like object to this pool.
``listener`` may be an object that implements some or all of
PoolListener, or a dictionary of callables containing implementations
of some or all of the named methods in PoolListener.
"""
interfaces.PoolListener._adapt_listener(self, listener)
def unique_connection(self):
"""Produce a DBAPI connection that is not referenced by any
thread-local context.
This method is different from :meth:`.Pool.connect` only if the
``use_threadlocal`` flag has been set to ``True``.
"""
return _ConnectionFairy(self).checkout()
def _create_connection(self):
"""Called by subclasses to create a new ConnectionRecord."""
return _ConnectionRecord(self)
def recreate(self):
"""Return a new :class:`.Pool`, of the same class as this one
and configured with identical creation arguments.
This method is used in conjunection with :meth:`dispose`
to close out an entire :class:`.Pool` and create a new one in
its place.
"""
raise NotImplementedError()
def dispose(self):
"""Dispose of this pool.
This method leaves the possibility of checked-out connections
remaining open, It is advised to not reuse the pool once dispose()
is called, and to instead use a new pool constructed by the
recreate() method.
"""
raise NotImplementedError()
def connect(self):
"""Return a DBAPI connection from the pool.
The connection is instrumented such that when its
``close()`` method is called, the connection will be returned to
the pool.
"""
if not self._use_threadlocal:
return _ConnectionFairy(self).checkout()
try:
rec = self._threadconns.current()
if rec:
return rec.checkout()
except AttributeError:
pass
agent = _ConnectionFairy(self)
self._threadconns.current = weakref.ref(agent)
return agent.checkout()
def _return_conn(self, record):
"""Given a _ConnectionRecord, return it to the :class:`.Pool`.
This method is called when an instrumented DBAPI connection
has its ``close()`` method called.
"""
if self._use_threadlocal:
try:
del self._threadconns.current
except AttributeError:
pass
self._do_return_conn(record)
def _do_get(self):
"""Implementation for :meth:`get`, supplied by subclasses."""
raise NotImplementedError()
def _do_return_conn(self, conn):
"""Implementation for :meth:`return_conn`, supplied by subclasses."""
raise NotImplementedError()
def status(self):
raise NotImplementedError()
class _ConnectionRecord(object):
finalize_callback = None
def __init__(self, pool):
self.__pool = pool
self.connection = self.__connect()
self.info = {}
pool.dispatch.first_connect.exec_once(self.connection, self)
pool.dispatch.connect(self.connection, self)
def close(self):
if self.connection is not None:
self.__pool.logger.debug("Closing connection %r", self.connection)
try:
self.connection.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
self.__pool.logger.debug("Exception closing connection %r",
self.connection)
def invalidate(self, e=None):
if e is not None:
self.__pool.logger.info(
"Invalidate connection %r (reason: %s:%s)",
self.connection, e.__class__.__name__, e)
else:
self.__pool.logger.info(
"Invalidate connection %r", self.connection)
self.__close()
self.connection = None
def get_connection(self):
if self.connection is None:
self.connection = self.__connect()
self.info.clear()
if self.__pool.dispatch.connect:
self.__pool.dispatch.connect(self.connection, self)
elif self.__pool._recycle > -1 and \
time.time() - self.starttime > self.__pool._recycle:
self.__pool.logger.info(
"Connection %r exceeded timeout; recycling",
self.connection)
self.__close()
self.connection = self.__connect()
self.info.clear()
if self.__pool.dispatch.connect:
self.__pool.dispatch.connect(self.connection, self)
return self.connection
def __close(self):
try:
self.__pool.logger.debug("Closing connection %r", self.connection)
self.connection.close()
except (SystemExit, KeyboardInterrupt):
raise
except Exception, e:
self.__pool.logger.debug(
"Connection %r threw an error on close: %s",
self.connection, e)
def __connect(self):
try:
self.starttime = time.time()
connection = self.__pool._creator()
self.__pool.logger.debug("Created new connection %r", connection)
return connection
except Exception, e:
self.__pool.logger.debug("Error on connect(): %s", e)
raise
def _finalize_fairy(connection, connection_record, pool, ref, echo):
_refs.discard(connection_record)
if ref is not None and \
connection_record.fairy is not ref:
return
if connection is not None:
try:
if pool._reset_on_return:
connection.rollback()
# Immediately close detached instances
if connection_record is None:
connection.close()
except Exception, e:
if connection_record is not None:
connection_record.invalidate(e=e)
if isinstance(e, (SystemExit, KeyboardInterrupt)):
raise
if connection_record is not None:
connection_record.fairy = None
if echo:
pool.logger.debug("Connection %r being returned to pool",
connection)
if connection_record.finalize_callback:
connection_record.finalize_callback(connection)
del connection_record.finalize_callback
if pool.dispatch.checkin:
pool.dispatch.checkin(connection, connection_record)
pool._return_conn(connection_record)
_refs = set()
class _ConnectionFairy(object):
"""Proxies a DB-API connection and provides return-on-dereference
support."""
__slots__ = '_pool', '__counter', 'connection', \
'_connection_record', '__weakref__', \
'_detached_info', '_echo'
def __init__(self, pool):
self._pool = pool
self.__counter = 0
self._echo = _echo = pool._should_log_debug()
try:
rec = self._connection_record = pool._do_get()
conn = self.connection = self._connection_record.get_connection()
rec.fairy = weakref.ref(
self,
lambda ref:_finalize_fairy(conn, rec, pool, ref, _echo)
)
_refs.add(rec)
except:
# helps with endless __getattr__ loops later on
self.connection = None
self._connection_record = None
raise
if self._echo:
self._pool.logger.debug("Connection %r checked out from pool" %
self.connection)
@property
def _logger(self):
return self._pool.logger
@property
def is_valid(self):
return self.connection is not None
@property
def info(self):
"""An info collection unique to this DB-API connection."""
try:
return self._connection_record.info
except AttributeError:
if self.connection is None:
raise exc.InvalidRequestError("This connection is closed")
try:
return self._detached_info
except AttributeError:
self._detached_info = value = {}
return value
def invalidate(self, e=None):
"""Mark this connection as invalidated.
The connection will be immediately closed. The containing
ConnectionRecord will create a new connection when next used.
"""
if self.connection is None:
raise exc.InvalidRequestError("This connection is closed")
if self._connection_record is not None:
self._connection_record.invalidate(e=e)
self.connection = None
self._close()
def cursor(self, *args, **kwargs):
return self.connection.cursor(*args, **kwargs)
def __getattr__(self, key):
return getattr(self.connection, key)
def checkout(self):
if self.connection is None:
raise exc.InvalidRequestError("This connection is closed")
self.__counter += 1
if not self._pool.dispatch.checkout or self.__counter != 1:
return self
# Pool listeners can trigger a reconnection on checkout
attempts = 2
while attempts > 0:
try:
self._pool.dispatch.checkout(self.connection,
self._connection_record,
self)
return self
except exc.DisconnectionError, e:
self._pool.logger.info(
"Disconnection detected on checkout: %s", e)
self._connection_record.invalidate(e)
self.connection = self._connection_record.get_connection()
attempts -= 1
self._pool.logger.info("Reconnection attempts exhausted on checkout")
self.invalidate()
raise exc.InvalidRequestError("This connection is closed")
def detach(self):
"""Separate this connection from its Pool.
This means that the connection will no longer be returned to the
pool when closed, and will instead be literally closed. The
containing ConnectionRecord is separated from the DB-API connection,
and will create a new connection when next used.
Note that any overall connection limiting constraints imposed by a
Pool implementation may be violated after a detach, as the detached
connection is removed from the pool's knowledge and control.
"""
if self._connection_record is not None:
_refs.remove(self._connection_record)
self._connection_record.fairy = None
self._connection_record.connection = None
self._pool._do_return_conn(self._connection_record)
self._detached_info = \
self._connection_record.info.copy()
self._connection_record = None
def close(self):
self.__counter -= 1
if self.__counter == 0:
self._close()
def _close(self):
_finalize_fairy(self.connection, self._connection_record,
self._pool, None, self._echo)
self.connection = None
self._connection_record = None
class SingletonThreadPool(Pool):
"""A Pool that maintains one connection per thread.
Maintains one connection per each thread, never moving a connection to a
thread other than the one which it was created in.
Options are the same as those of :class:`.Pool`, as well as:
:param pool_size: The number of threads in which to maintain connections
at once. Defaults to five.
:class:`.SingletonThreadPool` is used by the SQLite dialect
automatically when a memory-based database is used.
See :ref:`sqlite_toplevel`.
"""
def __init__(self, creator, pool_size=5, **kw):
kw['use_threadlocal'] = True
Pool.__init__(self, creator, **kw)
self._conn = threading.local()
self._all_conns = set()
self.size = pool_size
def recreate(self):
self.logger.info("Pool recreating")
return SingletonThreadPool(self._creator,
pool_size=self.size,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
_dispatch=self.dispatch)
def dispose(self):
"""Dispose of this pool."""
for conn in self._all_conns:
try:
conn.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
# pysqlite won't even let you close a conn from a thread
# that didn't create it
pass
self._all_conns.clear()
def _cleanup(self):
while len(self._all_conns) > self.size:
c = self._all_conns.pop()
c.close()
def status(self):
return "SingletonThreadPool id:%d size: %d" % \
(id(self), len(self._all_conns))
def _do_return_conn(self, conn):
pass
def _do_get(self):
try:
c = self._conn.current()
if c:
return c
except AttributeError:
pass
c = self._create_connection()
self._conn.current = weakref.ref(c)
self._all_conns.add(c)
if len(self._all_conns) > self.size:
self._cleanup()
return c
class QueuePool(Pool):
"""A :class:`.Pool` that imposes a limit on the number of open connections.
:class:`.QueuePool` is the default pooling implementation used for
all :class:`.Engine` objects, unless the SQLite dialect is in use.
"""
def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30,
**kw):
"""
Construct a QueuePool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param pool_size: The size of the pool to be maintained,
defaults to 5. This is the largest number of connections that
will be kept persistently in the pool. Note that the pool
begins with no connections; once this number of connections
is requested, that number of connections will remain.
``pool_size`` can be set to 0 to indicate no size limit; to
disable pooling, use a :class:`~sqlalchemy.pool.NullPool`
instead.
:param max_overflow: The maximum overflow size of the
pool. When the number of checked-out connections reaches the
size set in pool_size, additional connections will be
returned up to this limit. When those additional connections
are returned to the pool, they are disconnected and
discarded. It follows then that the total number of
simultaneous connections the pool will allow is pool_size +
`max_overflow`, and the total number of "sleeping"
connections the pool will allow is pool_size. `max_overflow`
can be set to -1 to indicate no overflow limit; no limit
will be placed on the total number of concurrent
connections. Defaults to 10.
:param timeout: The number of seconds to wait before giving up
on returning a connection. Defaults to 30.
:param recycle: If set to non -1, number of seconds between
connection recycling, which means upon checkout, if this
timeout is surpassed the connection will be closed and
replaced with a newly opened connection. Defaults to -1.
:param echo: If True, connections being pulled and retrieved
from the pool will be logged to the standard output, as well
as pool sizing information. Echoing can also be achieved by
enabling logging for the "sqlalchemy.pool"
namespace. Defaults to False.
:param use_threadlocal: If set to True, repeated calls to
:meth:`connect` within the same application thread will be
guaranteed to return the same connection object, if one has
already been retrieved from the pool and has not been
returned yet. Offers a slight performance advantage at the
cost of individual transactions by default. The
:meth:`unique_connection` method is provided to bypass the
threadlocal behavior installed into :meth:`connect`.
:param reset_on_return: If true, reset the database state of
connections returned to the pool. This is typically a
ROLLBACK to release locks and transaction resources.
Disable at your own peril. Defaults to True.
:param listeners: A list of
:class:`~sqlalchemy.interfaces.PoolListener`-like objects or
dictionaries of callables that receive events when DB-API
connections are created, checked out and checked in to the
pool.
"""
Pool.__init__(self, creator, **kw)
self._pool = sqla_queue.Queue(pool_size)
self._overflow = 0 - pool_size
self._max_overflow = max_overflow
self._timeout = timeout
self._overflow_lock = self._max_overflow > -1 and \
threading.Lock() or None
def recreate(self):
self.logger.info("Pool recreating")
return QueuePool(self._creator, pool_size=self._pool.maxsize,
max_overflow=self._max_overflow,
timeout=self._timeout,
recycle=self._recycle, echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
_dispatch=self.dispatch)
def _do_return_conn(self, conn):
try:
self._pool.put(conn, False)
except sqla_queue.Full:
conn.close()
if self._overflow_lock is None:
self._overflow -= 1
else:
self._overflow_lock.acquire()
try:
self._overflow -= 1
finally:
self._overflow_lock.release()
def _do_get(self):
try:
wait = self._max_overflow > -1 and \
self._overflow >= self._max_overflow
return self._pool.get(wait, self._timeout)
except sqla_queue.Empty:
if self._max_overflow > -1 and \
self._overflow >= self._max_overflow:
if not wait:
return self._do_get()
else:
raise exc.TimeoutError(
"QueuePool limit of size %d overflow %d reached, "
"connection timed out, timeout %d" %
(self.size(), self.overflow(), self._timeout))
if self._overflow_lock is not None:
self._overflow_lock.acquire()
if self._max_overflow > -1 and \
self._overflow >= self._max_overflow:
if self._overflow_lock is not None:
self._overflow_lock.release()
return self._do_get()
try:
con = self._create_connection()
self._overflow += 1
finally:
if self._overflow_lock is not None:
self._overflow_lock.release()
return con
def dispose(self):
while True:
try:
conn = self._pool.get(False)
conn.close()
except sqla_queue.Empty:
break
self._overflow = 0 - self.size()
self.logger.info("Pool disposed. %s", self.status())
def status(self):
return "Pool size: %d Connections in pool: %d "\
"Current Overflow: %d Current Checked out "\
"connections: %d" % (self.size(),
self.checkedin(),
self.overflow(),
self.checkedout())
def size(self):
return self._pool.maxsize
def checkedin(self):
return self._pool.qsize()
def overflow(self):
return self._overflow
def checkedout(self):
return self._pool.maxsize - self._pool.qsize() + self._overflow
class NullPool(Pool):
"""A Pool which does not pool connections.
Instead it literally opens and closes the underlying DB-API connection
per each connection open/close.
Reconnect-related functions such as ``recycle`` and connection
invalidation are not supported by this Pool implementation, since
no connections are held persistently.
:class:`.NullPool` is used by the SQlite dilalect automatically
when a file-based database is used (as of SQLAlchemy 0.7).
See :ref:`sqlite_toplevel`.
"""
def status(self):
return "NullPool"
def _do_return_conn(self, conn):
conn.close()
def _do_get(self):
return self._create_connection()
def recreate(self):
self.logger.info("Pool recreating")
return NullPool(self._creator,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
_dispatch=self.dispatch)
def dispose(self):
pass
class StaticPool(Pool):
"""A Pool of exactly one connection, used for all requests.
Reconnect-related functions such as ``recycle`` and connection
invalidation (which is also used to support auto-reconnect) are not
currently supported by this Pool implementation but may be implemented
in a future release.
"""
@memoized_property
def _conn(self):
return self._creator()
@memoized_property
def connection(self):
return _ConnectionRecord(self)
def status(self):
return "StaticPool"
def dispose(self):
if '_conn' in self.__dict__:
self._conn.close()
self._conn = None
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(creator=self._creator,
recycle=self._recycle,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch)
def _create_connection(self):
return self._conn
def _do_return_conn(self, conn):
pass
def _do_get(self):
return self.connection
class AssertionPool(Pool):
"""A :class:`.Pool` that allows at most one checked out connection at any given
time.
This will raise an exception if more than one connection is checked out
at a time. Useful for debugging code that is using more connections
than desired.
:class:`.AssertionPool` also logs a traceback of where
the original connection was checked out, and reports
this in the assertion error raised (new in 0.7).
"""
def __init__(self, *args, **kw):
self._conn = None
self._checked_out = False
self._store_traceback = kw.pop('store_traceback', True)
self._checkout_traceback = None
Pool.__init__(self, *args, **kw)
def status(self):
return "AssertionPool"
def _do_return_conn(self, conn):
if not self._checked_out:
raise AssertionError("connection is not checked out")
self._checked_out = False
assert conn is self._conn
def dispose(self):
self._checked_out = False
if self._conn:
self._conn.close()
def recreate(self):
self.logger.info("Pool recreating")
return AssertionPool(self._creator, echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch)
def _do_get(self):
if self._checked_out:
if self._checkout_traceback:
suffix = ' at:\n%s' % ''.join(
chop_traceback(self._checkout_traceback))
else:
suffix = ''
raise AssertionError("connection is already checked out" + suffix)
if not self._conn:
self._conn = self._create_connection()
self._checked_out = True
if self._store_traceback:
self._checkout_traceback = traceback.format_stack()
return self._conn
class _DBProxy(object):
"""Layers connection pooling behavior on top of a standard DB-API module.
Proxies a DB-API 2.0 connect() call to a connection pool keyed to the
specific connect parameters. Other functions and attributes are delegated
to the underlying DB-API module.
"""
def __init__(self, module, poolclass=QueuePool, **kw):
"""Initializes a new proxy.
module
a DB-API 2.0 module
poolclass
a Pool class, defaulting to QueuePool
Other parameters are sent to the Pool object's constructor.
"""
self.module = module
self.kw = kw
self.poolclass = poolclass
self.pools = {}
self._create_pool_mutex = threading.Lock()
def close(self):
for key in self.pools.keys():
del self.pools[key]
def __del__(self):
self.close()
def __getattr__(self, key):
return getattr(self.module, key)
def get_pool(self, *args, **kw):
key = self._serialize(*args, **kw)
try:
return self.pools[key]
except KeyError:
self._create_pool_mutex.acquire()
try:
if key not in self.pools:
pool = self.poolclass(lambda:
self.module.connect(*args, **kw), **self.kw)
self.pools[key] = pool
return pool
else:
return self.pools[key]
finally:
self._create_pool_mutex.release()
def connect(self, *args, **kw):
"""Activate a connection to the database.
Connect to the database using this DBProxy's module and the given
connect arguments. If the arguments match an existing pool, the
connection will be returned from the pool's current thread-local
connection instance, or if there is no thread-local connection
instance it will be checked out from the set of pooled connections.
If the pool has no available connections and allows new connections
to be created, a new database connection will be made.
"""
return self.get_pool(*args, **kw).connect()
def dispose(self, *args, **kw):
"""Dispose the pool referenced by the given connect arguments."""
key = self._serialize(*args, **kw)
try:
del self.pools[key]
except KeyError:
pass
def _serialize(self, *args, **kw):
return tuple(
list(args) +
[(k, kw[k]) for k in sorted(kw)]
)
| gpl-2.0 | -6,588,580,423,723,602,000 | 33.872651 | 84 | 0.586177 | false |
GoogleCloudPlatform/python-compat-runtime | appengine-compat/exported_appengine_sdk/google/appengine/ext/vmruntime/initialize.py | 1 | 3059 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Functions that prepare GAE user code for running in a GCE VM."""
import json
import logging
import logging.handlers
import math
import sys
import traceback
from google.appengine import api
from google.appengine.api import app_logging
from google.appengine.api.logservice import logservice
from google.appengine.ext.vmruntime import background_thread
from google.appengine.runtime import request_environment
from google.appengine.runtime import runtime
APP_LOG_FILE = '/var/log/app_engine/app.log.json'
MAX_LOG_BYTES = 128 * 1024 * 1024
LOG_BACKUP_COUNT = 3
class JsonFormatter(logging.Formatter):
"""Class for logging to the cloud logging api with json metadata."""
def format(self, record):
"""Format the record as json the cloud logging agent understands.
Args:
record: A logging.LogRecord to format.
Returns:
A json string to log.
"""
float_frac_sec, float_sec = math.modf(record.created)
data = {'thread': record.thread,
'timestamp': {
'seconds': int(float_sec),
'nanos': int(float_frac_sec * 1000000000)}}
if record.exc_info:
data['message'] = '%s\n%s' % (record.getMessage(),
traceback.format_exc(
record.exc_info))
data['severity'] = 'CRITICAL'
else:
data['message'] = record.getMessage()
data['severity'] = record.levelname
return json.dumps(data)
def InitializeFileLogging():
"""Helper called from CreateAndRunService() to set up syslog logging."""
logging.basicConfig()
logger = logging.getLogger()
logger.handlers = []
file_handler = logging.handlers.RotatingFileHandler(
APP_LOG_FILE, maxBytes=MAX_LOG_BYTES, backupCount=LOG_BACKUP_COUNT)
file_handler.setFormatter(JsonFormatter())
logger.addHandler(file_handler)
logger.setLevel(logging.DEBUG)
def InitializeApiLogging():
"""Helper called from CreateAndRunService() to set up api logging."""
logservice.logs_buffer = lambda: request_environment.current_request.errors
logger = logging.getLogger()
app_log_handler = app_logging.AppLogsHandler()
logger.addHandler(app_log_handler)
def InitializeThreadingApis():
"""Helper to monkey-patch various threading APIs."""
runtime.PatchStartNewThread()
sys.modules[api.__name__ + '.background_thread'] = background_thread
api.background_thread = background_thread
| apache-2.0 | 5,319,909,203,128,097,000 | 22.898438 | 77 | 0.698921 | false |
mogotest/selenium | selenium/src/py/lib/docutils/readers/pep.py | 5 | 1666 | # Author: David Goodger
# Contact: [email protected]
# Revision: $Revision: 3892 $
# Date: $Date: 2005-09-20 22:04:53 +0200 (Tue, 20 Sep 2005) $
# Copyright: This module has been placed in the public domain.
"""
Python Enhancement Proposal (PEP) Reader.
"""
__docformat__ = 'reStructuredText'
from docutils.readers import standalone
from docutils.transforms import peps, references, misc, frontmatter
from docutils.parsers import rst
class Reader(standalone.Reader):
supported = ('pep',)
"""Contexts this reader supports."""
settings_spec = (
'PEP Reader Option Defaults',
'The --pep-references and --rfc-references options (for the '
'reStructuredText parser) are on by default.',
())
config_section = 'pep reader'
config_section_dependencies = ('readers', 'standalone reader')
def get_transforms(self):
transforms = standalone.Reader.get_transforms(self)
# We have PEP-specific frontmatter handling.
transforms.remove(frontmatter.DocTitle)
transforms.remove(frontmatter.SectionSubTitle)
transforms.remove(frontmatter.DocInfo)
transforms.extend([peps.Headers, peps.Contents, peps.TargetNotes])
return transforms
settings_default_overrides = {'pep_references': 1, 'rfc_references': 1}
inliner_class = rst.states.Inliner
def __init__(self, parser=None, parser_name=None):
"""`parser` should be ``None``."""
if parser is None:
parser = rst.Parser(rfc2822=1, inliner=self.inliner_class())
standalone.Reader.__init__(self, parser, '')
| apache-2.0 | -8,956,217,481,703,280,000 | 31.32 | 75 | 0.654862 | false |
Timurdov/bionic | bionic/Lib/site-packages/django/contrib/sessions/backends/cache.py | 102 | 2499 | from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.cache import caches
from django.utils.six.moves import xrange
KEY_PREFIX = "django.contrib.sessions.cache"
class SessionStore(SessionBase):
"""
A cache-based session store.
"""
def __init__(self, session_key=None):
self._cache = caches[settings.SESSION_CACHE_ALIAS]
super(SessionStore, self).__init__(session_key)
@property
def cache_key(self):
return KEY_PREFIX + self._get_or_create_session_key()
def load(self):
try:
session_data = self._cache.get(self.cache_key, None)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
session_data = None
if session_data is not None:
return session_data
self.create()
return {}
def create(self):
# Because a cache can fail silently (e.g. memcache), we don't know if
# we are failing to create a new session because of a key collision or
# because the cache is missing. So we try for a (large) number of times
# and then raise an exception. That's the risk you shoulder if using
# cache backing.
for i in xrange(10000):
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
raise RuntimeError(
"Unable to create a new session key. "
"It is likely that the cache is unavailable.")
def save(self, must_create=False):
if must_create:
func = self._cache.add
else:
func = self._cache.set
result = func(self.cache_key,
self._get_session(no_load=must_create),
self.get_expiry_age())
if must_create and not result:
raise CreateError
def exists(self, session_key):
return (KEY_PREFIX + session_key) in self._cache
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(KEY_PREFIX + session_key)
@classmethod
def clear_expired(cls):
pass
| apache-2.0 | -6,142,576,335,898,407,000 | 32.77027 | 79 | 0.593037 | false |
mushtaqak/edx-platform | cms/djangoapps/contentstore/features/transcripts.py | 46 | 8895 | # disable missing docstring
# pylint: disable=missing-docstring
import os
from lettuce import world, step
from django.conf import settings
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import NotFoundError
from splinter.request_handler.request_handler import RequestHandler
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
# We should wait 300 ms for event handler invocation + 200ms for safety.
DELAY = 0.5
ERROR_MESSAGES = {
'url_format': u'Incorrect url format.',
'file_type': u'Link types should be unique.',
'links_duplication': u'Links should be unique.',
}
STATUSES = {
'found': u'Timed Transcript Found',
'not found on edx': u'No EdX Timed Transcript',
'not found': u'No Timed Transcript',
'replace': u'Timed Transcript Conflict',
'uploaded_successfully': u'Timed Transcript Uploaded Successfully',
'use existing': u'Confirm Timed Transcript',
}
SELECTORS = {
'error_bar': '.transcripts-error-message',
'url_inputs': '.videolist-settings-item input.input',
'collapse_link': '.collapse-action.collapse-setting',
'collapse_bar': '.videolist-extra-videos',
'status_bar': '.transcripts-message-status',
}
# button type , button css selector, button message
TRANSCRIPTS_BUTTONS = {
'import': ('.setting-import', 'Import YouTube Transcript'),
'download_to_edit': ('.setting-download', 'Download Transcript for Editing'),
'disabled_download_to_edit': ('.setting-download.is-disabled', 'Download Transcript for Editing'),
'upload_new_timed_transcripts': ('.setting-upload', 'Upload New Transcript'),
'replace': ('.setting-replace', 'Yes, replace the edX transcript with the YouTube transcript'),
'choose': ('.setting-choose', 'Timed Transcript from {}'),
'use_existing': ('.setting-use-existing', 'Use Current Transcript'),
}
@step('I clear fields$')
def clear_fields(_step):
# Clear the input fields and trigger an 'input' event
script = """
$('{selector}')
.prop('disabled', false)
.removeClass('is-disabled')
.attr('aria-disabled', false)
.val('')
.trigger('input');
""".format(selector=SELECTORS['url_inputs'])
world.browser.execute_script(script)
world.wait(DELAY)
world.wait_for_ajax_complete()
@step('I clear field number (.+)$')
def clear_field(_step, index):
index = int(index) - 1
world.css_fill(SELECTORS['url_inputs'], '', index)
# For some reason ChromeDriver doesn't trigger an 'input' event after filling
# the field with an empty value. That's why we trigger it manually via jQuery.
world.trigger_event(SELECTORS['url_inputs'], event='input', index=index)
world.wait(DELAY)
world.wait_for_ajax_complete()
@step('I expect (.+) inputs are disabled$')
def inputs_are_disabled(_step, indexes):
index_list = [int(i.strip()) - 1 for i in indexes.split(',')]
for index in index_list:
el = world.css_find(SELECTORS['url_inputs'])[index]
assert el['disabled']
@step('I expect inputs are enabled$')
def inputs_are_enabled(_step):
for index in range(3):
el = world.css_find(SELECTORS['url_inputs'])[index]
assert not el['disabled']
@step('I do not see error message$')
def i_do_not_see_error_message(_step):
assert not world.css_visible(SELECTORS['error_bar'])
@step('I see error message "([^"]*)"$')
def i_see_error_message(_step, error):
assert world.css_has_text(SELECTORS['error_bar'], ERROR_MESSAGES[error])
@step('I do not see status message$')
def i_do_not_see_status_message(_step):
assert not world.css_visible(SELECTORS['status_bar'])
@step('I see status message "([^"]*)"$')
def i_see_status_message(_step, status):
assert not world.css_visible(SELECTORS['error_bar'])
assert world.css_has_text(SELECTORS['status_bar'], STATUSES[status])
DOWNLOAD_BUTTON = TRANSCRIPTS_BUTTONS["download_to_edit"][0]
if world.is_css_present(DOWNLOAD_BUTTON, wait_time=1) and not world.css_find(DOWNLOAD_BUTTON)[0].has_class('is-disabled'):
assert _transcripts_are_downloaded()
@step('I (.*)see button "([^"]*)"$')
def i_see_button(_step, not_see, button_type):
button = button_type.strip()
if not_see.strip():
assert world.is_css_not_present(TRANSCRIPTS_BUTTONS[button][0])
else:
assert world.css_has_text(TRANSCRIPTS_BUTTONS[button][0], TRANSCRIPTS_BUTTONS[button][1])
@step('I (.*)see (.*)button "([^"]*)" number (\d+)$')
def i_see_button_with_custom_text(_step, not_see, button_type, custom_text, index):
button = button_type.strip()
custom_text = custom_text.strip()
index = int(index.strip()) - 1
if not_see.strip():
assert world.is_css_not_present(TRANSCRIPTS_BUTTONS[button][0])
else:
assert world.css_has_text(TRANSCRIPTS_BUTTONS[button][0], TRANSCRIPTS_BUTTONS[button][1].format(custom_text), index)
@step('I click transcript button "([^"]*)"$')
def click_button_transcripts_variant(_step, button_type):
button = button_type.strip()
world.css_click(TRANSCRIPTS_BUTTONS[button][0])
world.wait_for_ajax_complete()
@step('I click transcript button "([^"]*)" number (\d+)$')
def click_button_index(_step, button_type, index):
button = button_type.strip()
index = int(index.strip()) - 1
world.css_click(TRANSCRIPTS_BUTTONS[button][0], index)
world.wait_for_ajax_complete()
@step('I remove "([^"]+)" transcripts id from store')
def remove_transcripts_from_store(_step, subs_id):
"""Remove from store, if transcripts content exists."""
filename = 'subs_{0}.srt.sjson'.format(subs_id.strip())
content_location = StaticContent.compute_location(
world.scenario_dict['COURSE'].id,
filename
)
try:
content = contentstore().find(content_location)
contentstore().delete(content.location)
print('Transcript file was removed from store.')
except NotFoundError:
print('Transcript file was NOT found and not removed.')
@step('I enter a "([^"]+)" source to field number (\d+)$')
def i_enter_a_source(_step, link, index):
index = int(index) - 1
if index is not 0 and not world.css_visible(SELECTORS['collapse_bar']):
world.css_click(SELECTORS['collapse_link'])
assert world.css_visible(SELECTORS['collapse_bar'])
world.css_fill(SELECTORS['url_inputs'], link, index)
world.wait(DELAY)
world.wait_for_ajax_complete()
@step('I upload the transcripts file "([^"]*)"$')
def upload_file(_step, file_name):
path = os.path.join(TEST_ROOT, 'uploads/', file_name.strip())
world.browser.execute_script("$('form.file-chooser').show()")
world.browser.attach_file('transcript-file', os.path.abspath(path))
world.wait_for_ajax_complete()
@step('I see "([^"]*)" text in the captions')
def check_text_in_the_captions(_step, text):
world.wait_for_present('.video.is-captions-rendered')
world.wait_for(lambda _: world.css_text('.subtitles'), timeout=30)
actual_text = world.css_text('.subtitles')
assert (text in actual_text)
@step('I see value "([^"]*)" in the field "([^"]*)"$')
def check_transcripts_field(_step, values, field_name):
world.select_editor_tab('Advanced')
tab = world.css_find('#settings-tab').first
field_id = '#' + tab.find_by_xpath('.//label[text()="%s"]' % field_name.strip())[0]['for']
values_list = [i.strip() == world.css_value(field_id) for i in values.split('|')]
assert any(values_list)
world.select_editor_tab('Basic')
@step('I save changes$')
def save_changes(_step):
world.save_component()
@step('I open tab "([^"]*)"$')
def open_tab(_step, tab_name):
world.select_editor_tab(tab_name)
@step('I set value "([^"]*)" to the field "([^"]*)"$')
def set_value_transcripts_field(_step, value, field_name):
tab = world.css_find('#settings-tab').first
XPATH = './/label[text()="{name}"]'.format(name=field_name)
SELECTOR = '#' + tab.find_by_xpath(XPATH)[0]['for']
element = world.css_find(SELECTOR).first
if element['type'] == 'text':
SCRIPT = '$("{selector}").val("{value}").change()'.format(
selector=SELECTOR,
value=value
)
world.browser.execute_script(SCRIPT)
assert world.css_has_value(SELECTOR, value)
else:
assert False, 'Incorrect element type.'
world.wait_for_ajax_complete()
@step('I revert the transcript field "([^"]*)"$')
def revert_transcripts_field(_step, field_name):
world.revert_setting_entry(field_name)
def _transcripts_are_downloaded():
world.wait_for_ajax_complete()
request = RequestHandler()
DOWNLOAD_BUTTON = world.css_find(TRANSCRIPTS_BUTTONS["download_to_edit"][0]).first
url = DOWNLOAD_BUTTON['href']
request.connect(url)
return request.status_code.is_success()
| agpl-3.0 | -8,272,054,600,443,058,000 | 33.08046 | 126 | 0.659921 | false |
ioanpocol/superdesk-core | tests/publish/ninjs_formatter_test.py | 1 | 44687 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import json
from unittest import mock
from datetime import timedelta
from superdesk.utc import utcnow
from superdesk.tests import TestCase
from superdesk.publish.formatters.ninjs_formatter import NINJSFormatter
from superdesk.publish import init_app
from bson import ObjectId
@mock.patch("superdesk.publish.subscribers.SubscribersService.generate_sequence_number", lambda self, subscriber: 1)
class NinjsFormatterTest(TestCase):
def setUp(self):
self.formatter = NINJSFormatter()
init_app(self.app)
self.maxDiff = None
def test_text_formatter(self):
self.app.data.insert(
"vocabularies",
[
{
"_id": "locators",
"display_name": "Locators",
"type": "unmanageable",
"unique_field": "qcode",
"items": [
{
"is_active": True,
"name": "NSW",
"qcode": "NSW",
"state": "New South Wales",
"country": "Australia",
"world_region": "Oceania",
"group": "Australia",
}
],
}
],
)
embargo_ts = utcnow() + timedelta(days=2)
article = {
"_id": "tag:aap.com.au:20150613:12345",
"guid": "tag:aap.com.au:20150613:12345",
"_current_version": 1,
"anpa_category": [{"qcode": "a"}],
"source": "AAP",
"headline": "This is a test headline",
"byline": "joe",
"slugline": "slugline",
"subject": [
{"qcode": "02011001", "name": "international court or tribunal", "parent": None},
{"qcode": "02011002", "name": "extradition"},
],
"anpa_take_key": "take_key",
"unique_id": "1",
"body_html": "The story body",
"type": "text",
"word_count": "1",
"priority": 1,
"profile": "snap",
"state": "published",
"urgency": 2,
"pubstatus": "usable",
"creditline": "sample creditline",
"keywords": ["traffic"],
"abstract": "<p>sample <b>abstract</b></p>",
"place": [{"name": "NSW", "qcode": "NSW"}],
"embargo": embargo_ts,
"body_footer": "<p>call helpline 999 if you are planning to quit smoking</p>",
"company_codes": [{"name": "YANCOAL AUSTRALIA LIMITED", "qcode": "YAL", "security_exchange": "ASX"}],
"genre": [{"name": "Article", "qcode": "article"}],
"flags": {"marked_for_legal": True},
"extra": {"foo": "test"},
"annotations": [{"msg": "test"}],
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
expected = {
"guid": "tag:aap.com.au:20150613:12345",
"version": "1",
"place": [{"code": "NSW", "name": "New South Wales"}],
"pubstatus": "usable",
"body_html": "The story body<p>call helpline 999 if you are planning to quit smoking</p>",
"type": "text",
"subject": [
{"code": "02011001", "name": "international court or tribunal"},
{"code": "02011002", "name": "extradition"},
],
"service": [{"code": "a"}],
"source": "AAP",
"headline": "This is a test headline",
"byline": "joe",
"urgency": 2,
"priority": 1,
"embargoed": embargo_ts.isoformat(),
"profile": "snap",
"slugline": "slugline",
"description_text": "sample abstract",
"description_html": "<p>sample <b>abstract</b></p>",
"keywords": ["traffic"],
"organisation": [
{
"name": "YANCOAL AUSTRALIA LIMITED",
"rel": "Securities Identifier",
"symbols": [{"ticker": "YAL", "exchange": "ASX"}],
}
],
"genre": [{"name": "Article", "code": "article"}],
"signal": [{"name": "Content Warning", "code": "cwarn", "scheme": "http://cv.iptc.org/newscodes/signal/"}],
"extra": {"foo": "test"},
"charcount": 67,
"wordcount": 13,
"readtime": 0,
"annotations": article["annotations"],
}
self.assertEqual(json.loads(doc), expected)
def test_picture_formatter(self):
article = {
"guid": "20150723001158606583",
"_current_version": 1,
"slugline": "AMAZING PICTURE",
"original_source": "AAP",
"renditions": {
"viewImage": {
"width": 640,
"href": "http://localhost:5000/api/upload/55b032041d41c8d278d21b6f/raw?_schema=http",
"mimetype": "image/jpeg",
"height": 401,
},
"original": {
"href": "https://one-api.aap.com.au/api/v3/Assets/20150723001158606583/Original/download",
"mimetype": "image/jpeg",
},
},
"byline": "MICKEY MOUSE",
"headline": "AMAZING PICTURE",
"versioncreated": "2015-07-23T00:15:00.000Z",
"ednote": "TEST ONLY",
"type": "picture",
"pubstatus": "usable",
"source": "AAP",
"description": "The most amazing picture you will ever see",
"guid": "20150723001158606583",
"body_footer": "<p>call helpline 999 if you are planning to quit smoking</p>",
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
expected = {
"byline": "MICKEY MOUSE",
"renditions": {
"original": {
"href": "https://one-api.aap.com.au/api/v3/Assets/20150723001158606583/Original/download",
"mimetype": "image/jpeg",
}
},
"headline": "AMAZING PICTURE",
"pubstatus": "usable",
"version": "1",
"versioncreated": "2015-07-23T00:15:00.000Z",
"guid": "20150723001158606583",
"description_html": "The most amazing picture you will ever see<p>call helpline 999 if you are planning to "
"quit smoking</p>",
"type": "picture",
"priority": 5,
"slugline": "AMAZING PICTURE",
"ednote": "TEST ONLY",
"source": "AAP",
}
self.assertEqual(expected, json.loads(doc))
self.assertNotIn('viewImage', json.loads(doc).get('renditions'))
def test_composite_formatter(self):
article = {
"guid": "urn:newsml:localhost:2015-07-24T15:05:00.116047:435c93c2-492c-4668-ab47-ae6e2b9b1c2c",
"groups": [
{"id": "root", "refs": [{"idRef": "main"}, {"idRef": "sidebars"}], "role": "grpRole:NEP"},
{
"id": "main",
"refs": [
{
"renditions": {},
"slugline": "Boat",
"guid": "tag:localhost:2015:515b895a-b336-48b2-a506-5ffaf561b916",
"headline": "WA:Navy steps in with WA asylum-seeker boat",
"location": "archive",
"type": "text",
"itemClass": "icls:text",
"residRef": "tag:localhost:2015:515b895a-b336-48b2-a506-5ffaf561b916",
}
],
"role": "grpRole:main",
},
{
"id": "sidebars",
"refs": [
{
"renditions": {
"original_source": {
"href": "https://one-api.aap.com.au\
/api/v3/Assets/20150723001158639795/Original/download",
"mimetype": "image/jpeg",
},
"original": {
"width": 2784,
"height": 4176,
"href": "http://localhost:5000\
/api/upload/55b078b21d41c8e974d17ec5/raw?_schema=http",
"mimetype": "image/jpeg",
"media": "55b078b21d41c8e974d17ec5",
},
"thumbnail": {
"width": 80,
"height": 120,
"href": "http://localhost:5000\
/api/upload/55b078b41d41c8e974d17ed3/raw?_schema=http",
"mimetype": "image/jpeg",
"media": "55b078b41d41c8e974d17ed3",
},
"viewImage": {
"width": 426,
"height": 640,
"href": "http://localhost:5000\
/api/upload/55b078b31d41c8e974d17ed1/raw?_schema=http",
"mimetype": "image/jpeg",
"media": "55b078b31d41c8e974d17ed1",
},
"baseImage": {
"width": 933,
"height": 1400,
"href": "http://localhost:5000\
/api/upload/55b078b31d41c8e974d17ecf/raw?_schema=http",
"mimetype": "image/jpeg",
"media": "55b078b31d41c8e974d17ecf",
},
},
"slugline": "ABC SHOP CLOSURES",
"type": "picture",
"guid": "urn:newsml:localhost:2015-07-24T15:04:29.589984:"
"af3bef9a-5002-492b-a15a-8b460e69b164",
"headline": "ABC SHOP CLOSURES",
"location": "archive",
"itemClass": "icls:picture",
"residRef": "urn:newsml:localhost:2015-07-24T15:04:29.589984:"
"af3bef9a-5002-492b-a15a-8b460e69b164",
}
],
"role": "grpRole:sidebars",
},
],
"description": "",
"operation": "update",
"sign_off": "mar",
"type": "composite",
"pubstatus": "usable",
"version_creator": "558379451d41c83ff598a3af",
"language": "en",
"guid": "urn:newsml:localhost:2015-07-24T15:05:00.116047:435c93c2-492c-4668-ab47-ae6e2b9b1c2c",
"unique_name": "#145",
"headline": "WA:Navy steps in with WA asylum-seeker boat",
"original_creator": "558379451d41c83ff598a3af",
"source": "AAP",
"_etag": "b41df79084304219524a092abf07ecba9e1bb2c5",
"slugline": "Boat",
"firstcreated": "2015-07-24T05:05:00.000Z",
"unique_id": 145,
"versioncreated": "2015-07-24T05:05:14.000Z",
"_updated": "2015-07-24T05:05:25.000Z",
"family_id": "urn:newsml:localhost:2015-07-24T15:05:00.116047:435c93c2-492c-4668-ab47-ae6e2b9b1c2c",
"_current_version": 2,
"_created": "2015-07-24T05:05:00.000Z",
"version": 2,
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
expected = {
"headline": "WA:Navy steps in with WA asylum-seeker boat",
"version": "2",
"guid": "urn:newsml:localhost:2015-07-24T15:05:00.116047:435c93c2-492c-4668-ab47-ae6e2b9b1c2c",
"associations": {
"main": {"guid": "tag:localhost:2015:515b895a-b336-48b2-a506-5ffaf561b916", "type": "text"},
"sidebars": {
"guid": "urn:newsml:localhost:2015-07-24T15:04:29.589984:af3bef9a-5002-492b-a15a-8b460e69b164",
"type": "picture",
},
},
"firstcreated": "2015-07-24T05:05:00.000Z",
"versioncreated": "2015-07-24T05:05:14.000Z",
"type": "composite",
"pubstatus": "usable",
"language": "en",
"priority": 5,
"slugline": "Boat",
"source": "AAP",
}
self.assertEqual(expected, json.loads(doc))
def test_item_with_usable_associations(self):
article = {
"_id": "urn:bar",
"guid": "urn:bar",
"_current_version": 1,
"type": "text",
"associations": {
"image": {
"_id": "urn:foo",
"guid": "urn:foo",
"pubstatus": "usable",
"headline": "Foo",
"type": "picture",
"task": {},
"copyrightholder": "Foo ltd.",
"description_text": "Foo picture",
"renditions": {
"original": {
"href": "http://example.com",
"width": 100,
"height": 80,
"mimetype": "image/jpeg",
"CropLeft": 0,
}
},
}
},
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
formatted = json.loads(doc)
self.assertIn("associations", formatted)
self.assertIn("image", formatted["associations"])
image = formatted["associations"]["image"]
self.assertEqual("urn:foo", image["guid"])
self.assertEqual("Foo", image["headline"])
self.assertEqual("usable", image["pubstatus"])
self.assertNotIn("task", image)
self.assertEqual("Foo ltd.", image["copyrightholder"])
self.assertEqual("Foo picture", image["description_text"])
rendition = image["renditions"]["original"]
self.assertEqual(100, rendition["width"])
self.assertEqual(80, rendition["height"])
self.assertEqual("image/jpeg", rendition["mimetype"])
self.assertNotIn("CropLeft", rendition)
def test_item_with_empty_associations(self):
article = {
"_id": "urn:bar",
"guid": "urn:bar",
"_current_version": 1,
"type": "text",
"associations": {"image": None},
}
_, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
formatted = json.loads(doc)
self.assertIn("associations", formatted)
self.assertNotIn("image", formatted["associations"])
def test_vidible_formatting(self):
article = {
"_id": "tag:aap.com.au:20150613:12345",
"guid": "tag:aap.com.au:20150613:12345",
"_current_version": 1,
"source": "AAP",
"headline": "This is a test headline",
"slugline": "slugline",
"unique_id": "1",
"body_html": "The story body",
"type": "text",
"state": "published",
"pubstatus": "usable",
"associations": {
"embedded5346670761": {
"uri": "56ba77bde4b0568f54a1ce68",
"alt_text": "alternative",
"copyrightholder": "Edouard",
"copyrightnotice": "Edited with Gimp",
"usageterms": "indefinite-usage",
"type": "video",
"title": "Embed title",
"company": "Press Association",
"url": "https://videos.vidible.tv/prod/2016-02/09/56ba777ce4b0b6448ed478f5_640x360.mp4",
"thumbnail": "https://cdn-ssl.vidible.tv/2016-02/09/56ba777ce4b0b6448ed478f5_60x60.jpg",
"duration": 100,
"width": 400,
"height": 200,
}
},
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
expected = {
"guid": "tag:aap.com.au:20150613:12345",
"version": "1",
"pubstatus": "usable",
"body_html": "The story body",
"type": "text",
"headline": "This is a test headline",
"slugline": "slugline",
"priority": 5,
"source": "AAP",
"charcount": 14,
"wordcount": 3,
"readtime": 0,
"associations": {
"embedded5346670761": {
"guid": "56ba77bde4b0568f54a1ce68",
"type": "video",
"version": "1",
"priority": 5,
"body_text": "alternative",
"copyrightholder": "Edouard",
"copyrightnotice": "Edited with Gimp",
"usageterms": "indefinite-usage",
"headline": "Embed title",
"organisation": [{"name": "Press Association"}],
"renditions": {
"original": {
"href": "https://videos.vidible.tv/prod/2016-02/09/56ba777ce4b0b6448ed478f5_640x360.mp4",
"duration": 100,
"width": 400,
"height": 200,
},
"thumbnail": {
"href": "https://cdn-ssl.vidible.tv/2016-02/09/56ba777ce4b0b6448ed478f5_60x60.jpg"
},
},
}
},
}
self.assertEqual(json.loads(doc), expected)
def test_copyright_holder_notice(self):
self.app.data.insert(
"vocabularies",
[
{
"_id": "rightsinfo",
"items": [
{
"is_active": True,
"name": "default",
"copyrightHolder": "copyright holder",
"copyrightNotice": "copyright notice",
"usageTerms": "",
}
],
}
],
)
article = {"_id": "urn:bar", "_current_version": 1, "guid": "urn:bar", "type": "text"}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
data = json.loads(doc)
self.assertEqual("copyright holder", data["copyrightholder"])
self.assertEqual("copyright notice", data["copyrightnotice"])
self.assertEqual("", data["usageterms"])
def test_body_html(self):
article = {
"_id": "urn:bar",
"_current_version": 1,
"guid": "urn:bar",
"type": "text",
"body_html": (250 * 6 - 40) * "word ",
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
data = json.loads(doc)
self.assertEqual(data["charcount"], 7300)
self.assertEqual(data["wordcount"], 1460)
self.assertEqual(data["readtime"], 6)
def test_body_text(self):
article = {
"_id": "urn:bar",
"_current_version": 1,
"guid": "urn:bar",
"type": "text",
"body_text": (250 * 7 - 40) * "word ",
}
data = self._format(article)
self.assertEqual(data["charcount"], 8550)
self.assertEqual(data["wordcount"], 1710)
self.assertEqual(data["readtime"], 7)
# check japanese
article["language"] = "ja"
article["body_text"] = 5000 * "x"
data = self._format(article)
self.assertEqual(data["readtime"], 8)
article["body_text"] = 5000 * " "
data = self._format(article)
self.assertEqual(data["readtime"], 0)
def _format(self, article):
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
return json.loads(doc)
def test_empty_amstract(self):
article = {"_id": "urn:bar", "_current_version": 1, "guid": "urn:bar", "type": "text", "abstract": ""}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
data = json.loads(doc)
self.assertEqual(data["description_html"], "")
self.assertEqual(data["description_text"], "")
def test_authors(self):
self.app.data.insert(
"users",
[
{
"_id": "test_id",
"username": "author 1",
"display_name": "author 1",
"is_author": True,
"job_title": "writer_code",
"biography": "bio 1",
"facebook": "johnsmith",
"twitter": "@smith_john",
"instagram": "john_s",
"picture_url": "http://example.com",
},
{
"_id": "test_id_2",
"username": "author 2",
"display_name": "author 2",
"is_author": True,
"job_title": "reporter_code",
"biography": "bio 2",
},
],
)
self.app.data.insert(
"vocabularies",
[
{
"_id": "job_titles",
"display_name": "Job Titles",
"type": "manageable",
"unique_field": "qcode",
"items": [
{"is_active": True, "name": "Writer", "qcode": "writer_code"},
{"is_active": True, "name": "Reporter", "qcode": "reporter_code"},
],
"schema": {"name": {}, "qcode": {}},
}
],
)
article = {
"_id": "urn:bar",
"_current_version": 1,
"guid": "urn:bar",
"type": "text",
"authors": [
{
"_id": ["test_id", "writer"],
"role": "writer",
"name": "Writer",
"parent": "test_id",
"sub_label": "author 1",
},
{
"_id": ["test_id_2", "writer"],
"role": "photographer",
"name": "photographer",
"parent": "test_id_2",
"sub_label": "author 2",
},
],
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
data = json.loads(doc)
expected = [
{
"name": "author 1",
"role": "writer",
"jobtitle": {"qcode": "writer_code", "name": "Writer"},
"biography": "bio 1",
"facebook": "johnsmith",
"twitter": "@smith_john",
"instagram": "john_s",
"avatar_url": "http://example.com",
},
{
"name": "author 2",
"role": "photographer",
"jobtitle": {"qcode": "reporter_code", "name": "Reporter"},
"biography": "bio 2",
},
]
self.assertEqual(data["authors"], expected)
def test_author_missing_parent(self):
"""Test that older items with missing parent don't make the formatter crashing"""
article = {
"_id": "urn:bar",
"_current_version": 1,
"guid": "urn:bar",
"type": "text",
"authors": [
{"_id": ["test_id", "writer"], "role": "writer", "name": "Writer", "sub_label": "author 1"},
{
"_id": ["test_id_2", "writer"],
"role": "photographer",
"name": "photographer",
"sub_label": "author 2",
},
],
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
data = json.loads(doc)
expected = {
"guid": "urn:bar",
"version": "1",
"type": "text",
"priority": 5,
"authors": [
{"name": "Writer", "role": "writer", "biography": ""},
{"name": "photographer", "role": "photographer", "biography": ""},
],
}
self.assertEqual(data, expected)
def test_place(self):
self.app.data.insert(
"vocabularies",
[
{
"_id": "locators",
"display_name": "Locators",
"type": "unmanageable",
"unique_field": "qcode",
"items": [
{
"is_active": True,
"name": "JPN",
"qcode": "JPN",
"state": "",
"country": "Japan",
"world_region": "Asia",
"group": "Rest Of World",
},
{"is_active": True, "name": "SAM", "qcode": "SAM", "group": "Rest Of World"},
{
"is_active": True,
"name": "UK",
"qcode": "UK",
"state": "",
"country": "",
"world_region": "Europe",
"group": "Rest Of World",
},
],
}
],
)
article = {
"_id": "urn:bar",
"_current_version": 1,
"guid": "urn:bar",
"type": "text",
"place": [{"name": "JPN", "qcode": "JPN"}],
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
data = json.loads(doc)
self.assertEqual(data["place"], [{"code": "JPN", "name": "Japan"}])
article = {
"_id": "urn:bar",
"_current_version": 1,
"guid": "urn:bar",
"type": "text",
"place": [{"name": "SAM", "qcode": "SAM"}],
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
data = json.loads(doc)
self.assertEqual(data["place"], [{"code": "SAM", "name": "Rest Of World"}])
article = {
"_id": "urn:bar",
"_current_version": 1,
"guid": "urn:bar",
"type": "text",
"place": [{"name": "UK", "qcode": "UK"}],
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
data = json.loads(doc)
self.assertEqual(data["place"], [{"code": "UK", "name": "Europe"}])
def test_translations(self):
"""Check that fields are correctly translated"""
article = {
"_id": "5a68a134cc3a2d4bd6399177",
"type": "text",
"guid": "test",
"genre": [
{
"name": "Education",
"qcode": "genre_custom:Education",
"translations": {
"name": {"de": "Weiterbildung", "it": "Educazione finanziaria", "ja": "トレーニング用教材"}
},
"scheme": "genre_custom",
}
],
"language": "ja",
"headline": "test",
"body_html": "<p>test ter</p>",
"subject": [
{
"name": "Outcome orientated solutions",
"parent": "subject:01000000",
"qcode": "subject:01000002",
"translations": {
"name": {"de": "Ergebnisorientiert", "it": "Orientato ai risultati ", "ja": "アウトカム・オリエンティッド"}
},
"scheme": "subject_custom",
},
{
"name": "Austria",
"qcode": "country_custom:1001002",
"translations": {"name": {"de": "\u00d6sterreich", "it": "Austria", "ja": "オーストリア"}},
"scheme": "country_custom",
},
{
"name": "Asia ex Japan",
"qcode": "region_custom:Asia ex Japan",
"translations": {"name": {"de": "Asien exkl. Japan", "it": "Asia escl. Giappone", "ja": "日本除くアジア"}},
"scheme": "region_custom",
},
{"name": "no translations", "qcode": "test", "translations": None, "scheme": "test"},
],
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
ninjs = json.loads(doc)
expected_genre = [{"code": "genre_custom:Education", "name": "トレーニング用教材", "scheme": "genre_custom"}]
self.assertEqual(ninjs["genre"], expected_genre)
expected_subject = [
{"code": "subject:01000002", "name": "アウトカム・オリエンティッド", "scheme": "subject_custom"},
{"code": "country_custom:1001002", "name": "オーストリア", "scheme": "country_custom"},
{"code": "region_custom:Asia ex Japan", "name": "日本除くアジア", "scheme": "region_custom"},
{"code": "test", "name": "no translations", "scheme": "test"},
]
self.assertEqual(ninjs["subject"], expected_subject)
def test_place_geonames(self):
article = {
"_id": "urn:bar",
"_current_version": 1,
"guid": "urn:bar",
"type": "text",
"place": [
{
"name": "Kobeřice",
"code": "3073493",
"scheme": "geonames",
"state": "Moravskoslezský kraj",
"country": "Česko",
"state_code": "80",
"country_code": "CZ",
"location": {"lat": 49.98548, "lon": 18.05212},
}
],
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
ninjs = json.loads(doc)
self.assertEqual({"name": "Kobeřice", "code": "3073493", "scheme": "geonames"}, ninjs["place"][0])
def test_custom_media(self):
"""Test that custom media are put in "groups" field and not associations (SDESK-2955)"""
self.app.data.insert(
"content_types",
[
{
"_id": ObjectId("5ba11fec0d6f1301ac3cbd13"),
"label": "custom media field multi",
"editor": {
"slugline": {"order": 2, "sdWidth": "full"},
"headline": {"order": 3, "formatOptions": ["underline", "link", "bold"]},
"custom_media_field_multi_1": {"order": 1},
},
"schema": {
"headline": {"type": "string", "required": False, "maxlength": 64, "nullable": True},
"slugline": {"type": "string", "required": False, "maxlength": 24, "nullable": True},
"custom_media_field_multi_1": {
"type": "media",
"required": False,
"enabled": True,
"nullable": True,
},
},
}
],
)
article = {
"_id": "5ba1224e0d6f13056bd82d50",
"type": "text",
"version": 1,
"profile": "5ba11fec0d6f1301ac3cbd13",
"format": "HTML",
"template": "5ba11fec0d6f1301ac3cbd15",
"headline": "custom media field multi",
"slugline": "test custom media2",
"guid": "123",
"associations": {
"custom_media_field_multi_1--1": {
"renditions": {
"original": {
"href": "http://localhost:5000/api/upload-raw/123.jpg",
"media": "abc",
"mimetype": "image/jpeg",
"width": 550,
"height": 331,
}
},
"media": "abc",
"type": "picture",
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
},
"custom_media_field_multi_1--2": {
"renditions": {
"original": {
"href": "http://localhost:5000/api/upload-raw/456.jpg",
"media": "cde",
"mimetype": "image/jpeg",
"width": 550,
"height": 331,
}
},
"media": "cde",
"type": "picture",
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
},
},
}
expected = {
"associations": {
"custom_media_field_multi_1--1": {
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
"priority": 5,
"renditions": {
"original": {
"height": 331,
"href": "http://localhost:5000/api/upload-raw/123.jpg",
"media": "abc",
"mimetype": "image/jpeg",
"width": 550,
}
},
"type": "picture",
"version": "1",
},
"custom_media_field_multi_1--2": {
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
"priority": 5,
"renditions": {
"original": {
"height": 331,
"href": "http://localhost:5000/api/upload-raw/456.jpg",
"media": "cde",
"mimetype": "image/jpeg",
"width": 550,
}
},
"type": "picture",
"version": "1",
},
},
"extra_items": {
"custom_media_field_multi_1": {
"type": "media",
"items": [
{
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
"priority": 5,
"renditions": {
"original": {
"height": 331,
"href": "http://localhost:5000/api/upload-raw/123.jpg",
"media": "abc",
"mimetype": "image/jpeg",
"width": 550,
}
},
"type": "picture",
"version": "1",
},
{
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
"priority": 5,
"renditions": {
"original": {
"height": 331,
"href": "http://localhost:5000/api/upload-raw/456.jpg",
"media": "cde",
"mimetype": "image/jpeg",
"width": 550,
}
},
"type": "picture",
"version": "1",
},
]
}
},
"guid": "123",
"headline": "custom media field multi",
"priority": 5,
"profile": "custommediafieldmulti",
"slugline": "test custom media2",
"type": "text",
"version": "1",
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
ninjs = json.loads(doc)
self.assertEqual(ninjs, expected)
def test_custom_related_items(self):
self.app.data.insert(
"content_types",
[
{
"_id": ObjectId("5ba11fec0d6f1301ac3cbd13"),
"label": "custom related content",
"editor": {
"slugline": {"order": 2, "sdWidth": "full"},
"headline": {"order": 3, "formatOptions": ["underline", "link", "bold"]},
"custom_related_content": {"order": 1},
},
"schema": {
"headline": {"type": "string", "required": False, "maxlength": 64, "nullable": True},
"slugline": {"type": "string", "required": False, "maxlength": 24, "nullable": True},
"custom_related_content": {
"type": "related_content",
"required": False,
"enabled": True,
"nullable": True,
},
},
}
],
)
article = {
"_id": "5ba1224e0d6f13056bd82d50",
"type": "text",
"version": 1,
"profile": "5ba11fec0d6f1301ac3cbd13",
"format": "HTML",
"template": "5ba11fec0d6f1301ac3cbd15",
"headline": "custom related content",
"slugline": "test custom related content",
"guid": "123",
"associations": {
"custom_related_content--1": {
"renditions": {
"original": {
"href": "http://localhost:5000/api/upload-raw/123.jpg",
"media": "abc",
"mimetype": "image/jpeg",
"width": 550,
"height": 331,
}
},
"media": "abc",
"type": "picture",
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
},
"custom_related_content--2": {
"renditions": {
"original": {
"href": "http://localhost:5000/api/upload-raw/456.jpg",
"media": "cde",
"mimetype": "image/jpeg",
"width": 550,
"height": 331,
}
},
"media": "cde",
"type": "picture",
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
},
},
}
expected = {
"associations": {
"custom_related_content--1": {
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
"priority": 5,
"renditions": {
"original": {
"height": 331,
"href": "http://localhost:5000/api/upload-raw/123.jpg",
"media": "abc",
"mimetype": "image/jpeg",
"width": 550,
}
},
"type": "picture",
"version": "1",
},
"custom_related_content--2": {
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
"priority": 5,
"renditions": {
"original": {
"height": 331,
"href": "http://localhost:5000/api/upload-raw/456.jpg",
"media": "cde",
"mimetype": "image/jpeg",
"width": 550,
}
},
"type": "picture",
"version": "1",
},
},
"extra_items": {
"custom_related_content": {
"type": "related_content",
"items": [
{
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
"priority": 5,
"renditions": {
"original": {
"height": 331,
"href": "http://localhost:5000/api/upload-raw/123.jpg",
"media": "abc",
"mimetype": "image/jpeg",
"width": 550,
}
},
"type": "picture",
"version": "1",
},
{
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
"priority": 5,
"renditions": {
"original": {
"height": 331,
"href": "http://localhost:5000/api/upload-raw/456.jpg",
"media": "cde",
"mimetype": "image/jpeg",
"width": 550,
}
},
"type": "picture",
"version": "1",
},
]
}
},
"guid": "123",
"headline": "custom related content",
"priority": 5,
"profile": "customrelatedcontent",
"slugline": "test custom related content",
"type": "text",
"version": "1",
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
ninjs = json.loads(doc)
self.assertEqual(ninjs, expected)
| agpl-3.0 | -6,270,552,180,092,691,000 | 39.49 | 120 | 0.393296 | false |
zengenti/ansible | lib/ansible/utils/module_docs_fragments/dellos9.py | 42 | 3407 | #
# (c) 2015, Peter Sprygada <[email protected]>
#
# Copyright (c) 2016 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device.
required: false
default: 22
username:
description:
- User to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_USERNAME) will be used instead.
required: false
password:
description:
- Password to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_PASSWORD) will be used instead.
required: false
default: null
ssh_keyfile:
description:
- Path to an ssh key used to authenticate the SSH session to the remote
device. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
required: false
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
required: false
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
required: false
default: none
timeout:
description:
- Specifies idle timeout (in seconds) for the connection. Useful if the
console freezes before continuing. For example when saving
configurations.
required: false
default: 10
provider:
description:
- Convenience method that allows all I(dellos9) arguments to be passed as
a dict object. All constraints (required, choices, etc) must be
met either by individual arguments or values in this dict.
required: false
default: null
"""
| gpl-3.0 | 8,970,029,089,917,555,000 | 36.855556 | 79 | 0.704726 | false |
dag/genshi | genshi/filters/transform.py | 23 | 48218 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""A filter for functional-style transformations of markup streams.
The `Transformer` filter provides a variety of transformations that can be
applied to parts of streams that match given XPath expressions. These
transformations can be chained to achieve results that would be comparitively
tedious to achieve by writing stream filters by hand. The approach of chaining
node selection and transformation has been inspired by the `jQuery`_ Javascript
library.
.. _`jQuery`: http://jquery.com/
For example, the following transformation removes the ``<title>`` element from
the ``<head>`` of the input document:
>>> from genshi.builder import tag
>>> html = HTML('''<html>
... <head><title>Some Title</title></head>
... <body>
... Some <em>body</em> text.
... </body>
... </html>''',
... encoding='utf-8')
>>> print(html | Transformer('body/em').map(unicode.upper, TEXT)
... .unwrap().wrap(tag.u))
<html>
<head><title>Some Title</title></head>
<body>
Some <u>BODY</u> text.
</body>
</html>
The ``Transformer`` support a large number of useful transformations out of the
box, but custom transformations can be added easily.
:since: version 0.5
"""
import re
import sys
from genshi.builder import Element
from genshi.core import Stream, Attrs, QName, TEXT, START, END, _ensure, Markup
from genshi.path import Path
__all__ = ['Transformer', 'StreamBuffer', 'InjectorTransformation', 'ENTER',
'EXIT', 'INSIDE', 'OUTSIDE', 'BREAK']
class TransformMark(str):
"""A mark on a transformation stream."""
__slots__ = []
_instances = {}
def __new__(cls, val):
return cls._instances.setdefault(val, str.__new__(cls, val))
ENTER = TransformMark('ENTER')
"""Stream augmentation mark indicating that a selected element is being
entered."""
INSIDE = TransformMark('INSIDE')
"""Stream augmentation mark indicating that processing is currently inside a
selected element."""
OUTSIDE = TransformMark('OUTSIDE')
"""Stream augmentation mark indicating that a match occurred outside a selected
element."""
ATTR = TransformMark('ATTR')
"""Stream augmentation mark indicating a selected element attribute."""
EXIT = TransformMark('EXIT')
"""Stream augmentation mark indicating that a selected element is being
exited."""
BREAK = TransformMark('BREAK')
"""Stream augmentation mark indicating a break between two otherwise contiguous
blocks of marked events.
This is used primarily by the cut() transform to provide later transforms with
an opportunity to operate on the cut buffer.
"""
class PushBackStream(object):
"""Allows a single event to be pushed back onto the stream and re-consumed.
"""
def __init__(self, stream):
self.stream = iter(stream)
self.peek = None
def push(self, event):
assert self.peek is None
self.peek = event
def __iter__(self):
while True:
if self.peek is not None:
peek = self.peek
self.peek = None
yield peek
else:
try:
event = self.stream.next()
yield event
except StopIteration:
if self.peek is None:
raise
class Transformer(object):
"""Stream filter that can apply a variety of different transformations to
a stream.
This is achieved by selecting the events to be transformed using XPath,
then applying the transformations to the events matched by the path
expression. Each marked event is in the form (mark, (kind, data, pos)),
where mark can be any of `ENTER`, `INSIDE`, `EXIT`, `OUTSIDE`, or `None`.
The first three marks match `START` and `END` events, and any events
contained `INSIDE` any selected XML/HTML element. A non-element match
outside a `START`/`END` container (e.g. ``text()``) will yield an `OUTSIDE`
mark.
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
Transformations act on selected stream events matching an XPath expression.
Here's an example of removing some markup (the title, in this case)
selected by an expression:
>>> print(html | Transformer('head/title').remove())
<html><head/><body>Some <em>body</em> text.</body></html>
Inserted content can be passed in the form of a string, or a markup event
stream, which includes streams generated programmatically via the
`builder` module:
>>> from genshi.builder import tag
>>> print(html | Transformer('body').prepend(tag.h1('Document Title')))
<html><head><title>Some Title</title></head><body><h1>Document
Title</h1>Some <em>body</em> text.</body></html>
Each XPath expression determines the set of tags that will be acted upon by
subsequent transformations. In this example we select the ``<title>`` text,
copy it into a buffer, then select the ``<body>`` element and paste the
copied text into the body as ``<h1>`` enclosed text:
>>> buffer = StreamBuffer()
>>> print(html | Transformer('head/title/text()').copy(buffer)
... .end().select('body').prepend(tag.h1(buffer)))
<html><head><title>Some Title</title></head><body><h1>Some Title</h1>Some
<em>body</em> text.</body></html>
Transformations can also be assigned and reused, although care must be
taken when using buffers, to ensure that buffers are cleared between
transforms:
>>> emphasis = Transformer('body//em').attr('class', 'emphasis')
>>> print(html | emphasis)
<html><head><title>Some Title</title></head><body>Some <em
class="emphasis">body</em> text.</body></html>
"""
__slots__ = ['transforms']
def __init__(self, path='.'):
"""Construct a new transformation filter.
:param path: an XPath expression (as string) or a `Path` instance
"""
self.transforms = [SelectTransformation(path)]
def __call__(self, stream, keep_marks=False):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
:param keep_marks: Do not strip transformer selection marks from the
stream. Useful for testing.
:return: the transformed stream
:rtype: `Stream`
"""
transforms = self._mark(stream)
for link in self.transforms:
transforms = link(transforms)
if not keep_marks:
transforms = self._unmark(transforms)
return Stream(transforms,
serializer=getattr(stream, 'serializer', None))
def apply(self, function):
"""Apply a transformation to the stream.
Transformations can be chained, similar to stream filters. Any callable
accepting a marked stream can be used as a transform.
As an example, here is a simple `TEXT` event upper-casing transform:
>>> def upper(stream):
... for mark, (kind, data, pos) in stream:
... if mark and kind is TEXT:
... yield mark, (kind, data.upper(), pos)
... else:
... yield mark, (kind, data, pos)
>>> short_stream = HTML('<body>Some <em>test</em> text</body>',
... encoding='utf-8')
>>> print(short_stream | Transformer('.//em/text()').apply(upper))
<body>Some <em>TEST</em> text</body>
"""
transformer = Transformer()
transformer.transforms = self.transforms[:]
if isinstance(function, Transformer):
transformer.transforms.extend(function.transforms)
else:
transformer.transforms.append(function)
return transformer
#{ Selection operations
def select(self, path):
"""Mark events matching the given XPath expression, within the current
selection.
>>> html = HTML('<body>Some <em>test</em> text</body>', encoding='utf-8')
>>> print(html | Transformer().select('.//em').trace())
(None, ('START', (QName('body'), Attrs()), (None, 1, 0)))
(None, ('TEXT', u'Some ', (None, 1, 6)))
('ENTER', ('START', (QName('em'), Attrs()), (None, 1, 11)))
('INSIDE', ('TEXT', u'test', (None, 1, 15)))
('EXIT', ('END', QName('em'), (None, 1, 19)))
(None, ('TEXT', u' text', (None, 1, 24)))
(None, ('END', QName('body'), (None, 1, 29)))
<body>Some <em>test</em> text</body>
:param path: an XPath expression (as string) or a `Path` instance
:return: the stream augmented by transformation marks
:rtype: `Transformer`
"""
return self.apply(SelectTransformation(path))
def invert(self):
"""Invert selection so that marked events become unmarked, and vice
versa.
Specificaly, all marks are converted to null marks, and all null marks
are converted to OUTSIDE marks.
>>> html = HTML('<body>Some <em>test</em> text</body>', encoding='utf-8')
>>> print(html | Transformer('//em').invert().trace())
('OUTSIDE', ('START', (QName('body'), Attrs()), (None, 1, 0)))
('OUTSIDE', ('TEXT', u'Some ', (None, 1, 6)))
(None, ('START', (QName('em'), Attrs()), (None, 1, 11)))
(None, ('TEXT', u'test', (None, 1, 15)))
(None, ('END', QName('em'), (None, 1, 19)))
('OUTSIDE', ('TEXT', u' text', (None, 1, 24)))
('OUTSIDE', ('END', QName('body'), (None, 1, 29)))
<body>Some <em>test</em> text</body>
:rtype: `Transformer`
"""
return self.apply(InvertTransformation())
def end(self):
"""End current selection, allowing all events to be selected.
Example:
>>> html = HTML('<body>Some <em>test</em> text</body>', encoding='utf-8')
>>> print(html | Transformer('//em').end().trace())
('OUTSIDE', ('START', (QName('body'), Attrs()), (None, 1, 0)))
('OUTSIDE', ('TEXT', u'Some ', (None, 1, 6)))
('OUTSIDE', ('START', (QName('em'), Attrs()), (None, 1, 11)))
('OUTSIDE', ('TEXT', u'test', (None, 1, 15)))
('OUTSIDE', ('END', QName('em'), (None, 1, 19)))
('OUTSIDE', ('TEXT', u' text', (None, 1, 24)))
('OUTSIDE', ('END', QName('body'), (None, 1, 29)))
<body>Some <em>test</em> text</body>
:return: the stream augmented by transformation marks
:rtype: `Transformer`
"""
return self.apply(EndTransformation())
#{ Deletion operations
def empty(self):
"""Empty selected elements of all content.
Example:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//em').empty())
<html><head><title>Some Title</title></head><body>Some <em/>
text.</body></html>
:rtype: `Transformer`
"""
return self.apply(EmptyTransformation())
def remove(self):
"""Remove selection from the stream.
Example:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//em').remove())
<html><head><title>Some Title</title></head><body>Some
text.</body></html>
:rtype: `Transformer`
"""
return self.apply(RemoveTransformation())
#{ Direct element operations
def unwrap(self):
"""Remove outermost enclosing elements from selection.
Example:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//em').unwrap())
<html><head><title>Some Title</title></head><body>Some body
text.</body></html>
:rtype: `Transformer`
"""
return self.apply(UnwrapTransformation())
def wrap(self, element):
"""Wrap selection in an element.
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//em').wrap('strong'))
<html><head><title>Some Title</title></head><body>Some
<strong><em>body</em></strong> text.</body></html>
:param element: either a tag name (as string) or an `Element` object
:rtype: `Transformer`
"""
return self.apply(WrapTransformation(element))
#{ Content insertion operations
def replace(self, content):
"""Replace selection with content.
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//title/text()').replace('New Title'))
<html><head><title>New Title</title></head><body>Some <em>body</em>
text.</body></html>
:param content: Either a callable, an iterable of events, or a string
to insert.
:rtype: `Transformer`
"""
return self.apply(ReplaceTransformation(content))
def before(self, content):
"""Insert content before selection.
In this example we insert the word 'emphasised' before the <em> opening
tag:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//em').before('emphasised '))
<html><head><title>Some Title</title></head><body>Some emphasised
<em>body</em> text.</body></html>
:param content: Either a callable, an iterable of events, or a string
to insert.
:rtype: `Transformer`
"""
return self.apply(BeforeTransformation(content))
def after(self, content):
"""Insert content after selection.
Here, we insert some text after the </em> closing tag:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//em').after(' rock'))
<html><head><title>Some Title</title></head><body>Some <em>body</em>
rock text.</body></html>
:param content: Either a callable, an iterable of events, or a string
to insert.
:rtype: `Transformer`
"""
return self.apply(AfterTransformation(content))
def prepend(self, content):
"""Insert content after the ENTER event of the selection.
Inserting some new text at the start of the <body>:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//body').prepend('Some new body text. '))
<html><head><title>Some Title</title></head><body>Some new body text.
Some <em>body</em> text.</body></html>
:param content: Either a callable, an iterable of events, or a string
to insert.
:rtype: `Transformer`
"""
return self.apply(PrependTransformation(content))
def append(self, content):
"""Insert content before the END event of the selection.
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//body').append(' Some new body text.'))
<html><head><title>Some Title</title></head><body>Some <em>body</em>
text. Some new body text.</body></html>
:param content: Either a callable, an iterable of events, or a string
to insert.
:rtype: `Transformer`
"""
return self.apply(AppendTransformation(content))
#{ Attribute manipulation
def attr(self, name, value):
"""Add, replace or delete an attribute on selected elements.
If `value` evaulates to `None` the attribute will be deleted from the
element:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em class="before">body</em> <em>text</em>.</body>'
... '</html>', encoding='utf-8')
>>> print(html | Transformer('body/em').attr('class', None))
<html><head><title>Some Title</title></head><body>Some <em>body</em>
<em>text</em>.</body></html>
Otherwise the attribute will be set to `value`:
>>> print(html | Transformer('body/em').attr('class', 'emphasis'))
<html><head><title>Some Title</title></head><body>Some <em
class="emphasis">body</em> <em class="emphasis">text</em>.</body></html>
If `value` is a callable it will be called with the attribute name and
the `START` event for the matching element. Its return value will then
be used to set the attribute:
>>> def print_attr(name, event):
... attrs = event[1][1]
... print(attrs)
... return attrs.get(name)
>>> print(html | Transformer('body/em').attr('class', print_attr))
Attrs([(QName('class'), u'before')])
Attrs()
<html><head><title>Some Title</title></head><body>Some <em
class="before">body</em> <em>text</em>.</body></html>
:param name: the name of the attribute
:param value: the value that should be set for the attribute.
:rtype: `Transformer`
"""
return self.apply(AttrTransformation(name, value))
#{ Buffer operations
def copy(self, buffer, accumulate=False):
"""Copy selection into buffer.
The buffer is replaced by each *contiguous* selection before being passed
to the next transformation. If accumulate=True, further selections will
be appended to the buffer rather than replacing it.
>>> from genshi.builder import tag
>>> buffer = StreamBuffer()
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('head/title/text()').copy(buffer)
... .end().select('body').prepend(tag.h1(buffer)))
<html><head><title>Some Title</title></head><body><h1>Some
Title</h1>Some <em>body</em> text.</body></html>
This example illustrates that only a single contiguous selection will
be buffered:
>>> print(html | Transformer('head/title/text()').copy(buffer)
... .end().select('body/em').copy(buffer).end().select('body')
... .prepend(tag.h1(buffer)))
<html><head><title>Some Title</title></head><body><h1>Some
Title</h1>Some <em>body</em> text.</body></html>
>>> print(buffer)
<em>body</em>
Element attributes can also be copied for later use:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body><em>Some</em> <em class="before">body</em>'
... '<em>text</em>.</body></html>',
... encoding='utf-8')
>>> buffer = StreamBuffer()
>>> def apply_attr(name, entry):
... return list(buffer)[0][1][1].get('class')
>>> print(html | Transformer('body/em[@class]/@class').copy(buffer)
... .end().buffer().select('body/em[not(@class)]')
... .attr('class', apply_attr))
<html><head><title>Some Title</title></head><body><em
class="before">Some</em> <em class="before">body</em><em
class="before">text</em>.</body></html>
:param buffer: the `StreamBuffer` in which the selection should be
stored
:rtype: `Transformer`
:note: Copy (and cut) copy each individual selected object into the
buffer before passing to the next transform. For example, the
XPath ``*|text()`` will select all elements and text, each
instance of which will be copied to the buffer individually
before passing to the next transform. This has implications for
how ``StreamBuffer`` objects can be used, so some
experimentation may be required.
"""
return self.apply(CopyTransformation(buffer, accumulate))
def cut(self, buffer, accumulate=False):
"""Copy selection into buffer and remove the selection from the stream.
>>> from genshi.builder import tag
>>> buffer = StreamBuffer()
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//em/text()').cut(buffer)
... .end().select('.//em').after(tag.h1(buffer)))
<html><head><title>Some Title</title></head><body>Some
<em/><h1>body</h1> text.</body></html>
Specifying accumulate=True, appends all selected intervals onto the
buffer. Combining this with the .buffer() operation allows us operate
on all copied events rather than per-segment. See the documentation on
buffer() for more information.
:param buffer: the `StreamBuffer` in which the selection should be
stored
:rtype: `Transformer`
:note: this transformation will buffer the entire input stream
"""
return self.apply(CutTransformation(buffer, accumulate))
def buffer(self):
"""Buffer the entire stream (can consume a considerable amount of
memory).
Useful in conjunction with copy(accumulate=True) and
cut(accumulate=True) to ensure that all marked events in the entire
stream are copied to the buffer before further transformations are
applied.
For example, to move all <note> elements inside a <notes> tag at the
top of the document:
>>> doc = HTML('<doc><notes></notes><body>Some <note>one</note> '
... 'text <note>two</note>.</body></doc>',
... encoding='utf-8')
>>> buffer = StreamBuffer()
>>> print(doc | Transformer('body/note').cut(buffer, accumulate=True)
... .end().buffer().select('notes').prepend(buffer))
<doc><notes><note>one</note><note>two</note></notes><body>Some text
.</body></doc>
"""
return self.apply(list)
#{ Miscellaneous operations
def filter(self, filter):
"""Apply a normal stream filter to the selection. The filter is called
once for each contiguous block of marked events.
>>> from genshi.filters.html import HTMLSanitizer
>>> html = HTML('<html><body>Some text<script>alert(document.cookie)'
... '</script> and some more text</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('body/*').filter(HTMLSanitizer()))
<html><body>Some text and some more text</body></html>
:param filter: The stream filter to apply.
:rtype: `Transformer`
"""
return self.apply(FilterTransformation(filter))
def map(self, function, kind):
"""Applies a function to the ``data`` element of events of ``kind`` in
the selection.
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('head/title').map(unicode.upper, TEXT))
<html><head><title>SOME TITLE</title></head><body>Some <em>body</em>
text.</body></html>
:param function: the function to apply
:param kind: the kind of event the function should be applied to
:rtype: `Transformer`
"""
return self.apply(MapTransformation(function, kind))
def substitute(self, pattern, replace, count=1):
"""Replace text matching a regular expression.
Refer to the documentation for ``re.sub()`` for details.
>>> html = HTML('<html><body>Some text, some more text and '
... '<b>some bold text</b>\\n'
... '<i>some italicised text</i></body></html>',
... encoding='utf-8')
>>> print(html | Transformer('body/b').substitute('(?i)some', 'SOME'))
<html><body>Some text, some more text and <b>SOME bold text</b>
<i>some italicised text</i></body></html>
>>> tags = tag.html(tag.body('Some text, some more text and\\n',
... Markup('<b>some bold text</b>')))
>>> print(tags.generate() | Transformer('body').substitute(
... '(?i)some', 'SOME'))
<html><body>SOME text, some more text and
<b>SOME bold text</b></body></html>
:param pattern: A regular expression object or string.
:param replace: Replacement pattern.
:param count: Number of replacements to make in each text fragment.
:rtype: `Transformer`
"""
return self.apply(SubstituteTransformation(pattern, replace, count))
def rename(self, name):
"""Rename matching elements.
>>> html = HTML('<html><body>Some text, some more text and '
... '<b>some bold text</b></body></html>',
... encoding='utf-8')
>>> print(html | Transformer('body/b').rename('strong'))
<html><body>Some text, some more text and <strong>some bold text</strong></body></html>
"""
return self.apply(RenameTransformation(name))
def trace(self, prefix='', fileobj=None):
"""Print events as they pass through the transform.
>>> html = HTML('<body>Some <em>test</em> text</body>', encoding='utf-8')
>>> print(html | Transformer('em').trace())
(None, ('START', (QName('body'), Attrs()), (None, 1, 0)))
(None, ('TEXT', u'Some ', (None, 1, 6)))
('ENTER', ('START', (QName('em'), Attrs()), (None, 1, 11)))
('INSIDE', ('TEXT', u'test', (None, 1, 15)))
('EXIT', ('END', QName('em'), (None, 1, 19)))
(None, ('TEXT', u' text', (None, 1, 24)))
(None, ('END', QName('body'), (None, 1, 29)))
<body>Some <em>test</em> text</body>
:param prefix: a string to prefix each event with in the output
:param fileobj: the writable file-like object to write to; defaults to
the standard output stream
:rtype: `Transformer`
"""
return self.apply(TraceTransformation(prefix, fileobj=fileobj))
# Internal methods
def _mark(self, stream):
for event in stream:
yield OUTSIDE, event
def _unmark(self, stream):
for mark, event in stream:
kind = event[0]
if not (kind is None or kind is ATTR or kind is BREAK):
yield event
class SelectTransformation(object):
"""Select and mark events that match an XPath expression."""
def __init__(self, path):
"""Create selection.
:param path: an XPath expression (as string) or a `Path` object
"""
if not isinstance(path, Path):
path = Path(path)
self.path = path
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
namespaces = {}
variables = {}
test = self.path.test()
stream = iter(stream)
next = stream.next
for mark, event in stream:
if mark is None:
yield mark, event
continue
result = test(event, namespaces, variables)
# XXX This is effectively genshi.core._ensure() for transform
# streams.
if result is True:
if event[0] is START:
yield ENTER, event
depth = 1
while depth > 0:
mark, subevent = next()
if subevent[0] is START:
depth += 1
elif subevent[0] is END:
depth -= 1
if depth == 0:
yield EXIT, subevent
else:
yield INSIDE, subevent
test(subevent, namespaces, variables, updateonly=True)
else:
yield OUTSIDE, event
elif isinstance(result, Attrs):
# XXX Selected *attributes* are given a "kind" of None to
# indicate they are not really part of the stream.
yield ATTR, (ATTR, (QName(event[1][0] + '@*'), result), event[2])
yield None, event
elif isinstance(result, tuple):
yield OUTSIDE, result
elif result:
# XXX Assume everything else is "text"?
yield None, (TEXT, unicode(result), (None, -1, -1))
else:
yield None, event
class InvertTransformation(object):
"""Invert selection so that marked events become unmarked, and vice versa.
Specificaly, all input marks are converted to null marks, and all input
null marks are converted to OUTSIDE marks.
"""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for mark, event in stream:
if mark:
yield None, event
else:
yield OUTSIDE, event
class EndTransformation(object):
"""End the current selection."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for mark, event in stream:
yield OUTSIDE, event
class EmptyTransformation(object):
"""Empty selected elements of all content."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for mark, event in stream:
yield mark, event
if mark is ENTER:
for mark, event in stream:
if mark is EXIT:
yield mark, event
break
class RemoveTransformation(object):
"""Remove selection from the stream."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for mark, event in stream:
if mark is None:
yield mark, event
class UnwrapTransformation(object):
"""Remove outtermost enclosing elements from selection."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for mark, event in stream:
if mark not in (ENTER, EXIT):
yield mark, event
class WrapTransformation(object):
"""Wrap selection in an element."""
def __init__(self, element):
if isinstance(element, Element):
self.element = element
else:
self.element = Element(element)
def __call__(self, stream):
for mark, event in stream:
if mark:
element = list(self.element.generate())
for prefix in element[:-1]:
yield None, prefix
yield mark, event
start = mark
stopped = False
for mark, event in stream:
if start is ENTER and mark is EXIT:
yield mark, event
stopped = True
break
if not mark:
break
yield mark, event
else:
stopped = True
yield None, element[-1]
if not stopped:
yield mark, event
else:
yield mark, event
class TraceTransformation(object):
"""Print events as they pass through the transform."""
def __init__(self, prefix='', fileobj=None):
"""Trace constructor.
:param prefix: text to prefix each traced line with.
:param fileobj: the writable file-like object to write to
"""
self.prefix = prefix
self.fileobj = fileobj or sys.stdout
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for event in stream:
self.fileobj.write('%s%s\n' % (self.prefix, event))
yield event
class FilterTransformation(object):
"""Apply a normal stream filter to the selection. The filter is called once
for each selection."""
def __init__(self, filter):
"""Create the transform.
:param filter: The stream filter to apply.
"""
self.filter = filter
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
def flush(queue):
if queue:
for event in self.filter(queue):
yield OUTSIDE, event
del queue[:]
queue = []
for mark, event in stream:
if mark is ENTER:
queue.append(event)
for mark, event in stream:
queue.append(event)
if mark is EXIT:
break
for queue_event in flush(queue):
yield queue_event
elif mark is OUTSIDE:
stopped = False
queue.append(event)
for mark, event in stream:
if mark is not OUTSIDE:
break
queue.append(event)
else:
stopped = True
for queue_event in flush(queue):
yield queue_event
if not stopped:
yield mark, event
else:
yield mark, event
for queue_event in flush(queue):
yield queue_event
class MapTransformation(object):
"""Apply a function to the `data` element of events of ``kind`` in the
selection.
"""
def __init__(self, function, kind):
"""Create the transform.
:param function: the function to apply; the function must take one
argument, the `data` element of each selected event
:param kind: the stream event ``kind`` to apply the `function` to
"""
self.function = function
self.kind = kind
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
for mark, (kind, data, pos) in stream:
if mark and self.kind in (None, kind):
yield mark, (kind, self.function(data), pos)
else:
yield mark, (kind, data, pos)
class SubstituteTransformation(object):
"""Replace text matching a regular expression.
Refer to the documentation for ``re.sub()`` for details.
"""
def __init__(self, pattern, replace, count=0):
"""Create the transform.
:param pattern: A regular expression object, or string.
:param replace: Replacement pattern.
:param count: Number of replacements to make in each text fragment.
"""
if isinstance(pattern, basestring):
self.pattern = re.compile(pattern)
else:
self.pattern = pattern
self.count = count
self.replace = replace
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
for mark, (kind, data, pos) in stream:
if mark is not None and kind is TEXT:
new_data = self.pattern.sub(self.replace, data, self.count)
if isinstance(data, Markup):
data = Markup(new_data)
else:
data = new_data
yield mark, (kind, data, pos)
class RenameTransformation(object):
"""Rename matching elements."""
def __init__(self, name):
"""Create the transform.
:param name: New element name.
"""
self.name = QName(name)
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
for mark, (kind, data, pos) in stream:
if mark is ENTER:
data = self.name, data[1]
elif mark is EXIT:
data = self.name
yield mark, (kind, data, pos)
class InjectorTransformation(object):
"""Abstract base class for transformations that inject content into a
stream.
>>> class Top(InjectorTransformation):
... def __call__(self, stream):
... for event in self._inject():
... yield event
... for event in stream:
... yield event
>>> html = HTML('<body>Some <em>test</em> text</body>', encoding='utf-8')
>>> print(html | Transformer('.//em').apply(Top('Prefix ')))
Prefix <body>Some <em>test</em> text</body>
"""
def __init__(self, content):
"""Create a new injector.
:param content: An iterable of Genshi stream events, or a string to be
injected.
"""
self.content = content
def _inject(self):
content = self.content
if hasattr(content, '__call__'):
content = content()
for event in _ensure(content):
yield None, event
class ReplaceTransformation(InjectorTransformation):
"""Replace selection with content."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
stream = PushBackStream(stream)
for mark, event in stream:
if mark is not None:
start = mark
for subevent in self._inject():
yield subevent
for mark, event in stream:
if start is ENTER:
if mark is EXIT:
break
elif mark != start:
stream.push((mark, event))
break
else:
yield mark, event
class BeforeTransformation(InjectorTransformation):
"""Insert content before selection."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
stream = PushBackStream(stream)
for mark, event in stream:
if mark is not None:
start = mark
for subevent in self._inject():
yield subevent
yield mark, event
for mark, event in stream:
if mark != start and start is not ENTER:
stream.push((mark, event))
break
yield mark, event
if start is ENTER and mark is EXIT:
break
else:
yield mark, event
class AfterTransformation(InjectorTransformation):
"""Insert content after selection."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
stream = PushBackStream(stream)
for mark, event in stream:
yield mark, event
if mark:
start = mark
for mark, event in stream:
if start is not ENTER and mark != start:
stream.push((mark, event))
break
yield mark, event
if start is ENTER and mark is EXIT:
break
for subevent in self._inject():
yield subevent
class PrependTransformation(InjectorTransformation):
"""Prepend content to the inside of selected elements."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
for mark, event in stream:
yield mark, event
if mark is ENTER:
for subevent in self._inject():
yield subevent
class AppendTransformation(InjectorTransformation):
"""Append content after the content of selected elements."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
for mark, event in stream:
yield mark, event
if mark is ENTER:
for mark, event in stream:
if mark is EXIT:
break
yield mark, event
for subevent in self._inject():
yield subevent
yield mark, event
class AttrTransformation(object):
"""Set an attribute on selected elements."""
def __init__(self, name, value):
"""Construct transform.
:param name: name of the attribute that should be set
:param value: the value to set
"""
self.name = name
self.value = value
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
callable_value = hasattr(self.value, '__call__')
for mark, (kind, data, pos) in stream:
if mark is ENTER:
if callable_value:
value = self.value(self.name, (kind, data, pos))
else:
value = self.value
if value is None:
attrs = data[1] - [QName(self.name)]
else:
attrs = data[1] | [(QName(self.name), value)]
data = (data[0], attrs)
yield mark, (kind, data, pos)
class StreamBuffer(Stream):
"""Stream event buffer used for cut and copy transformations."""
def __init__(self):
"""Create the buffer."""
Stream.__init__(self, [])
def append(self, event):
"""Add an event to the buffer.
:param event: the markup event to add
"""
self.events.append(event)
def reset(self):
"""Empty the buffer of events."""
del self.events[:]
class CopyTransformation(object):
"""Copy selected events into a buffer for later insertion."""
def __init__(self, buffer, accumulate=False):
"""Create the copy transformation.
:param buffer: the `StreamBuffer` in which the selection should be
stored
"""
if not accumulate:
buffer.reset()
self.buffer = buffer
self.accumulate = accumulate
def __call__(self, stream):
"""Apply the transformation to the marked stream.
:param stream: the marked event stream to filter
"""
stream = PushBackStream(stream)
for mark, event in stream:
if mark:
if not self.accumulate:
self.buffer.reset()
events = [(mark, event)]
self.buffer.append(event)
start = mark
for mark, event in stream:
if start is not ENTER and mark != start:
stream.push((mark, event))
break
events.append((mark, event))
self.buffer.append(event)
if start is ENTER and mark is EXIT:
break
for i in events:
yield i
else:
yield mark, event
class CutTransformation(object):
"""Cut selected events into a buffer for later insertion and remove the
selection.
"""
def __init__(self, buffer, accumulate=False):
"""Create the cut transformation.
:param buffer: the `StreamBuffer` in which the selection should be
stored
"""
self.buffer = buffer
self.accumulate = accumulate
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
attributes = []
stream = PushBackStream(stream)
broken = False
if not self.accumulate:
self.buffer.reset()
for mark, event in stream:
if mark:
# Send a BREAK event if there was no other event sent between
if not self.accumulate:
if not broken and self.buffer:
yield BREAK, (BREAK, None, None)
self.buffer.reset()
self.buffer.append(event)
start = mark
if mark is ATTR:
attributes.extend([name for name, _ in event[1][1]])
for mark, event in stream:
if start is mark is ATTR:
attributes.extend([name for name, _ in event[1][1]])
# Handle non-element contiguous selection
if start is not ENTER and mark != start:
# Operating on the attributes of a START event
if start is ATTR:
kind, data, pos = event
assert kind is START
data = (data[0], data[1] - attributes)
attributes = None
stream.push((mark, (kind, data, pos)))
else:
stream.push((mark, event))
break
self.buffer.append(event)
if start is ENTER and mark is EXIT:
break
broken = False
else:
broken = True
yield mark, event
if not broken and self.buffer:
yield BREAK, (BREAK, None, None)
| bsd-3-clause | -5,640,835,119,870,386,000 | 35.254135 | 95 | 0.549774 | false |
silentfuzzle/calibre | src/chardet/sjisprober.py | 190 | 3549 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from mbcharsetprober import MultiByteCharSetProber
from codingstatemachine import CodingStateMachine
from chardistribution import SJISDistributionAnalysis
from jpcntx import SJISContextAnalysis
from mbcssm import SJISSMModel
import constants, sys
from constants import eStart, eError, eItsMe
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "SHIFT_JIS"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == eError:
if constants._debug:
sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n')
self._mState = constants.eNotMe
break
elif codingState == eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen :], charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen : i + 3 - charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1 : i + 1], charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if self._mContextAnalyzer.got_enough_data() and \
(self.get_confidence() > constants.SHORTCUT_THRESHOLD):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| gpl-3.0 | -748,416,975,761,509,100 | 40.752941 | 108 | 0.633981 | false |
AOSC-Dev/aosc-os-abbs | extra-libs/nss/autobuild/certdata2pem.py | 6 | 7075 | #!/usr/bin/python
# vim:set et sw=4:
#
# certdata2pem.py - splits certdata.txt into multiple files
#
# Copyright (C) 2009 Philipp Kern <[email protected]>
# Copyright (C) 2013 Kai Engert <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301,
# USA.
import base64
import os.path
import re
import sys
import textwrap
import urllib
objects = []
def printable_serial(obj):
return ".".join(map(lambda x:str(ord(x)), obj['CKA_SERIAL_NUMBER']))
# Dirty file parser.
in_data, in_multiline, in_obj = False, False, False
field, type, value, obj = None, None, None, dict()
for line in open('certdata.txt', 'r'):
# Ignore the file header.
if not in_data:
if line.startswith('BEGINDATA'):
in_data = True
continue
# Ignore comment lines.
if line.startswith('#'):
continue
# Empty lines are significant if we are inside an object.
if in_obj and len(line.strip()) == 0:
objects.append(obj)
obj = dict()
in_obj = False
continue
if len(line.strip()) == 0:
continue
if in_multiline:
if not line.startswith('END'):
if type == 'MULTILINE_OCTAL':
line = line.strip()
for i in re.finditer(r'\\([0-3][0-7][0-7])', line):
value += chr(int(i.group(1), 8))
else:
value += line
continue
obj[field] = value
in_multiline = False
continue
if line.startswith('CKA_CLASS'):
in_obj = True
line_parts = line.strip().split(' ', 2)
if len(line_parts) > 2:
field, type = line_parts[0:2]
value = ' '.join(line_parts[2:])
elif len(line_parts) == 2:
field, type = line_parts
value = None
else:
raise NotImplementedError, 'line_parts < 2 not supported.\n' + line
if type == 'MULTILINE_OCTAL':
in_multiline = True
value = ""
continue
obj[field] = value
if len(obj.items()) > 0:
objects.append(obj)
# Build up trust database.
trustmap = dict()
for obj in objects:
if obj['CKA_CLASS'] != 'CKO_NSS_TRUST':
continue
key = obj['CKA_LABEL'] + printable_serial(obj)
trustmap[key] = obj
print " added trust", key
# Build up cert database.
certmap = dict()
for obj in objects:
if obj['CKA_CLASS'] != 'CKO_CERTIFICATE':
continue
key = obj['CKA_LABEL'] + printable_serial(obj)
certmap[key] = obj
print " added cert", key
def obj_to_filename(obj):
label = obj['CKA_LABEL'][1:-1]
label = label.replace('/', '_')\
.replace(' ', '_')\
.replace('(', '=')\
.replace(')', '=')\
.replace(',', '_')
label = re.sub(r'\\x[0-9a-fA-F]{2}', lambda m:chr(int(m.group(0)[2:], 16)), label)
serial = printable_serial(obj)
return label + ":" + serial
trust_types = {
"CKA_TRUST_DIGITAL_SIGNATURE": "digital-signature",
"CKA_TRUST_NON_REPUDIATION": "non-repudiation",
"CKA_TRUST_KEY_ENCIPHERMENT": "key-encipherment",
"CKA_TRUST_DATA_ENCIPHERMENT": "data-encipherment",
"CKA_TRUST_KEY_AGREEMENT": "key-agreement",
"CKA_TRUST_KEY_CERT_SIGN": "cert-sign",
"CKA_TRUST_CRL_SIGN": "crl-sign",
"CKA_TRUST_SERVER_AUTH": "server-auth",
"CKA_TRUST_CLIENT_AUTH": "client-auth",
"CKA_TRUST_CODE_SIGNING": "code-signing",
"CKA_TRUST_EMAIL_PROTECTION": "email-protection",
"CKA_TRUST_IPSEC_END_SYSTEM": "ipsec-end-system",
"CKA_TRUST_IPSEC_TUNNEL": "ipsec-tunnel",
"CKA_TRUST_IPSEC_USER": "ipsec-user",
"CKA_TRUST_TIME_STAMPING": "time-stamping",
"CKA_TRUST_STEP_UP_APPROVED": "step-up-approved",
}
openssl_trust = {
"CKA_TRUST_SERVER_AUTH": "serverAuth",
"CKA_TRUST_CLIENT_AUTH": "clientAuth",
"CKA_TRUST_CODE_SIGNING": "codeSigning",
"CKA_TRUST_EMAIL_PROTECTION": "emailProtection",
}
for tobj in objects:
if tobj['CKA_CLASS'] == 'CKO_NSS_TRUST':
key = tobj['CKA_LABEL'] + printable_serial(tobj)
print "producing trust for " + key
trustbits = []
distrustbits = []
openssl_trustflags = []
openssl_distrustflags = []
for t in trust_types.keys():
if tobj.has_key(t) and tobj[t] == 'CKT_NSS_TRUSTED_DELEGATOR':
trustbits.append(t)
if t in openssl_trust:
openssl_trustflags.append(openssl_trust[t])
if tobj.has_key(t) and tobj[t] == 'CKT_NSS_NOT_TRUSTED':
distrustbits.append(t)
if t in openssl_trust:
openssl_distrustflags.append(openssl_trust[t])
fname = obj_to_filename(tobj)
try:
obj = certmap[key]
except:
obj = None
if obj != None:
fname += ".crt"
else:
fname += ".p11-kit"
f = open(fname, 'w')
if obj != None:
f.write("# alias=%s\n"%tobj['CKA_LABEL'])
f.write("# trust=" + " ".join(trustbits) + "\n")
f.write("# distrust=" + " ".join(distrustbits) + "\n")
if openssl_trustflags:
f.write("# openssl-trust=" + " ".join(openssl_trustflags) + "\n")
if openssl_distrustflags:
f.write("# openssl-distrust=" + " ".join(openssl_distrustflags) + "\n")
f.write("-----BEGIN CERTIFICATE-----\n")
f.write("\n".join(textwrap.wrap(base64.b64encode(obj['CKA_VALUE']), 64)))
f.write("\n-----END CERTIFICATE-----\n")
else:
f.write("[p11-kit-object-v1]\n")
f.write("label: ");
f.write(tobj['CKA_LABEL']);
f.write("\n")
f.write("class: certificate\n")
f.write("certificate-type: x-509\n")
f.write("issuer: \"");
f.write(urllib.quote(tobj['CKA_ISSUER']));
f.write("\"\n")
f.write("serial-number: \"");
f.write(urllib.quote(tobj['CKA_SERIAL_NUMBER']));
f.write("\"\n")
if (tobj['CKA_TRUST_SERVER_AUTH'] == 'CKT_NSS_NOT_TRUSTED') or (tobj['CKA_TRUST_EMAIL_PROTECTION'] == 'CKT_NSS_NOT_TRUSTED') or (tobj['CKA_TRUST_CODE_SIGNING'] == 'CKT_NSS_NOT_TRUSTED'):
f.write("x-distrusted: true\n")
f.write("\n\n")
f.close()
print " -> written as '%s', trust = %s, openssl-trust = %s, distrust = %s, openssl-distrust = %s" % (fname, trustbits, openssl_trustflags, distrustbits, openssl_distrustflags)
| gpl-2.0 | 5,802,270,283,670,229,000 | 34.199005 | 198 | 0.577809 | false |
frohoff/Empire | lib/listeners/template.py | 2 | 9688 | import base64
import random
# Empire imports
from lib.common import helpers
from lib.common import agents
from lib.common import encryption
from lib.common import packets
from lib.common import messages
class Listener:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Template',
'Author': ['@harmj0y'],
'Description': ("Listener template"),
# categories - client_server, peer_to_peer, broadcast, third_party
'Category' : ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name' : {
'Description' : 'Listener name.',
'Required' : True,
'Value' : 'http_foreign'
},
'Host' : {
'Description' : 'Hostname/IP for staging.',
'Required' : True,
'Value' : "http://%s:%s" % (helpers.lhost(), 80)
},
'Port' : {
'Description' : 'Port for the listener.',
'Required' : True,
'Value' : 80
},
'Launcher' : {
'Description' : 'Launcher string.',
'Required' : True,
'Value' : 'powershell -noP -sta -w 1 -enc '
},
'StagingKey' : {
'Description' : 'Staging key for initial agent negotiation.',
'Required' : True,
'Value' : '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay' : {
'Description' : 'Agent delay/reach back interval (in seconds).',
'Required' : True,
'Value' : 5
},
'DefaultJitter' : {
'Description' : 'Jitter in agent reachback interval (0.0-1.0).',
'Required' : True,
'Value' : 0.0
},
'DefaultLostLimit' : {
'Description' : 'Number of missed checkins before exiting',
'Required' : True,
'Value' : 60
},
'DefaultProfile' : {
'Description' : 'Default communication profile for the agent.',
'Required' : True,
'Value' : "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'KillDate' : {
'Description' : 'Date for the listener to exit (MM/dd/yyyy).',
'Required' : False,
'Value' : ''
},
'WorkingHours' : {
'Description' : 'Hours for the agent to operate (09:00-17:00).',
'Required' : False,
'Value' : ''
}
}
# required:
self.mainMenu = mainMenu
self.threads = {} # used to keep track of any threaded instances of this server
# optional/specific for this module
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
def default_response(self):
"""
If there's a default response expected from the server that the client needs to ignore,
(i.e. a default HTTP page), put the generation here.
"""
print helpers.color("[!] default_response() not implemented for listeners/template")
return ''
def validate_options(self):
"""
Validate all options for this listener.
"""
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print helpers.color("[!] Option \"%s\" is required." % (key))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default', proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='', listenerName=None):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print helpers.color('[!] listeners/template generate_launcher(): no language specified!')
return None
if listenerName and (listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
uris = [a.strip('/') for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
launchURI = "%s/%s" % (host, stage0)
if language.startswith('po'):
# PowerShell
return ''
if language.startswith('py'):
# Python
return ''
else:
print helpers.color("[!] listeners/template generate_launcher(): invalid language specification: only 'powershell' and 'python' are current supported for this module.")
else:
print helpers.color("[!] listeners/template generate_launcher(): invalid listener name specification!")
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="", language=None):
"""
If you want to support staging for the listener module, generate_stager must be
implemented to return the stage1 key-negotiation stager code.
"""
print helpers.color("[!] generate_stager() not implemented for listeners/template")
return ''
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""):
"""
If you want to support staging for the listener module, generate_agent must be
implemented to return the actual staged agent code.
"""
print helpers.color("[!] generate_agent() not implemented for listeners/template")
return ''
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
This should be implemented for the module.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
""" % (listenerOptions['Host']['Value'])
getTask = """
function script:Get-Task {
}
"""
sendMessage = """
function script:Send-Message {
param($Packets)
if($Packets) {
}
}
"""
return updateServers + getTask + sendMessage + "\n'New agent comms registered!'"
elif language.lower() == 'python':
# send_message()
pass
else:
print helpers.color("[!] listeners/template generate_comms(): invalid language specification, only 'powershell' and 'python' are current supported for this module.")
else:
print helpers.color('[!] listeners/template generate_comms(): no language specified!')
def start(self, name=''):
"""
If a server component needs to be started, implement the kick off logic
here and the actual server code in another function to facilitate threading
(i.e. start_server() in the http listener).
"""
# listenerOptions = self.options
# if name and name != '':
# self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
# self.threads[name].start()
# time.sleep(1)
# # returns True if the listener successfully started, false otherwise
# return self.threads[name].is_alive()
# else:
# name = listenerOptions['Name']['Value']
# self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
# self.threads[name].start()
# time.sleep(1)
# # returns True if the listener successfully started, false otherwise
# return self.threads[name].is_alive()
return True
def shutdown(self, name=''):
"""
If a server component was started, implement the logic that kills the particular
named listener here.
"""
# if name and name != '':
# print helpers.color("[!] Killing listener '%s'" % (name))
# self.threads[name].kill()
# else:
# print helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value']))
# self.threads[self.options['Name']['Value']].kill()
pass
| bsd-3-clause | -1,838,489,458,472,246,300 | 36.405405 | 213 | 0.523637 | false |
lamastex/scalable-data-science | db/xtraResources/edXBigDataSeries2015/CS100-1x/Module 4: Text Analysis and Entity Resolution Lab Solutions.py | 2 | 73278 | # Databricks notebook source exported at Mon, 14 Mar 2016 03:33:29 UTC
# MAGIC %md
# MAGIC **SOURCE:** This is from the Community Edition of databricks and has been added to this databricks shard at [/#workspace/scalable-data-science/xtraResources/edXBigDataSeries2015/CS100-1x](/#workspace/scalable-data-science/xtraResources/edXBigDataSeries2015/CS100-1x) as extra resources for the project-focussed course [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/) that is prepared by [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [Sivanand Sivaram](https://www.linkedin.com/in/sivanand), and *supported by* [](https://databricks.com/)
# MAGIC and
# MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome).
# COMMAND ----------
# MAGIC %md
# MAGIC <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-nd/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/4.0/">Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License</a>.
# COMMAND ----------
# MAGIC %md
# MAGIC # + 
# MAGIC # **Text Analysis and Entity Resolution**
# MAGIC Entity resolution is a common, yet difficult problem in data cleaning and integration. This lab will demonstrate how we can use Apache Spark to apply powerful and scalable text analysis techniques and perform entity resolution across two datasets of commercial products.
# COMMAND ----------
# MAGIC %md
# MAGIC Entity Resolution, or "[Record linkage][wiki]" is the term used by statisticians, epidemiologists, and historians, among others, to describe the process of joining records from one data source with another that describe the same entity. Our terms with the same meaning include, "entity disambiguation/linking", duplicate detection", "deduplication", "record matching", "(reference) reconciliation", "object identification", "data/information integration", and "conflation".
# MAGIC
# MAGIC Entity Resolution (ER) refers to the task of finding records in a dataset that refer to the same entity across different data sources (e.g., data files, books, websites, databases). ER is necessary when joining datasets based on entities that may or may not share a common identifier (e.g., database key, URI, National identification number), as may be the case due to differences in record shape, storage location, and/or curator style or preference. A dataset that has undergone ER may be referred to as being cross-linked.
# MAGIC [wiki]: https://en.wikipedia.org/wiki/Record_linkage
# COMMAND ----------
labVersion = 'cs100.1x-lab3-1.0.4'
# COMMAND ----------
# MAGIC %md
# MAGIC #### Code
# MAGIC This assignment can be completed using basic Python, pySpark Transformations and actions, and the plotting library matplotlib. Other libraries are not allowed.
# MAGIC
# MAGIC #### Files
# MAGIC Data files for this assignment are from the [metric-learning](https://code.google.com/p/metric-learning/) project and can be found at:
# MAGIC `cs100/lab3`
# MAGIC
# MAGIC The directory contains the following files:
# MAGIC * **Google.csv**, the Google Products dataset
# MAGIC * **Amazon.csv**, the Amazon dataset
# MAGIC * **Google_small.csv**, 200 records sampled from the Google data
# MAGIC * **Amazon_small.csv**, 200 records sampled from the Amazon data
# MAGIC * **Amazon_Google_perfectMapping.csv**, the "gold standard" mapping
# MAGIC * **stopwords.txt**, a list of common English words
# MAGIC
# MAGIC Besides the complete data files, there are "sample" data files for each dataset - we will use these for **Part 1**. In addition, there is a "gold standard" file that contains all of the true mappings between entities in the two datasets. Every row in the gold standard file has a pair of record IDs (one Google, one Amazon) that belong to two record that describe the same thing in the real world. We will use the gold standard to evaluate our algorithms.
# COMMAND ----------
# MAGIC %md
# MAGIC #### **Part 0: Preliminaries**
# MAGIC We read in each of the files and create an RDD consisting of lines.
# MAGIC For each of the data files ("Google.csv", "Amazon.csv", and the samples), we want to parse the IDs out of each record. The IDs are the first column of the file (they are URLs for Google, and alphanumeric strings for Amazon). Omitting the headers, we load these data files into pair RDDs where the *mapping ID* is the key, and the value is a string consisting of the name/title, description, and manufacturer from the record.
# MAGIC
# MAGIC The file format of an Amazon line is:
# MAGIC
# MAGIC `"id","title","description","manufacturer","price"`
# MAGIC
# MAGIC The file format of a Google line is:
# MAGIC
# MAGIC `"id","name","description","manufacturer","price"`
# COMMAND ----------
import re
DATAFILE_PATTERN = '^(.+),"(.+)",(.*),(.*),(.*)'
def removeQuotes(s):
""" Remove quotation marks from an input string
Args:
s (str): input string that might have the quote "" characters
Returns:
str: a string without the quote characters
"""
return ''.join(i for i in s if i!='"')
def parseDatafileLine(datafileLine):
""" Parse a line of the data file using the specified regular expression pattern
Args:
datafileLine (str): input string that is a line from the data file
Returns:
str: a string parsed using the given regular expression and without the quote characters
"""
match = re.search(DATAFILE_PATTERN, datafileLine)
if match is None:
print 'Invalid datafile line: %s' % datafileLine
return (datafileLine, -1)
elif match.group(1) == '"id"':
print 'Header datafile line: %s' % datafileLine
return (datafileLine, 0)
else:
product = '%s %s %s' % (match.group(2), match.group(3), match.group(4))
return ((removeQuotes(match.group(1)), product), 1)
# COMMAND ----------
display(dbutils.fs.ls('/databricks-datasets/cs100/lab3/data-001/'))
# COMMAND ----------
# MAGIC %md **WARNING:** If *test_helper*, required in the cell below, is not installed, follow the instructions [here](https://databricks-staging-cloudfront.staging.cloud.databricks.com/public/c65da9a2fa40e45a2028cddebe45b54c/8637560089690848/4187311313936645/6977722904629137/05f3c2ecc3.html).
# COMMAND ----------
import sys
import os
from test_helper import Test
baseDir = os.path.join('databricks-datasets')
inputPath = os.path.join('cs100', 'lab3', 'data-001')
GOOGLE_PATH = 'Google.csv'
GOOGLE_SMALL_PATH = 'Google_small.csv'
AMAZON_PATH = 'Amazon.csv'
AMAZON_SMALL_PATH = 'Amazon_small.csv'
GOLD_STANDARD_PATH = 'Amazon_Google_perfectMapping.csv'
STOPWORDS_PATH = 'stopwords.txt'
def parseData(filename):
""" Parse a data file
Args:
filename (str): input file name of the data file
Returns:
RDD: a RDD of parsed lines
"""
return (sc
.textFile(filename, 4, 0)
.map(parseDatafileLine))
def loadData(path):
""" Load a data file
Args:
path (str): input file name of the data file
Returns:
RDD: a RDD of parsed valid lines
"""
filename = os.path.join(baseDir, inputPath, path)
raw = parseData(filename).cache()
failed = (raw
.filter(lambda s: s[1] == -1)
.map(lambda s: s[0]))
for line in failed.take(10):
print '%s - Invalid datafile line: %s' % (path, line)
valid = (raw
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
print '%s - Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (path,
raw.count(),
valid.count(),
failed.count())
assert failed.count() == 0
assert raw.count() == (valid.count() + 1)
return valid
googleSmall = loadData(GOOGLE_SMALL_PATH)
google = loadData(GOOGLE_PATH)
amazonSmall = loadData(AMAZON_SMALL_PATH)
amazon = loadData(AMAZON_PATH)
# COMMAND ----------
# MAGIC %md
# MAGIC Let's examine the lines that were just loaded in the two subset (small) files - one from Google and one from Amazon
# COMMAND ----------
for line in googleSmall.take(3):
print 'google: %s: %s\n' % (line[0], line[1])
for line in amazonSmall.take(3):
print 'amazon: %s: %s\n' % (line[0], line[1])
# COMMAND ----------
# MAGIC %md
# MAGIC #### **Part 1: ER as Text Similarity - Bags of Words**
# MAGIC
# MAGIC A simple approach to entity resolution is to treat all records as strings and compute their similarity with a string distance function. In this part, we will build some components for performing bag-of-words text-analysis, and then use them to compute record similarity.
# MAGIC [Bag-of-words][bag-of-words] is a conceptually simple yet powerful approach to text analysis.
# MAGIC
# MAGIC The idea is to treat strings, a.k.a. **documents**, as *unordered collections* of words, or **tokens**, i.e., as bags of words.
# MAGIC > **Note on terminology**: a "token" is the result of parsing the document down to the elements we consider "atomic" for the task at hand. Tokens can be things like words, numbers, acronyms, or other exotica like word-roots or fixed-length character strings.
# MAGIC > Bag of words techniques all apply to any sort of token, so when we say "bag-of-words" we really mean "bag-of-tokens," strictly speaking.
# MAGIC Tokens become the atomic unit of text comparison. If we want to compare two documents, we count how many tokens they share in common. If we want to search for documents with keyword queries (this is what Google does), then we turn the keywords into tokens and find documents that contain them. The power of this approach is that it makes string comparisons insensitive to small differences that probably do not affect meaning much, for example, punctuation and word order.
# MAGIC [bag-of-words]: https://en.wikipedia.org/wiki/Bag-of-words_model
# COMMAND ----------
# MAGIC %md
# MAGIC #### **1(a) Tokenize a String**
# MAGIC Implement the function `simpleTokenize(string)` that takes a string and returns a list of non-empty tokens in the string. `simpleTokenize` should split strings using the provided regular expression. Since we want to make token-matching case insensitive, make sure all tokens are turned lower-case. Give an interpretation, in natural language, of what the regular expression, `split_regex`, matches.
# MAGIC If you need help with Regular Expressions, try the site [regex101](https://regex101.com/) where you can interactively explore the results of applying different regular expressions to strings. *Note that \W includes the "_" character*. You should use [re.split()](https://docs.python.org/2/library/re.html#re.split) to perform the string split. Also, make sure you remove any empty tokens.
# COMMAND ----------
# ANSWER
quickbrownfox = 'A quick brown fox jumps over the lazy dog.'
split_regex = r'\W+'
def simpleTokenize(string):
""" A simple implementation of input string tokenization
Args:
string (str): input string
Returns:
list: a list of tokens
"""
return [t for t in re.split(split_regex, string.lower()) if len(t)]
print simpleTokenize(quickbrownfox) # Should give ['a', 'quick', 'brown', ... ]
# COMMAND ----------
# TEST Tokenize a String (1a)
Test.assertEquals(simpleTokenize(quickbrownfox),
['a','quick','brown','fox','jumps','over','the','lazy','dog'],
'simpleTokenize should handle sample text')
Test.assertEquals(simpleTokenize(' '), [], 'simpleTokenize should handle empty string')
Test.assertEquals(simpleTokenize('!!!!123A/456_B/789C.123A'), ['123a','456_b','789c','123a'],
'simpleTokenize should handle punctuations and lowercase result')
Test.assertEquals(simpleTokenize('fox fox'), ['fox', 'fox'],
'simpleTokenize should not remove duplicates')
# COMMAND ----------
# PRIVATE_TEST Tokenize a String (1a)
Test.assertEquals(simpleTokenize(quickbrownfox),
['a','quick','brown','fox','jumps','over','the','lazy','dog'],
'simpleTokenize should handle sample text')
Test.assertEquals(simpleTokenize(' '), [], 'simpleTokenize should handle empty string')
Test.assertEquals(simpleTokenize('!!!!123A/456_B/789C.123A'), ['123a','456_b','789c','123a'],
'simpleTokenize should handle puntuations and lowercase result')
Test.assertEquals(simpleTokenize('fox fox'), ['fox', 'fox'],
'simpleTokenize should not remove duplicates')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(1b) Removing stopwords**
# MAGIC *[Stopwords][stopwords]* are common (English) words that do not contribute much to the content or meaning of a document (e.g., "the", "a", "is", "to", etc.). Stopwords add noise to bag-of-words comparisons, so they are usually excluded.
# MAGIC Using the included file "stopwords.txt", implement `tokenize`, an improved tokenizer that does not emit stopwords.
# MAGIC [stopwords]: https://en.wikipedia.org/wiki/Stop_words
# COMMAND ----------
# ANSWER
stopfile = os.path.join(baseDir, inputPath, STOPWORDS_PATH)
stopwords = set(sc.textFile(stopfile).collect())
print 'These are the stopwords: %s' % stopwords
def tokenize(string):
""" An implementation of input string tokenization that excludes stopwords
Args:
string (str): input string
Returns:
list: a list of tokens without stopwords
"""
return [t for t in simpleTokenize(string) if t not in stopwords]
print tokenize(quickbrownfox) # Should give ['quick', 'brown', ... ]
# COMMAND ----------
# TEST Removing stopwords (1b)
Test.assertEquals(tokenize("Why a the?"), [], 'tokenize should remove all stopwords')
Test.assertEquals(tokenize("Being at the_?"), ['the_'], 'tokenize should handle non-stopwords')
Test.assertEquals(tokenize(quickbrownfox), ['quick','brown','fox','jumps','lazy','dog'],
'tokenize should handle sample text')
# COMMAND ----------
# PRIVATE_TEST Removing stopwords (1b)
Test.assertEquals(tokenize("Why a the?"), [], 'tokenize should remove all stopwords')
Test.assertEquals(tokenize("Being at the_?"), ['the_'], 'tokenize should handle non-stopwords')
Test.assertEquals(tokenize(quickbrownfox), ['quick','brown','fox','jumps','lazy','dog'],
'tokenize should handle sample text')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(1c) Tokenizing the small datasets**
# MAGIC Now let's tokenize the two *small* datasets. For each ID in a dataset, `tokenize` the values, and then count the total number of tokens.
# MAGIC How many tokens, total, are there in the two datasets?
# COMMAND ----------
# ANSWER
amazonRecToToken = amazonSmall.map(lambda s: (s[0], tokenize(s[1])))
googleRecToToken = googleSmall.map(lambda s: (s[0], tokenize(s[1])))
def countTokens(vendorRDD):
""" Count and return the number of tokens
Args:
vendorRDD (RDD of (recordId, tokenizedValue)): Pair tuple of record ID to tokenized output
Returns:
count: count of all tokens
"""
recordCount = vendorRDD.map(lambda s: len(s[1]))
recordSum = recordCount.reduce(lambda a, b : a + b)
return recordSum
totalTokens = countTokens(amazonRecToToken) + countTokens(googleRecToToken)
print 'There are %s tokens in the combined datasets' % totalTokens
# COMMAND ----------
# TEST Tokenizing the small datasets (1c)
Test.assertEquals(totalTokens, 22520, 'incorrect totalTokens')
# COMMAND ----------
# PRIVATE_TEST Tokenizing the small datasets (1c)
Test.assertEquals(totalTokens, 22520, 'incorrect totalTokens')
Test.assertEquals(countTokens(amazonRecToToken), 16707, 'incorrect token count for Amazon records')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(1d) Amazon record with the most tokens**
# MAGIC Which Amazon record has the biggest number of tokens?
# MAGIC In other words, you want to sort the records and get the one with the largest count of tokens.
# COMMAND ----------
# ANSWER
def findBiggestRecord(vendorRDD):
""" Find and return the record with the largest number of tokens
Args:
vendorRDD (RDD of (recordId, tokens)): input Pair Tuple of record ID and tokens
Returns:
list: a list of 1 Pair Tuple of record ID and tokens
"""
return(vendorRDD.takeOrdered(1, lambda s: -1 * len(s[1])))
biggestRecordAmazon = findBiggestRecord(amazonRecToToken)
print 'The Amazon record with ID "%s" has the most tokens (%s)' % (biggestRecordAmazon[0][0],
len(biggestRecordAmazon[0][1]))
# COMMAND ----------
# TEST Amazon record with the most tokens (1d)
Test.assertEquals(biggestRecordAmazon[0][0], 'b000o24l3q', 'incorrect biggestRecordAmazon')
Test.assertEquals(len(biggestRecordAmazon[0][1]), 1547, 'incorrect len for biggestRecordAmazon')
# COMMAND ----------
# PRIVATE_TEST Amazon record with the most tokens (1d)
Test.assertEquals(biggestRecordAmazon[0][0], 'b000o24l3q', 'incorrect biggestRecordAmazon')
Test.assertEquals(len(biggestRecordAmazon[0][1]), 1547, 'incorrect len for biggestRecordAmazon')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **Part 2: ER as Text Similarity - Weighted Bag-of-Words using TF-IDF**
# MAGIC Bag-of-words comparisons are not very good when all tokens are treated the same: some tokens are more important than others. Weights give us a way to specify which tokens to favor. With weights, when we compare documents, instead of counting common tokens, we sum up the weights of common tokens. A good heuristic for assigning weights is called "Term-Frequency/Inverse-Document-Frequency," or [TF-IDF][tfidf] for short.
# MAGIC
# MAGIC **TF**
# MAGIC
# MAGIC TF rewards tokens that appear many times in the same document. It is computed as the frequency of a token in a document, that is, if document *d* contains 100 tokens and token *t* appears in *d* 5 times, then the TF weight of *t* in *d* is *5/100 = 1/20*. The intuition for TF is that if a word occurs often in a document, then it is more important to the meaning of the document.
# MAGIC
# MAGIC **IDF**
# MAGIC
# MAGIC IDF rewards tokens that are rare overall in a dataset. The intuition is that it is more significant if two documents share a rare word than a common one. IDF weight for a token, *t*, in a set of documents, *U*, is computed as follows:
# MAGIC * Let *N* be the total number of documents in *U*
# MAGIC * Find *n(t)*, the number of documents in *U* that contain *t*
# MAGIC * Then *IDF(t) = N/n(t)*.
# MAGIC
# MAGIC Note that *n(t)/N* is the frequency of *t* in *U*, and *N/n(t)* is the inverse frequency.
# MAGIC
# MAGIC > **Note on terminology**: Sometimes token weights depend on the document the token belongs to, that is, the same token may have a different weight when it's found in different documents. We call these weights *local* weights. TF is an example of a local weight, because it depends on the length of the source. On the other hand, some token weights only depend on the token, and are the same everywhere that token is found. We call these weights *global*, and IDF is one such weight.
# MAGIC
# MAGIC **TF-IDF**
# MAGIC
# MAGIC Finally, to bring it all together, the total TF-IDF weight for a token in a document is the product of its TF and IDF weights.
# MAGIC [tfidf]: https://en.wikipedia.org/wiki/Tf%E2%80%93idf
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(2a) Implement a TF function**
# MAGIC
# MAGIC Implement `tf(tokens)` that takes a list of tokens and returns a Python [dictionary](https://docs.python.org/2/tutorial/datastructures.html#dictionaries) mapping tokens to TF weights.
# MAGIC
# MAGIC The steps your function should perform are:
# MAGIC * Create an empty Python dictionary
# MAGIC * For each of the tokens in the input `tokens` list, count 1 for each occurance and add the token to the dictionary
# MAGIC * For each of the tokens in the dictionary, divide the token's count by the total number of tokens in the input `tokens` list
# COMMAND ----------
# ANSWER
def tf(tokens):
""" Compute TF
Args:
tokens (list of str): input list of tokens from tokenize
Returns:
dictionary: a dictionary of tokens to its TF values
"""
counts = {}
length = len(tokens)
for t in tokens:
counts.setdefault(t, 0.0)
counts[t] += 1
return { t: counts[t] / length for t in counts }
print tf(tokenize(quickbrownfox)) # Should give { 'quick': 0.1666 ... }
# COMMAND ----------
# TEST Implement a TF function (2a)
tf_test = tf(tokenize(quickbrownfox))
Test.assertEquals(tf_test, {'brown': 0.16666666666666666, 'lazy': 0.16666666666666666,
'jumps': 0.16666666666666666, 'fox': 0.16666666666666666,
'dog': 0.16666666666666666, 'quick': 0.16666666666666666},
'incorrect result for tf on sample text')
tf_test2 = tf(tokenize('one_ one_ two!'))
Test.assertEquals(tf_test2, {'one_': 0.6666666666666666, 'two': 0.3333333333333333},
'incorrect result for tf test')
# COMMAND ----------
# PRIVATE_TEST Implement a TF function (2a)
tf_test = tf(tokenize(quickbrownfox))
Test.assertEquals(tf_test, {'brown': 0.16666666666666666, 'lazy': 0.16666666666666666,
'jumps': 0.16666666666666666, 'fox': 0.16666666666666666,
'dog': 0.16666666666666666, 'quick': 0.16666666666666666},
'incorrect result for tf on sample text')
tf_test2 = tf(tokenize('one_ one_ two!'))
Test.assertEquals(tf_test2, {'one_': 0.6666666666666666, 'two': 0.3333333333333333},
'incorrect result for tf test')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(2b) Create a corpus**
# MAGIC Create a pair RDD called `corpusRDD`, consisting of a combination of the two small datasets, `amazonRecToToken` and `googleRecToToken`. Each element of the `corpusRDD` should be a pair consisting of a key from one of the small datasets (ID or URL) and the value is the associated value for that key from the small datasets.
# COMMAND ----------
# ANSWER
corpusRDD = amazonRecToToken.union(googleRecToToken)
# COMMAND ----------
# TEST Create a corpus (2b)
Test.assertEquals(corpusRDD.count(), 400, 'incorrect corpusRDD.count()')
# COMMAND ----------
# PRIVATE_TEST Create a corpus (2b)
Test.assertEquals(corpusRDD.count(), 400, 'incorrect corpusRDD.count()')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(2c) Implement an IDFs function**
# MAGIC Implement `idfs` that assigns an IDF weight to every unique token in an RDD called `corpus`. The function should return an pair RDD where the `key` is the unique token and value is the IDF weight for the token.
# MAGIC
# MAGIC Recall that the IDF weight for a token, *t*, in a set of documents, *U*, is computed as follows:
# MAGIC * Let *N* be the total number of documents in *U*.
# MAGIC * Find *n(t)*, the number of documents in *U* that contain *t*.
# MAGIC * Then *IDF(t) = N/n(t)*.
# MAGIC
# MAGIC The steps your function should perform are:
# MAGIC * Calculate *N*. Think about how you can calculate *N* from the input RDD.
# MAGIC * Create an RDD (*not a pair RDD*) containing the unique tokens from each document in the input `corpus`. For each document, you should only include a token once, *even if it appears multiple times in that document.*
# MAGIC * For each of the unique tokens, count how many times it appears in the document and then compute the IDF for that token: *N/n(t)*
# MAGIC
# MAGIC Use your `idfs` to compute the IDF weights for all tokens in `corpusRDD` (the combined small datasets).
# MAGIC How many unique tokens are there?
# COMMAND ----------
# ANSWER
def idfs(corpus):
""" Compute IDF
Args:
corpus (RDD): input corpus
Returns:
RDD: a RDD of (token, IDF value)
"""
uniqueTokens = corpus.flatMap(lambda s: list(set(s[1])))
tokenCountPairTuple = uniqueTokens.map(lambda token: (token, 1))
tokenSumPairTuple = tokenCountPairTuple.reduceByKey(lambda a, b : a + b)
N = float(corpus.count())
return (tokenSumPairTuple.map(lambda s: (s[0], float(N/s[1]))))
idfsSmall = idfs(amazonRecToToken.union(googleRecToToken))
uniqueTokenCount = idfsSmall.count()
print 'There are %s unique tokens in the small datasets.' % uniqueTokenCount
# COMMAND ----------
# TEST Implement an IDFs function (2c)
Test.assertEquals(uniqueTokenCount, 4772, 'incorrect uniqueTokenCount')
tokenSmallestIdf = idfsSmall.takeOrdered(1, lambda s: s[1])[0]
Test.assertEquals(tokenSmallestIdf[0], 'software', 'incorrect smallest IDF token')
Test.assertTrue(abs(tokenSmallestIdf[1] - 4.25531914894) < 0.0000000001,
'incorrect smallest IDF value')
# COMMAND ----------
# PRIVATE_TEST Implement an IDFs function (2c)
Test.assertEquals(uniqueTokenCount, 4772, 'incorrect uniqueTokenCount')
tokenSmallestIdf = idfsSmall.takeOrdered(1, lambda s: s[1])[0]
Test.assertEquals(tokenSmallestIdf[0], 'software', 'incorrect smallest IDF token')
Test.assertTrue(abs(tokenSmallestIdf[1] - 4.25531914894) < 0.0000000001,
'incorrect smallest IDF value')
firstElevenTokens = set(idfsSmall.takeOrdered(11, lambda s: s[1]))
Test.assertEquals(len(firstElevenTokens - set([('software', 4.25531914893617),('new', 6.896551724137931),('features', 6.896551724137931),('use', 7.017543859649122),('complete', 7.2727272727272725),('easy', 7.6923076923076925),('create', 8.333333333333334),('system', 8.333333333333334),('cd', 8.333333333333334),('1', 8.51063829787234), ('windows', 8.51063829787234)])), 0, 'incorrect firstTenTokens')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(2d) Tokens with the smallest IDF**
# MAGIC Print out the 11 tokens with the smallest IDF in the combined small dataset.
# COMMAND ----------
smallIDFTokens = idfsSmall.takeOrdered(11, lambda s: s[1])
print smallIDFTokens
# COMMAND ----------
# ANSWER
#*answer*: The 10 smallest IDFs are for: (1) software, (2) new, (3) features, (4) use, (5) complete, (6) easy, (7 tie) cd, (7 tie) system, (7 tie) create, (10 tie) windows, (10 tie) 1.
#These terms not useful for entity resolution because they are generic terms for marketing, prices, and product categories.
# COMMAND ----------
# ANSWER
# Quiz question:
# For part (2d), do you think the terms are useful for entity resolution?
# ( ) Yes
# (*) No
#
# Why or why not?
# ( ) These terms are useful for entity resolution because they describe distinguishing tokens in product descriptions
# ( ) These terms not useful for entity resolution because they are generic terms for marketing, prices, and product categories.
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(2e) IDF Histogram**
# MAGIC Plot a histogram of IDF values. Be sure to use appropriate scaling and bucketing for the data.
# MAGIC First plot the histogram using `matplotlib`
# COMMAND ----------
import matplotlib.pyplot as plt
small_idf_values = idfsSmall.map(lambda s: s[1]).collect()
fig = plt.figure(figsize=(8,3))
plt.hist(small_idf_values, 50, log=True)
display(fig)
pass
# COMMAND ----------
from pyspark.sql import Row
# Create a DataFrame and visualize using display()
idfsToCountRow = idfsSmall.map(lambda (x, y): Row(token=x, value=y))
idfsToCountDF = sqlContext.createDataFrame(idfsToCountRow)
display(idfsToCountDF)
# COMMAND ----------
# ANSWER
# Quiz question:
# Using the plot in (2e), what conclusions can you draw from the distribution of weights?
#
# *ANSWER:* There is a long tail of rare words in the corpus (these have large IDF values).
# [explanation]
# There are gaps between IDF values because IDF is a function of a discrete variable, i.e., a document count.
# [explanation]
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(2f) Implement a TF-IDF function**
# MAGIC Use your `tf` function to implement a `tfidf(tokens, idfs)` function that takes a list of tokens from a document and a Python dictionary of IDF weights and returns a Python dictionary mapping individual tokens to total TF-IDF weights.
# MAGIC
# MAGIC The steps your function should perform are:
# MAGIC * Calculate the token frequencies (TF) for `tokens`
# MAGIC * Create a Python dictionary where each token maps to the token's frequency times the token's IDF weight
# MAGIC
# MAGIC Use your `tfidf` function to compute the weights of Amazon product record 'b000hkgj8k'. To do this, we need to extract the record for the token from the tokenized small Amazon dataset and we need to convert the IDFs for the small dataset into a Python dictionary. We can do the first part, by using a `filter()` transformation to extract the matching record and a `collect()` action to return the value to the driver.
# MAGIC
# MAGIC For the second part, we use the [`collectAsMap()` action](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.collectAsMap) to return the IDFs to the driver as a Python dictionary.
# COMMAND ----------
# ANSWER
def tfidf(tokens, idfs):
""" Compute TF-IDF
Args:
tokens (list of str): input list of tokens from tokenize
idfs (dictionary): record to IDF value
Returns:
dictionary: a dictionary of records to TF-IDF values
"""
tfs = tf(tokens)
return { t: tfs[t] * idfs[t] for t in tfs }
rec_b000hkgj8k = amazonRecToToken.filter(lambda x: x[0] == 'b000hkgj8k').collect()[0][1]
idfsSmallWeights = idfsSmall.collectAsMap()
rec_b000hkgj8k_weights = tfidf(rec_b000hkgj8k, idfsSmallWeights)
print 'Amazon record "b000hkgj8k" has tokens and weights:\n%s' % rec_b000hkgj8k_weights
# COMMAND ----------
# TEST Implement a TF-IDF function (2f)
Test.assertEquals(rec_b000hkgj8k_weights,
{'autocad': 33.33333333333333, 'autodesk': 8.333333333333332,
'courseware': 66.66666666666666, 'psg': 33.33333333333333,
'2007': 3.5087719298245617, 'customizing': 16.666666666666664,
'interface': 3.0303030303030303}, 'incorrect rec_b000hkgj8k_weights')
# COMMAND ----------
# PRIVATE_TEST Implement a TF-IDF function (2f)
Test.assertEquals(rec_b000hkgj8k_weights, {'autocad': 33.33333333333333, 'autodesk': 8.333333333333332, 'courseware': 66.66666666666666, 'psg': 33.33333333333333, '2007': 3.5087719298245617, 'customizing': 16.666666666666664, 'interface': 3.0303030303030303}, 'incorrect rec_b000hkgj8k_weights')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **Part 3: ER as Text Similarity - Cosine Similarity**
# MAGIC Now we are ready to do text comparisons in a formal way. The metric of string distance we will use is called **[cosine similarity][cosine]**. We will treat each document as a vector in some high dimensional space. Then, to compare two documents we compute the cosine of the angle between their two document vectors. This is *much* easier than it sounds.
# MAGIC
# MAGIC The first question to answer is how do we represent documents as vectors? The answer is familiar: bag-of-words! We treat each unique token as a dimension, and treat token weights as magnitudes in their respective token dimensions. For example, suppose we use simple counts as weights, and we want to interpret the string "Hello, world! Goodbye, world!" as a vector. Then in the "hello" and "goodbye" dimensions the vector has value 1, in the "world" dimension it has value 2, and it is zero in all other dimensions.
# MAGIC
# MAGIC The next question is: given two vectors how do we find the cosine of the angle between them? Recall the formula for the dot product of two vectors:
# MAGIC \\[ a \cdot b = \| a \| \| b \| \cos \theta \\]
# MAGIC Here \\( a \cdot b = \sum a_i b_i \\) is the ordinary dot product of two vectors, and \\( \|a\| = \sqrt{ \sum a_i^2 } \\) is the norm of \\( a \\).
# MAGIC
# MAGIC We can rearrange terms and solve for the cosine to find it is simply the normalized dot product of the vectors. With our vector model, the dot product and norm computations are simple functions of the bag-of-words document representations, so we now have a formal way to compute similarity:
# MAGIC \\[ similarity = \cos \theta = \frac{a \cdot b}{\|a\| \|b\|} = \frac{\sum a_i b_i}{\sqrt{\sum a_i^2} \sqrt{\sum b_i^2}} \\]
# MAGIC
# MAGIC Setting aside the algebra, the geometric interpretation is more intuitive. The angle between two document vectors is small if they share many tokens in common, because they are pointing in roughly the same direction. For that case, the cosine of the angle will be large. Otherwise, if the angle is large (and they have few words in common), the cosine is small. Therefore, cosine similarity scales proportionally with our intuitive sense of similarity.
# MAGIC [cosine]: https://en.wikipedia.org/wiki/Cosine_similarity
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(3a) Implement the components of a `cosineSimilarity` function**
# MAGIC Implement the components of a `cosineSimilarity` function.
# MAGIC Use the `tokenize` and `tfidf` functions, and the IDF weights from Part 2 for extracting tokens and assigning them weights.
# MAGIC The steps you should perform are:
# MAGIC * Define a function `dotprod` that takes two Python dictionaries and produces the dot product of them, where the dot product is defined as the sum of the product of values for tokens that appear in *both* dictionaries
# MAGIC * Define a function `norm` that returns the square root of the dot product of a dictionary and itself
# MAGIC * Define a function `cossim` that returns the dot product of two dictionaries divided by the norm of the first dictionary and then by the norm of the second dictionary
# COMMAND ----------
# ANSWER
import math
def dotprod(a, b):
return sum([a[t] * b[t] for t in a if t in b])
def norm(a):
return math.sqrt(dotprod(a, a))
def cossim(a, b):
return dotprod(a, b) / norm(a) / norm(b)
testVec1 = {'foo': 2, 'bar': 3, 'baz': 5 }
testVec2 = {'foo': 1, 'bar': 0, 'baz': 20 }
dp = dotprod(testVec1, testVec2)
nm = norm(testVec1)
print dp, nm
# COMMAND ----------
# TEST Implement the components of a cosineSimilarity function (3a)
Test.assertEquals(dp, 102, 'incorrect dp')
Test.assertTrue(abs(nm - 6.16441400297) < 0.0000001, 'incorrrect nm')
# COMMAND ----------
# PRIVATE_TEST Implement the components of a cosineSimilarity function (3a)
Test.assertEquals(dp, 102, 'incorrect dp')
Test.assertTrue(abs(nm - 6.16441400297) < 0.0000001, 'incorrrect nm')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(3b) Implement a `cosineSimilarity` function**
# MAGIC Implement a `cosineSimilarity(string1, string2, idfsDictionary)` function that takes two strings and a dictionary of IDF weights, and computes their cosine similarity in the context of some global IDF weights.
# MAGIC
# MAGIC The steps you should perform are:
# MAGIC * Apply your `tfidf` function to the tokenized first and second strings, using the dictionary of IDF weights
# MAGIC * Compute and return your `cossim` function applied to the results of the two `tfidf` functions
# COMMAND ----------
# ANSWER
def cosineSimilarity(string1, string2, idfsDictionary):
""" Compute cosine similarity between two strings
Args:
string1 (str): first string
string2 (str): second string
idfsDictionary (dictionary): a dictionary of IDF values
Returns:
cossim: cosine similarity value
"""
w1 = tfidf(tokenize(string1), idfsDictionary)
w2 = tfidf(tokenize(string2), idfsDictionary)
return cossim(w1, w2)
cossimAdobe = cosineSimilarity('Adobe Photoshop',
'Adobe Illustrator',
idfsSmallWeights)
print cossimAdobe
# COMMAND ----------
# TEST Implement a cosineSimilarity function (3b)
Test.assertTrue(abs(cossimAdobe - 0.0577243382163) < 0.0000001, 'incorrect cossimAdobe')
# COMMAND ----------
# PRIVATE_TEST Implement a cosineSimilarity function (3b)
Test.assertTrue(abs(cossimAdobe - 0.0577243382163) < 0.0000001, 'incorrect cossimAdobe')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(3c) Perform Entity Resolution**
# MAGIC Now we can finally do some entity resolution!
# MAGIC For *every* product record in the small Google dataset, use your `cosineSimilarity` function to compute its similarity to every record in the small Amazon dataset. Then, build a dictionary mapping `(Google URL, Amazon ID)` tuples to similarity scores between 0 and 1.
# MAGIC We'll do this computation two different ways, first we'll do it without a broadcast variable, and then we'll use a broadcast variable
# MAGIC
# MAGIC The steps you should perform are:
# MAGIC * Create an RDD that is a combination of the small Google and small Amazon datasets that has as elements all pairs of elements (a, b) where a is in self and b is in other. The result will be an RDD of the form: `[ ((Google URL1, Google String1), (Amazon ID1, Amazon String1)), ((Google URL1, Google String1), (Amazon ID2, Amazon String2)), ((Google URL2, Google String2), (Amazon ID1, Amazon String1)), ... ]`
# MAGIC * Define a worker function that given an element from the combination RDD computes the cosineSimlarity for the two records in the element
# MAGIC * Apply the worker function to every element in the RDD
# MAGIC
# MAGIC Now, compute the similarity between Amazon record `b000o24l3q` and Google record `http://www.google.com/base/feeds/snippets/17242822440574356561`.
# COMMAND ----------
# ANSWER
crossSmall = (googleSmall
.cartesian(amazonSmall)
.cache())
def computeSimilarity(record):
""" Compute similarity on a combination record
Args:
record: a pair, (google record, amazon record)
Returns:
pair: a pair, (google URL, amazon ID, cosine similarity value)
"""
googleRec = record[0]
amazonRec = record[1]
googleURL = googleRec[0]
amazonID = amazonRec[0]
googleValue = googleRec[1]
amazonValue = amazonRec[1]
cs = cosineSimilarity(googleValue, amazonValue, idfsSmallWeights)
return (googleURL, amazonID, cs)
similarities = (crossSmall
.map(computeSimilarity)
.cache())
def similar(amazonID, googleURL):
""" Return similarity value
Args:
amazonID: amazon ID
googleURL: google URL
Returns:
similar: cosine similarity value
"""
return (similarities
.filter(lambda record: (record[0] == googleURL and record[1] == amazonID))
.collect()[0][2])
similarityAmazonGoogle = similar('b000o24l3q', 'http://www.google.com/base/feeds/snippets/17242822440574356561')
print 'Requested similarity is %s.' % similarityAmazonGoogle
# COMMAND ----------
# TEST Perform Entity Resolution (3c)
Test.assertTrue(abs(similarityAmazonGoogle - 0.000303171940451) < 0.0000001,
'incorrect similarityAmazonGoogle')
# COMMAND ----------
# PRIVATE_TEST Perform Entity Resolution (3c)
Test.assertTrue(abs(similarityAmazonGoogle - 0.000303171940451) < 0.0000001,
'incorrect similarityAmazonGoogle')
similarityAnother = similar('b000o24l3q', 'http://www.google.com/base/feeds/snippets/18274317756231697680')
Test.assertTrue(abs(similarityAnother - 0.093899589276) < 0.0000001, 'incorrect another similarity test')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(3d) Perform Entity Resolution with Broadcast Variables**
# MAGIC The solution in (3c) works well for small datasets, but it requires Spark to (automatically) send the `idfsSmallWeights` variable to all the workers. If we didn't `cache()` similarities, then it might have to be recreated if we run `similar()` multiple times. This would cause Spark to send `idfsSmallWeights` every time.
# MAGIC
# MAGIC Instead, we can use a broadcast variable - we define the broadcast variable in the driver and then we can refer to it in each worker. Spark saves the broadcast variable at each worker, so it is only sent once.
# MAGIC
# MAGIC The steps you should perform are:
# MAGIC * Define a `computeSimilarityBroadcast` function that given an element from the combination RDD computes the cosine simlarity for the two records in the element. This will be the same as the worker function `computeSimilarity` in (3c) except that it uses a broadcast variable.
# MAGIC * Apply the worker function to every element in the RDD
# MAGIC
# MAGIC Again, compute the similarity between Amazon record `b000o24l3q` and Google record `http://www.google.com/base/feeds/snippets/17242822440574356561`.
# COMMAND ----------
# ANSWER
def computeSimilarityBroadcast(record):
""" Compute similarity on a combination record, using Broadcast variable
Args:
record: a pair, (google record, amazon record)
Returns:
pair: a pair, (google URL, amazon ID, cosine similarity value)
"""
googleRec = record[0]
amazonRec = record[1]
googleURL = googleRec[0]
amazonID = amazonRec[0]
googleValue = googleRec[1]
amazonValue = amazonRec[1]
cs = cosineSimilarity(googleValue, amazonValue, idfsSmallBroadcast.value)
return (googleURL, amazonID, cs)
idfsSmallBroadcast = sc.broadcast(idfsSmallWeights)
similaritiesBroadcast = (crossSmall
.map(computeSimilarityBroadcast)
.cache())
def similarBroadcast(amazonID, googleURL):
""" Return similarity value, computed using Broadcast variable
Args:
amazonID: amazon ID
googleURL: google URL
Returns:
similar: cosine similarity value
"""
return (similaritiesBroadcast
.filter(lambda record: (record[0] == googleURL and record[1] == amazonID))
.collect()[0][2])
similarityAmazonGoogleBroadcast = similarBroadcast('b000o24l3q', 'http://www.google.com/base/feeds/snippets/17242822440574356561')
print 'Requested similarity is %s.' % similarityAmazonGoogleBroadcast
# COMMAND ----------
# TEST Perform Entity Resolution with Broadcast Variables (3d)
from pyspark import Broadcast
Test.assertTrue(isinstance(idfsSmallBroadcast, Broadcast), 'incorrect idfsSmallBroadcast')
Test.assertEquals(len(idfsSmallBroadcast.value), 4772, 'incorrect idfsSmallBroadcast value')
Test.assertTrue(abs(similarityAmazonGoogleBroadcast - 0.000303171940451) < 0.0000001,
'incorrect similarityAmazonGoogle')
# COMMAND ----------
# PRIVATE_TEST Perform Entity Resolution with Broadcast Variables (3d)
from pyspark import Broadcast
Test.assertTrue(isinstance(idfsSmallBroadcast, Broadcast), 'incorrect idfsSmallBroadcast')
Test.assertEquals(len(idfsSmallBroadcast.value), 4772, 'incorrect idfsSmallBroadcast value')
Test.assertTrue(abs(similarityAmazonGoogleBroadcast - 0.000303171940451) < 0.0000001,
'incorrect similarityAmazonGoogle')
similarityAnotherBroadcast = similarBroadcast('b000o24l3q', 'http://www.google.com/base/feeds/snippets/18274317756231697680')
Test.assertTrue(abs(similarityAnotherBroadcast - 0.093899589276) < 0.0000001,
'incorrect another similarity test')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(3e) Perform a Gold Standard evaluation**
# MAGIC
# MAGIC First, we'll load the "gold standard" data and use it to answer several questions. We read and parse the Gold Standard data, where the format of each line is "Amazon Product ID","Google URL". The resulting RDD has elements of the form ("AmazonID GoogleURL", 'gold')
# COMMAND ----------
GOLDFILE_PATTERN = '^(.+),(.+)'
# Parse each line of a data file useing the specified regular expression pattern
def parse_goldfile_line(goldfile_line):
""" Parse a line from the 'golden standard' data file
Args:
goldfile_line: a line of data
Returns:
pair: ((key, 'gold', 1 if successful or else 0))
"""
match = re.search(GOLDFILE_PATTERN, goldfile_line)
if match is None:
print 'Invalid goldfile line: %s' % goldfile_line
return (goldfile_line, -1)
elif match.group(1) == '"idAmazon"':
print 'Header datafile line: %s' % goldfile_line
return (goldfile_line, 0)
else:
key = '%s %s' % (removeQuotes(match.group(1)), removeQuotes(match.group(2)))
return ((key, 'gold'), 1)
goldfile = os.path.join(baseDir, inputPath, GOLD_STANDARD_PATH)
gsRaw = (sc
.textFile(goldfile)
.map(parse_goldfile_line)
.cache())
gsFailed = (gsRaw
.filter(lambda s: s[1] == -1)
.map(lambda s: s[0]))
for line in gsFailed.take(10):
print 'Invalid goldfile line: %s' % line
goldStandard = (gsRaw
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
print 'Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (gsRaw.count(),
goldStandard.count(),
gsFailed.count())
assert (gsFailed.count() == 0)
assert (gsRaw.count() == (goldStandard.count() + 1))
# COMMAND ----------
# MAGIC %md
# MAGIC #### Using the "gold standard" data we can answer the following questions:
# MAGIC
# MAGIC * How many true duplicate pairs are there in the small datasets?
# MAGIC * What is the average similarity score for true duplicates?
# MAGIC * What about for non-duplicates?
# MAGIC The steps you should perform are:
# MAGIC * Create a new `sims` RDD from the `similaritiesBroadcast` RDD, where each element consists of a pair of the form ("AmazonID GoogleURL", cosineSimilarityScore). An example entry from `sims` is: ('b000bi7uqs http://www.google.com/base/feeds/snippets/18403148885652932189', 0.40202896125621296)
# MAGIC * Combine the `sims` RDD with the `goldStandard` RDD by creating a new `trueDupsRDD` RDD that has the just the cosine similarity scores for those "AmazonID GoogleURL" pairs that appear in both the `sims` RDD and `goldStandard` RDD. Hint: you can do this using the join() transformation.
# MAGIC * Count the number of true duplicate pairs in the `trueDupsRDD` dataset
# MAGIC * Compute the average similarity score for true duplicates in the `trueDupsRDD` datasets. Remember to use `float` for calculation
# MAGIC * Create a new `nonDupsRDD` RDD that has the just the cosine similarity scores for those "AmazonID GoogleURL" pairs from the `similaritiesBroadcast` RDD that **do not** appear in both the *sims* RDD and gold standard RDD.
# MAGIC * Compute the average similarity score for non-duplicates in the last datasets. Remember to use `float` for calculation
# COMMAND ----------
# ANSWER
sims = similaritiesBroadcast.map(lambda x: ("%s %s" % (x[1], x[0]), x[2]))
trueDupsRDD = (sims
.join(goldStandard)
.map(lambda a: a[1][0]))
trueDupsCount = trueDupsRDD.count()
avgSimDups = float(trueDupsRDD.reduce(lambda a, b: a + b)) / float(trueDupsCount)
nonDupsRDD = (sims
.leftOuterJoin(goldStandard)
.filter(lambda x: (x[1][1] is None))
.map(lambda a: a[1][0]))
avgSimNon = float(nonDupsRDD.reduce(lambda a, b: a + b)) / float(sims.count() - trueDupsCount)
print 'There are %s true duplicates.' % trueDupsCount
print 'The average similarity of true duplicates is %s.' % avgSimDups
print 'And for non duplicates, it is %s.' % avgSimNon
# COMMAND ----------
# TEST Perform a Gold Standard evaluation (3e)
Test.assertEquals(trueDupsCount, 146, 'incorrect trueDupsCount')
Test.assertTrue(abs(avgSimDups - 0.264332573435) < 0.0000001, 'incorrect avgSimDups')
Test.assertTrue(abs(avgSimNon - 0.00123476304656) < 0.0000001, 'incorrect avgSimNon')
# COMMAND ----------
# PRIVATE_TEST Perform a Gold Standard evaluation (3e)
Test.assertEquals(trueDupsCount, 146, 'incorrect trueDupsCount')
Test.assertTrue(abs(avgSimDups - 0.264332573435) < 0.0000001, 'incorrect avgSimDups')
Test.assertTrue(abs(avgSimNon - 0.00123476304656) < 0.0000001, 'incorrect avgSimNon')
# COMMAND ----------
# ANSWER
# Quiz question:
# Based on the answers to the questions in part (3e), is cosine similarity doing a good job, qualitatively speaking, of identifying duplicates?
# (*) Yes
# ( ) No
# *answer*: Cosine similarity looks useful, because duplicates on average are 250X more similar than non-duplicates. As long as variance isn't too high, that's a good signal.
# COMMAND ----------
# MAGIC %md
# MAGIC #### **Part 4: Scalable ER**
# MAGIC In the previous parts, we built a text similarity function and used it for small scale entity resolution. Our implementation is limited by its quadratic run time complexity, and is not practical for even modestly sized datasets. In this part, we will implement a more scalable algorithm and use it to do entity resolution on the full dataset.
# MAGIC
# MAGIC #### Inverted Indices
# MAGIC To improve our ER algorithm from the earlier parts, we should begin by analyzing its running time. In particular, the algorithm above is quadratic in two ways. First, we did a lot of redundant computation of tokens and weights, since each record was reprocessed every time it was compared. Second, we made quadratically many token comparisons between records.
# MAGIC
# MAGIC The first source of quadratic overhead can be eliminated with precomputation and look-up tables, but the second source is a little more tricky. In the worst case, every token in every record in one dataset exists in every record in the other dataset, and therefore every token makes a non-zero contribution to the cosine similarity. In this case, token comparison is unavoidably quadratic.
# MAGIC
# MAGIC But in reality most records have nothing (or very little) in common. Moreover, it is typical for a record in one dataset to have at most one duplicate record in the other dataset (this is the case assuming each dataset has been de-duplicated against itself). In this case, the output is linear in the size of the input and we can hope to achieve linear running time.
# MAGIC
# MAGIC An [**inverted index**](https://en.wikipedia.org/wiki/Inverted_index) is a data structure that will allow us to avoid making quadratically many token comparisons. It maps each token in the dataset to the list of documents that contain the token. So, instead of comparing, record by record, each token to every other token to see if they match, we will use inverted indices to *look up* records that match on a particular token.
# MAGIC
# MAGIC > **Note on terminology**: In text search, a *forward* index maps documents in a dataset to the tokens they contain. An *inverted* index supports the inverse mapping.
# MAGIC
# MAGIC > **Note**: For this section, use the complete Google and Amazon datasets, not the samples
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(4a) Tokenize the full dataset**
# MAGIC Tokenize each of the two full datasets for Google and Amazon.
# COMMAND ----------
# ANSWER
amazonFullRecToToken = amazon.map(lambda s: (s[0], tokenize(s[1])))
googleFullRecToToken = google.map(lambda s: (s[0], tokenize(s[1])))
print 'Amazon full dataset is %s products, Google full dataset is %s products' % (amazonFullRecToToken.count(),
googleFullRecToToken.count())
# COMMAND ----------
# TEST Tokenize the full dataset (4a)
Test.assertEquals(amazonFullRecToToken.count(), 1363, 'incorrect amazonFullRecToToken.count()')
Test.assertEquals(googleFullRecToToken.count(), 3226, 'incorrect googleFullRecToToken.count()')
# COMMAND ----------
# PRIVATE_TEST Tokenize the full dataset (4a)
Test.assertEquals(amazonFullRecToToken.count(), 1363, 'incorrect amazonFullRecToToken.count()')
Test.assertEquals(googleFullRecToToken.count(), 3226, 'incorrect googleFullRecToToken.count()')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(4b) Compute IDFs and TF-IDFs for the full datasets**
# MAGIC
# MAGIC We will reuse your code from above to compute IDF weights for the complete combined datasets.
# MAGIC The steps you should perform are:
# MAGIC * Create a new `fullCorpusRDD` that contains the tokens from the full Amazon and Google datasets.
# MAGIC * Apply your `idfs` function to the `fullCorpusRDD`
# MAGIC * Create a broadcast variable containing a dictionary of the IDF weights for the full dataset.
# MAGIC * For each of the Amazon and Google full datasets, create weight RDDs that map IDs/URLs to TF-IDF weighted token vectors.
# COMMAND ----------
# ANSWER
fullCorpusRDD = amazonFullRecToToken.union(googleFullRecToToken)
idfsFull = idfs(fullCorpusRDD)
idfsFullCount = idfsFull.count()
print 'There are %s unique tokens in the full datasets.' % idfsFullCount
# Recompute IDFs for full dataset
idfsFullWeights = idfsFull.collectAsMap()
idfsFullBroadcast = sc.broadcast(idfsFullWeights)
# Pre-compute TF-IDF weights. Build mappings from record ID weight vector.
amazonWeightsRDD = amazonFullRecToToken.map(lambda x: (x[0], tfidf(x[1], idfsFullBroadcast.value)))
googleWeightsRDD = googleFullRecToToken.map(lambda x: (x[0], tfidf(x[1], idfsFullBroadcast.value)))
print 'There are %s Amazon weights and %s Google weights.' % (amazonWeightsRDD.count(),
googleWeightsRDD.count())
# COMMAND ----------
# TEST Compute IDFs and TF-IDFs for the full datasets (4b)
Test.assertEquals(idfsFullCount, 17078, 'incorrect idfsFullCount')
Test.assertEquals(amazonWeightsRDD.count(), 1363, 'incorrect amazonWeightsRDD.count()')
Test.assertEquals(googleWeightsRDD.count(), 3226, 'incorrect googleWeightsRDD.count()')
# COMMAND ----------
# PRIVATE_TEST Compute IDFs and TF-IDFs for the full datasets (4b)
Test.assertEquals(idfsFullCount, 17078, 'incorrect idfsFullCount')
Test.assertEquals(amazonWeightsRDD.count(), 1363, 'incorrect amazonWeightsRDD.count()')
Test.assertEquals(googleWeightsRDD.count(), 3226, 'incorrect googleWeightsRDD.count()')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(4c) Compute Norms for the weights from the full datasets**
# MAGIC
# MAGIC We will reuse your code from above to compute norms of the IDF weights for the complete combined dataset.
# MAGIC The steps you should perform are:
# MAGIC * Create two collections, one for each of the full Amazon and Google datasets, where IDs/URLs map to the norm of the associated TF-IDF weighted token vectors.
# MAGIC * Convert each collection into a broadcast variable, containing a dictionary of the norm of IDF weights for the full dataset
# COMMAND ----------
# ANSWER
amazonNorms = amazonWeightsRDD.map(lambda x: (x[0], norm(x[1]))).collectAsMap()
amazonNormsBroadcast = sc.broadcast(amazonNorms)
googleNorms = googleWeightsRDD.map(lambda x: (x[0], norm(x[1]))).collectAsMap()
googleNormsBroadcast = sc.broadcast(googleNorms)
print 'There are %s Amazon norms and %s Google norms.' % (len(amazonNorms), len(googleNorms))
# COMMAND ----------
# TEST Compute Norms for the weights from the full datasets (4c)
Test.assertTrue(isinstance(amazonNormsBroadcast, Broadcast), 'incorrect amazonNormsBroadcast')
Test.assertEquals(len(amazonNormsBroadcast.value), 1363, 'incorrect amazonNormsBroadcast.value')
Test.assertTrue(isinstance(googleNormsBroadcast, Broadcast), 'incorrect googleNormsBroadcast')
Test.assertEquals(len(googleNormsBroadcast.value), 3226, 'incorrect googleNormsBroadcast.value')
# COMMAND ----------
# PRIVATE_TEST Compute Norms for the weights from the full datasets (4c)
Test.assertTrue(isinstance(amazonNormsBroadcast, Broadcast), 'incorrect amazonNormsBroadcast')
Test.assertEquals(len(amazonNormsBroadcast.value), 1363, 'incorrect amazonNormsBroadcast.value')
Test.assertTrue(isinstance(googleNormsBroadcast, Broadcast), 'incorrect googleNormsBroadcast')
Test.assertEquals(len(googleNormsBroadcast.value), 3226, 'incorrect googleNormsBroadcast.value')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(4d) Create inverted indicies from the full datasets**
# MAGIC
# MAGIC Build inverted indices of both data sources.
# MAGIC The steps you should perform are:
# MAGIC * Create an invert function that given a pair of (ID/URL, TF-IDF weighted token vector), returns a list of pairs of (token, ID/URL). Recall that the TF-IDF weighted token vector is a Python dictionary with keys that are tokens and values that are weights.
# MAGIC * Use your invert function to convert the full Amazon and Google TF-IDF weighted token vector datasets into two RDDs where each element is a pair of a token and an ID/URL that contain that token. These are inverted indicies.
# COMMAND ----------
# ANSWER
def invert(record):
""" Invert (ID, tokens) to a list of (token, ID)
Args:
record: a pair, (ID, token vector)
Returns:
pairs: a list of pairs of token to ID
"""
value = record[0]
keys = record[1].keys()
pairs = []
for key in keys:
pairs.append((key, value))
return (pairs)
amazonInvPairsRDD = (amazonWeightsRDD
.flatMap(invert)
.cache())
googleInvPairsRDD = (googleWeightsRDD
.flatMap(invert)
.cache())
print 'There are %s Amazon inverted pairs and %s Google inverted pairs.' % (amazonInvPairsRDD.count(),
googleInvPairsRDD.count())
# COMMAND ----------
# TEST Create inverted indicies from the full datasets (4d)
invertedPair = invert((1, {'foo': 2}))
Test.assertEquals(invertedPair[0][1], 1, 'incorrect invert result')
Test.assertEquals(amazonInvPairsRDD.count(), 111387, 'incorrect amazonInvPairsRDD.count()')
Test.assertEquals(googleInvPairsRDD.count(), 77678, 'incorrect googleInvPairsRDD.count()')
# COMMAND ----------
# PRIVATE_TEST Create inverted indicies from the full datasets (4d)
invertedPair = invert((1, {'foo': 2}))
Test.assertEquals(invertedPair[0][1], 1, 'incorrect invert result')
Test.assertEquals(amazonInvPairsRDD.count(), 111387, 'incorrect amazonInvPairsRDD.count()')
Test.assertEquals(googleInvPairsRDD.count(), 77678, 'incorrect googleInvPairsRDD.count()')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(4e) Identify common tokens from the full dataset**
# MAGIC
# MAGIC We are now in position to efficiently perform ER on the full datasets. Implement the following algorithm to build an RDD that maps a pair of (ID, URL) to a list of tokens they share in common:
# MAGIC * Using the two inverted indicies (RDDs where each element is a pair of a token and an ID or URL that contains that token), create a new RDD that contains only tokens that appear in both datasets. This will yield an RDD of pairs of (token, iterable(ID, URL)).
# MAGIC * We need a mapping from (ID, URL) to token, so create a function that will swap the elements of the RDD you just created to create this new RDD consisting of ((ID, URL), token) pairs.
# MAGIC * Finally, create an RDD consisting of pairs mapping (ID, URL) to all the tokens the pair shares in common
# COMMAND ----------
# ANSWER
def swap(record):
""" Swap (token, (ID, URL)) to ((ID, URL), token)
Args:
record: a pair, (token, (ID, URL))
Returns:
pair: ((ID, URL), token)
"""
token = record[0]
keys = (record[1][0], record[1][1])
return (keys, token)
commonTokens = (amazonInvPairsRDD.join(googleInvPairsRDD)
.map(swap)
.groupByKey()
.map(lambda rec: (rec[0], list(rec[1])))
.cache())
print 'Found %d common tokens' % commonTokens.count()
# COMMAND ----------
# TEST Identify common tokens from the full dataset (4e)
Test.assertEquals(commonTokens.count(), 2441100, 'incorrect commonTokens.count()')
# COMMAND ----------
# PRIVATE_TEST Identify common tokens from the full dataset (4e)
Test.assertEquals(commonTokens.count(), 2441100, 'incorrect commonTokens.count()')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(4f) Identify common tokens from the full dataset**
# MAGIC
# MAGIC Use the data structures from parts **(4a)** and **(4e)** to build a dictionary to map record pairs to cosine similarity scores.
# MAGIC The steps you should perform are:
# MAGIC * Create two broadcast dictionaries from the amazonWeights and googleWeights RDDs
# MAGIC * Create a `fastCosinesSimilarity` function that takes in a record consisting of the pair ((Amazon ID, Google URL), tokens list) and computes the sum for each of the tokens in the token list of the products of the Amazon weight for the token times the Google weight for the token. The sum should then be divided by the norm for the Google URL and then divided by the norm for the Amazon ID. The function should return this value in a pair with the key being the (Amazon ID, Google URL). *Make sure you use broadcast variables you created for both the weights and norms*
# MAGIC * Apply your `fastCosinesSimilarity` function to the common tokens from the full dataset
# COMMAND ----------
# ANSWER
amazonWeightsBroadcast = sc.broadcast(amazonWeightsRDD.collectAsMap())
googleWeightsBroadcast = sc.broadcast(googleWeightsRDD.collectAsMap())
def fastCosineSimilarity(record):
""" Compute Cosine Similarity using Broadcast variables
Args:
record: ((ID, URL), token)
Returns:
pair: ((ID, URL), cosine similarity value)
"""
amazonRec = record[0][0]
googleRec = record[0][1]
tokens = record[1]
s = sum([amazonWeightsBroadcast.value[amazonRec][t] * googleWeightsBroadcast.value[googleRec][t]
for t in tokens])
value = s / googleNormsBroadcast.value[googleRec] / amazonNormsBroadcast.value[amazonRec]
key = (amazonRec, googleRec)
return (key, value)
similaritiesFullRDD = (commonTokens
.map(fastCosineSimilarity)
.cache())
print similaritiesFullRDD.count()
# COMMAND ----------
# TEST Identify common tokens from the full dataset (4f)
similarityTest = similaritiesFullRDD.filter(lambda ((aID, gURL), cs): aID == 'b00005lzly' and gURL == 'http://www.google.com/base/feeds/snippets/13823221823254120257').collect()
Test.assertEquals(len(similarityTest), 1, 'incorrect len(similarityTest)')
Test.assertTrue(abs(similarityTest[0][1] - 4.286548414e-06) < 0.000000000001, 'incorrect similarityTest fastCosineSimilarity')
Test.assertEquals(similaritiesFullRDD.count(), 2441100, 'incorrect similaritiesFullRDD.count()')
# COMMAND ----------
# PRIVATE_TEST Identify common tokens from the full dataset (4f)
similarityTest = similaritiesFullRDD.filter(lambda ((aID, gURL), cs): aID == 'b00005lzly' and gURL == 'http://www.google.com/base/feeds/snippets/13823221823254120257').collect()
Test.assertEquals(len(similarityTest), 1, 'incorrect len(similarityTest)')
Test.assertTrue(abs(similarityTest[0][1] - 4.286548414e-06) < 0.000000000001, 'incorrect similarityTest fastCosineSimilarity')
Test.assertEquals(similaritiesFullRDD.count(), 2441100, 'incorrect similaritiesFullRDD.count()')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **Part 5: Analysis**
# MAGIC
# MAGIC Now we have an authoritative list of record-pair similarities, but we need a way to use those similarities to decide if two records are duplicates or not. The simplest approach is to pick a **threshold**. Pairs whose similarity is above the threshold are declared duplicates, and pairs below the threshold are declared distinct.
# MAGIC
# MAGIC To decide where to set the threshold we need to understand what kind of errors result at different levels. If we set the threshold too low, we get more **false positives**, that is, record-pairs we say are duplicates that in reality are not. If we set the threshold too high, we get more **false negatives**, that is, record-pairs that really are duplicates but that we miss.
# MAGIC
# MAGIC ER algorithms are evaluated by the common metrics of information retrieval and search called **precision** and **recall**. Precision asks of all the record-pairs marked duplicates, what fraction are true duplicates? Recall asks of all the true duplicates in the data, what fraction did we successfully find? As with false positives and false negatives, there is a trade-off between precision and recall. A third metric, called **F-measure**, takes the harmonic mean of precision and recall to measure overall goodness in a single value:
# MAGIC \\[ Fmeasure = 2 \frac{precision * recall}{precision + recall} \\]
# MAGIC
# MAGIC > **Note**: In this part, we use the "gold standard" mapping from the included file to look up true duplicates, and the results of Part 4.
# MAGIC
# MAGIC > **Note**: In this part, you will not be writing any code. We've written all of the code for you. Run each cell and then answer the quiz questions on Studio.
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(5a) Counting True Positives, False Positives, and False Negatives**
# MAGIC
# MAGIC We need functions that count True Positives (true duplicates above the threshold), and False Positives and False Negatives:
# MAGIC * We start with creating the `simsFullRDD` from our `similaritiesFullRDD` that consists of a pair of ((Amazon ID, Google URL), simlarity score)
# MAGIC * From this RDD, we create an RDD consisting of only the similarity scores
# MAGIC * To look up the similarity scores for true duplicates, we perform a left outer join using the `goldStandard` RDD and `simsFullRDD` and extract the
# COMMAND ----------
# Create an RDD of ((Amazon ID, Google URL), similarity score)
simsFullRDD = similaritiesFullRDD.map(lambda x: ("%s %s" % (x[0][0], x[0][1]), x[1]))
assert (simsFullRDD.count() == 2441100)
# Create an RDD of just the similarity scores
simsFullValuesRDD = (simsFullRDD
.map(lambda x: x[1])
.cache())
assert (simsFullValuesRDD.count() == 2441100)
# Look up all similarity scores for true duplicates
# This helper function will return the similarity score for records that are in the gold standard and the simsFullRDD (True positives), and will return 0 for records that are in the gold standard but not in simsFullRDD (False Negatives).
def gs_value(record):
if (record[1][1] is None):
return 0
else:
return record[1][1]
# Join the gold standard and simsFullRDD, and then extract the similarities scores using the helper function
trueDupSimsRDD = (goldStandard
.leftOuterJoin(simsFullRDD)
.map(gs_value)
.cache())
print 'There are %s true duplicates.' % trueDupSimsRDD.count()
assert(trueDupSimsRDD.count() == 1300)
# COMMAND ----------
# MAGIC %md
# MAGIC The next step is to pick a threshold between 0 and 1 for the count of True Positives (true duplicates above the threshold). However, we would like to explore many different thresholds.
# MAGIC
# MAGIC To do this, we divide the space of thresholds into 100 bins, and take the following actions:
# MAGIC * We use Spark Accumulators to implement our counting function. We define a custom accumulator type, `VectorAccumulatorParam`, along with functions to initialize the accumulator's vector to zero, and to add two vectors. Note that we have to use the += operator because you can only add to an accumulator.
# MAGIC * We create a helper function to create a list with one entry (bit) set to a value and all others set to 0.
# MAGIC * We create 101 bins for the 100 threshold values between 0 and 1.
# MAGIC * Now, for each similarity score, we can compute the false positives. We do this by adding each similarity score to the appropriate bin of the vector. Then we remove true positives from the vector by using the gold standard data.
# MAGIC * We define functions for computing false positive and negative and true positives, for a given threshold.
# COMMAND ----------
from pyspark.accumulators import AccumulatorParam
class VectorAccumulatorParam(AccumulatorParam):
# Initialize the VectorAccumulator to 0
def zero(self, value):
return [0] * len(value)
# Add two VectorAccumulator variables
def addInPlace(self, val1, val2):
for i in xrange(len(val1)):
val1[i] += val2[i]
return val1
# Return a list with entry x set to value and all other entries set to 0
def set_bit(x, value, length):
bits = []
for y in xrange(length):
if (x == y):
bits.append(value)
else:
bits.append(0)
return bits
# Pre-bin counts of false positives for different threshold ranges
BINS = 101
nthresholds = 100
def bin(similarity):
return int(similarity * nthresholds)
# fpCounts[i] = number of entries (possible false positives) where bin(similarity) == i
zeros = [0] * BINS
fpCounts = sc.accumulator(zeros, VectorAccumulatorParam())
def add_element(score):
global fpCounts
b = bin(score)
fpCounts += set_bit(b, 1, BINS)
simsFullValuesRDD.foreach(add_element)
# Remove true positives from FP counts
def sub_element(score):
global fpCounts
b = bin(score)
fpCounts += set_bit(b, -1, BINS)
trueDupSimsRDD.foreach(sub_element)
def falsepos(threshold):
fpList = fpCounts.value
return sum([fpList[b] for b in range(0, BINS) if float(b) / nthresholds >= threshold])
def falseneg(threshold):
return trueDupSimsRDD.filter(lambda x: x < threshold).count()
def truepos(threshold):
return trueDupSimsRDD.count() - falsenegDict[threshold]
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(5b) Precision, Recall, and F-measures**
# MAGIC We define functions so that we can compute the [Precision](https://en.wikipedia.org/wiki/Precision_and_recall), [Recall](https://en.wikipedia.org/wiki/Precision_and_recall), and [F-measure](https://en.wikipedia.org/wiki/Precision_and_recall#F-measure) as a function of threshold value:
# MAGIC * Precision = true-positives / (true-positives + false-positives)
# MAGIC * Recall = true-positives / (true-positives + false-negatives)
# MAGIC * F-measure = 2 x Recall x Precision / (Recall + Precision)
# COMMAND ----------
# Precision = true-positives / (true-positives + false-positives)
# Recall = true-positives / (true-positives + false-negatives)
# F-measure = 2 x Recall x Precision / (Recall + Precision)
def precision(threshold):
tp = trueposDict[threshold]
return float(tp) / (tp + falseposDict[threshold])
def recall(threshold):
tp = trueposDict[threshold]
return float(tp) / (tp + falsenegDict[threshold])
def fmeasure(threshold):
r = recall(threshold)
p = precision(threshold)
return 2 * r * p / (r + p)
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(5c) Line Plots**
# MAGIC We can make line plots of precision, recall, and F-measure as a function of threshold value, for thresholds between 0.0 and 1.0. You can change `nthresholds` (above in part **(5a)**) to change the threshold values to plot.
# COMMAND ----------
thresholds = [float(n) / nthresholds for n in range(0, nthresholds)]
falseposDict = dict([(t, falsepos(t)) for t in thresholds])
falsenegDict = dict([(t, falseneg(t)) for t in thresholds])
trueposDict = dict([(t, truepos(t)) for t in thresholds])
precisions = [precision(t) for t in thresholds]
recalls = [recall(t) for t in thresholds]
fmeasures = [fmeasure(t) for t in thresholds]
print precisions[0], fmeasures[0]
assert (abs(precisions[0] - 0.000532546802671) < 0.0000001)
assert (abs(fmeasures[0] - 0.00106452669505) < 0.0000001)
fig = plt.figure()
plt.plot(thresholds, precisions)
plt.plot(thresholds, recalls)
plt.plot(thresholds, fmeasures)
plt.legend(['Precision', 'Recall', 'F-measure'])
display(fig)
pass
# COMMAND ----------
# Create a DataFrame and visualize using display()
graph = [(t, precision(t), recall(t),fmeasure(t)) for t in thresholds]
graphRDD = sc.parallelize(graph)
graphRow = graphRDD.map(lambda (t, x, y, z): Row(threshold=t, precision=x, recall=y, fmeasure=z))
graphDF = sqlContext.createDataFrame(graphRow)
display(graphDF)
# COMMAND ----------
# MAGIC %md
# MAGIC #### Discussion
# MAGIC
# MAGIC State-of-the-art tools can get an F-measure of about 60% on this dataset. In this lab exercise, our best F-measure is closer to 40%. Look at some examples of errors (both False Positives and False Negatives) and think about what went wrong.
# MAGIC
# MAGIC #### There are several ways we might improve our simple classifier, including:
# MAGIC * Using additional attributes
# MAGIC * Performing better featurization of our textual data (e.g., stemming, n-grams, etc.)
# MAGIC * Using different similarity functions | unlicense | 2,912,175,164,576,989,000 | 48.680678 | 749 | 0.703294 | false |
wilvk/ansible | lib/ansible/modules/cloud/amazon/ecs_service_facts.py | 14 | 8170 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_service_facts
short_description: list or describe services in ecs
notes:
- for details of the parameters and returns see U(http://boto3.readthedocs.org/en/latest/reference/services/ecs.html)
description:
- Lists or describes services in ecs.
version_added: "2.1"
author:
- "Mark Chance (@java1guy)"
- "Darek Kaczynski (@kaczynskid)"
requirements: [ json, botocore, boto3 ]
options:
details:
description:
- Set this to true if you want detailed information about the services.
required: false
default: 'false'
choices: ['true', 'false']
cluster:
description:
- The cluster ARNS in which to list the services.
required: false
default: 'default'
service:
description:
- The service to get details for (required if details is true)
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic listing example
- ecs_service_facts:
cluster: test-cluster
service: console-test-service
details: true
# Basic listing example
- ecs_service_facts:
cluster: test-cluster
'''
RETURN = '''
services:
description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below.
returned: success
type: complex
contains:
clusterArn:
description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
returned: always
type: string
desiredCount:
description: The desired number of instantiations of the task definition to keep running on the service.
returned: always
type: int
loadBalancers:
description: A list of load balancer objects
returned: always
type: complex
contains:
loadBalancerName:
description: the name
returned: always
type: string
containerName:
description: The name of the container to associate with the load balancer.
returned: always
type: string
containerPort:
description: The port on the container to associate with the load balancer.
returned: always
type: int
pendingCount:
description: The number of tasks in the cluster that are in the PENDING state.
returned: always
type: int
runningCount:
description: The number of tasks in the cluster that are in the RUNNING state.
returned: always
type: int
serviceArn:
description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service .
returned: always
type: string
serviceName:
description: A user-generated string used to identify the service
returned: always
type: string
status:
description: The valid values are ACTIVE, DRAINING, or INACTIVE.
returned: always
type: string
taskDefinition:
description: The ARN of a task definition to use for tasks in the service.
returned: always
type: string
deployments:
description: list of service deployments
returned: always
type: list of complex
events:
description: lost of service events
returned: always
type: list of complex
''' # NOQA
try:
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
class EcsServiceManager:
"""Handles ECS Services"""
def __init__(self, module):
self.module = module
# self.ecs = boto3.client('ecs')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
# def list_clusters(self):
# return self.client.list_clusters()
# {'failures': [],
# 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'ce7b5880-1c41-11e5-8a31-47a93a8a98eb'},
# 'clusters': [{'activeServicesCount': 0, 'clusterArn': 'arn:aws:ecs:us-west-2:777110527155:cluster/default',
# 'status': 'ACTIVE', 'pendingTasksCount': 0, 'runningTasksCount': 0, 'registeredContainerInstancesCount': 0, 'clusterName': 'default'}]}
# {'failures': [{'arn': 'arn:aws:ecs:us-west-2:777110527155:cluster/bogus', 'reason': 'MISSING'}],
# 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '0f66c219-1c42-11e5-8a31-47a93a8a98eb'},
# 'clusters': []}
def list_services(self, cluster):
fn_args = dict()
if cluster and cluster is not None:
fn_args['cluster'] = cluster
response = self.ecs.list_services(**fn_args)
relevant_response = dict(services=response['serviceArns'])
return relevant_response
def describe_services(self, cluster, services):
fn_args = dict()
if cluster and cluster is not None:
fn_args['cluster'] = cluster
fn_args['services'] = services.split(",")
response = self.ecs.describe_services(**fn_args)
relevant_response = dict(services=map(self.extract_service_from, response['services']))
if 'failures' in response and len(response['failures']) > 0:
relevant_response['services_not_running'] = response['failures']
return relevant_response
def extract_service_from(self, service):
# some fields are datetime which is not JSON serializable
# make them strings
if 'deployments' in service:
for d in service['deployments']:
if 'createdAt' in d:
d['createdAt'] = str(d['createdAt'])
if 'updatedAt' in d:
d['updatedAt'] = str(d['updatedAt'])
if 'events' in service:
for e in service['events']:
if 'createdAt' in e:
e['createdAt'] = str(e['createdAt'])
return service
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
details=dict(required=False, type='bool', default=False),
cluster=dict(required=False, type='str'),
service=dict(required=False, type='str')
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
show_details = module.params.get('details', False)
task_mgr = EcsServiceManager(module)
if show_details:
if 'service' not in module.params or not module.params['service']:
module.fail_json(msg="service must be specified for ecs_service_facts")
ecs_facts = task_mgr.describe_services(module.params['cluster'], module.params['service'])
else:
ecs_facts = task_mgr.list_services(module.params['cluster'])
ecs_facts_result = dict(changed=False, ansible_facts=ecs_facts)
module.exit_json(**ecs_facts_result)
if __name__ == '__main__':
main()
| gpl-3.0 | -1,457,576,710,612,867,800 | 36.136364 | 326 | 0.619706 | false |
Muxi-X/muxi_site | muxiwebsite/api/signup.py | 2 | 1117 | # -*- coding: utf-8 -*-
"""
signup.py
~~~~~~~~~
木犀官网注册API
"""
from flask import jsonify, g, request
from . import api
from muxiwebsite.models import User
from .authentication import auth
from muxiwebsite import db
from werkzeug.security import generate_password_hash
import base64
@api.route('/signup/', methods=['POST'])
def signup():
"""用户注册"""
un = request.get_json().get("username")
email = request.get_json().get("email")
password = request.get_json().get("password")
if User.query.filter_by(username=un).first() is not None:
return jsonify ({}), 401
if User.query.filter_by(email=email).first() is not None:
return jsonify ({}), 402
if un is None or email is None or password is None:
return jsonify ({}), 403
user = User(
username = un,
email = email,
password = base64.b64encode(password),
avatar_url = "http://7xrvvt.com1.z0.glb.clouddn.com/shakedog.gif",
)
db.session.add(user)
db.session.commit()
return jsonify({
"created": user.id ,
}), 200
| mit | 5,526,761,842,879,015,000 | 24.511628 | 74 | 0.616226 | false |
mlucchini/electricitymap | parsers/CA_YT.py | 1 | 4487 | import arrow
from bs4 import BeautifulSoup
import requests
timezone = 'Canada/Pacific'
def fetch_production(country_code='CA-YT', session=None):
"""Requests the last known production mix (in MW) of a given region
Arguments:
country_code -- ignored here, only information for CA-YT is returned
session (optional) -- request session passed in order to re-use an existing session
"""
"""
We are using Yukon Energy's data from
http://www.yukonenergy.ca/energy-in-yukon/electricity-101/current-energy-consumption
Generation in Yukon is done with hydro, diesel oil, and LNG.
There are two companies, Yukon Energy and ATCO aka Yukon Electric aka YECL.
Yukon Energy does most of the generation and feeds into Yukon's grid.
ATCO does operations, billing, and generation in some of the off-grid communities.
See schema of the grid at http://www.atcoelectricyukon.com/About-Us/
Per https://en.wikipedia.org/wiki/Yukon#Municipalities_by_population
of total population 35874 (2016 census), 28238 are in municipalities
that are connected to the grid - that is 78.7%.
Off-grid generation is with diesel generators, this is not reported online as of 2017-06-23
and is not included in this calculation.
Yukon Energy reports only "hydro" and "thermal" generation.
Per http://www.yukonenergy.ca/ask-janet/lng-and-boil-off-gas,
in 2016 the thermal generation was about 50% diesel and 50% LNG.
But since Yukon Energy doesn't break it down on their website,
we return all thermal as "unknown".
Per https://en.wikipedia.org/wiki/List_of_generating_stations_in_Yukon
Yukon Energy operates about 98% of Yukon's hydro capacity, the only exception is
the small 1.3 MW Fish Lake dam operated by ATCO/Yukon Electrical. That's small enough
to not matter, I think.
There is also a small 0.81 MW wind farm, its current generation is not available.
"""
requests_obj = session or requests.session()
url = 'http://www.yukonenergy.ca/consumption/chart_current.php?chart=current&width=420'
response = requests_obj.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
def find_div_by_class(soup_obj, cls):
return soup_obj.find('div', attrs={'class': cls})
def parse_mw(text):
try:
return float(text[:text.index('MW')])
except ValueError:
return 0
# date is specified like "Thursday, June 22, 2017"
source_date = find_div_by_class(soup, 'current_date').text
# time is specified like "11:55 pm" or "2:25 am"
source_time = find_div_by_class(soup, 'current_time').text
datetime_text = '{} {}'.format(source_date, source_time)
datetime_arrow = arrow.get(datetime_text, 'dddd, MMMM D, YYYY h:mm A')
datetime_datetime = arrow.get(datetime_arrow.datetime, timezone).datetime
# generation is specified like "37.69 MW - hydro"
hydro_div = find_div_by_class(soup, 'load_hydro')
hydro_text = hydro_div.div.text
hydro_generation = parse_mw(hydro_text)
hydro_cap_div = find_div_by_class(soup, 'avail_hydro')
if hydro_cap_div:
hydro_cap_text = hydro_cap_div.div.text
hydro_capacity = parse_mw(hydro_cap_text)
else:
# hydro capacity is not provided when thermal is used
hydro_capacity = None
thermal_div = find_div_by_class(soup, 'load_thermal')
if thermal_div.div:
thermal_text = thermal_div.div.text
thermal_generation = parse_mw(thermal_text)
else:
# thermal is not always used and when it's not used, it's not specified in HTML
thermal_generation = 0
data = {
'datetime': datetime_datetime,
'countryCode': country_code,
'production': {
'unknown': thermal_generation,
'hydro': hydro_generation,
# specify some sources that aren't present in Yukon as zero,
# this allows the analyzer to better estimate CO2eq
'coal': 0,
'nuclear': 0,
'geothermal': 0
},
'storage': {},
'source': 'www.yukonenergy.ca'
}
if hydro_capacity:
data.update({
'capacity': {
'hydro': hydro_capacity
}
})
return data
if __name__ == '__main__':
"""Main method, never used by the Electricity Map backend, but handy for testing."""
print('fetch_production() ->')
print(fetch_production())
| gpl-3.0 | 7,202,155,513,057,837,000 | 34.054688 | 95 | 0.657232 | false |
stamhe/zulip | zproject/test_settings.py | 115 | 1817 | from settings import *
import os
DATABASES["default"] = {"NAME": "zulip_test",
"USER": "zulip_test",
"PASSWORD": LOCAL_DATABASE_PASSWORD,
"HOST": "localhost",
"SCHEMA": "zulip",
"ENGINE": "django.db.backends.postgresql_psycopg2",
"TEST_NAME": "django_zulip_tests",
"OPTIONS": {"connection_factory": TimeTrackingConnection },}
if "TORNADO_SERVER" in os.environ:
TORNADO_SERVER = os.environ["TORNADO_SERVER"]
else:
TORNADO_SERVER = None
# Decrease the get_updates timeout to 1 second.
# This allows CasperJS to proceed quickly to the next test step.
POLL_TIMEOUT = 1000
# Don't use the real message log for tests
EVENT_LOG_DIR = '/tmp/zulip-test-event-log'
# Print our emails rather than sending them
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
# The test suite uses EmailAuthBackend
AUTHENTICATION_BACKENDS += ('zproject.backends.EmailAuthBackend',)
TEST_SUITE = True
RATE_LIMITING = False
# Don't use rabbitmq from the test suite -- the user_profile_ids for
# any generated queue elements won't match those being used by the
# real app.
USING_RABBITMQ = False
# Disable the tutorial because it confuses the client tests.
TUTORIAL_ENABLED = False
# Disable use of memcached for caching
CACHES['database'] = {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'LOCATION': 'zulip-database-test-cache',
'TIMEOUT': 3600,
'CONN_MAX_AGE': 600,
'OPTIONS': {
'MAX_ENTRIES': 100000
}
}
LOGGING['loggers']['zulip.requests']['level'] = 'CRITICAL'
LOGGING['loggers']['zulip.management']['level'] = 'CRITICAL'
CAMO_URI = 'https://external-content.zulipcdn.net/'
CAMO_KEY = 'dummy'
| apache-2.0 | 6,958,732,118,205,763,000 | 30.877193 | 84 | 0.648872 | false |
fusionpig/ansible | lib/ansible/plugins/action/win_template.py | 117 | 1168 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
from ansible.plugins.action.template import ActionModule as TemplateActionModule
# Even though TemplateActionModule inherits from ActionBase, we still need to
# directly inherit from ActionBase to appease the plugin loader.
class ActionModule(TemplateActionModule, ActionBase):
pass
| gpl-3.0 | 4,400,703,885,525,659,600 | 40.714286 | 80 | 0.778253 | false |
elkingtonmcb/rethinkdb | external/v8_3.30.33.16/tools/testrunner/local/progress.py | 41 | 10716 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import os
import sys
import time
from . import junit_output
ABS_PATH_PREFIX = os.getcwd() + os.sep
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class ProgressIndicator(object):
def __init__(self):
self.runner = None
def Starting(self):
pass
def Done(self):
pass
def AboutToRun(self, test):
pass
def HasRun(self, test, has_unexpected_output):
pass
def PrintFailureHeader(self, test):
if test.suite.IsNegativeTest(test):
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
class SimpleProgressIndicator(ProgressIndicator):
"""Abstract base class for {Verbose,Dots}ProgressIndicator"""
def Starting(self):
print 'Running %i tests' % self.runner.total
def Done(self):
print
for failed in self.runner.failed:
self.PrintFailureHeader(failed)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(self.runner.GetCommand(failed))
if failed.output.HasCrashed():
print "exit code: %d" % failed.output.exit_code
print "--- CRASHED ---"
if failed.output.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.runner.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.runner.failed)
if self.runner.crashed > 0:
print "=== %i tests CRASHED" % self.runner.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, test):
print 'Starting %s...' % test.GetLabel()
sys.stdout.flush()
def HasRun(self, test, has_unexpected_output):
if has_unexpected_output:
if test.output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def HasRun(self, test, has_unexpected_output):
total = self.runner.succeeded + len(self.runner.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if has_unexpected_output:
if test.output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif test.output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class CompactProgressIndicator(ProgressIndicator):
"""Abstract base class for {Color,Monochrome}ProgressIndicator"""
def __init__(self, templates):
super(CompactProgressIndicator, self).__init__()
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Done(self):
self.PrintProgress('Done')
print "" # Line break.
def AboutToRun(self, test):
self.PrintProgress(test.GetLabel())
def HasRun(self, test, has_unexpected_output):
if has_unexpected_output:
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(test)
stdout = test.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = test.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
if test.output.HasCrashed():
print "exit code: %d" % test.output.exit_code
print "--- CRASHED ---"
if test.output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, string, length):
if length and (len(string) > (length - 3)):
return string[:(length - 3)] + "..."
else:
return string
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.runner.succeeded,
'remaining': (((self.runner.total - self.runner.remaining) * 100) //
self.runner.total),
'failed': len(self.runner.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
'status_line': ("[%(mins)02i:%(secs)02i|"
"\033[34m%%%(remaining) 4d\033[0m|"
"\033[32m+%(passed) 4d\033[0m|"
"\033[31m-%(failed) 4d\033[0m]: %(test)s"),
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
'status_line': ("[%(mins)02i:%(secs)02i|%%%(remaining) 4d|"
"+%(passed) 4d|-%(failed) 4d]: %(test)s"),
'stdout': '%s',
'stderr': '%s',
}
super(MonochromeProgressIndicator, self).__init__(templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
class JUnitTestProgressIndicator(ProgressIndicator):
def __init__(self, progress_indicator, junitout, junittestsuite):
self.progress_indicator = progress_indicator
self.outputter = junit_output.JUnitTestOutput(junittestsuite)
if junitout:
self.outfile = open(junitout, "w")
else:
self.outfile = sys.stdout
def Starting(self):
self.progress_indicator.runner = self.runner
self.progress_indicator.Starting()
def Done(self):
self.progress_indicator.Done()
self.outputter.FinishAndWrite(self.outfile)
if self.outfile != sys.stdout:
self.outfile.close()
def AboutToRun(self, test):
self.progress_indicator.AboutToRun(test)
def HasRun(self, test, has_unexpected_output):
self.progress_indicator.HasRun(test, has_unexpected_output)
fail_text = ""
if has_unexpected_output:
stdout = test.output.stdout.strip()
if len(stdout):
fail_text += "stdout:\n%s\n" % stdout
stderr = test.output.stderr.strip()
if len(stderr):
fail_text += "stderr:\n%s\n" % stderr
fail_text += "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
if test.output.HasCrashed():
fail_text += "exit code: %d\n--- CRASHED ---" % test.output.exit_code
if test.output.HasTimedOut():
fail_text += "--- TIMEOUT ---"
self.outputter.HasRunTest(
[test.GetLabel()] + self.runner.context.mode_flags + test.flags,
test.duration,
fail_text)
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, progress_indicator, json_test_results, arch, mode):
self.progress_indicator = progress_indicator
self.json_test_results = json_test_results
self.arch = arch
self.mode = mode
self.results = []
def Starting(self):
self.progress_indicator.runner = self.runner
self.progress_indicator.Starting()
def Done(self):
self.progress_indicator.Done()
complete_results = []
if os.path.exists(self.json_test_results):
with open(self.json_test_results, "r") as f:
# Buildbot might start out with an empty file.
complete_results = json.loads(f.read() or "[]")
complete_results.append({
"arch": self.arch,
"mode": self.mode,
"results": self.results,
})
with open(self.json_test_results, "w") as f:
f.write(json.dumps(complete_results))
def AboutToRun(self, test):
self.progress_indicator.AboutToRun(test)
def HasRun(self, test, has_unexpected_output):
self.progress_indicator.HasRun(test, has_unexpected_output)
if not has_unexpected_output:
# Omit tests that run as expected. Passing tests of reruns after failures
# will have unexpected_output to be reported here has well.
return
self.results.append({
"name": test.GetLabel(),
"flags": test.flags,
"command": EscapeCommand(self.runner.GetCommand(test)).replace(
ABS_PATH_PREFIX, ""),
"run": test.run,
"stdout": test.output.stdout,
"stderr": test.output.stderr,
"exit_code": test.output.exit_code,
"result": test.suite.GetOutcome(test),
})
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'mono': MonochromeProgressIndicator
}
| agpl-3.0 | 1,146,058,452,391,740,200 | 30.151163 | 79 | 0.645857 | false |
ScienceStacks/SciSheets | mysite/scisheets/core/helpers/block_execution_controller.py | 2 | 7374 | """
This class interacts with the code generated for evaluating a
scisheet to control the execution of blocks of code. A block
of code (hereafter, just block) can be a formulas, prologue, or
epilogue.
"""
from Files.logger import Logger
from mysite import settings
import inspect
import os
import sys
class BlockExecutionController(object):
"""
Assists with:
1. Controlling the execution of a code block, such as
making exceptions precise by identifying the
code block and line number at which an exception occurs.
See: startBlock, endBlock, exceptionForBlock
2. Managing loop iterations.
See: initializeLoop, startIteration, endIteration
"""
def __init__(self, scisheets_api, is_logging=False, debug=False):
"""
:param ApiFormula scisheets_api:
:param bool is_logging: creates a log file
"""
self.debug = debug
self._api = scisheets_api
self._block_linenumber = None # Where exception occurred in block
self._block_name = None
self._block_start_linenumber = None # Start of block in source
self._caller_filename = None
self._exception = None
self._exception_filename = None
if is_logging:
self._logger = Logger(settings.SCISHEETS_LOG,
"controller")
else:
self._logger = None
self._iterations = 0
self._is_first = True
self._table = None
if self._api is not None:
self._table = self._api.getTable()
def _log(self, name, details):
if self._logger is not None:
self._logger.log(name, details=details)
# TODO: Handle different file for caller
def startBlock(self, name):
"""
Called at the start of a block that is being evaluated.
:param str name: User oriented identifier of the code block
"""
if self.debug:
if name == 'V_MAX':
import pdb; pdb.set_trace()
self._block_name = name
context = inspect.getouterframes(inspect.currentframe())[1]
linenumber = context[2]
self._caller_filename = context[1]
self._block_start_linenumber = linenumber + 1
self._exception_filename = None
self._log("start/%s" % self._block_name, "")
def endBlock(self):
"""
Called at the end of a block
"""
self._log("end/%s" % self._block_name, "")
self._block_start_linenumber = None
self._caller_filename = None
self._exception_filename = None
self._block_name = None
def exceptionForBlock(self, exception):
"""
Called when an exception has occurred.
:param Exception exception:
:return str, int: block name, line number in the block
:raises RuntimeError: if not within a block
"""
if self.debug:
import pdb; pdb.set_trace()
if self._block_name is None:
self._block_name = "Unknown"
self._exception = exception
_, _, exc_tb = sys.exc_info()
self._exception_filename = exc_tb.tb_frame.f_code.co_filename
# Check for compile error
if 'lineno' in dir(self._exception):
abs_linenumber = self._exception.lineno
is_runtime_error = False
# Must be runtime error
else:
abs_linenumber = exc_tb.tb_lineno
is_runtime_error = True
# Compute the line number of the exception
if is_runtime_error and \
self._exception_filename == self._caller_filename:
self._block_linenumber = abs_linenumber \
- self._block_start_linenumber + 1
else:
self._block_linenumber = abs_linenumber
self._log("exception/%s" % self._block_name, self.formatError())
def formatError(self,
is_absolute_linenumber=False,
is_use_block_name=True):
"""
Formats the exception to include the block and line number.
:param bool is_absolute_linenumber: Forces message to be
an absolute line number
:param bool is_use_block_name: Use the block name in the message
:return str/None:
"""
if self._exception is None:
return None
if is_use_block_name:
if (not is_absolute_linenumber) \
and self._caller_filename == self._exception_filename:
if not "Computing" in str(self._exception):
msg = "Computing %s near line %d: %s" % (self._block_name,
self._block_linenumber, str(self._exception))
else:
msg = str(self._exception)
else:
msg = "In %s near line %d: %s" % (self._exception_filename,
self._block_linenumber, str(self._exception))
else:
msg = "near line %d: %s" % (self._block_linenumber, str(self._exception))
return msg
def initializeLoop(self):
"""
Initializes variables before loop begins
"""
self._iterations = 0
self._log("initializeLoop", "")
def startAnIteration(self):
"""
Beginning of a loop iteration
"""
self._iterations += 1
self._exception = None
for cv in self._api.getColumnVariables():
try:
cv.setIterationStartValue()
except Exception as err:
import pdb; pdb.set_trace()
pass
self._log("startAnIteration", "iterations=%d" % self._iterations)
def endAnIteration(self):
"""
End of a loop iteration
"""
self._log("endAnIteration", "iterations=%d" % self._iterations)
def endProgram(self, details=""):
"""
End of a loop iteration
"""
self._log("endProgram", details)
def _isEquivalentValues(self):
"""
Checks if not namespace variable has changed since the start of the iteration.
:return bool, cv/None: True if no change; cv of first ColumnVariable that failed
"""
for cv in self._api.getColumnVariables():
if not cv.isNamespaceValueEquivalentToIterationStartValue():
return False, cv
return True, None
def isTerminateLoop(self):
"""
Determines if the loop should terminate
:return bool: terminate loop if True
"""
num_formula_columns = len(self._table.getFormulaColumns())
outcome = ""
done = None
is_first = self._is_first
if is_first:
self._is_first = False
done = False
is_not_evaluate = None
is_not_except= None
is_equiv = None
is_large = None
cv_bad = None
else:
is_not_evaluate = not self._table.getIsEvaluateFormulas()
is_not_except= self._exception is None
is_equiv, cv_bad = self._isEquivalentValues()
is_large = self._iterations >= num_formula_columns
if is_not_evaluate:
outcome = "True - not isEvaluateFormulas"
done = True
elif is_not_except and is_equiv:
outcome = "True - not exception & equivalent values"
done = True
elif is_large:
outcome = "True - iterations >= num_formula_columns"
done = True
else:
outcome = "False"
done = False
details = "%s: not_evaluate: %s; not_except: %s;" \
% (outcome, is_not_evaluate, is_not_except)
cv_msg = str(is_equiv)
if cv_bad is not None:
cv_msg = "%s,col=%s" % (is_equiv, cv_bad.getColumn().getName())
details = "%s equiv: %s; first: %s; large: %s." \
% (details, cv_msg, is_first, is_large)
self._log("isTerminateLoop", details)
return done
def getException(self):
return self._exception
def getExceptionLineNumber(self):
return self._block_linenumber
def setTable(self, table):
self._table = table
| apache-2.0 | 6,850,372,470,727,965,000 | 30.648069 | 84 | 0.629916 | false |
yukim/cassandra-dtest | udtencoding_test.py | 2 | 2105 | from dtest import Tester
from assertions import assert_invalid
from tools import since
import os, sys, time
from ccmlib.cluster import Cluster
@since('2.1')
class TestUDTEncoding(Tester):
def udt_test(self):
""" Test (somewhat indirectly) that user queries involving UDT's are properly encoded (due to driver not recognizing UDT syntax) """
cluster = self.cluster
cluster.populate(3).start()
[node1, node2, node3] = cluster.nodelist()
time.sleep(.5)
cursor = self.patient_cql_connection(node1)
self.create_ks(cursor, 'ks', 3)
# create udt and insert correctly (should be successful)
cursor.execute('CREATE TYPE address (city text,zip int);')
cursor.execute('CREATE TABLE user_profiles (login text PRIMARY KEY, addresses map<text, frozen<address>>);')
cursor.execute("INSERT INTO user_profiles(login, addresses) VALUES ('tsmith', { 'home': {city: 'San Fransisco',zip: 94110 }});")
#note here address looks likes a map -> which is what the driver thinks it is. udt is encoded server side, we test that if addresses is changed slightly whether encoder recognizes the errors
# try adding a field - see if will be encoded to a udt (should return error)
assert_invalid(cursor, "INSERT INTO user_profiles(login, addresses) VALUES ('jsmith', { 'home': {street: 'El Camino Real', city: 'San Fransisco', zip: 94110 }});", "Unknown field 'street' in value of user defined type address")
# try modifying a field name - see if will be encoded to a udt (should return error)
assert_invalid(cursor, "INSERT INTO user_profiles(login, addresses) VALUES ('fsmith', { 'home': {cityname: 'San Fransisco', zip: 94110 }});", "Unknown field 'cityname' in value of user defined type address")
# try modifying a type within the collection - see if will be encoded to a udt (should return error)
assert_invalid(cursor, "INSERT INTO user_profiles(login, addresses) VALUES ('fsmith', { 'home': {city: 'San Fransisco', zip: '94110' }});", "Invalid map literal for addresses")
| apache-2.0 | 8,999,324,405,026,003,000 | 57.472222 | 235 | 0.688361 | false |
lyarwood/bugwarrior | tests/test_config.py | 2 | 3581 | # coding: utf-8
from __future__ import unicode_literals
import os
import configparser
from unittest import TestCase
import bugwarrior.config as config
from .base import ConfigTest
class TestGetConfigPath(ConfigTest):
def create(self, path):
"""
Create an empty file in the temporary directory, return the full path.
"""
fpath = os.path.join(self.tempdir, path)
if not os.path.exists(os.path.dirname(fpath)):
os.makedirs(os.path.dirname(fpath))
open(fpath, 'a').close()
return fpath
def test_default(self):
"""
If it exists, use the file at $XDG_CONFIG_HOME/bugwarrior/bugwarriorrc
"""
rc = self.create('.config/bugwarrior/bugwarriorrc')
self.assertEquals(config.get_config_path(), rc)
def test_legacy(self):
"""
Falls back on .bugwarriorrc if it exists
"""
rc = self.create('.bugwarriorrc')
self.assertEquals(config.get_config_path(), rc)
def test_xdg_first(self):
"""
If both files above exist, the one in $XDG_CONFIG_HOME takes precedence
"""
self.create('.bugwarriorrc')
rc = self.create('.config/bugwarrior/bugwarriorrc')
self.assertEquals(config.get_config_path(), rc)
def test_no_file(self):
"""
If no bugwarriorrc exist anywhere, the path to the prefered one is
returned.
"""
self.assertEquals(
config.get_config_path(),
os.path.join(self.tempdir, '.config/bugwarrior/bugwarriorrc'))
def test_BUGWARRIORRC(self):
"""
If $BUGWARRIORRC is set, it takes precedence over everything else (even
if the file doesn't exist).
"""
rc = os.path.join(self.tempdir, 'my-bugwarriorc')
os.environ['BUGWARRIORRC'] = rc
self.create('.bugwarriorrc')
self.create('.config/bugwarrior/bugwarriorrc')
self.assertEquals(config.get_config_path(), rc)
def test_BUGWARRIORRC_empty(self):
"""
If $BUGWARRIORRC is set but emty, it is not used and the default file
is used instead.
"""
os.environ['BUGWARRIORRC'] = ''
rc = self.create('.config/bugwarrior/bugwarriorrc')
self.assertEquals(config.get_config_path(), rc)
class TestGetDataPath(ConfigTest):
def setUp(self):
super(TestGetDataPath, self).setUp()
self.config = configparser.RawConfigParser()
self.config.add_section('general')
def assertDataPath(self, expected_datapath):
self.assertEqual(
expected_datapath, config.get_data_path(self.config, 'general'))
def test_TASKDATA(self):
"""
TASKDATA should be respected, even when taskrc's data.location is set.
"""
datapath = os.environ['TASKDATA'] = os.path.join(self.tempdir, 'data')
self.assertDataPath(datapath)
def test_taskrc_datalocation(self):
"""
When TASKDATA is not set, data.location in taskrc should be respected.
"""
os.environ['TASKDATA'] = ''
self.assertDataPath(self.lists_path)
def test_unassigned(self):
"""
When data path is not assigned, use default location.
"""
# Empty taskrc.
with open(self.taskrc, 'w'):
pass
os.environ['TASKDATA'] = ''
self.assertDataPath(os.path.expanduser('~/.task'))
class TestOracleEval(TestCase):
def test_echo(self):
self.assertEqual(config.oracle_eval("echo fööbår"), "fööbår")
| gpl-3.0 | 4,037,508,409,989,290,000 | 29.29661 | 79 | 0.612028 | false |
TangXT/GreatCatMOOC | lms/djangoapps/courseware/tests/test_model_data.py | 3 | 14099 | """
Test for lms courseware app, module data (runtime data storage for XBlocks)
"""
import json
from mock import Mock, patch
from functools import partial
from courseware.model_data import DjangoKeyValueStore
from courseware.model_data import InvalidScopeError, FieldDataCache
from courseware.models import StudentModule, XModuleUserStateSummaryField
from courseware.models import XModuleStudentInfoField, XModuleStudentPrefsField
from student.tests.factories import UserFactory
from courseware.tests.factories import StudentModuleFactory as cmfStudentModuleFactory
from courseware.tests.factories import UserStateSummaryFactory
from courseware.tests.factories import StudentPrefsFactory, StudentInfoFactory
from xblock.fields import Scope, BlockScope
from xmodule.modulestore import Location
from django.test import TestCase
from django.db import DatabaseError
from xblock.core import KeyValueMultiSaveError
def mock_field(scope, name):
field = Mock()
field.scope = scope
field.name = name
return field
def mock_descriptor(fields=[]):
descriptor = Mock()
descriptor.location = location('def_id')
descriptor.module_class.fields.values.return_value = fields
descriptor.fields.values.return_value = fields
descriptor.module_class.__name__ = 'MockProblemModule'
return descriptor
location = partial(Location, 'i4x', 'edX', 'test_course', 'problem')
course_id = 'edX/test_course/test'
# The user ids here are 1 because we make a student in the setUp functions, and
# they get an id of 1. There's an assertion in setUp to ensure that assumption
# is still true.
user_state_summary_key = partial(DjangoKeyValueStore.Key, Scope.user_state_summary, None, location('def_id'))
settings_key = partial(DjangoKeyValueStore.Key, Scope.settings, None, location('def_id'))
user_state_key = partial(DjangoKeyValueStore.Key, Scope.user_state, 1, location('def_id'))
prefs_key = partial(DjangoKeyValueStore.Key, Scope.preferences, 1, 'MockProblemModule')
user_info_key = partial(DjangoKeyValueStore.Key, Scope.user_info, 1, None)
class StudentModuleFactory(cmfStudentModuleFactory):
module_state_key = location('def_id').url()
course_id = course_id
class TestInvalidScopes(TestCase):
def setUp(self):
self.user = UserFactory.create(username='user')
self.field_data_cache = FieldDataCache([mock_descriptor([mock_field(Scope.user_state, 'a_field')])], course_id, self.user)
self.kvs = DjangoKeyValueStore(self.field_data_cache)
def test_invalid_scopes(self):
for scope in (Scope(user=True, block=BlockScope.DEFINITION),
Scope(user=False, block=BlockScope.TYPE),
Scope(user=False, block=BlockScope.ALL)):
key = DjangoKeyValueStore.Key(scope, None, None, 'field')
self.assertRaises(InvalidScopeError, self.kvs.get, key)
self.assertRaises(InvalidScopeError, self.kvs.set, key, 'value')
self.assertRaises(InvalidScopeError, self.kvs.delete, key)
self.assertRaises(InvalidScopeError, self.kvs.has, key)
self.assertRaises(InvalidScopeError, self.kvs.set_many, {key: 'value'})
class TestStudentModuleStorage(TestCase):
def setUp(self):
student_module = StudentModuleFactory(state=json.dumps({'a_field': 'a_value', 'b_field': 'b_value'}))
self.user = student_module.student
self.assertEqual(self.user.id, 1) # check our assumption hard-coded in the key functions above.
self.field_data_cache = FieldDataCache([mock_descriptor([mock_field(Scope.user_state, 'a_field')])], course_id, self.user)
self.kvs = DjangoKeyValueStore(self.field_data_cache)
def test_get_existing_field(self):
"Test that getting an existing field in an existing StudentModule works"
self.assertEquals('a_value', self.kvs.get(user_state_key('a_field')))
def test_get_missing_field(self):
"Test that getting a missing field from an existing StudentModule raises a KeyError"
self.assertRaises(KeyError, self.kvs.get, user_state_key('not_a_field'))
def test_set_existing_field(self):
"Test that setting an existing user_state field changes the value"
self.kvs.set(user_state_key('a_field'), 'new_value')
self.assertEquals(1, StudentModule.objects.all().count())
self.assertEquals({'b_field': 'b_value', 'a_field': 'new_value'}, json.loads(StudentModule.objects.all()[0].state))
def test_set_missing_field(self):
"Test that setting a new user_state field changes the value"
self.kvs.set(user_state_key('not_a_field'), 'new_value')
self.assertEquals(1, StudentModule.objects.all().count())
self.assertEquals({'b_field': 'b_value', 'a_field': 'a_value', 'not_a_field': 'new_value'}, json.loads(StudentModule.objects.all()[0].state))
def test_delete_existing_field(self):
"Test that deleting an existing field removes it from the StudentModule"
self.kvs.delete(user_state_key('a_field'))
self.assertEquals(1, StudentModule.objects.all().count())
self.assertRaises(KeyError, self.kvs.get, user_state_key('not_a_field'))
def test_delete_missing_field(self):
"Test that deleting a missing field from an existing StudentModule raises a KeyError"
self.assertRaises(KeyError, self.kvs.delete, user_state_key('not_a_field'))
self.assertEquals(1, StudentModule.objects.all().count())
self.assertEquals({'b_field': 'b_value', 'a_field': 'a_value'}, json.loads(StudentModule.objects.all()[0].state))
def test_has_existing_field(self):
"Test that `has` returns True for existing fields in StudentModules"
self.assertTrue(self.kvs.has(user_state_key('a_field')))
def test_has_missing_field(self):
"Test that `has` returns False for missing fields in StudentModule"
self.assertFalse(self.kvs.has(user_state_key('not_a_field')))
def construct_kv_dict(self):
"""Construct a kv_dict that can be passed to set_many"""
key1 = user_state_key('field_a')
key2 = user_state_key('field_b')
new_value = 'new value'
newer_value = 'newer value'
return {key1: new_value, key2: newer_value}
def test_set_many(self):
"Test setting many fields that are scoped to Scope.user_state"
kv_dict = self.construct_kv_dict()
self.kvs.set_many(kv_dict)
for key in kv_dict:
self.assertEquals(self.kvs.get(key), kv_dict[key])
def test_set_many_failure(self):
"Test failures when setting many fields that are scoped to Scope.user_state"
kv_dict = self.construct_kv_dict()
# because we're patching the underlying save, we need to ensure the
# fields are in the cache
for key in kv_dict:
self.kvs.set(key, 'test_value')
with patch('django.db.models.Model.save', side_effect=DatabaseError):
with self.assertRaises(KeyValueMultiSaveError) as exception_context:
self.kvs.set_many(kv_dict)
self.assertEquals(len(exception_context.exception.saved_field_names), 0)
class TestMissingStudentModule(TestCase):
def setUp(self):
self.user = UserFactory.create(username='user')
self.assertEqual(self.user.id, 1) # check our assumption hard-coded in the key functions above.
self.field_data_cache = FieldDataCache([mock_descriptor()], course_id, self.user)
self.kvs = DjangoKeyValueStore(self.field_data_cache)
def test_get_field_from_missing_student_module(self):
"Test that getting a field from a missing StudentModule raises a KeyError"
self.assertRaises(KeyError, self.kvs.get, user_state_key('a_field'))
def test_set_field_in_missing_student_module(self):
"Test that setting a field in a missing StudentModule creates the student module"
self.assertEquals(0, len(self.field_data_cache.cache))
self.assertEquals(0, StudentModule.objects.all().count())
self.kvs.set(user_state_key('a_field'), 'a_value')
self.assertEquals(1, len(self.field_data_cache.cache))
self.assertEquals(1, StudentModule.objects.all().count())
student_module = StudentModule.objects.all()[0]
self.assertEquals({'a_field': 'a_value'}, json.loads(student_module.state))
self.assertEquals(self.user, student_module.student)
self.assertEquals(location('def_id').url(), student_module.module_state_key)
self.assertEquals(course_id, student_module.course_id)
def test_delete_field_from_missing_student_module(self):
"Test that deleting a field from a missing StudentModule raises a KeyError"
self.assertRaises(KeyError, self.kvs.delete, user_state_key('a_field'))
def test_has_field_for_missing_student_module(self):
"Test that `has` returns False for missing StudentModules"
self.assertFalse(self.kvs.has(user_state_key('a_field')))
class StorageTestBase(object):
"""
A base class for that gets subclassed when testing each of the scopes.
"""
# Disable pylint warnings that arise because of the way the child classes call
# this base class -- pylint's static analysis can't keep up with it.
# pylint: disable=E1101, E1102
factory = None
scope = None
key_factory = None
storage_class = None
def setUp(self):
field_storage = self.factory.create()
if hasattr(field_storage, 'student'):
self.user = field_storage.student
else:
self.user = UserFactory.create()
self.mock_descriptor = mock_descriptor([
mock_field(self.scope, 'existing_field'),
mock_field(self.scope, 'other_existing_field')])
self.field_data_cache = FieldDataCache([self.mock_descriptor], course_id, self.user)
self.kvs = DjangoKeyValueStore(self.field_data_cache)
def test_set_and_get_existing_field(self):
self.kvs.set(self.key_factory('existing_field'), 'test_value')
self.assertEquals('test_value', self.kvs.get(self.key_factory('existing_field')))
def test_get_existing_field(self):
"Test that getting an existing field in an existing Storage Field works"
self.assertEquals('old_value', self.kvs.get(self.key_factory('existing_field')))
def test_get_missing_field(self):
"Test that getting a missing field from an existing Storage Field raises a KeyError"
self.assertRaises(KeyError, self.kvs.get, self.key_factory('missing_field'))
def test_set_existing_field(self):
"Test that setting an existing field changes the value"
self.kvs.set(self.key_factory('existing_field'), 'new_value')
self.assertEquals(1, self.storage_class.objects.all().count())
self.assertEquals('new_value', json.loads(self.storage_class.objects.all()[0].value))
def test_set_missing_field(self):
"Test that setting a new field changes the value"
self.kvs.set(self.key_factory('missing_field'), 'new_value')
self.assertEquals(2, self.storage_class.objects.all().count())
self.assertEquals('old_value', json.loads(self.storage_class.objects.get(field_name='existing_field').value))
self.assertEquals('new_value', json.loads(self.storage_class.objects.get(field_name='missing_field').value))
def test_delete_existing_field(self):
"Test that deleting an existing field removes it"
self.kvs.delete(self.key_factory('existing_field'))
self.assertEquals(0, self.storage_class.objects.all().count())
def test_delete_missing_field(self):
"Test that deleting a missing field from an existing Storage Field raises a KeyError"
self.assertRaises(KeyError, self.kvs.delete, self.key_factory('missing_field'))
self.assertEquals(1, self.storage_class.objects.all().count())
def test_has_existing_field(self):
"Test that `has` returns True for an existing Storage Field"
self.assertTrue(self.kvs.has(self.key_factory('existing_field')))
def test_has_missing_field(self):
"Test that `has` return False for an existing Storage Field"
self.assertFalse(self.kvs.has(self.key_factory('missing_field')))
def construct_kv_dict(self):
"""Construct a kv_dict that can be passed to set_many"""
key1 = self.key_factory('existing_field')
key2 = self.key_factory('other_existing_field')
new_value = 'new value'
newer_value = 'newer value'
return {key1: new_value, key2: newer_value}
def test_set_many(self):
"""Test that setting many regular fields at the same time works"""
kv_dict = self.construct_kv_dict()
self.kvs.set_many(kv_dict)
for key in kv_dict:
self.assertEquals(self.kvs.get(key), kv_dict[key])
def test_set_many_failure(self):
"""Test that setting many regular fields with a DB error """
kv_dict = self.construct_kv_dict()
for key in kv_dict:
self.kvs.set(key, 'test value')
with patch('django.db.models.Model.save', side_effect=[None, DatabaseError]):
with self.assertRaises(KeyValueMultiSaveError) as exception_context:
self.kvs.set_many(kv_dict)
exception = exception_context.exception
self.assertEquals(len(exception.saved_field_names), 1)
self.assertEquals(exception.saved_field_names[0], 'existing_field')
class TestContentStorage(StorageTestBase, TestCase):
factory = UserStateSummaryFactory
scope = Scope.user_state_summary
key_factory = user_state_summary_key
storage_class = XModuleUserStateSummaryField
class TestStudentPrefsStorage(StorageTestBase, TestCase):
factory = StudentPrefsFactory
scope = Scope.preferences
key_factory = prefs_key
storage_class = XModuleStudentPrefsField
class TestStudentInfoStorage(StorageTestBase, TestCase):
factory = StudentInfoFactory
scope = Scope.user_info
key_factory = user_info_key
storage_class = XModuleStudentInfoField
| agpl-3.0 | -996,768,137,892,174,100 | 44.334405 | 149 | 0.691326 | false |
zero323/spark | python/pyspark/mllib/random.py | 22 | 19517 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Python package for random data generation.
"""
import sys
from functools import wraps
from pyspark.mllib.common import callMLlibFunc
__all__ = ['RandomRDDs', ]
def toArray(f):
@wraps(f)
def func(sc, *a, **kw):
rdd = f(sc, *a, **kw)
return rdd.map(lambda vec: vec.toArray())
return func
class RandomRDDs(object):
"""
Generator methods for creating RDDs comprised of i.i.d samples from
some distribution.
.. versionadded:: 1.1.0
"""
@staticmethod
def uniformRDD(sc, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the
uniform distribution U(0.0, 1.0).
To transform the distribution in the generated RDD from U(0.0, 1.0)
to U(a, b), use
``RandomRDDs.uniformRDD(sc, n, p, seed).map(lambda v: a + (b - a) * v)``
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
used to create the RDD.
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of float comprised of i.i.d. samples ~ `U(0.0, 1.0)`.
Examples
--------
>>> x = RandomRDDs.uniformRDD(sc, 100).collect()
>>> len(x)
100
>>> max(x) <= 1.0 and min(x) >= 0.0
True
>>> RandomRDDs.uniformRDD(sc, 100, 4).getNumPartitions()
4
>>> parts = RandomRDDs.uniformRDD(sc, 100, seed=4).getNumPartitions()
>>> parts == sc.defaultParallelism
True
"""
return callMLlibFunc("uniformRDD", sc._jsc, size, numPartitions, seed)
@staticmethod
def normalRDD(sc, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the standard normal
distribution.
To transform the distribution in the generated RDD from standard normal
to some other normal N(mean, sigma^2), use
``RandomRDDs.normal(sc, n, p, seed).map(lambda v: mean + sigma * v)``
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
used to create the RDD.
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of float comprised of i.i.d. samples ~ N(0.0, 1.0).
Examples
--------
>>> x = RandomRDDs.normalRDD(sc, 1000, seed=1)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - 0.0) < 0.1
True
>>> abs(stats.stdev() - 1.0) < 0.1
True
"""
return callMLlibFunc("normalRDD", sc._jsc, size, numPartitions, seed)
@staticmethod
def logNormalRDD(sc, mean, std, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the log normal
distribution with the input mean and standard distribution.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
used to create the RDD.
mean : float
mean for the log Normal distribution
std : float
std for the log Normal distribution
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
RDD of float comprised of i.i.d. samples ~ log N(mean, std).
Examples
--------
>>> from math import sqrt, exp
>>> mean = 0.0
>>> std = 1.0
>>> expMean = exp(mean + 0.5 * std * std)
>>> expStd = sqrt((exp(std * std) - 1.0) * exp(2.0 * mean + std * std))
>>> x = RandomRDDs.logNormalRDD(sc, mean, std, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - expMean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - expStd) < 0.5
True
"""
return callMLlibFunc("logNormalRDD", sc._jsc, float(mean), float(std),
size, numPartitions, seed)
@staticmethod
def poissonRDD(sc, mean, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the Poisson
distribution with the input mean.
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
mean : float
Mean, or lambda, for the Poisson distribution.
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of float comprised of i.i.d. samples ~ Pois(mean).
Examples
--------
>>> mean = 100.0
>>> x = RandomRDDs.poissonRDD(sc, mean, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("poissonRDD", sc._jsc, float(mean), size, numPartitions, seed)
@staticmethod
def exponentialRDD(sc, mean, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the Exponential
distribution with the input mean.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
mean : float
Mean, or 1 / lambda, for the Exponential distribution.
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of float comprised of i.i.d. samples ~ Exp(mean).
Examples
--------
>>> mean = 2.0
>>> x = RandomRDDs.exponentialRDD(sc, mean, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("exponentialRDD", sc._jsc, float(mean), size, numPartitions, seed)
@staticmethod
def gammaRDD(sc, shape, scale, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the Gamma
distribution with the input shape and scale.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
shape : float
shape (> 0) parameter for the Gamma distribution
scale : float
scale (> 0) parameter for the Gamma distribution
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of float comprised of i.i.d. samples ~ Gamma(shape, scale).
Examples
--------
>>> from math import sqrt
>>> shape = 1.0
>>> scale = 2.0
>>> expMean = shape * scale
>>> expStd = sqrt(shape * scale * scale)
>>> x = RandomRDDs.gammaRDD(sc, shape, scale, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - expMean) < 0.5
True
>>> abs(stats.stdev() - expStd) < 0.5
True
"""
return callMLlibFunc("gammaRDD", sc._jsc, float(shape),
float(scale), size, numPartitions, seed)
@staticmethod
@toArray
def uniformVectorRDD(sc, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the uniform distribution U(0.0, 1.0).
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
numRows : int
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD.
seed : int, optional
Seed for the RNG that generates the seed for the generator in each partition.
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d samples ~ `U(0.0, 1.0)`.
Examples
--------
>>> import numpy as np
>>> mat = np.matrix(RandomRDDs.uniformVectorRDD(sc, 10, 10).collect())
>>> mat.shape
(10, 10)
>>> mat.max() <= 1.0 and mat.min() >= 0.0
True
>>> RandomRDDs.uniformVectorRDD(sc, 10, 10, 4).getNumPartitions()
4
"""
return callMLlibFunc("uniformVectorRDD", sc._jsc, numRows, numCols, numPartitions, seed)
@staticmethod
@toArray
def normalVectorRDD(sc, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the standard normal distribution.
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
numRows : int
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d. samples ~ `N(0.0, 1.0)`.
Examples
--------
>>> import numpy as np
>>> mat = np.matrix(RandomRDDs.normalVectorRDD(sc, 100, 100, seed=1).collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - 0.0) < 0.1
True
>>> abs(mat.std() - 1.0) < 0.1
True
"""
return callMLlibFunc("normalVectorRDD", sc._jsc, numRows, numCols, numPartitions, seed)
@staticmethod
@toArray
def logNormalVectorRDD(sc, mean, std, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the log normal distribution.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
mean : float
Mean of the log normal distribution
std : float
Standard Deviation of the log normal distribution
numRows : int
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d. samples ~ log `N(mean, std)`.
Examples
--------
>>> import numpy as np
>>> from math import sqrt, exp
>>> mean = 0.0
>>> std = 1.0
>>> expMean = exp(mean + 0.5 * std * std)
>>> expStd = sqrt((exp(std * std) - 1.0) * exp(2.0 * mean + std * std))
>>> m = RandomRDDs.logNormalVectorRDD(sc, mean, std, 100, 100, seed=1).collect()
>>> mat = np.matrix(m)
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - expMean) < 0.1
True
>>> abs(mat.std() - expStd) < 0.1
True
"""
return callMLlibFunc("logNormalVectorRDD", sc._jsc, float(mean), float(std),
numRows, numCols, numPartitions, seed)
@staticmethod
@toArray
def poissonVectorRDD(sc, mean, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Poisson distribution with the input mean.
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
mean : float
Mean, or lambda, for the Poisson distribution.
numRows : float
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`)
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d. samples ~ Pois(mean).
Examples
--------
>>> import numpy as np
>>> mean = 100.0
>>> rdd = RandomRDDs.poissonVectorRDD(sc, mean, 100, 100, seed=1)
>>> mat = np.mat(rdd.collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(mat.std() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("poissonVectorRDD", sc._jsc, float(mean), numRows, numCols,
numPartitions, seed)
@staticmethod
@toArray
def exponentialVectorRDD(sc, mean, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Exponential distribution with the input mean.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
mean : float
Mean, or 1 / lambda, for the Exponential distribution.
numRows : int
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`)
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d. samples ~ Exp(mean).
Examples
--------
>>> import numpy as np
>>> mean = 0.5
>>> rdd = RandomRDDs.exponentialVectorRDD(sc, mean, 100, 100, seed=1)
>>> mat = np.mat(rdd.collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(mat.std() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("exponentialVectorRDD", sc._jsc, float(mean), numRows, numCols,
numPartitions, seed)
@staticmethod
@toArray
def gammaVectorRDD(sc, shape, scale, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Gamma distribution.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
shape : float
Shape (> 0) of the Gamma distribution
scale : float
Scale (> 0) of the Gamma distribution
numRows : int
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional,
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d. samples ~ Gamma(shape, scale).
Examples
--------
>>> import numpy as np
>>> from math import sqrt
>>> shape = 1.0
>>> scale = 2.0
>>> expMean = shape * scale
>>> expStd = sqrt(shape * scale * scale)
>>> mat = np.matrix(RandomRDDs.gammaVectorRDD(sc, shape, scale, 100, 100, seed=1).collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - expMean) < 0.1
True
>>> abs(mat.std() - expStd) < 0.1
True
"""
return callMLlibFunc("gammaVectorRDD", sc._jsc, float(shape), float(scale),
numRows, numCols, numPartitions, seed)
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("mllib.random tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 | 5,471,673,634,723,734,000 | 31.582638 | 100 | 0.550699 | false |
toolforger/sympy | sympy/plotting/pygletplot/tests/test_plotting.py | 109 | 2653 | from sympy.external.importtools import import_module
disabled = False
# if pyglet.gl fails to import, e.g. opengl is missing, we disable the tests
pyglet_gl = import_module("pyglet.gl", catch=(OSError,))
pyglet_window = import_module("pyglet.window", catch=(OSError,))
if not pyglet_gl or not pyglet_window:
disabled = True
from sympy import symbols, sin, cos
x, y, z = symbols('x, y, z')
def test_import():
from sympy.plotting.pygletplot import PygletPlot
def test_plot_2d():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(x, [x, -5, 5, 4], visible=False)
p.wait_for_calculations()
def test_plot_2d_discontinuous():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(1/x, [x, -1, 1, 2], visible=False)
p.wait_for_calculations()
def test_plot_3d():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(x*y, [x, -5, 5, 5], [y, -5, 5, 5], visible=False)
p.wait_for_calculations()
def test_plot_3d_discontinuous():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(1/x, [x, -3, 3, 6], [y, -1, 1, 1], visible=False)
p.wait_for_calculations()
def test_plot_2d_polar():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(1/x, [x, -1, 1, 4], 'mode=polar', visible=False)
p.wait_for_calculations()
def test_plot_3d_cylinder():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(
1/y, [x, 0, 6.282, 4], [y, -1, 1, 4], 'mode=polar;style=solid',
visible=False)
p.wait_for_calculations()
def test_plot_3d_spherical():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(
1, [x, 0, 6.282, 4], [y, 0, 3.141,
4], 'mode=spherical;style=wireframe',
visible=False)
p.wait_for_calculations()
def test_plot_2d_parametric():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(sin(x), cos(x), [x, 0, 6.282, 4], visible=False)
p.wait_for_calculations()
def test_plot_3d_parametric():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(sin(x), cos(x), x/5.0, [x, 0, 6.282, 4], visible=False)
p.wait_for_calculations()
def _test_plot_log():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(log(x), [x, 0, 6.282, 4], 'mode=polar', visible=False)
p.wait_for_calculations()
def test_plot_integral():
# Make sure it doesn't treat x as an independent variable
from sympy.plotting.pygletplot import PygletPlot
from sympy import Integral
p = PygletPlot(Integral(z*x, (x, 1, z), (z, 1, y)), visible=False)
p.wait_for_calculations()
| bsd-3-clause | 240,842,127,289,674,300 | 28.477778 | 76 | 0.664908 | false |
dvliman/jaikuengine | .google_appengine/lib/django-1.5/tests/regressiontests/syndication/tests.py | 47 | 13086 | from __future__ import absolute_import, unicode_literals
from xml.dom import minidom
from django.contrib.syndication import views
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import tzinfo
from django.utils.feedgenerator import rfc2822_date, rfc3339_date
from .models import Entry
class FeedTestCase(TestCase):
fixtures = ['feeddata.json']
def assertChildNodes(self, elem, expected):
actual = set([n.nodeName for n in elem.childNodes])
expected = set(expected)
self.assertEqual(actual, expected)
def assertChildNodeContent(self, elem, expected):
for k, v in expected.items():
self.assertEqual(
elem.getElementsByTagName(k)[0].firstChild.wholeText, v)
def assertCategories(self, elem, expected):
self.assertEqual(set(i.firstChild.wholeText for i in elem.childNodes if i.nodeName == 'category'), set(expected))
######################################
# Feed view
######################################
class SyndicationFeedTest(FeedTestCase):
"""
Tests for the high-level syndication feed framework.
"""
urls = 'regressiontests.syndication.urls'
def test_rss2_feed(self):
"""
Test the structure and content of feeds generated by Rss201rev2Feed.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
# Find the last build date
d = Entry.objects.latest('date').date
ltz = tzinfo.LocalTimezone(d)
last_build_date = rfc2822_date(d.replace(tzinfo=ltz))
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
self.assertChildNodeContent(chan, {
'title': 'My blog',
'description': 'A more thorough description of my blog.',
'link': 'http://example.com/blog/',
'language': 'en',
'lastBuildDate': last_build_date,
#'atom:link': '',
'ttl': '600',
'copyright': 'Copyright (c) 2007, Sally Smith',
})
self.assertCategories(chan, ['python', 'django'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss2/'
)
# Find the pubdate of the first feed item
d = Entry.objects.get(pk=1).date
ltz = tzinfo.LocalTimezone(d)
pub_date = rfc2822_date(d.replace(tzinfo=ltz))
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
'guid': 'http://example.com/blog/1/',
'pubDate': pub_date,
'author': '[email protected] (Sally Smith)',
})
self.assertCategories(items[0], ['python', 'testing'])
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
def test_rss091_feed(self):
"""
Test the structure and content of feeds generated by RssUserland091Feed.
"""
response = self.client.get('/syndication/rss091/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '0.91')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
self.assertCategories(chan, ['python', 'django'])
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss091/'
)
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
})
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description'])
self.assertCategories(item, [])
def test_atom_feed(self):
"""
Test the structure and content of feeds generated by Atom1Feed.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author'])
for link in feed.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'category', 'updated', 'rights', 'author'])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_custom_feed_generator(self):
response = self.client.get('/syndication/custom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('django'), 'rocks')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'spam', 'rights', 'category', 'author'])
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertEqual(entry.getAttribute('bacon'), 'yum')
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'ministry', 'rights', 'author', 'updated', 'category'])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_title_escaping(self):
"""
Tests that titles are escaped correctly in RSS feeds.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
for item in doc.getElementsByTagName('item'):
link = item.getElementsByTagName('link')[0]
if link.firstChild.wholeText == 'http://example.com/blog/4/':
title = item.getElementsByTagName('title')[0]
self.assertEqual(title.firstChild.wholeText, 'A & B < C > D')
def test_naive_datetime_conversion(self):
"""
Test that datetimes are correctly converted to the local time zone.
"""
# Naive date times passed in get converted to the local time zone, so
# check the recived zone offset against the local offset.
response = self.client.get('/syndication/naive-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('date').date
ltz = tzinfo.LocalTimezone(d)
latest = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest)
def test_aware_datetime_conversion(self):
"""
Test that datetimes with timezones don't get trodden on.
"""
response = self.client.get('/syndication/aware-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
self.assertEqual(updated[-6:], '+00:42')
def test_feed_last_modified_time(self):
response = self.client.get('/syndication/naive-dates/')
self.assertEqual(response['Last-Modified'], 'Thu, 03 Jan 2008 19:30:00 GMT')
# No last-modified when feed has no item_pubdate
response = self.client.get('/syndication/no_pubdate/')
self.assertFalse(response.has_header('Last-Modified'))
def test_feed_url(self):
"""
Test that the feed_url can be overridden.
"""
response = self.client.get('/syndication/feedurl/')
doc = minidom.parseString(response.content)
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
def test_secure_urls(self):
"""
Test URLs are prefixed with https:// when feed is requested over HTTPS.
"""
response = self.client.get('/syndication/rss2/', **{
'wsgi.url_scheme': 'https',
})
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName('channel')[0]
self.assertEqual(
chan.getElementsByTagName('link')[0].firstChild.wholeText[0:5],
'https'
)
atom_link = chan.getElementsByTagName('atom:link')[0]
self.assertEqual(atom_link.getAttribute('href')[0:5], 'https')
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href')[0:5], 'https')
def test_item_link_error(self):
"""
Test that a ImproperlyConfigured is raised if no link could be found
for the item(s).
"""
self.assertRaises(ImproperlyConfigured,
self.client.get,
'/syndication/articles/')
def test_template_feed(self):
"""
Test that the item title and description can be overridden with
templates.
"""
response = self.client.get('/syndication/template/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'Title in your templates: My first entry',
'description': 'Description in your templates: My first entry',
'link': 'http://example.com/blog/1/',
})
def test_add_domain(self):
"""
Test add_domain() prefixes domains onto the correct URLs.
"""
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value', True),
'https://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', 'http://djangoproject.com/doc/'),
'http://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'https://djangoproject.com/doc/'),
'https://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'mailto:[email protected]'),
'mailto:[email protected]'
)
self.assertEqual(
views.add_domain('example.com', '//example.com/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
| apache-2.0 | 6,104,464,081,741,133,000 | 40.021944 | 151 | 0.60217 | false |
AntonKhorev/BudgetSpb | main.py | 1 | 1072 | #!/usr/bin/env python3
from linker import Linker
import htmlPage
import content.index,content.db,content.fincom
# TODO put into config
spbBudgetXlsPath='../spb-budget-xls'
if __name__=='__main__':
linker=Linker('filelists',{
'csv':['csv'],
'xls':['xls'],
'db':['zip','sql','xlsx'],
})
htmlPage.HtmlPage('index.html','Данные бюджета Санкт-Петербурга',content.index.content,linker).write('output/index.html')
htmlPage.HtmlPage('xls.html','Ведомственная структура расходов бюджета Санкт-Петербурга в csv и xls',htmlPage.importContent(spbBudgetXlsPath+'/index.html'),linker).write('output/xls.html')
htmlPage.HtmlPage('db.html','БД и таблицы расходов бюджета Санкт-Петербурга из разных источников',content.db.content,linker).write('output/db.html')
htmlPage.HtmlPage('fincom.html','Что можно найти на сайте Комитета финансов',content.fincom.content,linker).write('output/fincom.html')
| bsd-2-clause | 6,578,248,339,887,820,000 | 46.157895 | 189 | 0.748884 | false |
postla/e2-gui | lib/python/Plugins/SystemPlugins/WirelessAccessPoint/plugin.py | 6 | 26631 | from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen, ConfigList
from Components.config import config, ConfigSubsection, getConfigListEntry, ConfigSelection, ConfigIP, ConfigInteger
from Components.config import ConfigText, ConfigYesNo, NoSave, ConfigPassword, ConfigNothing, ConfigSequence
from Components.ActionMap import ActionMap
from Screens.MessageBox import MessageBox
from Components.Sources.StaticText import StaticText
from Plugins.Plugin import PluginDescriptor
from Tools.Directories import fileExists
from math import pow as math_pow
from Components.Network import iNetwork
from Components.PluginComponent import plugins
from Components.Console import Console
from os import path as os_path, system as os_system, listdir
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from enigma import eTimer
import wirelessap
debug_msg_on = False
def printDebugMsg(msg):
global debug_msg_on
if debug_msg_on:
print "[Wireless Access Point] ", msg
class fixedValue:
def __init__(self, value = ""):
self.value = value
apModeConfig = ConfigSubsection()
apModeConfig.useap = ConfigYesNo(default = False)
apModeConfig.setupmode = ConfigSelection(default = "simple", choices = [ ("simple", "Simple"), ("advanced", "Advanced") ] )
#apModeConfig.wirelessdevice = fixedValue(value = "")
apModeConfig.branch = fixedValue(value = "br0")
apModeConfig.driver = fixedValue(value = "nl80211")
apModeConfig.wirelessmode = ConfigSelection(default = "g", choices = [ ("b", "802.11b"), ("a", "802.11a"), ("g", "802.11g") ] )
apModeConfig.channel = ConfigInteger(default = 1, limits = (1,13) )
apModeConfig.ssid = ConfigText(default = "Input SSID", visible_width = 50, fixed_size = False)
apModeConfig.beacon = ConfigInteger(default = 100, limits = (15,65535))
apModeConfig.rts_threshold = ConfigInteger(default = 2347, limits = (0,2347) )
apModeConfig.fragm_threshold = ConfigInteger(default = 2346, limits = (256,2346) )
apModeConfig.preamble = ConfigSelection(default = "0", choices = [ ("0", "Long"), ("1", "Short") ] )
apModeConfig.ignore_broadcast_ssid = ConfigSelection(default = "0", choices = [ ("0", _("disabled")), ("1", _("enabled")) ])
apModeConfig.encrypt = ConfigYesNo(default = False)
apModeConfig.method = ConfigSelection(default = "0", choices = [
("0", _("WEP")), ("1", _("WPA")), ("2", _("WPA2")),("3", _("WPA/WPA2"))])
apModeConfig.wep = ConfigYesNo(default = False)
#apModeConfig.wep_default_key = ConfigSelection(default = "0", choices = [ ("0", "0"), ("1", "1"), ("2", "2"), ("3", "3") ] )
apModeConfig.wep_default_key = fixedValue(value = "0")
apModeConfig.wepType = ConfigSelection(default = "64", choices = [
("64", _("Enable 64 bit (Input 10 hex keys)")), ("128", _("Enable 128 bit (Input 26 hex keys)"))])
apModeConfig.wep_key0 = ConfigPassword(default = "", visible_width = 50, fixed_size = False)
apModeConfig.wpa = ConfigSelection(default = "0", choices = [
("0", _("not set")), ("1", _("WPA")), ("2", _("WPA2")),("3", _("WPA/WPA2"))])
apModeConfig.wpa_passphrase = ConfigPassword(default = "", visible_width = 50, fixed_size = False)
apModeConfig.wpagrouprekey = ConfigInteger(default = 600, limits = (0,3600))
apModeConfig.wpa_key_mgmt = fixedValue(value = "WPA-PSK")
apModeConfig.wpa_pairwise = fixedValue(value = "TKIP CCMP")
apModeConfig.rsn_pairwise = fixedValue(value = "CCMP")
apModeConfig.usedhcp = ConfigYesNo(default=True)
apModeConfig.address = ConfigIP(default = [0,0,0,0])
apModeConfig.netmask = ConfigIP(default = [255,0,0,0])
apModeConfig.gateway = ConfigIP(default = [0,0,0,0])
class WirelessAccessPoint(Screen,ConfigListScreen):
skin = """
<screen position="center,center" size="590,450" title="Wireless Access Point" >
<ePixmap pixmap="skin_default/buttons/red.png" position="20,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="160,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="300,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="440,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="20,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" foregroundColor="#ffffff" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="160,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" foregroundColor="#ffffff" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="300,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" foregroundColor="#ffffff" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="440,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" foregroundColor="#ffffff" backgroundColor="#18188b" transparent="1" />
<widget name="config" zPosition="2" position="10,70" size="580,270" scrollbarMode="showOnDemand" transparent="1" />
<widget source="current_settings" render="Label" position="10,340" size="570,20" font="Regular;19" halign="center" valign="center" transparent="1" />
<widget source="IPAddress_text" render="Label" position="130,370" size="190,21" font="Regular;19" transparent="1" />
<widget source="Netmask_text" render="Label" position="130,395" size="190,21" font="Regular;19" transparent="1" />
<widget source="Gateway_text" render="Label" position="130,420" size="190,21" font="Regular;19" transparent="1" />
<widget source="IPAddress" render="Label" position="340,370" size="240,21" font="Regular;19" transparent="1" />
<widget source="Netmask" render="Label" position="340,395" size="240,21" font="Regular;19" transparent="1" />
<widget source="Gateway" render="Label" position="340,420" size="240,21" font="Regular;19" transparent="1" />
</screen>"""
def __init__(self,session):
Screen.__init__(self,session)
self.session = session
self["shortcuts"] = ActionMap(["ShortcutActions", "SetupActions" ],
{
"ok": self.doConfigMsg,
"cancel": self.keyCancel,
"red": self.keyCancel,
"green": self.doConfigMsg,
}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list,session = self.session)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Ok"))
self["key_yellow"] = StaticText(_(" "))
self["key_blue"] = StaticText(_(" "))
self["current_settings"] = StaticText(_("Current settings (interface : br0)"))
self["IPAddress_text"] = StaticText(_("IP Address"))
self["Netmask_text"] = StaticText(_("Netmask"))
self["Gateway_text"] = StaticText(_("Gateway"))
self["IPAddress"] = StaticText(_("N/A"))
self["Netmask"] = StaticText(_("N/A"))
self["Gateway"] = StaticText(_("N/A"))
self.wirelessAP = wirelessap.wirelessAP()
self.checkRunHostapd()
self.checkWirelessDevices()
self.makeConfigList()
self.loadInterfacesConfig()
self.loadHostapConfig()
self.setupCurrentEncryption()
self.createConfigEntry()
self.createConfig()
self.onClose.append(self.__onClose)
self.onLayoutFinish.append(self.checkwlanDeviceList)
self.onLayoutFinish.append(self.currentNetworkSettings)
self.checkwlanDeviceListTimer = eTimer()
self.checkwlanDeviceListTimer.callback.append(self.WirelessDeviceNotDetectedMsg)
def checkwlanDeviceList(self):
if len(self.wlanDeviceList) == 0:
self.checkwlanDeviceListTimer.start(100,True)
def WirelessDeviceNotDetectedMsg(self):
self.session.openWithCallback(self.close ,MessageBox, _("Wireless Lan Device is not detected."), MessageBox.TYPE_ERROR)
def currentNetworkSettings(self):
self["IPAddress"].setText(self.formatAddr(iNetwork.getAdapterAttribute("br0", "ip")))
self["Netmask"].setText(self.formatAddr(iNetwork.getAdapterAttribute("br0", "netmask")))
self["Gateway"].setText(self.formatAddr(iNetwork.getAdapterAttribute("br0", "gateway")))
def formatAddr(self, address = [0,0,0,0]):
if address is None:
return "N/A"
return "%d:%d:%d:%d"%(address[0],address[1],address[2],address[3])
def checkRunHostapd(self):
global apModeConfig
if fileExists("/var/run/hostapd", 0):
apModeConfig.useap.value = True
def makeConfigList(self):
global apModeConfig
self.hostapdConfigList = {}
self.hostapdConfigList["interface"] = apModeConfig.wirelessdevice
self.hostapdConfigList["bridge"] = apModeConfig.branch # "br0"
self.hostapdConfigList["driver"] = apModeConfig.driver # "nl80211"
self.hostapdConfigList["hw_mode"] = apModeConfig.wirelessmode
self.hostapdConfigList["channel"] = apModeConfig.channel
self.hostapdConfigList["ssid"] = apModeConfig.ssid
self.hostapdConfigList["beacon_int"] = apModeConfig.beacon
self.hostapdConfigList["rts_threshold"] = apModeConfig.rts_threshold
self.hostapdConfigList["fragm_threshold"] = apModeConfig.fragm_threshold
self.hostapdConfigList["preamble"] = apModeConfig.preamble
# self.hostapdConfigList["macaddr_acl"] = "" # fix to add Access Control List Editer
# self.hostapdConfigList["accept_mac_file"] = "" # fix to add Access Control List Editer
# self.hostapdConfigList["deny_mac_file"] = "" # fix to add Access Control List Editer
self.hostapdConfigList["ignore_broadcast_ssid"] = apModeConfig.ignore_broadcast_ssid
# self.hostapdConfigList["wmm_enabled"] = ""
# self.hostapdConfigList["ieee80211n"] = ""
# self.hostapdConfigList["ht_capab"] = ""
self.hostapdConfigList["wep_default_key"] = apModeConfig.wep_default_key
self.hostapdConfigList["wep_key0"] = apModeConfig.wep_key0
self.hostapdConfigList["wpa"] = apModeConfig.wpa
self.hostapdConfigList["wpa_passphrase"] = apModeConfig.wpa_passphrase
self.hostapdConfigList["wpa_key_mgmt"] = apModeConfig.wpa_key_mgmt # "WPA-PSK"
self.hostapdConfigList["wpa_pairwise"] = apModeConfig.wpa_pairwise # "TKIP CCMP"
self.hostapdConfigList["rsn_pairwise"] = apModeConfig.rsn_pairwise # "CCMP"
self.hostapdConfigList["wpa_group_rekey"] = apModeConfig.wpagrouprekey
def loadInterfacesConfig(self):
global apModeConfig
try:
fp = file('/etc/network/interfaces', 'r')
datas = fp.readlines()
fp.close()
except:
printDebugMsg("interfaces - file open failed")
# check br0 configuration
current_iface = ""
ifaceConf = {}
try:
for line in datas:
split = line.strip().split(' ')
if (split[0] == "iface"):
current_iface = split[1]
if (current_iface == "br0") and (len(split) == 4 and split[3] == "dhcp"):
apModeConfig.usedhcp.value = True
else:
apModeConfig.usedhcp.value = False
if (current_iface == "br0" or current_iface == "eth0"):
if (split[0] == "address"):
apModeConfig.address.value = map(int, split[1].split('.'))
if (split[0] == "netmask"):
apModeConfig.netmask.value = map(int, split[1].split('.'))
if (split[0] == "gateway"):
apModeConfig.gateway.value = map(int, split[1].split('.'))
except:
printDebugMsg("configuration parsing error! - /etc/network/interfaces")
def loadHostapConfig(self):
hostapdConf = { }
ret = self.wirelessAP.loadHostapConfig(hostapdConf)
if ret != 0:
printDebugMsg("configuration opening failed!!")
return
for (key,value) in hostapdConf.items():
if key == "config.wep":
apModeConfig.wep.value = int(value)
elif key in ["channel", "beacon_int", "rts_threshold", "fragm_threshold", "wpa_group_rekey"]:
self.hostapdConfigList[key].value = int(value)
elif key in self.hostapdConfigList.keys():
self.hostapdConfigList[key].value = value
if key == "channel" and int(value) not in range(14):
self.hostapdConfigList[key].value = 1
# for key in self.hostapdConfigList.keys():
# printDebugMsg("[cofigList] key : %s, value : %s"%(key, str(self.hostapdConfigList[key].value)) )
def setupCurrentEncryption(self):
if apModeConfig.wpa.value is not "0" and apModeConfig.wpa_passphrase.value: # (1,WPA), (2,WPA2), (3,WPA/WPA2)
apModeConfig.encrypt.value = True
apModeConfig.method.value = apModeConfig.wpa.value
elif apModeConfig.wep.value and apModeConfig.wep_key0.value:
apModeConfig.encrypt.value = True
apModeConfig.method.value = "0"
if len(apModeConfig.wep_key0.value) > 10:
apModeConfig.wepType.value = "128"
else:
apModeConfig.encrypt.value = False
def createConfigEntry(self):
global apModeConfig
#hostap settings
self.useApEntry = getConfigListEntry(_("Use AP Mode"), apModeConfig.useap)
self.setupModeEntry = getConfigListEntry(_("Setup Mode"), apModeConfig.setupmode)
self.wirelessDeviceEntry = getConfigListEntry(_("AP Device"), apModeConfig.wirelessdevice)
self.wirelessModeEntry = getConfigListEntry(_("AP Mode"), apModeConfig.wirelessmode)
self.channelEntry = getConfigListEntry(_("Channel (1~13)"), apModeConfig.channel)
self.ssidEntry = getConfigListEntry(_("SSID (1~32 Characters)"), apModeConfig.ssid)
self.beaconEntry = getConfigListEntry(_("Beacon (15~65535)"), apModeConfig.beacon)
self.rtsThresholdEntry = getConfigListEntry(_("RTS Threshold (0~2347)"), apModeConfig.rts_threshold)
self.fragmThresholdEntry = getConfigListEntry(_("FRAGM Threshold (256~2346)"), apModeConfig.fragm_threshold)
self.prambleEntry = getConfigListEntry(_("Preamble"), apModeConfig.preamble)
self.ignoreBroadcastSsid = getConfigListEntry(_("Ignore Broadcast SSID"), apModeConfig.ignore_broadcast_ssid)
# hostap encryption
self.encryptEntry = getConfigListEntry(_("Encrypt"), apModeConfig.encrypt)
self.methodEntry = getConfigListEntry(_("Method"), apModeConfig.method)
self.wepKeyTypeEntry = getConfigListEntry(_("KeyType"), apModeConfig.wepType)
self.wepKey0Entry = getConfigListEntry(_("WEP Key (HEX)"), apModeConfig.wep_key0)
self.wpaKeyEntry = getConfigListEntry(_("KEY (8~63 Characters)"), apModeConfig.wpa_passphrase)
self.groupRekeyEntry = getConfigListEntry(_("Group Rekey Interval"), apModeConfig.wpagrouprekey)
# interface settings
self.usedhcpEntry = getConfigListEntry(_("Use DHCP"), apModeConfig.usedhcp)
self.ipEntry = getConfigListEntry(_("IP Address"), apModeConfig.address)
self.netmaskEntry = getConfigListEntry(_("NetMask"), apModeConfig.netmask)
self.gatewayEntry = getConfigListEntry(_("Gateway"), apModeConfig.gateway)
def createConfig(self):
global apModeConfig
self.configList = []
self.configList.append( self.useApEntry )
if apModeConfig.useap.value is True:
self.configList.append( self.setupModeEntry )
self.configList.append( self.wirelessDeviceEntry )
self.configList.append( self.wirelessModeEntry )
self.configList.append( self.channelEntry )
self.configList.append( self.ssidEntry )
if apModeConfig.setupmode.value is "advanced":
self.configList.append( self.beaconEntry )
self.configList.append( self.rtsThresholdEntry )
self.configList.append( self.fragmThresholdEntry )
self.configList.append( self.prambleEntry )
self.configList.append( self.ignoreBroadcastSsid )
self.configList.append( self.encryptEntry )
if apModeConfig.encrypt.value is True:
self.configList.append( self.methodEntry )
if apModeConfig.method.value is "0": # wep
self.configList.append( self.wepKeyTypeEntry )
self.configList.append( self.wepKey0Entry )
else:
self.configList.append( self.wpaKeyEntry )
if apModeConfig.setupmode.value is "advanced":
self.configList.append( self.groupRekeyEntry )
## set network interfaces
self.configList.append( self.usedhcpEntry )
if apModeConfig.usedhcp.value is False:
self.configList.append( self.ipEntry )
self.configList.append( self.netmaskEntry )
self.configList.append( self.gatewayEntry )
self["config"].list = self.configList
self["config"].l.setList(self.configList)
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.newConfig()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.newConfig()
def newConfig(self):
if self["config"].getCurrent() in [ self.encryptEntry, self.methodEntry, self.useApEntry, self.usedhcpEntry, self.setupModeEntry]:
self.createConfig()
def doConfigMsg(self):
try:
self.session.openWithCallback(self.doConfig, MessageBox, (_("Are you sure you want to setup your AP?\n\n") ) )
except:
printDebugMsg("doConfig failed")
def doConfig(self, ret = False):
global apModeConfig
if ret is not True:
return
if apModeConfig.useap.value is True and apModeConfig.encrypt.value is True:
if not self.checkEncrypKey():
return
if not self.checkConfig():
return
self.configStartMsg = self.session.openWithCallback(self.ConfigFinishedMsg, MessageBox, _("Please wait for AP Configuration....\n") , type = MessageBox.TYPE_INFO, enable_input = False)
if apModeConfig.useap.value is True:
self.networkRestart( nextFunc = self.makeConf )
else:
self.networkRestart( nextFunc = self.removeConf )
def checkEncrypKey(self):
if apModeConfig.method.value == "0":
if self.checkWep(apModeConfig.wep_key0.value) is False:
self.session.open(MessageBox, _("Invalid WEP key\n\n"), type = MessageBox.TYPE_ERROR, timeout = 10 )
else:
return True
else:
if not len(apModeConfig.wpa_passphrase.value) in range(8,65):
self.session.open(MessageBox, _("Invalid WPA key\n\n"), type = MessageBox.TYPE_ERROR, timeout = 10)
else:
return True
return False
def checkWep(self, key):
length = len(key)
if length == 0:
return False
elif apModeConfig.wepType.value == "64" and length == 10:
return True
elif apModeConfig.wepType.value == "128" and length == 26:
return True
else:
return False
def checkConfig(self):
# ssid Check
if len(apModeConfig.ssid.value) == 0 or len(apModeConfig.ssid.value) > 32:
self.session.open(MessageBox, _("Invalid SSID\n"), type = MessageBox.TYPE_ERROR, timeout = 10)
return False;
elif apModeConfig.channel.value not in range(1,14):
self.session.open(MessageBox, _("Invalid channel\n"), type = MessageBox.TYPE_ERROR, timeout = 10)
return False;
elif apModeConfig.beacon.value < 15 or apModeConfig.beacon.value > 65535:
self.session.open(MessageBox, _("Invalid beacon\n"), type = MessageBox.TYPE_ERROR, timeout = 10)
return False;
elif apModeConfig.rts_threshold.value < 0 or apModeConfig.rts_threshold.value > 2347:
self.session.open(MessageBox, _("Invalid RTS Threshold\n"), type = MessageBox.TYPE_ERROR, timeout = 10)
return False;
elif apModeConfig.fragm_threshold.value < 256 or apModeConfig.fragm_threshold.value > 2346:
self.session.open(MessageBox, _("Invalid Fragm Threshold\n"), type = MessageBox.TYPE_ERROR, timeout = 10)
return False;
elif apModeConfig.wpagrouprekey.value < 0 or apModeConfig.wpagrouprekey.value > 3600:
self.session.open(MessageBox, _("Invalid wpagrouprekey\n"), type = MessageBox.TYPE_ERROR, timeout = 10)
return False;
return True;
def networkRestart(self, nextFunc = None ):
self.networkRestart_stop( nextFunc = nextFunc )
def networkRestart_stop(self, nextFunc = None ):
printDebugMsg("networkRestart_stop")
self.msgPlugins(False)
self.commands = [] # stop current network
self.networkRestartConsole = Console()
self.commands.append("/etc/init.d/avahi-daemon stop")
for iface in iNetwork.getAdapterList():
if iface != 'eth0' or not iNetwork.onRemoteRootFS():
self.commands.append("ifdown " + iface)
self.commands.append("ip addr flush dev " + iface)
self.commands.append("/etc/init.d/hostapd stop")
self.commands.append("/etc/init.d/networking stop")
self.commands.append("killall -9 udhcpc")
self.commands.append("rm /var/run/udhcpc*")
self.networkRestartConsole.eBatch(self.commands, nextFunc, debug = True)
def makeConf(self,extra_args):
printDebugMsg("makeConf")
self.writeNetworkInterfaces()
result = self.writeHostapdConfig()
if result == -1:
self.configStartMsg.close(False)
return
self.setIpForward(1)
self.networkRestart_start()
def removeConf(self,extra_args):
printDebugMsg("removeConf")
if fileExists("/etc/hostapd.conf", 0):
os_system("mv /etc/hostapd.conf /etc/hostapd.conf.linuxap.back")
fp = file("/etc/network/interfaces", 'w')
fp.write("# automatically generated by AP Setup Plugin\n# do NOT change manually!\n\n")
fp.write("auto lo\n")
fp.write("iface lo inet loopback\n\n")
# eth0 setup
fp.write("auto eth0\n")
if apModeConfig.usedhcp.value is True:
fp.write("iface eth0 inet dhcp\n")
else:
fp.write("iface eth0 inet static\n")
fp.write(" address %d.%d.%d.%d\n" % tuple(apModeConfig.address.value) )
fp.write(" netmask %d.%d.%d.%d\n" % tuple(apModeConfig.netmask.value) )
fp.write(" gateway %d.%d.%d.%d\n" % tuple(apModeConfig.gateway.value) )
fp.close()
self.setIpForward(0)
self.networkRestart_start()
def networkRestart_start(self):
printDebugMsg("networkRestart_start")
self.restartConsole = Console()
self.commands = []
self.commands.append("/etc/init.d/networking start")
self.commands.append("/etc/init.d/avahi-daemon start")
self.commands.append("/etc/init.d/hostapd start")
self.restartConsole.eBatch(self.commands, self.networkRestartFinished, debug=True)
def networkRestartFinished(self, data):
printDebugMsg("networkRestartFinished")
iNetwork.removeAdapterAttribute('br0',"ip")
iNetwork.removeAdapterAttribute('br0',"netmask")
iNetwork.removeAdapterAttribute('br0',"gateway")
iNetwork.getInterfaces(self.getInterfacesDataAvail)
def getInterfacesDataAvail(self, data):
if data is True and self.configStartMsg is not None:
self.configStartMsg.close(True)
def ConfigFinishedMsg(self, ret):
if ret is True:
self.session.openWithCallback(self.ConfigFinishedMsgCallback ,MessageBox, _("Configuration your AP is finished"), type = MessageBox.TYPE_INFO, timeout = 5, default = False)
else:
self.session.openWithCallback(self.close ,MessageBox, _("Invalid model or Image."), MessageBox.TYPE_ERROR)
def ConfigFinishedMsgCallback(self,data):
self.close()
def msgPlugins(self,reason = False):
for p in plugins.getPlugins(PluginDescriptor.WHERE_NETWORKCONFIG_READ):
p(reason=reason)
def writeNetworkInterfaces(self):
global apModeConfig
fp = file("/etc/network/interfaces", 'w')
fp.write("# automatically generated by AP Setup Plugin\n# do NOT change manually!\n\n")
fp.write("auto lo\n")
fp.write("iface lo inet loopback\n\n")
# eth0 setup
fp.write("auto eth0\n")
fp.write("iface eth0 inet manual\n")
fp.write(" up ip link set $IFACE up\n")
fp.write(" down ip link set $IFACE down\n\n")
# Wireless device setup
fp.write("auto %s\n" % apModeConfig.wirelessdevice.value)
fp.write("iface %s inet manual\n" % apModeConfig.wirelessdevice.value)
fp.write(" up ip link set $IFACE up\n")
fp.write(" down ip link set $IFACE down\n")
# branch setup
fp.write("auto br0\n")
if apModeConfig.usedhcp.value is True:
fp.write("iface br0 inet dhcp\n")
else:
fp.write("iface br0 inet static\n")
fp.write(" address %d.%d.%d.%d\n" % tuple(apModeConfig.address.value) )
fp.write(" netmask %d.%d.%d.%d\n" % tuple(apModeConfig.netmask.value) )
fp.write(" gateway %d.%d.%d.%d\n" % tuple(apModeConfig.gateway.value) )
fp.write(" pre-up brctl addbr br0\n")
fp.write(" pre-up brctl addif br0 eth0\n")
# fp.write(" pre-up brctl addif br0 wlan0\n") // runned by hostpad
fp.write(" post-down brctl delif br0 eth0\n")
# fp.write(" post-down brctl delif br0 wlan0\n") // runned by hostpad
fp.write(" post-down brctl delbr br0\n\n")
fp.write("\n")
fp.close()
def writeHostapdConfig(self): #c++
global apModeConfig
configDict = {}
for key in self.hostapdConfigList.keys():
configDict[key] = str(self.hostapdConfigList[key].value)
configDict["config.encrypt"] = str(int(apModeConfig.encrypt.value))
configDict["config.method"] = apModeConfig.method.value
ret = self.wirelessAP.writeHostapdConfig(configDict)
if(ret != 0):
return -1
return 0
def setIpForward(self, setValue = 0):
ipForwardFilePath = "/proc/sys/net/ipv4/ip_forward"
if not fileExists(ipForwardFilePath):
return -1
printDebugMsg("set %s to %d" % (ipForwardFilePath, setValue))
f = open(ipForwardFilePath, "w")
f.write("%d" % setValue)
f.close()
sysctlPath = "/etc/sysctl.conf"
sysctlLines = []
if fileExists(sysctlPath):
fp = file(sysctlPath, "r")
sysctlLines = fp.readlines()
fp.close()
sysctlList = {}
for line in sysctlLines:
line = line.strip()
(key,value) = line.split("=")
key=key.strip()
value=value.strip()
sysctlList[key] = value
sysctlList["net.ipv4.ip_forward"] = str(setValue)
fp = file(sysctlPath, "w")
for (key,value) in sysctlList.items():
fp.write("%s=%s\n"%(key,value))
fp.close()
return 0
def checkWirelessDevices(self):
global apModeConfig
self.wlanDeviceList = []
wlanIfaces =[]
for x in iNetwork.getInstalledAdapters():
if x.startswith('eth') or x.startswith('br') or x.startswith('mon'):
continue
wlanIfaces.append(x)
description=self.getAdapterDescription(x)
if description == "Unknown network adapter":
self.wlanDeviceList.append((x, x))
else:
self.wlanDeviceList.append(( x, description + " (%s)"%x ))
apModeConfig.wirelessdevice = ConfigSelection( choices = self.wlanDeviceList )
def getAdapterDescription(self, iface):
classdir = "/sys/class/net/" + iface + "/device/"
driverdir = "/sys/class/net/" + iface + "/device/driver/"
if os_path.exists(classdir):
files = listdir(classdir)
if 'driver' in files:
if os_path.realpath(driverdir).endswith('rtw_usb_drv'):
return _("Realtek")+ " " + _("WLAN adapter.")
elif os_path.realpath(driverdir).endswith('ath_pci'):
return _("Atheros")+ " " + _("WLAN adapter.")
elif os_path.realpath(driverdir).endswith('zd1211b'):
return _("Zydas")+ " " + _("WLAN adapter.")
elif os_path.realpath(driverdir).endswith('rt73'):
return _("Ralink")+ " " + _("WLAN adapter.")
elif os_path.realpath(driverdir).endswith('rt73usb'):
return _("Ralink")+ " " + _("WLAN adapter.")
else:
return str(os_path.basename(os_path.realpath(driverdir))) + " " + _("WLAN adapter")
else:
return _("Unknown network adapter")
else:
return _("Unknown network adapter")
def __onClose(self):
for x in self["config"].list:
x[1].cancel()
apModeConfig.wpa.value = "0"
apModeConfig.wep.value = False
def keyCancel(self):
self.close()
def main(session, **kwargs):
session.open(WirelessAccessPoint)
def Plugins(**kwargs):
return [PluginDescriptor(name=_("Wireless Access Point"), description="Using a Wireless module as access point.", where = PluginDescriptor.WHERE_PLUGINMENU, needsRestart = True, fnc=main)]
| gpl-2.0 | 6,908,707,807,822,501,000 | 44.213922 | 210 | 0.716608 | false |
Remper/learningbyreading | src/mappings.py | 2 | 1612 | import logging as log
import os
import re
# builds a dictionary of frame names indexed by wordnet synset id
offset2bn = dict()
bn2offset = dict()
offset2wn = dict()
wn2offset = dict()
wn2bn = dict()
bn2wn = dict()
wn30wn31 = dict()
wn31wn30 = dict()
bn2dbpedia = dict()
dbpedia2bn = dict()
# the mapping is in a tabular file, e.g.:
# s00069798n Scout-n#2-n 110582611-n
with open(os.path.join(os.path.dirname(__file__), '../resources/bn35-wn31.map')) as f:
for line in f:
bn_id, wn_id, wn_offset = line.rstrip().split(' ')
if wn_offset.endswith("-s"): wn_offset = wn_offset.replace("-s", "-a")# To use only the tag "a" for adjetives
if wn_id.endswith("-s"): wn_id = re.sub("(-s)(#\d+)(-s)", "-a\\2-a", wn_id)# To use only the tag "a" for adjetives
offset2bn[wn_offset[1:]] = bn_id
bn2offset[bn_id] = wn_offset[1:]
offset2wn[wn_offset[1:]] = wn_id
wn2offset[wn_id] = wn_offset[1:]
wn2bn[wn_id] = bn_id
bn2wn[bn_id] = wn_id
# Mapping different WN versions
# 00013662-a 00013681-a
with open(os.path.join(os.path.dirname(__file__), '../resources/wn30-31')) as f:
for line in f:
wn30, wn31 = line.rstrip().split(' ')
wn30wn31[wn30] = wn31
wn31wn30[wn31] = wn30
# Mapping BabelNet-DBpedia
# s00000006n Dodecanol
for i in range(4):
filename = os.path.join(os.path.dirname(__file__), '../resources/bn-dbpedia{0}'.format(i+1))
with open(filename) as f:
for line in f:
bn_id, dbpedia_id = line.rstrip().split(' ')
dbpedia2bn[dbpedia_id] = bn_id
bn2dbpedia[bn_id] = dbpedia_id | gpl-2.0 | 7,844,695,370,471,201,000 | 33.319149 | 122 | 0.615385 | false |
hofschroeer/gnuradio | gr-filter/examples/resampler.py | 7 | 4489 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr
from gnuradio import filter
from gnuradio import blocks
import sys
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class mytb(gr.top_block):
def __init__(self, fs_in, fs_out, fc, N=10000):
gr.top_block.__init__(self)
rerate = float(fs_out) / float(fs_in)
print("Resampling from %f to %f by %f " %(fs_in, fs_out, rerate))
# Creating our own taps
taps = filter.firdes.low_pass_2(32, 32, 0.25, 0.1, 80)
self.src = analog.sig_source_c(fs_in, analog.GR_SIN_WAVE, fc, 1)
#self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, N)
# A resampler with our taps
self.resamp_0 = filter.pfb.arb_resampler_ccf(rerate, taps,
flt_size=32)
# A resampler that just needs a resampling rate.
# Filter is created for us and designed to cover
# entire bandwidth of the input signal.
# An optional atten=XX rate can be used here to
# specify the out-of-band rejection (default=80).
self.resamp_1 = filter.pfb.arb_resampler_ccf(rerate)
self.snk_in = blocks.vector_sink_c()
self.snk_0 = blocks.vector_sink_c()
self.snk_1 = blocks.vector_sink_c()
self.connect(self.src, self.head, self.snk_in)
self.connect(self.head, self.resamp_0, self.snk_0)
self.connect(self.head, self.resamp_1, self.snk_1)
def main():
fs_in = 8000
fs_out = 20000
fc = 1000
N = 10000
tb = mytb(fs_in, fs_out, fc, N)
tb.run()
# Plot PSD of signals
nfftsize = 2048
fig1 = pyplot.figure(1, figsize=(10,10), facecolor="w")
sp1 = fig1.add_subplot(2,1,1)
sp1.psd(tb.snk_in.data(), NFFT=nfftsize,
noverlap=nfftsize / 4, Fs = fs_in)
sp1.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in / 1000.0)))
sp1.set_xlim([-fs_in / 2, fs_in / 2])
sp2 = fig1.add_subplot(2,1,2)
sp2.psd(tb.snk_0.data(), NFFT=nfftsize,
noverlap=nfftsize / 4, Fs = fs_out,
label="With our filter")
sp2.psd(tb.snk_1.data(), NFFT=nfftsize,
noverlap=nfftsize / 4, Fs = fs_out,
label="With auto-generated filter")
sp2.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out / 1000.0)))
sp2.set_xlim([-fs_out / 2, fs_out / 2])
sp2.legend()
# Plot signals in time
Ts_in = 1.0 / fs_in
Ts_out = 1.0 / fs_out
t_in = numpy.arange(0, len(tb.snk_in.data())*Ts_in, Ts_in)
t_out = numpy.arange(0, len(tb.snk_0.data())*Ts_out, Ts_out)
fig2 = pyplot.figure(2, figsize=(10,10), facecolor="w")
sp21 = fig2.add_subplot(2,1,1)
sp21.plot(t_in, tb.snk_in.data())
sp21.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in / 1000.0)))
sp21.set_xlim([t_in[100], t_in[200]])
sp22 = fig2.add_subplot(2,1,2)
sp22.plot(t_out, tb.snk_0.data(),
label="With our filter")
sp22.plot(t_out, tb.snk_1.data(),
label="With auto-generated filter")
sp22.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out / 1000.0)))
r = float(fs_out) / float(fs_in)
sp22.set_xlim([t_out[r * 100], t_out[r * 200]])
sp22.legend()
pyplot.show()
if __name__ == "__main__":
main()
| gpl-3.0 | 8,309,209,222,756,665,000 | 32.75188 | 95 | 0.622633 | false |
fusionapp/entropy | entropy/ientropy.py | 1 | 3968 | """
@copyright: 2007-2014 Quotemaster cc. See LICENSE for details.
Interface definitions for Entropy.
"""
from zope.interface import Interface, Attribute
class IContentObject(Interface):
"""
Immutable content object.
"""
hash = Attribute("""The hash function used to calculate the content digest.""")
contentDigest = Attribute("""A digest of the object content.""")
contentType = Attribute("""The MIME type describing the content of this object.""")
created = Attribute("""Creation timestamp of this object.""")
metadata = Attribute("""Object metadata.""")
def getContent():
"""
Get the data contained in this object.
@rtype: C{str}
"""
class IContentStore(Interface):
"""
Interface for storing and retrieving immutable content objects.
"""
def storeObject(content, contentType, metadata={}, created=None):
"""
Store an object.
@param content: the data to store.
@type content: C{str}
@param contentType: the MIME type of the content.
@type contentType: C{unicode}
@param metadata: a dictionary of metadata entries.
@type metadata: C{dict} of C{unicode}:C{unicode}
@param created: the creation timestamp; defaults to the current time.
@type created: L{epsilon.extime.Time} or C{None}
@returns: the object identifier.
@rtype: C{Deferred<unicode>}
"""
def getObject(objectID):
"""
Retrieve an object.
@param objectId: the object identifier.
@type objectId: C{unicode}
@returns: the content object.
@rtype: C{Deferred<IContentObject>}
"""
def migrateTo(destination):
"""
Initiate a migration to another content store.
All objects present in this content store at the moment the migration
is initiated MUST be replicated to the destination store before the
migration is considered complete. Objects created after the migration
is initiated MUST NOT be replicated.
NOTE: This method is optional, as some storage backends may be unable
to support enumerating all objects which is usually necessary to
implement migration.
@type destination: L{IContentStore}
@param destination: The destination store.
@rtype: L{IMigration}
@return: The migration powerup tracking the requested migration.
@raise NotImplementedError: if this implementation does not support
migration.
"""
class ISiblingStore(IContentStore):
"""
Sibling content store.
"""
class IBackendStore(IContentStore):
"""
Backend content store.
"""
class IUploadScheduler(Interface):
"""
Manager of pending uploads.
"""
def scheduleUpload(objectId, backend):
"""
Notify the scheduler that an object needs to be uploaded to a backend.
"""
class IMigrationManager(Interface):
"""
Manager for migrations from one content store to another.
"""
def migrate(source, destination):
"""
Initiate a migration between two content stores. Some content stores
may not support migration, as some storage backends cannot support
enumerating all stored objects.
@type source: L{IContentStore}
@param source: The source content store; must support migration.
@type destination: L{IContentStore}
@param destination: The destination store; does not need any special
support for migration.
@rtype: L{IMigration}
@return: The migration powerup responsible for tracking the requested
migration.
"""
class IMigration(Interface):
"""
Powerup tracking a migration in progress.
"""
def run():
"""
Run this migration.
If the migration is already running, this is a noop.
"""
| mit | -8,097,366,782,661,055,000 | 25.810811 | 87 | 0.641129 | false |
ruibarreira/linuxtrail | usr/lib/python3/dist-packages/orca/scripts/apps/gcalctool/script.py | 5 | 3292 | # Orca
#
# Copyright 2004-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Provides a custom script for gcalctool."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2008 Sun Microsystems Inc."
__license__ = "LGPL"
import pyatspi
import orca.scripts.toolkits.gtk as gtk
import orca.messages as messages
########################################################################
# #
# The GCalcTool script class. #
# #
########################################################################
class Script(gtk.Script):
def __init__(self, app):
"""Creates a new script for the given application. Callers
should use the getScript factory method instead of calling
this constructor directly.
Arguments:
- app: the application to create a script for.
"""
gtk.Script.__init__(self, app)
self._resultsDisplay = None
self._statusLine = None
def onWindowActivated(self, event):
"""Called whenever one of gcalctool's toplevel windows is activated.
Arguments:
- event: the window activated Event
"""
if self._resultsDisplay and self._statusLine:
gtk.Script.onWindowActivated(self, event)
return
obj = event.source
role = obj.getRole()
if role != pyatspi.ROLE_FRAME:
gtk.Script.onWindowActivated(self, event)
return
isEditbar = lambda x: x and x.getRole() == pyatspi.ROLE_EDITBAR
self._resultsDisplay = pyatspi.findDescendant(obj, isEditbar)
if not self._resultsDisplay:
self.presentMessage(messages.CALCULATOR_DISPLAY_NOT_FOUND)
isStatusLine = lambda x: x and x.getRole() == pyatspi.ROLE_TEXT \
and not x.getState().contains(pyatspi.STATE_EDITABLE)
self._statusLine = pyatspi.findDescendant(obj, isStatusLine)
gtk.Script.onWindowActivated(self, event)
def onTextInserted(self, event):
"""Called whenever text is inserted into gcalctool's text display.
Arguments:
- event: the text inserted Event
"""
if self.utilities.isSameObject(event.source, self._statusLine):
self.presentMessage(self.utilities.displayedText(self._statusLine))
return
gtk.Script.onTextInserted(self, event)
| gpl-3.0 | 7,844,648,670,494,701,000 | 34.021277 | 79 | 0.596902 | false |
vponomaryov/manila | manila/tests/share/drivers/netapp/dataontap/client/test_client_base.py | 1 | 6337 | # Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_log import log
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp.dataontap.client import client_base
from manila import test
from manila.tests.share.drivers.netapp.dataontap.client import fakes as fake
@ddt.ddt
class NetAppBaseClientTestCase(test.TestCase):
def setUp(self):
super(NetAppBaseClientTestCase, self).setUp()
# Mock loggers as themselves to allow logger arg validation
mock_logger = log.getLogger('mock_logger')
self.mock_object(client_base.LOG,
'error',
mock.Mock(side_effect=mock_logger.error))
self.mock_object(client_base.LOG,
'exception',
mock.Mock(side_effect=mock_logger.error))
self.client = client_base.NetAppBaseClient(**fake.CONNECTION_INFO)
self.client.connection = mock.MagicMock()
self.connection = self.client.connection
def test_get_ontapi_version(self):
version_response = netapp_api.NaElement(fake.ONTAPI_VERSION_RESPONSE)
self.connection.invoke_successfully.return_value = version_response
major, minor = self.client.get_ontapi_version(cached=False)
self.assertEqual('1', major)
self.assertEqual('19', minor)
def test_get_ontapi_version_cached(self):
self.connection.get_api_version.return_value = (1, 20)
major, minor = self.client.get_ontapi_version()
self.assertEqual(1, self.connection.get_api_version.call_count)
self.assertEqual(1, major)
self.assertEqual(20, minor)
def test_get_system_version(self):
version_response = netapp_api.NaElement(
fake.SYSTEM_GET_VERSION_RESPONSE)
self.connection.invoke_successfully.return_value = version_response
result = self.client.get_system_version()
self.assertEqual(fake.VERSION, result['version'])
self.assertEqual(('8', '2', '1'), result['version-tuple'])
def test_init_features(self):
self.client._init_features()
self.assertSetEqual(set(), self.client.features.defined_features)
@ddt.data('tag_name', '{http://www.netapp.com/filer/admin}tag_name')
def test_strip_xml_namespace(self, element):
result = self.client._strip_xml_namespace(element)
self.assertEqual('tag_name', result)
def test_send_request(self):
element = netapp_api.NaElement('fake-api')
self.client.send_request('fake-api')
self.assertEqual(
element.to_string(),
self.connection.invoke_successfully.call_args[0][0].to_string())
self.assertTrue(self.connection.invoke_successfully.call_args[0][1])
def test_send_request_no_tunneling(self):
element = netapp_api.NaElement('fake-api')
self.client.send_request('fake-api', enable_tunneling=False)
self.assertEqual(
element.to_string(),
self.connection.invoke_successfully.call_args[0][0].to_string())
self.assertFalse(self.connection.invoke_successfully.call_args[0][1])
def test_send_request_with_args(self):
element = netapp_api.NaElement('fake-api')
api_args = {'arg1': 'data1', 'arg2': 'data2'}
element.translate_struct(api_args)
self.client.send_request('fake-api', api_args=api_args)
self.assertEqual(
element.to_string(),
self.connection.invoke_successfully.call_args[0][0].to_string())
self.assertTrue(self.connection.invoke_successfully.call_args[0][1])
def test_get_licenses(self):
api_response = netapp_api.NaElement(fake.LICENSE_V2_LIST_INFO_RESPONSE)
self.mock_object(
self.client, 'send_request', mock.Mock(return_value=api_response))
response = self.client.get_licenses()
self.assertSequenceEqual(fake.LICENSES, response)
def test_get_licenses_api_error(self):
self.mock_object(self.client,
'send_request',
mock.Mock(side_effect=netapp_api.NaApiError))
self.assertRaises(netapp_api.NaApiError, self.client.get_licenses)
self.assertEqual(1, client_base.LOG.exception.call_count)
def test_send_ems_log_message(self):
self.assertRaises(NotImplementedError,
self.client.send_ems_log_message,
{})
@ddt.ddt
class FeaturesTestCase(test.TestCase):
def setUp(self):
super(FeaturesTestCase, self).setUp()
self.features = client_base.Features()
def test_init(self):
self.assertSetEqual(set(), self.features.defined_features)
def test_add_feature_default(self):
self.features.add_feature('FEATURE_1')
self.assertTrue(self.features.FEATURE_1)
self.assertIn('FEATURE_1', self.features.defined_features)
@ddt.data(True, False)
def test_add_feature(self, value):
self.features.add_feature('FEATURE_2', value)
self.assertEqual(value, self.features.FEATURE_2)
self.assertIn('FEATURE_2', self.features.defined_features)
@ddt.data('True', 'False', 0, 1, 1.0, None, [], {}, (True,))
def test_add_feature_type_error(self, value):
self.assertRaises(TypeError,
self.features.add_feature,
'FEATURE_3',
value)
self.assertNotIn('FEATURE_3', self.features.defined_features)
def test_get_attr_missing(self):
self.assertRaises(AttributeError, getattr, self.features, 'FEATURE_4')
| apache-2.0 | 7,923,674,527,359,359,000 | 34.205556 | 79 | 0.650939 | false |
mcardillo55/django | django/contrib/gis/geos/prototypes/topology.py | 338 | 2145 | """
This module houses the GEOS ctypes prototype functions for the
topological operations on geometries.
"""
from ctypes import c_double, c_int
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_minus_one, check_string,
)
from django.contrib.gis.geos.prototypes.geom import geos_char_p
class Topology(GEOSFuncFactory):
"For GEOS unary topology functions."
argtypes = [GEOM_PTR]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
# Topology Routines
geos_boundary = Topology('GEOSBoundary')
geos_buffer = Topology('GEOSBuffer', argtypes=[GEOM_PTR, c_double, c_int])
geos_centroid = Topology('GEOSGetCentroid')
geos_convexhull = Topology('GEOSConvexHull')
geos_difference = Topology('GEOSDifference', argtypes=[GEOM_PTR, GEOM_PTR])
geos_envelope = Topology('GEOSEnvelope')
geos_intersection = Topology('GEOSIntersection', argtypes=[GEOM_PTR, GEOM_PTR])
geos_linemerge = Topology('GEOSLineMerge')
geos_pointonsurface = Topology('GEOSPointOnSurface')
geos_preservesimplify = Topology('GEOSTopologyPreserveSimplify', argtypes=[GEOM_PTR, c_double])
geos_simplify = Topology('GEOSSimplify', argtypes=[GEOM_PTR, c_double])
geos_symdifference = Topology('GEOSSymDifference', argtypes=[GEOM_PTR, GEOM_PTR])
geos_union = Topology('GEOSUnion', argtypes=[GEOM_PTR, GEOM_PTR])
geos_cascaded_union = GEOSFuncFactory('GEOSUnionCascaded', argtypes=[GEOM_PTR], restype=GEOM_PTR)
# GEOSRelate returns a string, not a geometry.
geos_relate = GEOSFuncFactory(
'GEOSRelate', argtypes=[GEOM_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string
)
# Linear referencing routines
geos_project = GEOSFuncFactory(
'GEOSProject', argtypes=[GEOM_PTR, GEOM_PTR], restype=c_double, errcheck=check_minus_one
)
geos_interpolate = Topology('GEOSInterpolate', argtypes=[GEOM_PTR, c_double])
geos_project_normalized = GEOSFuncFactory(
'GEOSProjectNormalized', argtypes=[GEOM_PTR, GEOM_PTR], restype=c_double, errcheck=check_minus_one
)
geos_interpolate_normalized = Topology('GEOSInterpolateNormalized', argtypes=[GEOM_PTR, c_double])
| bsd-3-clause | 584,213,763,458,463,400 | 40.25 | 102 | 0.767832 | false |
nburn42/tensorflow | tensorflow/python/training/tensorboard_logging_test.py | 132 | 4456 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.tensorboard_logging."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import tempfile
import time
from tensorflow.core.util import event_pb2
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.summary.writer import writer
from tensorflow.python.training import tensorboard_logging
class EventLoggingTest(test.TestCase):
def setUp(self):
self._work_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
self._sw = writer.FileWriter(self._work_dir)
tensorboard_logging.set_summary_writer(self._sw)
self.addCleanup(shutil.rmtree, self._work_dir)
# Stop the clock to avoid test flakiness.
now = time.time()
time._real_time = time.time
time.time = lambda: now
# Mock out logging calls so we can verify that the right number of messages
# get logged.
self.logged_message_count = 0
self._actual_log = logging.log
def mockLog(*args, **kwargs):
self.logged_message_count += 1
self._actual_log(*args, **kwargs)
logging.log = mockLog
def tearDown(self):
time.time = time._real_time
logging.log = self._actual_log
def assertLoggedMessagesAre(self, expected_messages):
self._sw.close()
event_paths = glob.glob(os.path.join(self._work_dir, "event*"))
# If the tests runs multiple time in the same directory we can have
# more than one matching event file. We only want to read the last one.
self.assertTrue(event_paths)
event_reader = summary_iterator.summary_iterator(event_paths[-1])
# Skip over the version event.
next(event_reader)
for level, message in expected_messages:
event = next(event_reader)
self.assertEqual(event.wall_time, time.time())
self.assertEqual(event.log_message.level, level)
self.assertEqual(event.log_message.message, message)
def testBasic(self):
tensorboard_logging.set_summary_writer(self._sw)
tensorboard_logging.error("oh no!")
tensorboard_logging.error("for%s", "mat")
self.assertLoggedMessagesAre([(event_pb2.LogMessage.ERROR, "oh no!"),
(event_pb2.LogMessage.ERROR, "format")])
self.assertEqual(2, self.logged_message_count)
def testVerbosity(self):
tensorboard_logging.set_summary_writer(self._sw)
tensorboard_logging.set_verbosity(tensorboard_logging.ERROR)
tensorboard_logging.warn("warn")
tensorboard_logging.error("error")
tensorboard_logging.set_verbosity(tensorboard_logging.DEBUG)
tensorboard_logging.debug("debug")
self.assertLoggedMessagesAre([(event_pb2.LogMessage.ERROR, "error"),
(event_pb2.LogMessage.DEBUGGING, "debug")])
# All message should be logged because tensorboard_logging verbosity doesn't
# affect logging verbosity.
self.assertEqual(3, self.logged_message_count)
def testBadVerbosity(self):
with self.assertRaises(ValueError):
tensorboard_logging.set_verbosity("failure")
with self.assertRaises(ValueError):
tensorboard_logging.log("bad", "dead")
def testNoSummaryWriter(self):
"""Test that logging without a SummaryWriter succeeds."""
tensorboard_logging.set_summary_writer(None)
tensorboard_logging.warn("this should work")
self.assertEqual(1, self.logged_message_count)
def testSummaryWriterFailsAfterClear(self):
tensorboard_logging._clear_summary_writer()
with self.assertRaises(RuntimeError):
tensorboard_logging.log(tensorboard_logging.ERROR, "failure")
if __name__ == "__main__":
test.main()
| apache-2.0 | 3,987,900,248,590,241,000 | 35.52459 | 80 | 0.707585 | false |
Bourneer/scrapy | scrapy/utils/console.py | 70 | 2754 | from functools import wraps
from collections import OrderedDict
def _embed_ipython_shell(namespace={}, banner=''):
"""Start an IPython Shell"""
try:
from IPython.terminal.embed import InteractiveShellEmbed
from IPython.terminal.ipapp import load_default_config
except ImportError:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
from IPython.frontend.terminal.ipapp import load_default_config
@wraps(_embed_ipython_shell)
def wrapper(namespace=namespace, banner=''):
config = load_default_config()
shell = InteractiveShellEmbed(
banner1=banner, user_ns=namespace, config=config)
shell()
return wrapper
def _embed_bpython_shell(namespace={}, banner=''):
"""Start a bpython shell"""
import bpython
@wraps(_embed_bpython_shell)
def wrapper(namespace=namespace, banner=''):
bpython.embed(locals_=namespace, banner=banner)
return wrapper
def _embed_standard_shell(namespace={}, banner=''):
"""Start a standard python shell"""
import code
try: # readline module is only available on unix systems
import readline
except ImportError:
pass
else:
import rlcompleter
readline.parse_and_bind("tab:complete")
@wraps(_embed_standard_shell)
def wrapper(namespace=namespace, banner=''):
code.interact(banner=banner, local=namespace)
return wrapper
DEFAULT_PYTHON_SHELLS = OrderedDict([
('ipython', _embed_ipython_shell),
('bpython', _embed_bpython_shell),
( 'python', _embed_standard_shell),
])
def get_shell_embed_func(shells=None, known_shells=None):
"""Return the first acceptable shell-embed function
from a given list of shell names.
"""
if shells is None: # list, preference order of shells
shells = DEFAULT_PYTHON_SHELLS.keys()
if known_shells is None: # available embeddable shells
known_shells = DEFAULT_PYTHON_SHELLS.copy()
for shell in shells:
if shell in known_shells:
try:
# function test: run all setup code (imports),
# but dont fall into the shell
return known_shells[shell]()
except ImportError:
continue
def start_python_console(namespace=None, banner='', shells=None):
"""Start Python console bound to the given namespace.
Readline support and tab completion will be used on Unix, if available.
"""
if namespace is None:
namespace = {}
try:
shell = get_shell_embed_func(shells)
if shell is not None:
shell(namespace=namespace, banner=banner)
except SystemExit: # raised when using exit() in python code.interact
pass
| bsd-3-clause | -8,722,593,703,790,764,000 | 33.860759 | 75 | 0.658315 | false |
ptisserand/ansible | hacking/tests/gen_distribution_version_testcase.py | 63 | 1905 | #!/usr/bin/env python
"""
This script generated test_cases for test_distribution_version.py.
To do so it outputs the relevant files from /etc/*release, the output of platform.dist() and the current ansible_facts regarding the distribution version.
This assumes a working ansible version in the path.
"""
import platform
import os.path
import subprocess
import json
import sys
filelist = [
'/etc/oracle-release',
'/etc/slackware-version',
'/etc/redhat-release',
'/etc/vmware-release',
'/etc/openwrt_release',
'/etc/system-release',
'/etc/alpine-release',
'/etc/release',
'/etc/arch-release',
'/etc/os-release',
'/etc/SuSE-release',
'/etc/gentoo-release',
'/etc/os-release',
'/etc/lsb-release',
'/etc/altlinux-release',
'/etc/os-release',
'/etc/coreos/update.conf',
'/usr/lib/os-release',
]
fcont = {}
for f in filelist:
if os.path.exists(f):
s = os.path.getsize(f)
if s > 0 and s < 10000:
with open(f) as fh:
fcont[f] = fh.read()
dist = platform.dist()
facts = ['distribution', 'distribution_version', 'distribution_release', 'distribution_major_version', 'os_family']
try:
ansible_out = subprocess.check_output(
['ansible', 'localhost', '-m', 'setup'])
except subprocess.CalledProcessError as e:
print("ERROR: ansible run failed, output was: \n")
print(e.output)
sys.exit(e.returncode)
parsed = json.loads(ansible_out[ansible_out.index('{'):])
ansible_facts = {}
for fact in facts:
try:
ansible_facts[fact] = parsed['ansible_facts']['ansible_' + fact]
except:
ansible_facts[fact] = "N/A"
nicename = ansible_facts['distribution'] + ' ' + ansible_facts['distribution_version']
output = {
'name': nicename,
'input': fcont,
'platform.dist': dist,
'result': ansible_facts,
}
print(json.dumps(output, indent=4))
| gpl-3.0 | 8,547,507,749,244,324,000 | 23.74026 | 154 | 0.63832 | false |
mezz64/home-assistant | homeassistant/components/volkszaehler/sensor.py | 19 | 4129 | """Support for consuming values for the Volkszaehler API."""
from datetime import timedelta
import logging
from volkszaehler import Volkszaehler
from volkszaehler.exceptions import VolkszaehlerApiConnectionError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PORT,
ENERGY_WATT_HOUR,
POWER_WATT,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_UUID = "uuid"
DEFAULT_HOST = "localhost"
DEFAULT_NAME = "Volkszaehler"
DEFAULT_PORT = 80
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
SENSOR_TYPES = {
"average": ["Average", POWER_WATT, "mdi:power-off"],
"consumption": ["Consumption", ENERGY_WATT_HOUR, "mdi:power-plug"],
"max": ["Max", POWER_WATT, "mdi:arrow-up"],
"min": ["Min", POWER_WATT, "mdi:arrow-down"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_UUID): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_MONITORED_CONDITIONS, default=["average"]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Volkszaehler sensors."""
host = config[CONF_HOST]
name = config[CONF_NAME]
port = config[CONF_PORT]
uuid = config[CONF_UUID]
conditions = config[CONF_MONITORED_CONDITIONS]
session = async_get_clientsession(hass)
vz_api = VolkszaehlerData(
Volkszaehler(hass.loop, session, uuid, host=host, port=port)
)
await vz_api.async_update()
if vz_api.api.data is None:
raise PlatformNotReady
dev = []
for condition in conditions:
dev.append(VolkszaehlerSensor(vz_api, name, condition))
async_add_entities(dev, True)
class VolkszaehlerSensor(Entity):
"""Implementation of a Volkszaehler sensor."""
def __init__(self, vz_api, name, sensor_type):
"""Initialize the Volkszaehler sensor."""
self.vz_api = vz_api
self._name = name
self.type = sensor_type
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return "{} {}".format(self._name, SENSOR_TYPES[self.type][0])
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return SENSOR_TYPES[self.type][1]
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self.vz_api.available
@property
def state(self):
"""Return the state of the resources."""
return self._state
async def async_update(self):
"""Get the latest data from REST API."""
await self.vz_api.async_update()
if self.vz_api.api.data is not None:
self._state = round(getattr(self.vz_api.api, self.type), 2)
class VolkszaehlerData:
"""The class for handling the data retrieval from the Volkszaehler API."""
def __init__(self, api):
"""Initialize the data object."""
self.api = api
self.available = True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from the Volkszaehler REST API."""
try:
await self.api.get_data()
self.available = True
except VolkszaehlerApiConnectionError:
_LOGGER.error("Unable to fetch data from the Volkszaehler API")
self.available = False
| apache-2.0 | -8,731,169,241,903,271,000 | 28.492857 | 86 | 0.658029 | false |
gvnn3/PCS | scripts/ptptimes.py | 1 | 3112 | #!/usr/bin/env python
# Copyright (c) 2012-2016, Neville-Neil Consulting
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Neville-Neil Consulting nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: George V. Neville-Neil
#
# Description: Script displays all the times found in various PTP packets.
from pcs.packets.ptp import *
from pcs.packets.ptp_common import Common
from pcs.packets.ipv4 import ipv4
from pcs.packets.udpv4 import udpv4
import pcs
import datetime
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file",
dest="file", default=None,
help="File to read from.")
parser.add_option("-n", "--natural",
action="store_true",
dest="natural", default=False,
help="human readable time.")
(options, args) = parser.parse_args()
file = pcs.PcapConnector(options.file)
packet = file.readpkt()
while(packet != None):
if packet.data == None:
continue
if packet.data.data == None:
continue
if packet.data.data.data != None:
if (options.natural == True):
ts = datetime.datetime.fromtimestamp(packet.data.data.timestamp)
ms = ts.microsecond / 1000
msecond = ts.strftime("%H:%M:%S")
msecond += (".%d") % ms
print msecond
else:
print packet.data.data.timestamp
print packet.data.data.data
if packet.data.data.data.data != None:
print packet.data.data.data.data
packet = file.readpkt()
if __name__ == "__main__":
main()
| bsd-3-clause | 1,206,872,159,495,267,800 | 35.611765 | 80 | 0.674165 | false |
alex-robbins/micropython | examples/hwapi/soft_pwm.py | 43 | 1367 | import utime
from hwconfig import LED
# Using sleep_ms() gives pretty poor PWM resolution and
# brightness control, but we use it in the attempt to
# make this demo portable to even more boards (e.g. to
# those which don't provide sleep_us(), or provide, but
# it's not precise, like would be on non realtime OSes).
# We otherwise use 20ms period, to make frequency not less
# than 50Hz to avoid visible flickering (you may still see
# if you're unlucky).
def pwm_cycle(led, duty, cycles):
duty_off = 20 - duty
for i in range(cycles):
if duty:
led.on()
utime.sleep_ms(duty)
if duty_off:
led.off()
utime.sleep_ms(duty_off)
# At the duty setting of 1, an LED is still pretty bright, then
# at duty 0, it's off. This makes rather unsmooth transition, and
# breaks fade effect. So, we avoid value of 0 and oscillate between
# 1 and 20. Actually, highest values like 19 and 20 are also
# barely distinguishible (like, both of them too bright and burn
# your eye). So, improvement to the visible effect would be to use
# more steps (at least 10x), and then higher frequency, and use
# range which includes 1 but excludes values at the top.
while True:
# Fade in
for i in range(1, 21):
pwm_cycle(LED, i, 2)
# Fade out
for i in range(20, 0, -1):
pwm_cycle(LED, i, 2)
| mit | -2,090,389,369,248,464,000 | 34.973684 | 67 | 0.673007 | false |
dentaku65/plugin.video.sod | servers/ultramegabit.py | 42 | 1290 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para ultramegabit
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[ultramegabit.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://ultramegabit.com/file/
patronvideos = '(ultramegabit.com/file/details/[^"]+)'
logger.info("[ultramegabit.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[ultramegabit]"
url ='http://'+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'ultramegabit' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
| gpl-3.0 | 2,796,540,630,078,556,700 | 31.225 | 91 | 0.581846 | false |
Celtoys/pycgen | pycgen.py | 1 | 5990 |
#
# Copyright 2014 Celtoys Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
#
# Compiles and executes the Python code that the generator code has access to.
# Returns the global python environment that must be used between all code runs in a file.
#
def CreatePythonExecEnvironment():
prologue = """
import os
import inspect
# Empty string for output
g_EmitOutput = ""
def EmitStr(string):
globals()["g_EmitOutput"] += string
def EmitLn(line):
globals()["g_EmitOutput"] += line
globals()["g_EmitOutput"] += os.linesep
def EmitRepl(generic, repl):
# Remove leading/trailing newlines
generic = generic.lstrip(os.linesep)
generic = generic.rstrip(os.linesep)
# Split into old value and replacement values
r = repl.split(":")
old_val = r[0]
new_vals = r[1]
# Iterate all replacement values and emit them
vals = new_vals.split(",")
for v in vals:
EmitLn(generic.replace(old_val, v))
def EmitFmt(line):
# Get local variables from calling frame
# This is available in CPython but not guaranteed to be available in other Python implementations
calling_frame = inspect.currentframe().f_back
locals = calling_frame.f_locals
# Remove leading/trailing newlines
line = line.lstrip(os.linesep)
line = line.rstrip(os.linesep)
# Format with the unpacked calling frame local variables
EmitStr(line.format(**locals))
"""
# Compile the prologue and return the environment it creates
exec_globals = { }
prologue_compiled = compile(prologue, "<prologue>", "exec")
exec(prologue_compiled, exec_globals)
return exec_globals
#
# Opens a file and returns it as an array of lines
#
def OpenFile(filename):
try:
f = open(filename, "rb")
lines = f.readlines()
return lines
except:
return None
#
# Strips all leading whitespace on a line and treats it as the indent
#
def GetIndent(line):
lstrip = line.lstrip()
indent_size = len(line) - len(lstrip)
return line[0:indent_size]
#
# Executes python code at runtime within the given environment.
# Assumes that the code will be doing nothing but code emits so returns the
# emit output from the python environment passed in.
#
def ExecPythonLines(lines, filename, start_line, python_env):
if len(lines) == 0:
return None
# Use the indent from the starting line to prepare all the other lines
start_indent = GetIndent(lines[0])
indent_size = len(start_indent)
# Strip leading indentation from all lines
for i in range(0, len(lines)):
line = lines[i]
# Lines that don't share the same indent could be an error...
if not line.startswith(start_indent):
# ...unless they're empty lines
line = line.lstrip()
if len(line) == 0:
lines[i] = ""
continue
print(filename + "(" + str(start_line + i) + "): Bad leading whitespace indent")
return None
lines[i] = line[indent_size:]
# Run python code with shared global state for entire file
python_env["g_EmitOutput"] = ""
code = "".join(lines)
exec(code, python_env)
output = python_env["g_EmitOutput"].splitlines()
if len(output) == 0:
return None
# Prefix the output with indentation inferred from the generator
output = [ start_indent + line for line in output ]
output = os.linesep.join(output) + os.linesep
return output
#
# Quick and dirty, line-by-line parsing of an input file looking for Python generator code-blocks
# to execute. Returns the modified file as a set of lines.
#
def ParseInputFile(python_env, filename):
lines = OpenFile(filename)
if lines == None:
return None
new_lines = [ ]
in_emit_output = False
in_python_code = False
python_lines = [ ]
python_start_line = 0
python_indent = 0
for i in range(0, len(lines)):
line = lines[i]
line_lstrip = line.lstrip()
# Detect and skip start and end of emit output from previous runs
if line_lstrip.startswith("//$pycgen-begin"):
in_emit_output = True
continue
if in_emit_output and line_lstrip.startswith("//$pycgen-end"):
in_emit_output = False
continue
# Copy lines from old to new, ignoring emit output from previous runs
if not in_emit_output:
new_lines.append(line)
# Detect start of new python code block
if line_lstrip.startswith("/*$pycgen"):
in_python_code = True
python_lines = [ ]
python_start_line = i + 2 # +1 for skip over "pycgen", +1 for 1-based indexing
python_indent = GetIndent(line)
continue
# Detect end of python code block
if in_python_code and line_lstrip.startswith("*/"):
in_python_code = False
output = ExecPythonLines(python_lines, filename, python_start_line, python_env)
if output != None:
new_lines.append(python_indent + "//$pycgen-begin" + os.linesep)
new_lines += output
new_lines.append(python_indent + "//$pycgen-end" + os.linesep)
# Copy lines in python code blocks
if in_python_code:
python_lines.append(line)
return new_lines
# Parse command-line arguments
if len(sys.argv) != 3:
print("Use: pycgen.py <input_filename> <output_filename>")
sys.exit(1)
input_filename = os.path.abspath(sys.argv[1])
output_filename = sys.argv[2]
# Parse the input file and generate any required code
python_env = CreatePythonExecEnvironment()
output_lines = ParseInputFile(python_env, input_filename)
if output_lines == None:
print("ERROR: Couldn't open file " + input_filename)
sys.exit(1)
# Write the result
try:
output_file = open(output_filename, "wb")
output_file.writelines(output_lines)
except:
print("ERROR: Couldn't write to file " + output_filename)
sys.exit(1)
| apache-2.0 | 21,097,735,267,603,150 | 24.274262 | 98 | 0.711686 | false |
chromium2014/src | build/android/pylib/base/base_test_result_unittest.py | 134 | 2817 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for TestRunResults."""
import unittest
from pylib.base.base_test_result import BaseTestResult
from pylib.base.base_test_result import TestRunResults
from pylib.base.base_test_result import ResultType
class TestTestRunResults(unittest.TestCase):
def setUp(self):
self.p1 = BaseTestResult('p1', ResultType.PASS, log='pass1')
other_p1 = BaseTestResult('p1', ResultType.PASS)
self.p2 = BaseTestResult('p2', ResultType.PASS)
self.f1 = BaseTestResult('f1', ResultType.FAIL, log='failure1')
self.c1 = BaseTestResult('c1', ResultType.CRASH, log='crash1')
self.u1 = BaseTestResult('u1', ResultType.UNKNOWN)
self.tr = TestRunResults()
self.tr.AddResult(self.p1)
self.tr.AddResult(other_p1)
self.tr.AddResult(self.p2)
self.tr.AddResults(set([self.f1, self.c1, self.u1]))
def testGetAll(self):
self.assertFalse(
self.tr.GetAll().symmetric_difference(
[self.p1, self.p2, self.f1, self.c1, self.u1]))
def testGetPass(self):
self.assertFalse(self.tr.GetPass().symmetric_difference(
[self.p1, self.p2]))
def testGetNotPass(self):
self.assertFalse(self.tr.GetNotPass().symmetric_difference(
[self.f1, self.c1, self.u1]))
def testGetAddTestRunResults(self):
tr2 = TestRunResults()
other_p1 = BaseTestResult('p1', ResultType.PASS)
f2 = BaseTestResult('f2', ResultType.FAIL)
tr2.AddResult(other_p1)
tr2.AddResult(f2)
tr2.AddTestRunResults(self.tr)
self.assertFalse(
tr2.GetAll().symmetric_difference(
[self.p1, self.p2, self.f1, self.c1, self.u1, f2]))
def testGetLogs(self):
log_print = ('[FAIL] f1:\n'
'failure1\n'
'[CRASH] c1:\n'
'crash1')
self.assertEqual(self.tr.GetLogs(), log_print)
def testGetShortForm(self):
short_print = ('ALL: 5 PASS: 2 FAIL: 1 '
'CRASH: 1 TIMEOUT: 0 UNKNOWN: 1 ')
self.assertEqual(self.tr.GetShortForm(), short_print)
def testGetGtestForm(self):
gtest_print = ('[==========] 5 tests ran.\n'
'[ PASSED ] 2 tests.\n'
'[ FAILED ] 3 tests, listed below:\n'
'[ FAILED ] f1\n'
'[ FAILED ] c1 (CRASHED)\n'
'[ FAILED ] u1 (UNKNOWN)\n'
'\n'
'3 FAILED TESTS')
self.assertEqual(gtest_print, self.tr.GetGtestForm())
def testRunPassed(self):
self.assertFalse(self.tr.DidRunPass())
tr2 = TestRunResults()
self.assertTrue(tr2.DidRunPass())
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -6,242,539,859,490,549,000 | 33.353659 | 72 | 0.614838 | false |
skudriashev/incubator-airflow | tests/contrib/hooks/test_jdbc_hook.py | 12 | 1690 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from mock import Mock
from mock import patch
from airflow import configuration
from airflow.hooks.jdbc_hook import JdbcHook
from airflow import models
from airflow.utils import db
jdbc_conn_mock = Mock(
name="jdbc_conn"
)
class TestJdbcHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
db.merge_conn(
models.Connection(
conn_id='jdbc_default', conn_type='jdbc',
host='jdbc://localhost/', port=443,
extra='{"extra__jdbc__drv_path": "/path1/test.jar,/path2/t.jar2", "extra__jdbc__drv_clsname": "com.driver.main"}'))
@patch("airflow.hooks.jdbc_hook.jaydebeapi.connect", autospec=True,
return_value=jdbc_conn_mock)
def test_jdbc_conn_connection(self, jdbc_mock):
jdbc_hook = JdbcHook()
jdbc_conn = jdbc_hook.get_conn()
self.assertTrue(jdbc_mock.called)
self.assertIsInstance(jdbc_conn, Mock)
self.assertEqual(jdbc_conn.name, jdbc_mock.return_value.name)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 1,580,695,797,843,285,200 | 32.137255 | 139 | 0.669822 | false |
Lokke/eden | modules/feedparser.py | 22 | 155418 | """Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit https://code.google.com/p/feedparser/ for the latest version
Visit http://packages.python.org/feedparser/ for the latest documentation
Required: Python 2.4 or later
Recommended: iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "5.1.3"
__license__ = """
Copyright (c) 2010-2013 Kurt McKee <[email protected]>
Copyright (c) 2002-2008 Mark Pilgrim
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>",
"Martin Pool <http://sourcefrog.net/>",
"Kurt McKee <http://kurtmckee.org/>",
"Bernd Schlapsi <https://github.com/brot>",]
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# ---------- Python 3 modules (make it work if possible) ----------
try:
import rfc822
except ImportError:
from email import _parseaddr as rfc822
#try:
# # Python 3.1 introduces bytes.maketrans and simultaneously
# # deprecates string.maketrans; use bytes.maketrans if possible
# _maketrans = bytes.maketrans
#except (NameError, AttributeError):
import string
_maketrans = string.maketrans
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except ImportError:
base64 = binascii = None
else:
# Python 3.1 deprecates decodestring in favor of decodebytes
#_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
_base64decode = base64.decodestring
# _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
# _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
#try:
# if bytes is str:
# # In Python 2.5 and below, bytes doesn't exist (NameError)
# # In Python 2.6 and above, bytes and str are the same type
# raise NameError
#except NameError:
# Python 2
def _s2bytes(s):
return s
def _l2bytes(l):
return ''.join(map(chr, l))
#else:
# # Python 3
# def _s2bytes(s):
# return bytes(s, 'utf8')
# def _l2bytes(l):
# return bytes(l)
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
# ---------- required modules (should come with any Python distribution) ----------
import cgi
import codecs
import copy
import datetime
import itertools
import re
import struct
import time
import types
import urllib
import urllib2
import urlparse
import warnings
from htmlentitydefs import name2codepoint, codepoint2name, entitydefs
try:
from io import BytesIO as _StringIO
except ImportError:
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except ImportError:
gzip = None
try:
import zlib
except ImportError:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
from xml.sax.saxutils import escape as _xmlescape
except ImportError:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
else:
try:
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
except xml.sax.SAXReaderNotAvailable:
_XML_AVAILABLE = 0
else:
_XML_AVAILABLE = 1
# sgmllib is not available by default in Python 3; if the end user doesn't have
# it available then we'll lose illformed XML parsing and content santizing
try:
import sgmllib
except ImportError:
# This is probably Python 3, which doesn't include sgmllib anymore
_SGML_AVAILABLE = 0
# Mock sgmllib enough to allow subclassing later on
class sgmllib(object):
class SGMLParser(object):
def goahead(self, i):
pass
def parse_starttag(self, i):
pass
else:
_SGML_AVAILABLE = 1
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
)
# Unfortunately, these must be copied over to prevent NameError exceptions
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
# iconv_codec provides support for more character encodings.
# It's available from http://cjkpython.i18n.org/
#try:
# import iconv_codec
#except ImportError:
# pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
#try:
# import chardet
#except ImportError:
chardet = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
SUPPORTED_VERSIONS = {'': u'unknown',
'rss090': u'RSS 0.90',
'rss091n': u'RSS 0.91 (Netscape)',
'rss091u': u'RSS 0.91 (Userland)',
'rss092': u'RSS 0.92',
'rss093': u'RSS 0.93',
'rss094': u'RSS 0.94',
'rss20': u'RSS 2.0',
'rss10': u'RSS 1.0',
'rss': u'RSS (unknown version)',
'atom01': u'Atom 0.1',
'atom02': u'Atom 0.2',
'atom03': u'Atom 0.3',
'atom10': u'Atom 1.0',
'atom': u'Atom (unknown version)',
'cdf': u'CDF',
}
class FeedParserDict(dict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError, "object doesn't have key 'category'"
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure']
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel']==u'license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if not dict.__contains__(self, 'updated') and \
dict.__contains__(self, 'published'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated` to `published` if `updated` doesn't "
"exist. This fallback will be removed in a future version "
"of feedparser.", DeprecationWarning)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if not dict.__contains__(self, 'updated_parsed') and \
dict.__contains__(self, 'published_parsed'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated_parsed` to `published_parsed` if "
"`updated_parsed` doesn't exist. This fallback will be "
"removed in a future version of feedparser.",
DeprecationWarning)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return value
return self[key]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError, "object has no attribute '%s'" % key
def __hash__(self):
return id(self)
_cp1252 = {
128: unichr(8364), # euro sign
130: unichr(8218), # single low-9 quotation mark
131: unichr( 402), # latin small letter f with hook
132: unichr(8222), # double low-9 quotation mark
133: unichr(8230), # horizontal ellipsis
134: unichr(8224), # dagger
135: unichr(8225), # double dagger
136: unichr( 710), # modifier letter circumflex accent
137: unichr(8240), # per mille sign
138: unichr( 352), # latin capital letter s with caron
139: unichr(8249), # single left-pointing angle quotation mark
140: unichr( 338), # latin capital ligature oe
142: unichr( 381), # latin capital letter z with caron
145: unichr(8216), # left single quotation mark
146: unichr(8217), # right single quotation mark
147: unichr(8220), # left double quotation mark
148: unichr(8221), # right double quotation mark
149: unichr(8226), # bullet
150: unichr(8211), # en dash
151: unichr(8212), # em dash
152: unichr( 732), # small tilde
153: unichr(8482), # trade mark sign
154: unichr( 353), # latin small letter s with caron
155: unichr(8250), # single right-pointing angle quotation mark
156: unichr( 339), # latin small ligature oe
158: unichr( 382), # latin small letter z with caron
159: unichr( 376), # latin capital letter y with diaeresis
}
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
if not isinstance(uri, unicode):
uri = uri.decode('utf-8', 'ignore')
try:
uri = urlparse.urljoin(base, uri)
except ValueError:
uri = u''
if not isinstance(uri, unicode):
return uri.decode('utf-8', 'ignore')
return uri
class _FeedParserMixin:
namespaces = {
'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://www.georss.org/georss': 'georss',
'http://www.opengis.net/gml': 'gml',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
# Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://podlove.org/simple-chapters': 'psc',
}
_matchnamespaces = {}
can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
html_types = [u'text/html', u'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = u'' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
# georss
self.ingeometry = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or u''
self.lang = baselang or None
self.svgOK = 0
self.title_depth = -1
self.depth = 0
# psc_chapters_flag prevents multiple psc_chapters from being
# captured in a single entry or item. The transition states are
# None -> True -> False. psc_chapter elements will only be
# captured while it is True.
self.psc_chapters_flag = None
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
# A map of the following form:
# {
# object_that_value_is_set_on: {
# property_name: depth_of_node_property_was_extracted_from,
# other_property: depth_of_node_property_was_extracted_from,
# },
# }
self.property_depth_map = {}
def _normalize_attributes(self, kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
# the sgml parser doesn't handle entities in attributes, nor
# does it pass the attribute values through as unicode, while
# strict xml parsers do -- account for this difference
if isinstance(self, _LooseFeedParser):
v = v.replace('&', '&')
if not isinstance(v, unicode):
v = v.decode('utf-8')
return (k, v)
def unknown_starttag(self, tag, attrs):
# increment depth counter
self.depth += 1
# normalize attrs
attrs = map(self._normalize_attributes, attrs)
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if not isinstance(baseuri, unicode):
baseuri = baseuri.decode(self.encoding, 'ignore')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg':
self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK:
self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK:
raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
self.depth -= 1
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack:
return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack:
return
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities:
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try:
name2codepoint[ref]
except KeyError:
text = '&%s;' % ref
else:
text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack:
return
if escape and self.contentparams.get('type') == u'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text' or contentType == 'plain':
contentType = u'text/plain'
elif contentType == 'html':
contentType = u'text/html'
elif contentType == 'xhtml':
contentType = u'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if not self.version:
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
self.version = u'rss090'
elif loweruri == 'http://purl.org/rss/1.0/':
self.version = u'rss10'
elif loweruri == 'http://www.w3.org/2005/atom':
self.version = u'atom10'
if loweruri.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = u'http://backend.userland.com/rss'
loweruri = uri
if loweruri in self._matchnamespaces:
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or u'', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack:
return
if self.elementstack[-1][0] != element:
return
element, expectingText, pieces = self.elementstack.pop()
if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0:
break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if not isinstance(v, unicode):
pieces[i] = v.decode('utf-8')
output = u''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText:
return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
# do not resolve guid elements with isPermalink="false"
if not element == 'id' or self.guidislink:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# some feed formats require consumers to guess
# whether the content is html or plain text
if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain':
if self.lookslikehtml(output):
self.contentparams['type'] = u'text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html'))
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html'))
if self.encoding and not isinstance(output, unicode):
output = output.decode(self.encoding, 'ignore')
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode):
try:
output = output.encode('iso-8859-1').decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
# map win-1252 extensions to the proper code points
if isinstance(output, unicode):
output = output.translate(_cp1252)
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
if element == 'title' and -1 < self.title_depth <= self.depth:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
if old_value_depth is None or self.depth <= old_value_depth:
self.property_depth_map[self.entries[-1]][element] = self.depth
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang:
self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
@staticmethod
def lookslikehtml(s):
# must have a close tag or an entity reference to qualify
if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',s)):
return
# all entities must have been defined as valid HTML entities
if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)):
return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith(u'text/'):
return 0
if self.contentparams['type'].endswith(u'+xml'):
return 0
if self.contentparams['type'].endswith(u'/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': u'rss091u',
'0.92': u'rss092',
'0.93': u'rss093',
'0.94': u'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith(u'rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = u'rss20'
else:
self.version = u'rss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
def _cdf_common(self, attrsD):
if 'lastmod' in attrsD:
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if 'href' in attrsD:
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': u'atom01',
'0.2': u'atom02',
'0.3': u'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = u'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.title_depth = -1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.title_depth = -1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and 'image' in self.feeddata:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = u'%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author:
return
emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, u'')
author = author.replace(u'()', u'')
author = author.replace(u'<>', u'')
author = author.replace(u'<>', u'')
author = author.strip()
if author and (author[0] == u'('):
author = author[1:]
if author and (author[-1] == u')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, u'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, u'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.title_depth = -1
self.psc_chapters_flag = None
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
_start_pubdate = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
_end_pubdate = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
# geospatial location, or "where", from georss.org
def _start_georssgeom(self, attrsD):
self.push('geometry', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_point = _start_georssgeom
_start_georss_line = _start_georssgeom
_start_georss_polygon = _start_georssgeom
_start_georss_box = _start_georssgeom
def _save_where(self, geometry):
context = self._getContext()
context['where'].update(geometry)
def _end_georss_point(self):
geometry = _parse_georss_point(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_line(self):
geometry = _parse_georss_line(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_polygon(self):
this = self.pop('geometry')
geometry = _parse_georss_polygon(this)
if geometry:
self._save_where(geometry)
def _end_georss_box(self):
geometry = _parse_georss_box(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _start_where(self, attrsD):
self.push('where', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_where = _start_where
def _parse_srs_attrs(self, attrsD):
srsName = attrsD.get('srsname')
try:
srsDimension = int(attrsD.get('srsdimension', '2'))
except ValueError:
srsDimension = 2
context = self._getContext()
context['where']['srsName'] = srsName
context['where']['srsDimension'] = srsDimension
def _start_gml_point(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 1
self.push('geometry', 0)
def _start_gml_linestring(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 'linestring'
self.push('geometry', 0)
def _start_gml_polygon(self, attrsD):
self._parse_srs_attrs(attrsD)
self.push('geometry', 0)
def _start_gml_exterior(self, attrsD):
self.push('geometry', 0)
def _start_gml_linearring(self, attrsD):
self.ingeometry = 'polygon'
self.push('geometry', 0)
def _start_gml_pos(self, attrsD):
self.push('pos', 0)
def _end_gml_pos(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_georss_point(this, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _start_gml_poslist(self, attrsD):
self.push('pos', 0)
def _end_gml_poslist(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_poslist(
this, self.ingeometry, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _end_geom(self):
self.ingeometry = 0
self.pop('geometry')
_end_gml_point = _end_geom
_end_gml_linestring = _end_geom
_end_gml_linearring = _end_geom
_end_gml_exterior = _end_geom
_end_gml_polygon = _end_geom
def _end_where(self):
self.pop('where')
_end_georss_where = _end_where
# end geospatial
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href'] = value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label):
return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split(','):
if term.strip():
self._addTag(term.strip(), u'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value:
return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
_end_media_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', u'alternate')
if attrsD['rel'] == u'self':
attrsD.setdefault('type', u'application/atom+xml')
else:
attrsD.setdefault('type', u'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if 'href' in attrsD:
expectingText = 0
if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
def _end_link(self):
value = self.pop('link')
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
_start_id = _start_guid
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and 'link' not in self._getContext())
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
_end_id = _end_guid
def _start_title(self, attrsD):
if self.svgOK:
return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK:
return
value = self.popContent('title')
if not value:
return
self.title_depth = self.depth
_end_dc_title = _end_title
def _end_media_title(self):
title_depth = self.title_depth
self._end_title()
self.title_depth = title_depth
def _start_description(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
_start_media_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
_end_media_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, u'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if 'generator_detail' in context:
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, u'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel'] = u'enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
self.title_depth = -1
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, u'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, u'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, u'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
elif attrsD.get('url'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
def _start_media_group(self, attrsD):
# don't do anything, but don't break the enclosed tags either
pass
def _start_media_credit(self, attrsD):
context = self._getContext()
context.setdefault('media_credit', [])
context['media_credit'].append(attrsD)
self.push('credit', 1)
def _end_media_credit(self):
credit = self.pop('credit')
if credit != None and len(credit.strip()) != 0:
context = self._getContext()
context['media_credit'][-1]['content'] = credit
def _start_media_restriction(self, attrsD):
context = self._getContext()
context.setdefault('media_restriction', attrsD)
self.push('restriction', 1)
def _end_media_restriction(self):
restriction = self.pop('restriction')
if restriction != None and len(restriction.strip()) != 0:
context = self._getContext()
context['media_restriction']['content'] = restriction
def _start_media_license(self, attrsD):
context = self._getContext()
context.setdefault('media_license', attrsD)
self.push('license', 1)
def _end_media_license(self):
license = self.pop('license')
if license != None and len(license.strip()) != 0:
context = self._getContext()
context['media_license']['content'] = license
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if 'url' not in context['media_thumbnail'][-1]:
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
def _start_psc_chapters(self, attrsD):
if self.psc_chapters_flag is None:
# Transition from None -> True
self.psc_chapters_flag = True
attrsD['chapters'] = []
self._getContext()['psc_chapters'] = FeedParserDict(attrsD)
def _end_psc_chapters(self):
# Transition from True -> False
self.psc_chapters_flag = False
def _start_psc_chapter(self, attrsD):
if self.psc_chapters_flag:
start = self._getAttribute(attrsD, 'start')
attrsD['start_parsed'] = _parse_psc_chapter_start(start)
context = self._getContext()['psc_chapters']
context['chapters'].append(FeedParserDict(attrsD))
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
self.decls = {}
def startPrefixMapping(self, prefix, uri):
if not uri:
return
# Jython uses '' instead of None; standardize on None
prefix = prefix or None
self.trackNamespace(prefix, uri)
if prefix and uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:' + prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = u'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
for (namespace, attrlocalname), attrvalue in attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
localname = str(localname).lower()
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
# drv_libxml2 calls warning() in some cases
warning = error
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
])
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
# By declaring these methods and overriding their compiled code
# with the code from sgmllib, the original code will execute in
# feedparser's scope instead of sgmllib's. This means that the
# `tagfind` and `charref` regular expressions will be found as
# they're declared above, not as they're declared in sgmllib.
def goahead(self, i):
pass
goahead.func_code = sgmllib.SGMLParser.goahead.func_code
def __parse_starttag(self, i):
pass
__parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
def parse_starttag(self,i):
j = self.__parse_starttag(i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
try:
bytes
if bytes is str:
raise NameError
self.encoding = self.encoding + u'_INVALID_PYTHON_3'
except NameError:
if self.encoding and isinstance(data, unicode):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs:
return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if not isinstance(value, unicode):
value = value.decode(self.encoding, 'ignore')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((unicode(key, self.encoding), value))
except TypeError:
uattrs.append((key, value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs = strattrs.encode(self.encoding)
except (UnicodeEncodeError, LookupError):
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%s%s />' % (tag, strattrs))
else:
self.pieces.append('<%s%s>' % (tag, strattrs))
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%s>" % tag)
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
ref = ref.lower()
if ref.startswith('x'):
value = int(ref[1:], 16)
else:
value = int(ref)
if value in _cp1252:
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%s;' % ref)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if ref in name2codepoint or ref == 'apos':
self.pieces.append('&%s;' % ref)
else:
self.pieces.append('&%s' % ref)
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%s-->' % text)
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%s>' % text)
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%s>' % text)
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
def parse_declaration(self, i):
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# escape the doctype declaration and continue parsing
self.handle_data('<')
return i+1
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if not self.contentparams.get('type', u'xml').endswith(u'xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = set([('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src'),
('video', 'poster')])
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
return _urljoin(base, rel or u'')
if not base:
return rel or u''
if not rel:
try:
scheme = urlparse.urlparse(base)[0]
except ValueError:
return u''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return u''
uri = _urljoin(base, rel)
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return u''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'])
acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'poster', 'pqg', 'preload', 'prompt', 'radiogroup', 'readonly', 'rel',
'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing',
'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span',
'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang'])
unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
acceptable_css_properties = set(['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width'])
# survey of common keywords found in feeds
acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow'])
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = set(['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics'])
mathml_attributes = set(['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'])
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use'])
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan'])
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity'])
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag=='svg':
attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
if tag=='math':
attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
# make sure the uri uses an acceptable uri scheme
if key == u'href':
value = _makeSafeAbsoluteURI(value)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value:
clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK:
self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK:
self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value:
continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = _BaseHTMLProcessor.parse_comment(self, i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitizeHTML(htmlSource, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _HTMLSanitizer(encoding, _type)
htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[')
p.feed(htmlSource)
data = p.output()
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
# The default implementation just raises HTTPError.
# Forget that.
fp.status = code
return fp
def http_error_301(self, req, fp, code, msg, hdrs):
result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp,
code, msg, hdrs)
result.status = code
result.newurl = result.geturl()
return result
# The default implementations in urllib2.HTTPRedirectHandler
# are identical, so hardcoding a http_error_301 call above
# won't affect anything
http_error_300 = http_error_301
http_error_302 = http_error_301
http_error_303 = http_error_301
http_error_307 = http_error_301
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
if base64 is None or 'Authorization' not in req.headers \
or 'WWW-Authenticate' not in headers:
return self.http_error_default(req, fp, code, msg, headers)
auth = _base64decode(req.headers['Authorization'].split(' ')[1])
user, passw = auth.split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if isinstance(url_file_stream_or_string, basestring) \
and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
elif url_file_stream_or_string.startswith('feed:'):
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
# Test for inline user:password credentials for HTTP basic auth
auth = None
if base64 and not url_file_stream_or_string.startswith('ftp:'):
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
if isinstance(url_file_stream_or_string, unicode):
url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
opener = urllib2.build_opener(*tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
except (IOError, UnicodeEncodeError, TypeError):
# if url_file_stream_or_string is a unicode object that
# cannot be converted to the encoding returned by
# sys.getfilesystemencoding(), a UnicodeEncodeError
# will be thrown
# If url_file_stream_or_string is a string that contains NULL
# (such as an XML document encoded in UTF-32), TypeError will
# be thrown.
pass
# treat url_file_stream_or_string as string
if isinstance(url_file_stream_or_string, unicode):
return _StringIO(url_file_stream_or_string.encode('utf-8'))
return _StringIO(url_file_stream_or_string)
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urlparse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = u''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urlparse.urlunsplit(parts)
else:
return url
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib2.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if isinstance(modified, basestring):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in request_headers.items():
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
def _parse_psc_chapter_start(start):
FORMAT = r'^((\d{2}):)?(\d{2}):(\d{2})(\.(\d{3}))?$'
m = re.compile(FORMAT).match(start)
if m is None:
return None
_, h, m, s, _, ms = m.groups()
h, m, s, ms = (int(h or 0), int(m), int(s), int(ms or 0))
return datetime.timedelta(0, h*60*60 + m*60 + s, ms*1000)
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params:
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m or m.group(2) not in _hungarian_months:
return None
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
timezonenames = {
'ut': 0, 'gmt': 0, 'z': 0,
'adt': -3, 'ast': -4, 'at': -4,
'edt': -4, 'est': -5, 'et': -5,
'cdt': -5, 'cst': -6, 'ct': -6,
'mdt': -6, 'mst': -7, 'mt': -7,
'pdt': -7, 'pst': -8, 'pt': -8,
'a': -1, 'n': 1,
'm': -12, 'y': 12,
}
# W3 date and time format parser
# http://www.w3.org/TR/NOTE-datetime
# Also supports MSSQL-style datetimes as defined at:
# http://msdn.microsoft.com/en-us/library/ms186724.aspx
# (basically, allow a space as a date/time/timezone separator)
def _parse_date_w3dtf(datestr):
if not datestr.strip():
return None
parts = datestr.lower().split('t')
if len(parts) == 1:
# This may be a date only, or may be an MSSQL-style date
parts = parts[0].split()
if len(parts) == 1:
# Treat this as a date only
parts.append('00:00:00z')
elif len(parts) > 2:
return None
date = parts[0].split('-', 2)
if not date or len(date[0]) != 4:
return None
# Ensure that `date` has 3 elements. Using '1' sets the default
# month to January and the default day to the 1st of the month.
date.extend(['1'] * (3 - len(date)))
try:
year, month, day = [int(i) for i in date]
except ValueError:
# `date` may have more than 3 elements or may contain
# non-integer strings.
return None
if parts[1].endswith('z'):
parts[1] = parts[1][:-1]
parts.append('z')
# Append the numeric timezone offset, if any, to parts.
# If this is an MSSQL-style date then parts[2] already contains
# the timezone information, so `append()` will not affect it.
# Add 1 to each value so that if `find()` returns -1 it will be
# treated as False.
loc = parts[1].find('-') + 1 or parts[1].find('+') + 1 or len(parts[1]) + 1
loc = loc - 1
parts.append(parts[1][loc:])
parts[1] = parts[1][:loc]
time = parts[1].split(':', 2)
# Ensure that time has 3 elements. Using '0' means that the
# minutes and seconds, if missing, will default to 0.
time.extend(['0'] * (3 - len(time)))
tzhour = 0
tzmin = 0
if parts[2][:1] in ('-', '+'):
try:
tzhour = int(parts[2][1:3])
tzmin = int(parts[2][4:])
except ValueError:
return None
if parts[2].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[2], 0)
try:
hour, minute, second = [int(float(i)) for i in time]
except ValueError:
return None
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(date):
"""Parse RFC 822 dates and times
http://tools.ietf.org/html/rfc822#section-5
There are some formatting differences that are accounted for:
1. Years may be two or four digits.
2. The month and day can be swapped.
3. Additional timezone names are supported.
4. A default time and timezone are assumed if only a date is present.
"""
daynames = set(['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'])
months = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12,
}
parts = date.lower().split()
if len(parts) < 5:
# Assume that the time and timezone are missing
parts.extend(('00:00:00', '0000'))
# Remove the day name
if parts[0][:3] in daynames:
parts = parts[1:]
if len(parts) < 5:
# If there are still fewer than five parts, there's not enough
# information to interpret this
return None
try:
day = int(parts[0])
except ValueError:
# Check if the day and month are swapped
if months.get(parts[0][:3]):
try:
day = int(parts[1])
except ValueError:
return None
else:
parts[1] = parts[0]
else:
return None
month = months.get(parts[1][:3])
if not month:
return None
try:
year = int(parts[2])
except ValueError:
return None
# Normalize two-digit years:
# Anything in the 90's is interpreted as 1990 and on
# Anything 89 or less is interpreted as 2089 or before
if len(parts[2]) <= 2:
year += (1900, 2000)[year < 90]
timeparts = parts[3].split(':')
timeparts = timeparts + ([0] * (3 - len(timeparts)))
try:
(hour, minute, second) = map(int, timeparts)
except ValueError:
return None
tzhour = 0
tzmin = 0
# Strip 'Etc/' from the timezone
if parts[4].startswith('etc/'):
parts[4] = parts[4][4:]
# Normalize timezones that start with 'gmt':
# GMT-05:00 => -0500
# GMT => GMT
if parts[4].startswith('gmt'):
parts[4] = ''.join(parts[4][3:].split(':')) or 'gmt'
# Handle timezones like '-0500', '+0500', and 'EST'
if parts[4] and parts[4][0] in ('-', '+'):
try:
tzhour = int(parts[4][1:3])
tzmin = int(parts[4][3:])
except ValueError:
return None
if parts[4].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[4], 0)
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
registerDateHandler(_parse_date_rfc822)
_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
def _parse_date_asctime(dt):
"""Parse asctime-style dates"""
dayname, month, day, remainder = dt.split(None, 3)
# Convert month and day into zero-padded integers
month = '%02i ' % (_months.index(month.lower()) + 1)
day = '%02i ' % (int(day),)
dt = month + day + remainder
return time.strptime(dt, '%m %d %H:%M:%S %Y')[:-1] + (0, )
registerDateHandler(_parse_date_asctime)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
m = _my_date_pattern.search(aDateString)
if m is None:
return None
dow, year, month, day, hour, minute, second, tz = m.groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None
# Each marker represents some of the characters of the opening XML
# processing instruction ('<?xm') in the specified encoding.
EBCDIC_MARKER = _l2bytes([0x4C, 0x6F, 0xA7, 0x94])
UTF16BE_MARKER = _l2bytes([0x00, 0x3C, 0x00, 0x3F])
UTF16LE_MARKER = _l2bytes([0x3C, 0x00, 0x3F, 0x00])
UTF32BE_MARKER = _l2bytes([0x00, 0x00, 0x00, 0x3C])
UTF32LE_MARKER = _l2bytes([0x3C, 0x00, 0x00, 0x00])
ZERO_BYTES = _l2bytes([0x00, 0x00])
# Match the opening XML declaration.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>')
# Capture the value of the XML processing instruction's encoding attribute.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>'))
def convert_to_utf8(http_headers, data):
'''Detect and convert the character encoding to UTF-8.
http_headers is a dictionary
data is a raw string (not Unicode)'''
# This is so much trickier than it sounds, it's not even funny.
# According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
# is application/xml, application/*+xml,
# application/xml-external-parsed-entity, or application/xml-dtd,
# the encoding given in the charset parameter of the HTTP Content-Type
# takes precedence over the encoding given in the XML prefix within the
# document, and defaults to 'utf-8' if neither are specified. But, if
# the HTTP Content-Type is text/xml, text/*+xml, or
# text/xml-external-parsed-entity, the encoding given in the XML prefix
# within the document is ALWAYS IGNORED and only the encoding given in
# the charset parameter of the HTTP Content-Type header should be
# respected, and it defaults to 'us-ascii' if not specified.
# Furthermore, discussion on the atom-syntax mailing list with the
# author of RFC 3023 leads me to the conclusion that any document
# served with a Content-Type of text/* and no charset parameter
# must be treated as us-ascii. (We now do this.) And also that it
# must always be flagged as non-well-formed. (We now do this too.)
# If Content-Type is unspecified (input was local file or non-HTTP source)
# or unrecognized (server just got it totally wrong), then go by the
# encoding given in the XML prefix of the document and default to
# 'iso-8859-1' as per the HTTP specification (RFC 2616).
# Then, assuming we didn't find a character encoding in the HTTP headers
# (and the HTTP Content-type allowed us to look in the body), we need
# to sniff the first few bytes of the XML data and try to determine
# whether the encoding is ASCII-compatible. Section F of the XML
# specification shows the way here:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# If the sniffed encoding is not ASCII-compatible, we need to make it
# ASCII compatible so that we can sniff further into the XML declaration
# to find the encoding attribute, which will tell us the true encoding.
# Of course, none of this guarantees that we will be able to parse the
# feed in the declared character encoding (assuming it was declared
# correctly, which many are not). iconv_codec can help a lot;
# you should definitely install it if you can.
# http://cjkpython.i18n.org/
bom_encoding = u''
xml_encoding = u''
rfc3023_encoding = u''
# Look at the first few bytes of the document to guess what
# its encoding may be. We only need to decode enough of the
# document that we can use an ASCII-compatible regular
# expression to search for an XML encoding declaration.
# The heuristic follows the XML specification, section F:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# Check for BOMs first.
if data[:4] == codecs.BOM_UTF32_BE:
bom_encoding = u'utf-32be'
data = data[4:]
elif data[:4] == codecs.BOM_UTF32_LE:
bom_encoding = u'utf-32le'
data = data[4:]
elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16be'
data = data[2:]
elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16le'
data = data[2:]
elif data[:3] == codecs.BOM_UTF8:
bom_encoding = u'utf-8'
data = data[3:]
# Check for the characters '<?xm' in several encodings.
elif data[:4] == EBCDIC_MARKER:
bom_encoding = u'cp037'
elif data[:4] == UTF16BE_MARKER:
bom_encoding = u'utf-16be'
elif data[:4] == UTF16LE_MARKER:
bom_encoding = u'utf-16le'
elif data[:4] == UTF32BE_MARKER:
bom_encoding = u'utf-32be'
elif data[:4] == UTF32LE_MARKER:
bom_encoding = u'utf-32le'
tempdata = data
try:
if bom_encoding:
tempdata = data.decode(bom_encoding).encode('utf-8')
except (UnicodeDecodeError, LookupError):
# feedparser recognizes UTF-32 encodings that aren't
# available in Python 2.4 and 2.5, so it's possible to
# encounter a LookupError during decoding.
xml_encoding_match = None
else:
xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
# Normalize the xml_encoding if necessary.
if bom_encoding and (xml_encoding in (
u'u16', u'utf-16', u'utf16', u'utf_16',
u'u32', u'utf-32', u'utf32', u'utf_32',
u'iso-10646-ucs-2', u'iso-10646-ucs-4',
u'csucs4', u'csunicode', u'ucs-2', u'ucs-4'
)):
xml_encoding = bom_encoding
# Find the HTTP Content-Type and, hopefully, a character
# encoding provided by the server. The Content-Type is used
# to choose the "correct" encoding among the BOM encoding,
# XML declaration encoding, and HTTP encoding, following the
# heuristic defined in RFC 3023.
http_content_type = http_headers.get('content-type') or ''
http_content_type, params = cgi.parse_header(http_content_type)
http_encoding = params.get('charset', '').replace("'", "")
if not isinstance(http_encoding, unicode):
http_encoding = http_encoding.decode('utf-8', 'ignore')
acceptable_content_type = 0
application_content_types = (u'application/xml', u'application/xml-dtd',
u'application/xml-external-parsed-entity')
text_content_types = (u'text/xml', u'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith(u'application/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or xml_encoding or u'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith(u'text/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_content_type.startswith(u'text/'):
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_headers and 'content-type' not in http_headers:
rfc3023_encoding = xml_encoding or u'iso-8859-1'
else:
rfc3023_encoding = xml_encoding or u'utf-8'
# gb18030 is a superset of gb2312, so always replace gb2312
# with gb18030 for greater compatibility.
if rfc3023_encoding.lower() == u'gb2312':
rfc3023_encoding = u'gb18030'
if xml_encoding.lower() == u'gb2312':
xml_encoding = u'gb18030'
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
error = None
if http_headers and (not acceptable_content_type):
if 'content-type' in http_headers:
msg = '%s is not an XML media type' % http_headers['content-type']
else:
msg = 'no Content-type specified'
error = NonXMLContentType(msg)
# determine character encoding
known_encoding = 0
chardet_encoding = None
tried_encodings = []
if chardet:
chardet_encoding = chardet.detect(data)['encoding']
if not chardet_encoding:
chardet_encoding = ''
if not isinstance(chardet_encoding, unicode):
chardet_encoding = unicode(chardet_encoding, 'ascii', 'ignore')
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
chardet_encoding, u'utf-8', u'windows-1252', u'iso-8859-2'):
if not proposed_encoding:
continue
if proposed_encoding in tried_encodings:
continue
tried_encodings.append(proposed_encoding)
try:
data = data.decode(proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = 1
# Update the encoding in the opening XML processing instruction.
new_declaration = '''<?xml version='1.0' encoding='utf-8'?>'''
if RE_XML_DECLARATION.search(data):
data = RE_XML_DECLARATION.sub(new_declaration, data)
else:
data = new_declaration + u'\n' + data
data = data.encode('utf-8')
break
# if still no luck, give up
if not known_encoding:
error = CharacterEncodingUnknown(
'document encoding unknown, I tried ' +
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
(rfc3023_encoding, xml_encoding))
rfc3023_encoding = u''
elif proposed_encoding != rfc3023_encoding:
error = CharacterEncodingOverride(
'document declared as %s, but parsed as %s' %
(rfc3023_encoding, proposed_encoding))
rfc3023_encoding = proposed_encoding
return data, rfc3023_encoding, error
# Match XML entity declarations.
# Example: <!ENTITY copyright "(C)">
RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
# Match XML DOCTYPE declarations.
# Example: <!DOCTYPE feed [ ]>
RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
# Match safe entity declarations.
# This will allow hexadecimal character references through,
# as well as text, but not arbitrary nested entities.
# Example: cubed "³"
# Example: copyright "(C)"
# Forbidden: explode1 "&explode2;&explode2;"
RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
def replace_doctype(data):
'''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document with a replaced DOCTYPE
'''
# Divide the document into two groups by finding the location
# of the first element that doesn't begin with '<?' or '<!'.
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
head, data = data[:start+1], data[start+1:]
# Save and then remove all of the ENTITY declarations.
entity_results = RE_ENTITY_PATTERN.findall(head)
head = RE_ENTITY_PATTERN.sub(_s2bytes(''), head)
# Find the DOCTYPE declaration and check the feed type.
doctype_results = RE_DOCTYPE_PATTERN.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
if _s2bytes('netscape') in doctype.lower():
version = u'rss091n'
else:
version = None
# Re-insert the safe ENTITY declarations if a DOCTYPE was found.
replacement = _s2bytes('')
if len(doctype_results) == 1 and entity_results:
match_safe_entities = lambda e: RE_SAFE_ENTITY_PATTERN.match(e)
safe_entities = filter(match_safe_entities, entity_results)
if safe_entities:
replacement = _s2bytes('<!DOCTYPE feed [\n<!ENTITY') \
+ _s2bytes('>\n<!ENTITY ').join(safe_entities) \
+ _s2bytes('>\n]>')
data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data
# Precompute the safe entities for the loose parser.
safe_entities = dict((k.decode('utf-8'), v.decode('utf-8'))
for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement))
return version, data, safe_entities
# GeoRSS geometry parsers. Each return a dict with 'type' and 'coordinates'
# items, or None in the case of a parsing error.
def _parse_poslist(value, geom_type, swap=True, dims=2):
if geom_type == 'linestring':
return _parse_georss_line(value, swap, dims)
elif geom_type == 'polygon':
ring = _parse_georss_line(value, swap, dims)
return {'type': u'Polygon', 'coordinates': (ring['coordinates'],)}
else:
return None
def _gen_georss_coords(value, swap=True, dims=2):
# A generator of (lon, lat) pairs from a string of encoded GeoRSS
# coordinates. Converts to floats and swaps order.
latlons = itertools.imap(float, value.strip().replace(',', ' ').split())
nxt = latlons.next
while True:
t = [nxt(), nxt()][::swap and -1 or 1]
if dims == 3:
t.append(nxt())
yield tuple(t)
def _parse_georss_point(value, swap=True, dims=2):
# A point contains a single latitude-longitude pair, separated by
# whitespace. We'll also handle comma separators.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'Point', u'coordinates': coords[0]}
except (IndexError, ValueError):
return None
def _parse_georss_line(value, swap=True, dims=2):
# A line contains a space separated list of latitude-longitude pairs in
# WGS84 coordinate reference system, with each pair separated by
# whitespace. There must be at least two pairs.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'LineString', u'coordinates': coords}
except (IndexError, ValueError):
return None
def _parse_georss_polygon(value, swap=True, dims=2):
# A polygon contains a space separated list of latitude-longitude pairs,
# with each pair separated by whitespace. There must be at least four
# pairs, with the last being identical to the first (so a polygon has a
# minimum of three actual points).
try:
ring = list(_gen_georss_coords(value, swap, dims))
except (IndexError, ValueError):
return None
if len(ring) < 4:
return None
return {u'type': u'Polygon', u'coordinates': (ring,)}
def _parse_georss_box(value, swap=True, dims=2):
# A bounding box is a rectangular region, often used to define the extents
# of a map or a rough area of interest. A box contains two space seperate
# latitude-longitude pairs, with each pair separated by whitespace. The
# first pair is the lower corner, the second is the upper corner.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'Box', u'coordinates': tuple(coords)}
except (IndexError, ValueError):
return None
# end geospatial parsers
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
'''
if handlers is None:
handlers = []
if request_headers is None:
request_headers = {}
if response_headers is None:
response_headers = {}
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
if hasattr(f, 'headers'):
result['headers'] = dict(f.headers)
# overwrite existing headers using response_headers
if 'headers' in result:
result['headers'].update(response_headers)
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
# lowercase all of the HTTP headers for comparisons per RFC 2616
if 'headers' in result:
http_headers = dict((k.lower(), v) for k, v in result['headers'].items())
else:
http_headers = {}
# if feed is gzip-compressed, decompress it
if f and data and http_headers:
if gzip and 'gzip' in http_headers.get('content-encoding', ''):
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except (IOError, struct.error), e:
# IOError can occur if the gzip header is bad.
# struct.error can occur if the data is damaged.
result['bozo'] = 1
result['bozo_exception'] = e
if isinstance(e, struct.error):
# A gzip header was found but the data is corrupt.
# Ideally, we should re-request the feed without the
# 'Accept-encoding: gzip' header, but we don't.
data = None
elif zlib and 'deflate' in http_headers.get('content-encoding', ''):
try:
data = zlib.decompress(data)
except zlib.error, e:
try:
# The data may have no headers and no checksum.
data = zlib.decompress(data, -15)
except zlib.error, e:
result['bozo'] = 1
result['bozo_exception'] = e
# save HTTP headers
if http_headers:
if 'etag' in http_headers:
etag = http_headers.get('etag', u'')
if not isinstance(etag, unicode):
etag = etag.decode('utf-8', 'ignore')
if etag:
result['etag'] = etag
if 'last-modified' in http_headers:
modified = http_headers.get('last-modified', u'')
if modified:
result['modified'] = modified
result['modified_parsed'] = _parse_date(modified)
if hasattr(f, 'url'):
if not isinstance(f.url, unicode):
result['href'] = f.url.decode('utf-8', 'ignore')
else:
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
if data is None:
return result
# Stop processing if the server sent HTTP 304 Not Modified.
if getattr(f, 'code', 0) == 304:
result['version'] = u''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
data, result['encoding'], error = convert_to_utf8(http_headers, data)
use_strict_parser = result['encoding'] and True or False
if error is not None:
result['bozo'] = 1
result['bozo_exception'] = error
result['version'], data, entities = replace_doctype(data)
# Ensure that baseuri is an absolute URI using an acceptable URI scheme.
contentloc = http_headers.get('content-location', u'')
href = result.get('href', u'')
baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
baselang = http_headers.get('content-language', None)
if not isinstance(baselang, unicode) and baselang is not None:
baselang = baselang.decode('utf-8', 'ignore')
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
try:
# disable downloading external doctype references, if possible
saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
except xml.sax.SAXNotSupportedException:
pass
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
try:
saxparser.parse(source)
except xml.sax.SAXException, e:
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser and _SGML_AVAILABLE:
feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
# The list of EPSG codes for geographic (latitude/longitude) coordinate
# systems to support decoding of GeoRSS GML profiles.
_geogCS = [
3819, 3821, 3824, 3889, 3906, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008,
4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4018, 4019, 4020, 4021, 4022,
4023, 4024, 4025, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036,
4041, 4042, 4043, 4044, 4045, 4046, 4047, 4052, 4053, 4054, 4055, 4075, 4081,
4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132,
4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145,
4146, 4147, 4148, 4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158,
4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171,
4172, 4173, 4174, 4175, 4176, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185,
4188, 4189, 4190, 4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200,
4201, 4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213,
4214, 4215, 4216, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227,
4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240,
4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253,
4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266,
4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279,
4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4291, 4292, 4293,
4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4306, 4307,
4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4322,
4324, 4326, 4463, 4470, 4475, 4483, 4490, 4555, 4558, 4600, 4601, 4602, 4603,
4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616,
4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629,
4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642,
4643, 4644, 4645, 4646, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665,
4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678,
4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691,
4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704,
4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717,
4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730,
4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743,
4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756,
4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4801, 4802, 4803, 4804,
4805, 4806, 4807, 4808, 4809, 4810, 4811, 4813, 4814, 4815, 4816, 4817, 4818,
4819, 4820, 4821, 4823, 4824, 4901, 4902, 4903, 4904, 4979 ]
| mit | -2,477,190,091,135,409,700 | 39.931788 | 208 | 0.579412 | false |
xzYue/odoo | addons/account/wizard/account_invoice_state.py | 340 | 2875 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class account_invoice_confirm(osv.osv_memory):
"""
This wizard will confirm the all the selected draft invoices
"""
_name = "account.invoice.confirm"
_description = "Confirm the selected invoices"
def invoice_confirm(self, cr, uid, ids, context=None):
if context is None:
context = {}
active_ids = context.get('active_ids', []) or []
proxy = self.pool['account.invoice']
for record in proxy.browse(cr, uid, active_ids, context=context):
if record.state not in ('draft', 'proforma', 'proforma2'):
raise osv.except_osv(_('Warning!'), _("Selected invoice(s) cannot be confirmed as they are not in 'Draft' or 'Pro-Forma' state."))
record.signal_workflow('invoice_open')
return {'type': 'ir.actions.act_window_close'}
class account_invoice_cancel(osv.osv_memory):
"""
This wizard will cancel the all the selected invoices.
If in the journal, the option allow cancelling entry is not selected then it will give warning message.
"""
_name = "account.invoice.cancel"
_description = "Cancel the Selected Invoices"
def invoice_cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
proxy = self.pool['account.invoice']
active_ids = context.get('active_ids', []) or []
for record in proxy.browse(cr, uid, active_ids, context=context):
if record.state in ('cancel','paid'):
raise osv.except_osv(_('Warning!'), _("Selected invoice(s) cannot be cancelled as they are already in 'Cancelled' or 'Done' state."))
record.signal_workflow('invoice_cancel')
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,649,328,772,700,792,000 | 40.666667 | 149 | 0.621913 | false |
CompMusic/essentia | test/src/unittest/standard/test_unaryoperator.py | 10 | 3671 | #!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestUnaryOperator(TestCase):
testInput = [1,2,3,4,3.4,-5.0008, 100034]
def testEmpty(self):
self.assertEqualVector(UnaryOperator()([]), [])
def testOne(self):
self.assertEqualVector(UnaryOperator(type="identity")([101]), [101])
def testAbs(self):
self.assertAlmostEqualVector(UnaryOperator(type="abs")(self.testInput),
[1,2,3,4,3.4,5.0008,100034])
def testLog10(self):
self.assertAlmostEqualVector(
UnaryOperator(type="log10")(self.testInput),
[0., 0.30103001, 0.4771212637, 0.60206002, 0.5314789414, -30., 5.0001478195])
def testLog(self):
self.assertAlmostEqualVector(
UnaryOperator(type="log")(self.testInput),
[0., 0.6931471825, 1.0986123085, 1.3862943649, 1.223775506, -69.0775527954, 11.5132656097])
def testLn(self):
self.assertAlmostEqualVector(UnaryOperator(type="ln")(self.testInput),
[0, 0.693147181, 1.098612289, 1.386294361, 1.223775432, -69.07755279, 11.513265407])
def testLin2Db(self):
self.assertAlmostEqualVector(
UnaryOperator(type="lin2db")(self.testInput),
[0., 3.01029992, 4.77121258, 6.02059984, 5.3147893, -90., 50.00147629])
def testDb2Lin(self):
# remove the last element because it causes an overflow because it is
# too large
self.assertAlmostEqualVector(
UnaryOperator(type="db2lin")(self.testInput[:-1]),
[1.25892544, 1.58489323, 1.99526227, 2.51188636, 2.18776178, 0.3161695],
2e-7)
def testSine(self):
self.assertAlmostEqualVector(UnaryOperator(type="sin")(self.testInput),
[0.841470985, 0.909297427, 0.141120008, -0.756802495, -0.255541102, 0.958697038, -0.559079868], 1e-6)
def testCosine(self):
self.assertAlmostEqualVector(UnaryOperator(type="cos")(self.testInput),
[0.540302306, -0.416146837, -0.989992497, -0.653643621, -0.966798193, 0.284429234, 0.829113805], 1e-6)
def testSqrt(self):
# first take abs so we won't take sqrt of a negative (that test comes later)
absInput = UnaryOperator(type="abs")(self.testInput)
self.assertAlmostEqualVector(UnaryOperator(type="sqrt")(absInput),
[1, 1.414213562, 1.732050808, 2, 1.843908891, 2.236246856, 316.281520168])
def testSqrtNegative(self):
self.assertComputeFails(UnaryOperator(type="sqrt"),([0, -1, 1]))
def testSquare(self):
self.assertAlmostEqualVector(UnaryOperator(type="square")(self.testInput),
[1, 4, 9, 16, 11.56, 25.0080006, 10006801156])
def testInvalidParam(self):
self.assertConfigureFails(UnaryOperator(), {'type':'exp'})
suite = allTests(TestUnaryOperator)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 | 276,211,603,184,710,430 | 38.473118 | 114 | 0.671479 | false |
murraymeehan/marsyas | scripts/createMarSystem.py | 3 | 2266 | #!/usr/bin/env python
'''
Script to generate skeleton files for a new MarSystem.
Usage:
createMarSystem.py NameOfNewMarSystem
This will create the files NameOfNewMarSystem.h and NameOfNewMarSystem.cpp
if the current directory.
'''
import os
import sys
def create_from_template(template_file, template_name, target_file, target_name):
'''
Create a MarSystem based on a given template.
Copy the lines from the given template_file to target_file and replace
the occurences of template_name to target_name.
Also remove the '//' style comments, but not the '/* */' and '///' ones.
'''
# If file already exists: quit.
if os.path.exists(target_file):
raise ValueError('file "%s" already exists' % target_file)
template = open(template_file, 'r')
target = open(target_file, 'w')
for line in template:
# Skip comment lines starting with '// '
# (other comments like '/* */' and '///' are kept.)
if line.strip() == '//' or line.strip()[:3] == '// ':
continue
# Replace the template_name with the target_name
line = line.replace(template_name, target_name)
# Replace the upper case versions too (for the #define stuff)
line = line.replace(template_name.upper(), target_name.upper())
target.write(line)
template.close()
target.close()
if __name__ == '__main__':
# Get target name from command line.
if len(sys.argv) == 2:
target_name = sys.argv[1]
else:
print >>sys.stderr, 'usage: %s MarSystemName' % sys.argv[0]
sys.exit(1)
# Get the base directory of Marsyas
marsyas_base_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
print 'Marsyas base directory:', marsyas_base_dir
# The template file.
template_name = 'MarSystemTemplateBasic'
for ext in ['.h', '.cpp']:
# Construct the file names.
template_file = os.path.join(marsyas_base_dir, 'src', 'marsyas', template_name + ext)
target_file = target_name + ext
# Do the copy and replace stuff.
create_from_template(template_file, template_name, target_file, target_name)
print 'created "%s" from template "%s"' % (target_file, template_file)
| gpl-2.0 | 1,998,110,069,064,431,900 | 30.472222 | 93 | 0.633716 | false |
jcoady9/python-for-android | python3-alpha/python3-src/Lib/test/test_os.py | 47 | 46096 | # As a test suite for the os module, this is woefully inadequate, but this
# does add tests for a few functions which have been determined to be more
# portable than they had been thought to be.
import os
import errno
import unittest
import warnings
import sys
import signal
import subprocess
import time
import shutil
from test import support
import contextlib
import mmap
import uuid
# Detect whether we're on a Linux system that uses the (now outdated
# and unmaintained) linuxthreads threading library. There's an issue
# when combining linuxthreads with a failed execv call: see
# http://bugs.python.org/issue4970.
if (hasattr(os, "confstr_names") and
"CS_GNU_LIBPTHREAD_VERSION" in os.confstr_names):
libpthread = os.confstr("CS_GNU_LIBPTHREAD_VERSION")
USING_LINUXTHREADS= libpthread.startswith("linuxthreads")
else:
USING_LINUXTHREADS= False
# Tests creating TESTFN
class FileTests(unittest.TestCase):
def setUp(self):
if os.path.exists(support.TESTFN):
os.unlink(support.TESTFN)
tearDown = setUp
def test_access(self):
f = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(f)
self.assertTrue(os.access(support.TESTFN, os.W_OK))
def test_closerange(self):
first = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
# We must allocate two consecutive file descriptors, otherwise
# it will mess up other file descriptors (perhaps even the three
# standard ones).
second = os.dup(first)
try:
retries = 0
while second != first + 1:
os.close(first)
retries += 1
if retries > 10:
# XXX test skipped
self.skipTest("couldn't allocate two consecutive fds")
first, second = second, os.dup(second)
finally:
os.close(second)
# close a fd that is open, and one that isn't
os.closerange(first, first + 2)
self.assertRaises(OSError, os.write, first, b"a")
@support.cpython_only
def test_rename(self):
path = support.TESTFN
old = sys.getrefcount(path)
self.assertRaises(TypeError, os.rename, path, 0)
new = sys.getrefcount(path)
self.assertEqual(old, new)
def test_read(self):
with open(support.TESTFN, "w+b") as fobj:
fobj.write(b"spam")
fobj.flush()
fd = fobj.fileno()
os.lseek(fd, 0, 0)
s = os.read(fd, 4)
self.assertEqual(type(s), bytes)
self.assertEqual(s, b"spam")
def test_write(self):
# os.write() accepts bytes- and buffer-like objects but not strings
fd = os.open(support.TESTFN, os.O_CREAT | os.O_WRONLY)
self.assertRaises(TypeError, os.write, fd, "beans")
os.write(fd, b"bacon\n")
os.write(fd, bytearray(b"eggs\n"))
os.write(fd, memoryview(b"spam\n"))
os.close(fd)
with open(support.TESTFN, "rb") as fobj:
self.assertEqual(fobj.read().splitlines(),
[b"bacon", b"eggs", b"spam"])
def write_windows_console(self, *args):
retcode = subprocess.call(args,
# use a new console to not flood the test output
creationflags=subprocess.CREATE_NEW_CONSOLE,
# use a shell to hide the console window (SW_HIDE)
shell=True)
self.assertEqual(retcode, 0)
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the Windows console')
def test_write_windows_console(self):
# Issue #11395: the Windows console returns an error (12: not enough
# space error) on writing into stdout if stdout mode is binary and the
# length is greater than 66,000 bytes (or less, depending on heap
# usage).
code = "print('x' * 100000)"
self.write_windows_console(sys.executable, "-c", code)
self.write_windows_console(sys.executable, "-u", "-c", code)
def fdopen_helper(self, *args):
fd = os.open(support.TESTFN, os.O_RDONLY)
f = os.fdopen(fd, *args)
f.close()
def test_fdopen(self):
fd = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(fd)
self.fdopen_helper()
self.fdopen_helper('r')
self.fdopen_helper('r', 100)
# Test attributes on return values from os.*stat* family.
class StatAttributeTests(unittest.TestCase):
def setUp(self):
os.mkdir(support.TESTFN)
self.fname = os.path.join(support.TESTFN, "f1")
f = open(self.fname, 'wb')
f.write(b"ABC")
f.close()
def tearDown(self):
os.unlink(self.fname)
os.rmdir(support.TESTFN)
def check_stat_attributes(self, fname):
if not hasattr(os, "stat"):
return
import stat
result = os.stat(fname)
# Make sure direct access works
self.assertEqual(result[stat.ST_SIZE], 3)
self.assertEqual(result.st_size, 3)
# Make sure all the attributes are there
members = dir(result)
for name in dir(stat):
if name[:3] == 'ST_':
attr = name.lower()
if name.endswith("TIME"):
def trunc(x): return int(x)
else:
def trunc(x): return x
self.assertEqual(trunc(getattr(result, attr)),
result[getattr(stat, name)])
self.assertIn(attr, members)
try:
result[200]
self.fail("No exception thrown")
except IndexError:
pass
# Make sure that assignment fails
try:
result.st_mode = 1
self.fail("No exception thrown")
except AttributeError:
pass
try:
result.st_rdev = 1
self.fail("No exception thrown")
except (AttributeError, TypeError):
pass
try:
result.parrot = 1
self.fail("No exception thrown")
except AttributeError:
pass
# Use the stat_result constructor with a too-short tuple.
try:
result2 = os.stat_result((10,))
self.fail("No exception thrown")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_stat_attributes(self):
self.check_stat_attributes(self.fname)
def test_stat_attributes_bytes(self):
try:
fname = self.fname.encode(sys.getfilesystemencoding())
except UnicodeEncodeError:
self.skipTest("cannot encode %a for the filesystem" % self.fname)
self.check_stat_attributes(fname)
def test_statvfs_attributes(self):
if not hasattr(os, "statvfs"):
return
try:
result = os.statvfs(self.fname)
except OSError as e:
# On AtheOS, glibc always returns ENOSYS
if e.errno == errno.ENOSYS:
return
# Make sure direct access works
self.assertEqual(result.f_bfree, result[3])
# Make sure all the attributes are there.
members = ('bsize', 'frsize', 'blocks', 'bfree', 'bavail', 'files',
'ffree', 'favail', 'flag', 'namemax')
for value, member in enumerate(members):
self.assertEqual(getattr(result, 'f_' + member), result[value])
# Make sure that assignment really fails
try:
result.f_bfree = 1
self.fail("No exception thrown")
except AttributeError:
pass
try:
result.parrot = 1
self.fail("No exception thrown")
except AttributeError:
pass
# Use the constructor with a too-short tuple.
try:
result2 = os.statvfs_result((10,))
self.fail("No exception thrown")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_utime_dir(self):
delta = 1000000
st = os.stat(support.TESTFN)
# round to int, because some systems may support sub-second
# time stamps in stat, but not in utime.
os.utime(support.TESTFN, (st.st_atime, int(st.st_mtime-delta)))
st2 = os.stat(support.TESTFN)
self.assertEqual(st2.st_mtime, int(st.st_mtime-delta))
# Restrict test to Win32, since there is no guarantee other
# systems support centiseconds
if sys.platform == 'win32':
def get_file_system(path):
root = os.path.splitdrive(os.path.abspath(path))[0] + '\\'
import ctypes
kernel32 = ctypes.windll.kernel32
buf = ctypes.create_unicode_buffer("", 100)
if kernel32.GetVolumeInformationW(root, None, 0, None, None, None, buf, len(buf)):
return buf.value
if get_file_system(support.TESTFN) == "NTFS":
def test_1565150(self):
t1 = 1159195039.25
os.utime(self.fname, (t1, t1))
self.assertEqual(os.stat(self.fname).st_mtime, t1)
def test_large_time(self):
t1 = 5000000000 # some day in 2128
os.utime(self.fname, (t1, t1))
self.assertEqual(os.stat(self.fname).st_mtime, t1)
def test_1686475(self):
# Verify that an open file can be stat'ed
try:
os.stat(r"c:\pagefile.sys")
except WindowsError as e:
if e.errno == 2: # file does not exist; cannot run test
return
self.fail("Could not stat pagefile.sys")
from test import mapping_tests
class EnvironTests(mapping_tests.BasicTestMappingProtocol):
"""check that os.environ object conform to mapping protocol"""
type2test = None
def setUp(self):
self.__save = dict(os.environ)
if os.supports_bytes_environ:
self.__saveb = dict(os.environb)
for key, value in self._reference().items():
os.environ[key] = value
def tearDown(self):
os.environ.clear()
os.environ.update(self.__save)
if os.supports_bytes_environ:
os.environb.clear()
os.environb.update(self.__saveb)
def _reference(self):
return {"KEY1":"VALUE1", "KEY2":"VALUE2", "KEY3":"VALUE3"}
def _empty_mapping(self):
os.environ.clear()
return os.environ
# Bug 1110478
def test_update2(self):
os.environ.clear()
if os.path.exists("/bin/sh"):
os.environ.update(HELLO="World")
with os.popen("/bin/sh -c 'echo $HELLO'") as popen:
value = popen.read().strip()
self.assertEqual(value, "World")
def test_os_popen_iter(self):
if os.path.exists("/bin/sh"):
with os.popen(
"/bin/sh -c 'echo \"line1\nline2\nline3\"'") as popen:
it = iter(popen)
self.assertEqual(next(it), "line1\n")
self.assertEqual(next(it), "line2\n")
self.assertEqual(next(it), "line3\n")
self.assertRaises(StopIteration, next, it)
# Verify environ keys and values from the OS are of the
# correct str type.
def test_keyvalue_types(self):
for key, val in os.environ.items():
self.assertEqual(type(key), str)
self.assertEqual(type(val), str)
def test_items(self):
for key, value in self._reference().items():
self.assertEqual(os.environ.get(key), value)
# Issue 7310
def test___repr__(self):
"""Check that the repr() of os.environ looks like environ({...})."""
env = os.environ
self.assertEqual(repr(env), 'environ({{{}}})'.format(', '.join(
'{!r}: {!r}'.format(key, value)
for key, value in env.items())))
def test_get_exec_path(self):
defpath_list = os.defpath.split(os.pathsep)
test_path = ['/monty', '/python', '', '/flying/circus']
test_env = {'PATH': os.pathsep.join(test_path)}
saved_environ = os.environ
try:
os.environ = dict(test_env)
# Test that defaulting to os.environ works.
self.assertSequenceEqual(test_path, os.get_exec_path())
self.assertSequenceEqual(test_path, os.get_exec_path(env=None))
finally:
os.environ = saved_environ
# No PATH environment variable
self.assertSequenceEqual(defpath_list, os.get_exec_path({}))
# Empty PATH environment variable
self.assertSequenceEqual(('',), os.get_exec_path({'PATH':''}))
# Supplied PATH environment variable
self.assertSequenceEqual(test_path, os.get_exec_path(test_env))
if os.supports_bytes_environ:
# env cannot contain 'PATH' and b'PATH' keys
try:
# ignore BytesWarning warning
with warnings.catch_warnings(record=True):
mixed_env = {'PATH': '1', b'PATH': b'2'}
except BytesWarning:
# mixed_env cannot be created with python -bb
pass
else:
self.assertRaises(ValueError, os.get_exec_path, mixed_env)
# bytes key and/or value
self.assertSequenceEqual(os.get_exec_path({b'PATH': b'abc'}),
['abc'])
self.assertSequenceEqual(os.get_exec_path({b'PATH': 'abc'}),
['abc'])
self.assertSequenceEqual(os.get_exec_path({'PATH': b'abc'}),
['abc'])
@unittest.skipUnless(os.supports_bytes_environ,
"os.environb required for this test.")
def test_environb(self):
# os.environ -> os.environb
value = 'euro\u20ac'
try:
value_bytes = value.encode(sys.getfilesystemencoding(),
'surrogateescape')
except UnicodeEncodeError:
msg = "U+20AC character is not encodable to %s" % (
sys.getfilesystemencoding(),)
self.skipTest(msg)
os.environ['unicode'] = value
self.assertEqual(os.environ['unicode'], value)
self.assertEqual(os.environb[b'unicode'], value_bytes)
# os.environb -> os.environ
value = b'\xff'
os.environb[b'bytes'] = value
self.assertEqual(os.environb[b'bytes'], value)
value_str = value.decode(sys.getfilesystemencoding(), 'surrogateescape')
self.assertEqual(os.environ['bytes'], value_str)
class WalkTests(unittest.TestCase):
"""Tests for os.walk()."""
def test_traversal(self):
import os
from os.path import join
# Build:
# TESTFN/
# TEST1/ a file kid and two directory kids
# tmp1
# SUB1/ a file kid and a directory kid
# tmp2
# SUB11/ no kids
# SUB2/ a file kid and a dirsymlink kid
# tmp3
# link/ a symlink to TESTFN.2
# TEST2/
# tmp4 a lone file
walk_path = join(support.TESTFN, "TEST1")
sub1_path = join(walk_path, "SUB1")
sub11_path = join(sub1_path, "SUB11")
sub2_path = join(walk_path, "SUB2")
tmp1_path = join(walk_path, "tmp1")
tmp2_path = join(sub1_path, "tmp2")
tmp3_path = join(sub2_path, "tmp3")
link_path = join(sub2_path, "link")
t2_path = join(support.TESTFN, "TEST2")
tmp4_path = join(support.TESTFN, "TEST2", "tmp4")
# Create stuff.
os.makedirs(sub11_path)
os.makedirs(sub2_path)
os.makedirs(t2_path)
for path in tmp1_path, tmp2_path, tmp3_path, tmp4_path:
f = open(path, "w")
f.write("I'm " + path + " and proud of it. Blame test_os.\n")
f.close()
if support.can_symlink():
os.symlink(os.path.abspath(t2_path), link_path)
sub2_tree = (sub2_path, ["link"], ["tmp3"])
else:
sub2_tree = (sub2_path, [], ["tmp3"])
# Walk top-down.
all = list(os.walk(walk_path))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: TESTFN, SUB1, SUB11, SUB2
# flipped: TESTFN, SUB2, SUB1, SUB11
flipped = all[0][1][0] != "SUB1"
all[0][1].sort()
self.assertEqual(all[0], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[1 + flipped], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 + flipped], (sub11_path, [], []))
self.assertEqual(all[3 - 2 * flipped], sub2_tree)
# Prune the search.
all = []
for root, dirs, files in os.walk(walk_path):
all.append((root, dirs, files))
# Don't descend into SUB1.
if 'SUB1' in dirs:
# Note that this also mutates the dirs we appended to all!
dirs.remove('SUB1')
self.assertEqual(len(all), 2)
self.assertEqual(all[0], (walk_path, ["SUB2"], ["tmp1"]))
self.assertEqual(all[1], sub2_tree)
# Walk bottom-up.
all = list(os.walk(walk_path, topdown=False))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: SUB11, SUB1, SUB2, TESTFN
# flipped: SUB2, SUB11, SUB1, TESTFN
flipped = all[3][1][0] != "SUB1"
all[3][1].sort()
self.assertEqual(all[3], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[flipped], (sub11_path, [], []))
self.assertEqual(all[flipped + 1], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 - 2 * flipped], sub2_tree)
if support.can_symlink():
# Walk, following symlinks.
for root, dirs, files in os.walk(walk_path, followlinks=True):
if root == link_path:
self.assertEqual(dirs, [])
self.assertEqual(files, ["tmp4"])
break
else:
self.fail("Didn't follow symlink with followlinks=True")
def tearDown(self):
# Tear everything down. This is a decent use for bottom-up on
# Windows, which doesn't have a recursive delete command. The
# (not so) subtlety is that rmdir will fail unless the dir's
# kids are removed first, so bottom up is essential.
for root, dirs, files in os.walk(support.TESTFN, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
dirname = os.path.join(root, name)
if not os.path.islink(dirname):
os.rmdir(dirname)
else:
os.remove(dirname)
os.rmdir(support.TESTFN)
class MakedirTests(unittest.TestCase):
def setUp(self):
os.mkdir(support.TESTFN)
def test_makedir(self):
base = support.TESTFN
path = os.path.join(base, 'dir1', 'dir2', 'dir3')
os.makedirs(path) # Should work
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4')
os.makedirs(path)
# Try paths with a '.' in them
self.assertRaises(OSError, os.makedirs, os.curdir)
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4', 'dir5', os.curdir)
os.makedirs(path)
path = os.path.join(base, 'dir1', os.curdir, 'dir2', 'dir3', 'dir4',
'dir5', 'dir6')
os.makedirs(path)
def test_exist_ok_existing_directory(self):
path = os.path.join(support.TESTFN, 'dir1')
mode = 0o777
old_mask = os.umask(0o022)
os.makedirs(path, mode)
self.assertRaises(OSError, os.makedirs, path, mode)
self.assertRaises(OSError, os.makedirs, path, mode, exist_ok=False)
self.assertRaises(OSError, os.makedirs, path, 0o776, exist_ok=True)
os.makedirs(path, mode=mode, exist_ok=True)
os.umask(old_mask)
def test_exist_ok_existing_regular_file(self):
base = support.TESTFN
path = os.path.join(support.TESTFN, 'dir1')
f = open(path, 'w')
f.write('abc')
f.close()
self.assertRaises(OSError, os.makedirs, path)
self.assertRaises(OSError, os.makedirs, path, exist_ok=False)
self.assertRaises(OSError, os.makedirs, path, exist_ok=True)
os.remove(path)
def tearDown(self):
path = os.path.join(support.TESTFN, 'dir1', 'dir2', 'dir3',
'dir4', 'dir5', 'dir6')
# If the tests failed, the bottom-most directory ('../dir6')
# may not have been created, so we look for the outermost directory
# that exists.
while not os.path.exists(path) and path != support.TESTFN:
path = os.path.dirname(path)
os.removedirs(path)
class DevNullTests(unittest.TestCase):
def test_devnull(self):
with open(os.devnull, 'wb') as f:
f.write(b'hello')
f.close()
with open(os.devnull, 'rb') as f:
self.assertEqual(f.read(), b'')
class URandomTests(unittest.TestCase):
def test_urandom(self):
try:
self.assertEqual(len(os.urandom(1)), 1)
self.assertEqual(len(os.urandom(10)), 10)
self.assertEqual(len(os.urandom(100)), 100)
self.assertEqual(len(os.urandom(1000)), 1000)
except NotImplementedError:
pass
@contextlib.contextmanager
def _execvpe_mockup(defpath=None):
"""
Stubs out execv and execve functions when used as context manager.
Records exec calls. The mock execv and execve functions always raise an
exception as they would normally never return.
"""
# A list of tuples containing (function name, first arg, args)
# of calls to execv or execve that have been made.
calls = []
def mock_execv(name, *args):
calls.append(('execv', name, args))
raise RuntimeError("execv called")
def mock_execve(name, *args):
calls.append(('execve', name, args))
raise OSError(errno.ENOTDIR, "execve called")
try:
orig_execv = os.execv
orig_execve = os.execve
orig_defpath = os.defpath
os.execv = mock_execv
os.execve = mock_execve
if defpath is not None:
os.defpath = defpath
yield calls
finally:
os.execv = orig_execv
os.execve = orig_execve
os.defpath = orig_defpath
class ExecTests(unittest.TestCase):
@unittest.skipIf(USING_LINUXTHREADS,
"avoid triggering a linuxthreads bug: see issue #4970")
def test_execvpe_with_bad_program(self):
self.assertRaises(OSError, os.execvpe, 'no such app-',
['no such app-'], None)
def test_execvpe_with_bad_arglist(self):
self.assertRaises(ValueError, os.execvpe, 'notepad', [], None)
@unittest.skipUnless(hasattr(os, '_execvpe'),
"No internal os._execvpe function to test.")
def _test_internal_execvpe(self, test_type):
program_path = os.sep + 'absolutepath'
if test_type is bytes:
program = b'executable'
fullpath = os.path.join(os.fsencode(program_path), program)
native_fullpath = fullpath
arguments = [b'progname', 'arg1', 'arg2']
else:
program = 'executable'
arguments = ['progname', 'arg1', 'arg2']
fullpath = os.path.join(program_path, program)
if os.name != "nt":
native_fullpath = os.fsencode(fullpath)
else:
native_fullpath = fullpath
env = {'spam': 'beans'}
# test os._execvpe() with an absolute path
with _execvpe_mockup() as calls:
self.assertRaises(RuntimeError,
os._execvpe, fullpath, arguments)
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], ('execv', fullpath, (arguments,)))
# test os._execvpe() with a relative path:
# os.get_exec_path() returns defpath
with _execvpe_mockup(defpath=program_path) as calls:
self.assertRaises(OSError,
os._execvpe, program, arguments, env=env)
self.assertEqual(len(calls), 1)
self.assertSequenceEqual(calls[0],
('execve', native_fullpath, (arguments, env)))
# test os._execvpe() with a relative path:
# os.get_exec_path() reads the 'PATH' variable
with _execvpe_mockup() as calls:
env_path = env.copy()
if test_type is bytes:
env_path[b'PATH'] = program_path
else:
env_path['PATH'] = program_path
self.assertRaises(OSError,
os._execvpe, program, arguments, env=env_path)
self.assertEqual(len(calls), 1)
self.assertSequenceEqual(calls[0],
('execve', native_fullpath, (arguments, env_path)))
def test_internal_execvpe_str(self):
self._test_internal_execvpe(str)
if os.name != "nt":
self._test_internal_execvpe(bytes)
class Win32ErrorTests(unittest.TestCase):
def test_rename(self):
self.assertRaises(WindowsError, os.rename, support.TESTFN, support.TESTFN+".bak")
def test_remove(self):
self.assertRaises(WindowsError, os.remove, support.TESTFN)
def test_chdir(self):
self.assertRaises(WindowsError, os.chdir, support.TESTFN)
def test_mkdir(self):
f = open(support.TESTFN, "w")
try:
self.assertRaises(WindowsError, os.mkdir, support.TESTFN)
finally:
f.close()
os.unlink(support.TESTFN)
def test_utime(self):
self.assertRaises(WindowsError, os.utime, support.TESTFN, None)
def test_chmod(self):
self.assertRaises(WindowsError, os.chmod, support.TESTFN, 0)
class TestInvalidFD(unittest.TestCase):
singles = ["fchdir", "dup", "fdopen", "fdatasync", "fstat",
"fstatvfs", "fsync", "tcgetpgrp", "ttyname"]
#singles.append("close")
#We omit close because it doesn'r raise an exception on some platforms
def get_single(f):
def helper(self):
if hasattr(os, f):
self.check(getattr(os, f))
return helper
for f in singles:
locals()["test_"+f] = get_single(f)
def check(self, f, *args):
try:
f(support.make_bad_fd(), *args)
except OSError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("%r didn't raise a OSError with a bad file descriptor"
% f)
def test_isatty(self):
if hasattr(os, "isatty"):
self.assertEqual(os.isatty(support.make_bad_fd()), False)
def test_closerange(self):
if hasattr(os, "closerange"):
fd = support.make_bad_fd()
# Make sure none of the descriptors we are about to close are
# currently valid (issue 6542).
for i in range(10):
try: os.fstat(fd+i)
except OSError:
pass
else:
break
if i < 2:
raise unittest.SkipTest(
"Unable to acquire a range of invalid file descriptors")
self.assertEqual(os.closerange(fd, fd + i-1), None)
def test_dup2(self):
if hasattr(os, "dup2"):
self.check(os.dup2, 20)
def test_fchmod(self):
if hasattr(os, "fchmod"):
self.check(os.fchmod, 0)
def test_fchown(self):
if hasattr(os, "fchown"):
self.check(os.fchown, -1, -1)
def test_fpathconf(self):
if hasattr(os, "fpathconf"):
self.check(os.fpathconf, "PC_NAME_MAX")
def test_ftruncate(self):
if hasattr(os, "ftruncate"):
self.check(os.ftruncate, 0)
def test_lseek(self):
if hasattr(os, "lseek"):
self.check(os.lseek, 0, 0)
def test_read(self):
if hasattr(os, "read"):
self.check(os.read, 1)
def test_tcsetpgrpt(self):
if hasattr(os, "tcsetpgrp"):
self.check(os.tcsetpgrp, 0)
def test_write(self):
if hasattr(os, "write"):
self.check(os.write, b" ")
class LinkTests(unittest.TestCase):
def setUp(self):
self.file1 = support.TESTFN
self.file2 = os.path.join(support.TESTFN + "2")
def tearDown(self):
for file in (self.file1, self.file2):
if os.path.exists(file):
os.unlink(file)
def _test_link(self, file1, file2):
with open(file1, "w") as f1:
f1.write("test")
os.link(file1, file2)
with open(file1, "r") as f1, open(file2, "r") as f2:
self.assertTrue(os.path.sameopenfile(f1.fileno(), f2.fileno()))
def test_link(self):
self._test_link(self.file1, self.file2)
def test_link_bytes(self):
self._test_link(bytes(self.file1, sys.getfilesystemencoding()),
bytes(self.file2, sys.getfilesystemencoding()))
def test_unicode_name(self):
try:
os.fsencode("\xf1")
except UnicodeError:
raise unittest.SkipTest("Unable to encode for this platform.")
self.file1 += "\xf1"
self.file2 = self.file1 + "2"
self._test_link(self.file1, self.file2)
if sys.platform != 'win32':
class Win32ErrorTests(unittest.TestCase):
pass
class PosixUidGidTests(unittest.TestCase):
if hasattr(os, 'setuid'):
def test_setuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setuid, 0)
self.assertRaises(OverflowError, os.setuid, 1<<32)
if hasattr(os, 'setgid'):
def test_setgid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setgid, 0)
self.assertRaises(OverflowError, os.setgid, 1<<32)
if hasattr(os, 'seteuid'):
def test_seteuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.seteuid, 0)
self.assertRaises(OverflowError, os.seteuid, 1<<32)
if hasattr(os, 'setegid'):
def test_setegid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setegid, 0)
self.assertRaises(OverflowError, os.setegid, 1<<32)
if hasattr(os, 'setreuid'):
def test_setreuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setreuid, 0, 0)
self.assertRaises(OverflowError, os.setreuid, 1<<32, 0)
self.assertRaises(OverflowError, os.setreuid, 0, 1<<32)
def test_setreuid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setreuid(-1,-1);sys.exit(0)'])
if hasattr(os, 'setregid'):
def test_setregid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setregid, 0, 0)
self.assertRaises(OverflowError, os.setregid, 1<<32, 0)
self.assertRaises(OverflowError, os.setregid, 0, 1<<32)
def test_setregid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setregid(-1,-1);sys.exit(0)'])
class Pep383Tests(unittest.TestCase):
def setUp(self):
if support.TESTFN_UNENCODABLE:
self.dir = support.TESTFN_UNENCODABLE
else:
self.dir = support.TESTFN
self.bdir = os.fsencode(self.dir)
bytesfn = []
def add_filename(fn):
try:
fn = os.fsencode(fn)
except UnicodeEncodeError:
return
bytesfn.append(fn)
add_filename(support.TESTFN_UNICODE)
if support.TESTFN_UNENCODABLE:
add_filename(support.TESTFN_UNENCODABLE)
if not bytesfn:
self.skipTest("couldn't create any non-ascii filename")
self.unicodefn = set()
os.mkdir(self.dir)
try:
for fn in bytesfn:
f = open(os.path.join(self.bdir, fn), "w")
f.close()
fn = os.fsdecode(fn)
if fn in self.unicodefn:
raise ValueError("duplicate filename")
self.unicodefn.add(fn)
except:
shutil.rmtree(self.dir)
raise
def tearDown(self):
shutil.rmtree(self.dir)
def test_listdir(self):
expected = self.unicodefn
found = set(os.listdir(self.dir))
self.assertEqual(found, expected)
def test_open(self):
for fn in self.unicodefn:
f = open(os.path.join(self.dir, fn), 'rb')
f.close()
def test_stat(self):
for fn in self.unicodefn:
os.stat(os.path.join(self.dir, fn))
else:
class PosixUidGidTests(unittest.TestCase):
pass
class Pep383Tests(unittest.TestCase):
pass
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32KillTests(unittest.TestCase):
def _kill(self, sig):
# Start sys.executable as a subprocess and communicate from the
# subprocess to the parent that the interpreter is ready. When it
# becomes ready, send *sig* via os.kill to the subprocess and check
# that the return code is equal to *sig*.
import ctypes
from ctypes import wintypes
import msvcrt
# Since we can't access the contents of the process' stdout until the
# process has exited, use PeekNamedPipe to see what's inside stdout
# without waiting. This is done so we can tell that the interpreter
# is started and running at a point where it could handle a signal.
PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe
PeekNamedPipe.restype = wintypes.BOOL
PeekNamedPipe.argtypes = (wintypes.HANDLE, # Pipe handle
ctypes.POINTER(ctypes.c_char), # stdout buf
wintypes.DWORD, # Buffer size
ctypes.POINTER(wintypes.DWORD), # bytes read
ctypes.POINTER(wintypes.DWORD), # bytes avail
ctypes.POINTER(wintypes.DWORD)) # bytes left
msg = "running"
proc = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('{}');"
"sys.stdout.flush();"
"input()".format(msg)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
self.addCleanup(proc.stdout.close)
self.addCleanup(proc.stderr.close)
self.addCleanup(proc.stdin.close)
count, max = 0, 100
while count < max and proc.poll() is None:
# Create a string buffer to store the result of stdout from the pipe
buf = ctypes.create_string_buffer(len(msg))
# Obtain the text currently in proc.stdout
# Bytes read/avail/left are left as NULL and unused
rslt = PeekNamedPipe(msvcrt.get_osfhandle(proc.stdout.fileno()),
buf, ctypes.sizeof(buf), None, None, None)
self.assertNotEqual(rslt, 0, "PeekNamedPipe failed")
if buf.value:
self.assertEqual(msg, buf.value.decode())
break
time.sleep(0.1)
count += 1
else:
self.fail("Did not receive communication from the subprocess")
os.kill(proc.pid, sig)
self.assertEqual(proc.wait(), sig)
def test_kill_sigterm(self):
# SIGTERM doesn't mean anything special, but make sure it works
self._kill(signal.SIGTERM)
def test_kill_int(self):
# os.kill on Windows can take an int which gets set as the exit code
self._kill(100)
def _kill_with_event(self, event, name):
tagname = "test_os_%s" % uuid.uuid1()
m = mmap.mmap(-1, 1, tagname)
m[0] = 0
# Run a script which has console control handling enabled.
proc = subprocess.Popen([sys.executable,
os.path.join(os.path.dirname(__file__),
"win_console_handler.py"), tagname],
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
# Let the interpreter startup before we send signals. See #3137.
count, max = 0, 100
while count < max and proc.poll() is None:
if m[0] == 1:
break
time.sleep(0.1)
count += 1
else:
# Forcefully kill the process if we weren't able to signal it.
os.kill(proc.pid, signal.SIGINT)
self.fail("Subprocess didn't finish initialization")
os.kill(proc.pid, event)
# proc.send_signal(event) could also be done here.
# Allow time for the signal to be passed and the process to exit.
time.sleep(0.5)
if not proc.poll():
# Forcefully kill the process if we weren't able to signal it.
os.kill(proc.pid, signal.SIGINT)
self.fail("subprocess did not stop on {}".format(name))
@unittest.skip("subprocesses aren't inheriting CTRL+C property")
def test_CTRL_C_EVENT(self):
from ctypes import wintypes
import ctypes
# Make a NULL value by creating a pointer with no argument.
NULL = ctypes.POINTER(ctypes.c_int)()
SetConsoleCtrlHandler = ctypes.windll.kernel32.SetConsoleCtrlHandler
SetConsoleCtrlHandler.argtypes = (ctypes.POINTER(ctypes.c_int),
wintypes.BOOL)
SetConsoleCtrlHandler.restype = wintypes.BOOL
# Calling this with NULL and FALSE causes the calling process to
# handle CTRL+C, rather than ignore it. This property is inherited
# by subprocesses.
SetConsoleCtrlHandler(NULL, 0)
self._kill_with_event(signal.CTRL_C_EVENT, "CTRL_C_EVENT")
def test_CTRL_BREAK_EVENT(self):
self._kill_with_event(signal.CTRL_BREAK_EVENT, "CTRL_BREAK_EVENT")
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
@support.skip_unless_symlink
class Win32SymlinkTests(unittest.TestCase):
filelink = 'filelinktest'
filelink_target = os.path.abspath(__file__)
dirlink = 'dirlinktest'
dirlink_target = os.path.dirname(filelink_target)
missing_link = 'missing link'
def setUp(self):
assert os.path.exists(self.dirlink_target)
assert os.path.exists(self.filelink_target)
assert not os.path.exists(self.dirlink)
assert not os.path.exists(self.filelink)
assert not os.path.exists(self.missing_link)
def tearDown(self):
if os.path.exists(self.filelink):
os.remove(self.filelink)
if os.path.exists(self.dirlink):
os.rmdir(self.dirlink)
if os.path.lexists(self.missing_link):
os.remove(self.missing_link)
def test_directory_link(self):
os.symlink(self.dirlink_target, self.dirlink)
self.assertTrue(os.path.exists(self.dirlink))
self.assertTrue(os.path.isdir(self.dirlink))
self.assertTrue(os.path.islink(self.dirlink))
self.check_stat(self.dirlink, self.dirlink_target)
def test_file_link(self):
os.symlink(self.filelink_target, self.filelink)
self.assertTrue(os.path.exists(self.filelink))
self.assertTrue(os.path.isfile(self.filelink))
self.assertTrue(os.path.islink(self.filelink))
self.check_stat(self.filelink, self.filelink_target)
def _create_missing_dir_link(self):
'Create a "directory" link to a non-existent target'
linkname = self.missing_link
if os.path.lexists(linkname):
os.remove(linkname)
target = r'c:\\target does not exist.29r3c740'
assert not os.path.exists(target)
target_is_dir = True
os.symlink(target, linkname, target_is_dir)
def test_remove_directory_link_to_missing_target(self):
self._create_missing_dir_link()
# For compatibility with Unix, os.remove will check the
# directory status and call RemoveDirectory if the symlink
# was created with target_is_dir==True.
os.remove(self.missing_link)
@unittest.skip("currently fails; consider for improvement")
def test_isdir_on_directory_link_to_missing_target(self):
self._create_missing_dir_link()
# consider having isdir return true for directory links
self.assertTrue(os.path.isdir(self.missing_link))
@unittest.skip("currently fails; consider for improvement")
def test_rmdir_on_directory_link_to_missing_target(self):
self._create_missing_dir_link()
# consider allowing rmdir to remove directory links
os.rmdir(self.missing_link)
def check_stat(self, link, target):
self.assertEqual(os.stat(link), os.stat(target))
self.assertNotEqual(os.lstat(link), os.stat(link))
bytes_link = os.fsencode(link)
self.assertEqual(os.stat(bytes_link), os.stat(target))
self.assertNotEqual(os.lstat(bytes_link), os.stat(bytes_link))
def test_12084(self):
level1 = os.path.abspath(support.TESTFN)
level2 = os.path.join(level1, "level2")
level3 = os.path.join(level2, "level3")
try:
os.mkdir(level1)
os.mkdir(level2)
os.mkdir(level3)
file1 = os.path.abspath(os.path.join(level1, "file1"))
with open(file1, "w") as f:
f.write("file1")
orig_dir = os.getcwd()
try:
os.chdir(level2)
link = os.path.join(level2, "link")
os.symlink(os.path.relpath(file1), "link")
self.assertIn("link", os.listdir(os.getcwd()))
# Check os.stat calls from the same dir as the link
self.assertEqual(os.stat(file1), os.stat("link"))
# Check os.stat calls from a dir below the link
os.chdir(level1)
self.assertEqual(os.stat(file1),
os.stat(os.path.relpath(link)))
# Check os.stat calls from a dir above the link
os.chdir(level3)
self.assertEqual(os.stat(file1),
os.stat(os.path.relpath(link)))
finally:
os.chdir(orig_dir)
except OSError as err:
self.fail(err)
finally:
os.remove(file1)
shutil.rmtree(level1)
class FSEncodingTests(unittest.TestCase):
def test_nop(self):
self.assertEqual(os.fsencode(b'abc\xff'), b'abc\xff')
self.assertEqual(os.fsdecode('abc\u0141'), 'abc\u0141')
def test_identity(self):
# assert fsdecode(fsencode(x)) == x
for fn in ('unicode\u0141', 'latin\xe9', 'ascii'):
try:
bytesfn = os.fsencode(fn)
except UnicodeEncodeError:
continue
self.assertEqual(os.fsdecode(bytesfn), fn)
class PidTests(unittest.TestCase):
@unittest.skipUnless(hasattr(os, 'getppid'), "test needs os.getppid")
def test_getppid(self):
p = subprocess.Popen([sys.executable, '-c',
'import os; print(os.getppid())'],
stdout=subprocess.PIPE)
stdout, _ = p.communicate()
# We are the parent of our subprocess
self.assertEqual(int(stdout), os.getpid())
# The introduction of this TestCase caused at least two different errors on
# *nix buildbots. Temporarily skip this to let the buildbots move along.
@unittest.skip("Skip due to platform/environment differences on *NIX buildbots")
@unittest.skipUnless(hasattr(os, 'getlogin'), "test needs os.getlogin")
class LoginTests(unittest.TestCase):
def test_getlogin(self):
user_name = os.getlogin()
self.assertNotEqual(len(user_name), 0)
def test_main():
support.run_unittest(
FileTests,
StatAttributeTests,
EnvironTests,
WalkTests,
MakedirTests,
DevNullTests,
URandomTests,
ExecTests,
Win32ErrorTests,
TestInvalidFD,
PosixUidGidTests,
Pep383Tests,
Win32KillTests,
Win32SymlinkTests,
FSEncodingTests,
PidTests,
LoginTests,
LinkTests,
)
if __name__ == "__main__":
test_main()
| apache-2.0 | -479,835,944,459,586,940 | 35.995185 | 94 | 0.5648 | false |
h00dy/Diamond | src/collectors/redisstat/redisstat.py | 22 | 10479 | # coding=utf-8
"""
Collects data from one or more Redis Servers
#### Dependencies
* redis
#### Notes
The collector is named an odd redisstat because of an import issue with
having the python library called redis and this collector's module being called
redis, so we use an odd name for this collector. This doesn't affect the usage
of this collector.
Example config file RedisCollector.conf
```
enabled=True
host=redis.example.com
port=16379
auth=PASSWORD
```
or for multi-instance mode:
```
enabled=True
instances = nick1@host1:port1, nick2@host2:port2/PASSWORD, ...
```
For connecting via unix sockets, provide the path prefixed with ``unix:``
instead of the host, e.g.
```
enabled=True
host=unix:/var/run/redis/redis.sock
```
or
```
enabled = True
instances = nick3@unix:/var/run/redis.sock:/PASSWORD
```
In that case, for disambiguation there must be a colon ``:`` before the slash
``/`` followed by the password.
Note: when using the host/port config mode, the port number is used in
the metric key. When using the multi-instance mode, the nick will be used.
If not specified the port will be used. In case of unix sockets, the base name
without file extension (i.e. in the aforementioned examples ``redis``)
is the default metric key.
"""
import diamond.collector
import time
import os
try:
import redis
except ImportError:
redis = None
SOCKET_PREFIX = 'unix:'
SOCKET_PREFIX_LEN = len(SOCKET_PREFIX)
class RedisCollector(diamond.collector.Collector):
_DATABASE_COUNT = 16
_DEFAULT_DB = 0
_DEFAULT_HOST = 'localhost'
_DEFAULT_PORT = 6379
_DEFAULT_SOCK_TIMEOUT = 5
_KEYS = {'clients.blocked': 'blocked_clients',
'clients.connected': 'connected_clients',
'clients.longest_output_list': 'client_longest_output_list',
'cpu.parent.sys': 'used_cpu_sys',
'cpu.children.sys': 'used_cpu_sys_children',
'cpu.parent.user': 'used_cpu_user',
'cpu.children.user': 'used_cpu_user_children',
'hash_max_zipmap.entries': 'hash_max_zipmap_entries',
'hash_max_zipmap.value': 'hash_max_zipmap_value',
'keys.evicted': 'evicted_keys',
'keys.expired': 'expired_keys',
'keyspace.hits': 'keyspace_hits',
'keyspace.misses': 'keyspace_misses',
'last_save.changes_since': 'changes_since_last_save',
'last_save.time': 'last_save_time',
'memory.internal_view': 'used_memory',
'memory.external_view': 'used_memory_rss',
'memory.fragmentation_ratio': 'mem_fragmentation_ratio',
'process.commands_processed': 'total_commands_processed',
'process.connections_received': 'total_connections_received',
'process.uptime': 'uptime_in_seconds',
'pubsub.channels': 'pubsub_channels',
'pubsub.patterns': 'pubsub_patterns',
'slaves.connected': 'connected_slaves',
'slaves.last_io': 'master_last_io_seconds_ago'}
_RENAMED_KEYS = {'last_save.changes_since': 'rdb_changes_since_last_save',
'last_save.time': 'rdb_last_save_time'}
def process_config(self):
super(RedisCollector, self).process_config()
instance_list = self.config['instances']
# configobj make str of single-element list, let's convert
if isinstance(instance_list, basestring):
instance_list = [instance_list]
# process original single redis instance
if len(instance_list) == 0:
host = self.config['host']
port = int(self.config['port'])
auth = self.config['auth']
if auth is not None:
instance_list.append('%s:%d/%s' % (host, port, auth))
else:
instance_list.append('%s:%d' % (host, port))
self.instances = {}
for instance in instance_list:
if '@' in instance:
(nickname, hostport) = instance.split('@', 1)
else:
nickname = None
hostport = instance
if hostport.startswith(SOCKET_PREFIX):
unix_socket, __, port_auth = hostport[
SOCKET_PREFIX_LEN:].partition(':')
auth = port_auth.partition('/')[2] or None
if nickname is None:
nickname = os.path.splitext(
os.path.basename(unix_socket))[0]
self.instances[nickname] = (self._DEFAULT_HOST,
self._DEFAULT_PORT,
unix_socket,
auth)
else:
if '/' in hostport:
parts = hostport.split('/')
hostport = parts[0]
auth = parts[1]
else:
auth = None
if ':' in hostport:
if hostport[0] == ':':
host = self._DEFAULT_HOST
port = int(hostport[1:])
else:
parts = hostport.split(':')
host = parts[0]
port = int(parts[1])
else:
host = hostport
port = self._DEFAULT_PORT
if nickname is None:
nickname = str(port)
self.instances[nickname] = (host, port, None, auth)
self.log.debug("Configured instances: %s" % self.instances.items())
def get_default_config_help(self):
config_help = super(RedisCollector, self).get_default_config_help()
config_help.update({
'host': 'Hostname to collect from',
'port': 'Port number to collect from',
'timeout': 'Socket timeout',
'db': '',
'auth': 'Password?',
'databases': 'how many database instances to collect',
'instances': "Redis addresses, comma separated, syntax:"
+ " nick1@host:port, nick2@:port or nick3@host"
})
return config_help
def get_default_config(self):
"""
Return default config
:rtype: dict
"""
config = super(RedisCollector, self).get_default_config()
config.update({
'host': self._DEFAULT_HOST,
'port': self._DEFAULT_PORT,
'timeout': self._DEFAULT_SOCK_TIMEOUT,
'db': self._DEFAULT_DB,
'auth': None,
'databases': self._DATABASE_COUNT,
'path': 'redis',
'instances': [],
})
return config
def _client(self, host, port, unix_socket, auth):
"""Return a redis client for the configuration.
:param str host: redis host
:param int port: redis port
:rtype: redis.Redis
"""
db = int(self.config['db'])
timeout = int(self.config['timeout'])
try:
cli = redis.Redis(host=host, port=port,
db=db, socket_timeout=timeout, password=auth,
unix_socket_path=unix_socket)
cli.ping()
return cli
except Exception, ex:
self.log.error("RedisCollector: failed to connect to %s:%i. %s.",
unix_socket or host, port, ex)
def _precision(self, value):
"""Return the precision of the number
:param str value: The value to find the precision of
:rtype: int
"""
value = str(value)
decimal = value.rfind('.')
if decimal == -1:
return 0
return len(value) - decimal - 1
def _publish_key(self, nick, key):
"""Return the full key for the partial key.
:param str nick: Nickname for Redis instance
:param str key: The key name
:rtype: str
"""
return '%s.%s' % (nick, key)
def _get_info(self, host, port, unix_socket, auth):
"""Return info dict from specified Redis instance
:param str host: redis host
:param int port: redis port
:rtype: dict
"""
client = self._client(host, port, unix_socket, auth)
if client is None:
return None
info = client.info()
del client
return info
def collect_instance(self, nick, host, port, unix_socket, auth):
"""Collect metrics from a single Redis instance
:param str nick: nickname of redis instance
:param str host: redis host
:param int port: redis port
:param str unix_socket: unix socket, if applicable
:param str auth: authentication password
"""
# Connect to redis and get the info
info = self._get_info(host, port, unix_socket, auth)
if info is None:
return
# The structure should include the port for multiple instances per
# server
data = dict()
# Iterate over the top level keys
for key in self._KEYS:
if self._KEYS[key] in info:
data[key] = info[self._KEYS[key]]
# Iterate over renamed keys for 2.6 support
for key in self._RENAMED_KEYS:
if self._RENAMED_KEYS[key] in info:
data[key] = info[self._RENAMED_KEYS[key]]
# Look for databaase speific stats
for dbnum in range(0, int(self.config.get('databases',
self._DATABASE_COUNT))):
db = 'db%i' % dbnum
if db in info:
for key in info[db]:
data['%s.%s' % (db, key)] = info[db][key]
# Time since last save
for key in ['last_save_time', 'rdb_last_save_time']:
if key in info:
data['last_save.time_since'] = int(time.time()) - info[key]
# Publish the data to graphite
for key in data:
self.publish(self._publish_key(nick, key),
data[key],
precision=self._precision(data[key]),
metric_type='GAUGE')
def collect(self):
"""Collect the stats from the redis instance and publish them.
"""
if redis is None:
self.log.error('Unable to import module redis')
return {}
for nick in self.instances.keys():
(host, port, unix_socket, auth) = self.instances[nick]
self.collect_instance(nick, host, int(port), unix_socket, auth)
| mit | 6,292,786,305,851,513,000 | 30.948171 | 79 | 0.550052 | false |
JoachimVandersmissen/CodingSolutions | python/PythonForEveryone/chapter3/22.py | 1 | 1199 | month = int(input("Please enter your birthday month"))
day = int(input("Please enter your birthday day"))
if month == 1:
if day < 20:
print("Capricorn")
else:
print("Aquarius")
if month == 2:
if day < 20:
print("Aquarius")
else:
print("Pisces")
if month == 3:
if day < 21:
print("Pisces")
else:
print("Aries")
if month == 4:
if day < 21:
print("Aries")
else:
print("Taurus")
if month == 5:
if day < 21:
print("Taurus")
else:
print("Gemini")
if month == 6:
if day < 22:
print("Gemini")
else:
print("Cancer")
if month == 7:
if day < 23:
print("Cancer")
else:
print("Leo")
if month == 8:
if day < 24:
print("Leo")
else:
print("Virgo")
if month == 9:
if day < 23:
print("Virgo")
else:
print("Libra")
if month == 10:
if day < 24:
print("Libra")
else:
print("Scorpio")
if month == 11:
if day < 23:
print("Scorpio")
else:
print("Sagittarius")
if month == 12:
if day < 23:
print("Sagittarius")
else:
print("Capricorn")
| apache-2.0 | -7,925,780,290,893,789,000 | 18.33871 | 54 | 0.486239 | false |
slohse/ansible | lib/ansible/modules/network/vyos/vyos_facts.py | 37 | 8854 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_facts
version_added: "2.2"
author: "Nathaniel Case (@qalthos)"
short_description: Collect facts from remote devices running VyOS
description:
- Collects a base set of device facts from a remote device that
is running VyOS. This module prepends all of the
base network fact keys with U(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: vyos
notes:
- Tested against VYOS 1.1.7
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, default, config, and neighbors. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: "!config"
"""
EXAMPLES = """
- name: collect all facts from the device
vyos_facts:
gather_subset: all
- name: collect only the config and default facts
vyos_facts:
gather_subset: config
- name: collect everything exception the config
vyos_facts:
gather_subset: "!config"
"""
RETURN = """
ansible_net_config:
description: The running-config from the device
returned: when config is configured
type: str
ansible_net_commits:
description: The set of available configuration revisions
returned: when present
type: list
ansible_net_hostname:
description: The configured system hostname
returned: always
type: str
ansible_net_model:
description: The device model string
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the device
returned: always
type: str
ansible_net_version:
description: The version of the software running
returned: always
type: str
ansible_net_neighbors:
description: The set of LLDP neighbors
returned: when interface is configured
type: list
ansible_net_gather_subset:
description: The list of subsets gathered by the module
returned: always
type: list
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.vyos.vyos import run_commands
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
class FactsBase(object):
COMMANDS = frozenset()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
def populate(self):
self.responses = run_commands(self.module, list(self.COMMANDS))
class Default(FactsBase):
COMMANDS = [
'show version',
'show host name',
]
def populate(self):
super(Default, self).populate()
data = self.responses[0]
self.facts['version'] = self.parse_version(data)
self.facts['serialnum'] = self.parse_serialnum(data)
self.facts['model'] = self.parse_model(data)
self.facts['hostname'] = self.responses[1]
def parse_version(self, data):
match = re.search(r'Version:\s*(.*)', data)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'HW model:\s*(\S+)', data)
if match:
return match.group(1)
def parse_serialnum(self, data):
match = re.search(r'HW S/N:\s+(\S+)', data)
if match:
return match.group(1)
class Config(FactsBase):
COMMANDS = [
'show configuration commands',
'show system commit',
]
def populate(self):
super(Config, self).populate()
self.facts['config'] = self.responses
commits = self.responses[1]
entries = list()
entry = None
for line in commits.split('\n'):
match = re.match(r'(\d+)\s+(.+)by(.+)via(.+)', line)
if match:
if entry:
entries.append(entry)
entry = dict(revision=match.group(1),
datetime=match.group(2),
by=str(match.group(3)).strip(),
via=str(match.group(4)).strip(),
comment=None)
else:
entry['comment'] = line.strip()
self.facts['commits'] = entries
class Neighbors(FactsBase):
COMMANDS = [
'show lldp neighbors',
'show lldp neighbors detail',
]
def populate(self):
super(Neighbors, self).populate()
all_neighbors = self.responses[0]
if 'LLDP not configured' not in all_neighbors:
neighbors = self.parse(
self.responses[1]
)
self.facts['neighbors'] = self.parse_neighbors(neighbors)
def parse(self, data):
parsed = list()
values = None
for line in data.split('\n'):
if not line:
continue
elif line[0] == ' ':
values += '\n%s' % line
elif line.startswith('Interface'):
if values:
parsed.append(values)
values = line
if values:
parsed.append(values)
return parsed
def parse_neighbors(self, data):
facts = dict()
for item in data:
interface = self.parse_interface(item)
host = self.parse_host(item)
port = self.parse_port(item)
if interface not in facts:
facts[interface] = list()
facts[interface].append(dict(host=host, port=port))
return facts
def parse_interface(self, data):
match = re.search(r'^Interface:\s+(\S+),', data)
return match.group(1)
def parse_host(self, data):
match = re.search(r'SysName:\s+(.+)$', data, re.M)
if match:
return match.group(1)
def parse_port(self, data):
match = re.search(r'PortDescr:\s+(.+)$', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
default=Default,
neighbors=Neighbors,
config=Config
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
argument_spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
argument_spec.update(vyos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Subset must be one of [%s], got %s' %
(', '.join(VALID_SUBSETS), subset))
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,373,468,952,665,928,000 | 26.66875 | 72 | 0.60899 | false |
cespare/pastedown | vendor/pygments/pygments/lexers/_luabuiltins.py | 26 | 6863 | # -*- coding: utf-8 -*-
"""
pygments.lexers._luabuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names and modules of lua functions
It is able to re-generate itself, but for adding new functions you
probably have to add some callbacks (see function module_callbacks).
Do not edit the MODULES dict by hand.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
MODULES = {'basic': ['_G',
'_VERSION',
'assert',
'collectgarbage',
'dofile',
'error',
'getfenv',
'getmetatable',
'ipairs',
'load',
'loadfile',
'loadstring',
'next',
'pairs',
'pcall',
'print',
'rawequal',
'rawget',
'rawset',
'select',
'setfenv',
'setmetatable',
'tonumber',
'tostring',
'type',
'unpack',
'xpcall'],
'coroutine': ['coroutine.create',
'coroutine.resume',
'coroutine.running',
'coroutine.status',
'coroutine.wrap',
'coroutine.yield'],
'debug': ['debug.debug',
'debug.getfenv',
'debug.gethook',
'debug.getinfo',
'debug.getlocal',
'debug.getmetatable',
'debug.getregistry',
'debug.getupvalue',
'debug.setfenv',
'debug.sethook',
'debug.setlocal',
'debug.setmetatable',
'debug.setupvalue',
'debug.traceback'],
'io': ['io.close',
'io.flush',
'io.input',
'io.lines',
'io.open',
'io.output',
'io.popen',
'io.read',
'io.tmpfile',
'io.type',
'io.write'],
'math': ['math.abs',
'math.acos',
'math.asin',
'math.atan2',
'math.atan',
'math.ceil',
'math.cosh',
'math.cos',
'math.deg',
'math.exp',
'math.floor',
'math.fmod',
'math.frexp',
'math.huge',
'math.ldexp',
'math.log10',
'math.log',
'math.max',
'math.min',
'math.modf',
'math.pi',
'math.pow',
'math.rad',
'math.random',
'math.randomseed',
'math.sinh',
'math.sin',
'math.sqrt',
'math.tanh',
'math.tan'],
'modules': ['module',
'require',
'package.cpath',
'package.loaded',
'package.loadlib',
'package.path',
'package.preload',
'package.seeall'],
'os': ['os.clock',
'os.date',
'os.difftime',
'os.execute',
'os.exit',
'os.getenv',
'os.remove',
'os.rename',
'os.setlocale',
'os.time',
'os.tmpname'],
'string': ['string.byte',
'string.char',
'string.dump',
'string.find',
'string.format',
'string.gmatch',
'string.gsub',
'string.len',
'string.lower',
'string.match',
'string.rep',
'string.reverse',
'string.sub',
'string.upper'],
'table': ['table.concat',
'table.insert',
'table.maxn',
'table.remove',
'table.sort']}
if __name__ == '__main__':
import re
import urllib
import pprint
# you can't generally find out what module a function belongs to if you
# have only its name. Because of this, here are some callback functions
# that recognize if a gioven function belongs to a specific module
def module_callbacks():
def is_in_coroutine_module(name):
return name.startswith('coroutine.')
def is_in_modules_module(name):
if name in ['require', 'module'] or name.startswith('package'):
return True
else:
return False
def is_in_string_module(name):
return name.startswith('string.')
def is_in_table_module(name):
return name.startswith('table.')
def is_in_math_module(name):
return name.startswith('math')
def is_in_io_module(name):
return name.startswith('io.')
def is_in_os_module(name):
return name.startswith('os.')
def is_in_debug_module(name):
return name.startswith('debug.')
return {'coroutine': is_in_coroutine_module,
'modules': is_in_modules_module,
'string': is_in_string_module,
'table': is_in_table_module,
'math': is_in_math_module,
'io': is_in_io_module,
'os': is_in_os_module,
'debug': is_in_debug_module}
def get_newest_version():
f = urllib.urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">Lua \1</A>')
for line in f:
m = r.match(line)
if m is not None:
return m.groups()[0]
def get_lua_functions(version):
f = urllib.urlopen('http://www.lua.org/manual/%s/' % version)
r = re.compile(r'^<A HREF="manual.html#pdf-(.+)">\1</A>')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def get_function_module(name):
for mod, cb in module_callbacks().iteritems():
if cb(name):
return mod
if '.' in name:
return name.split('.')[0]
else:
return 'basic'
def regenerate(filename, modules):
f = open(filename)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
f = open(filename, 'w')
f.write(header)
f.write('MODULES = %s\n\n' % pprint.pformat(modules))
f.write(footer)
f.close()
def run():
version = get_newest_version()
print '> Downloading function index for Lua %s' % version
functions = get_lua_functions(version)
print '> %d functions found:' % len(functions)
modules = {}
for full_function_name in functions:
print '>> %s' % full_function_name
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
regenerate(__file__, modules)
run()
| mit | 4,584,915,070,635,155,000 | 26.562249 | 75 | 0.471805 | false |
adsorensen/girder | tests/cases/path_utilities_test.py | 3 | 2095 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import unittest
from girder.utility import path
strings = [
('abcd', 'abcd'),
('/', '\/'),
('\\', '\\\\'),
('/\\', '\/\\\\'),
('\\//\\', '\\\\\/\/\\\\'),
('a\\\\b//c\\d', 'a\\\\\\\\b\/\/c\\\\d')
]
paths = [
('abcd', ['abcd']),
('/abcd', ['', 'abcd']),
('/ab/cd/ef/gh', ['', 'ab', 'cd', 'ef', 'gh']),
('/ab/cd//', ['', 'ab', 'cd', '', '']),
('ab\\/cd', ['ab/cd']),
('ab\/c/d', ['ab/c', 'd']),
('ab\//cd', ['ab/', 'cd']),
('ab/\/cd', ['ab', '/cd']),
('ab\\\\/cd', ['ab\\', 'cd']),
('ab\\\\/\\\\cd', ['ab\\', '\\cd']),
('ab\\\\\\/\\\\cd', ['ab\\/\\cd']),
('/\\\\abcd\\\\/', ['', '\\abcd\\', '']),
('/\\\\\\\\/\\//\\\\', ['', '\\\\', '/', '\\'])
]
class TestPathUtilities(unittest.TestCase):
"""Tests the girder.utility.path module."""
def testEncodeStrings(self):
for raw, encoded in strings:
self.assertEqual(path.encode(raw), encoded)
def testDecodeStrings(self):
for raw, encoded in strings:
self.assertEqual(path.decode(encoded), raw)
def testSplitPath(self):
for pth, tokens in paths:
self.assertEqual(path.split(pth), tokens)
def testJoinTokens(self):
for pth, tokens in paths:
self.assertEqual(path.join(tokens), pth)
| apache-2.0 | -4,752,472,402,200,224,000 | 30.742424 | 79 | 0.481146 | false |
Dreamsolution/django-auth-policy | testsite/views.py | 1 | 2666 | import logging
from django.conf import settings
from django.http import HttpResponseRedirect, HttpResponse
from django.template.response import TemplateResponse
from django.utils.http import is_safe_url
from django.shortcuts import resolve_url
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth.decorators import login_required
from django.contrib.auth import REDIRECT_FIELD_NAME, login as auth_login
from django.contrib.sites.shortcuts import get_current_site
from django_auth_policy.decorators import login_not_required
from django_auth_policy.forms import StrictAuthenticationForm
logger = logging.getLogger(__name__)
@sensitive_post_parameters()
@csrf_protect
@never_cache
def login(request, template_name='registration/login.html',
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=StrictAuthenticationForm,
current_app=None, extra_context=None):
"""
Displays the login form and handles the login action.
This view is the same as the Django 1.6 login view but uses the
StrictAuthenticationForm
"""
redirect_to = request.POST.get(redirect_field_name,
request.GET.get(redirect_field_name, ''))
if request.method == "POST":
form = authentication_form(request, data=request.POST)
if form.is_valid():
# Ensure the user-originating redirection url is safe.
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
# Okay, security check complete. Log the user in.
auth_login(request, form.get_user())
return HttpResponseRedirect(redirect_to)
else:
form = authentication_form(request)
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
@login_required
def login_required_view(request):
""" View used in tests
"""
return HttpResponse('ok')
@login_not_required
def login_not_required_view(request):
""" View used in tests
"""
return HttpResponse('login not required!')
def another_view(request):
""" View used in tests
"""
return HttpResponse('another view')
| bsd-3-clause | -536,018,489,641,723,800 | 30.364706 | 76 | 0.692423 | false |
vlegoff/tsunami | src/test/secondaires/crafting/test_talent.py | 1 | 2655 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier définissant les unittest de talents de guilde."""
import unittest
from test.primaires.joueur.static.joueur import ManipulationJoueur
class TestTalent(ManipulationJoueur, unittest.TestCase):
"""Unittest des talents de guilde."""
def test_creation(self):
"""Test l'ajout de talents à des guildes vides."""
joueur = self.creer_joueur("simple", "Fiche")
# Création d'une guilde avec deux talents
guilde = importeur.crafting.creer_guilde("forgerons")
guilde.ajouter_rang("apprenti")
guilde.ajouter_talent("forge", "talent de la forge", False)
guilde.ajouter_talent("ciseau", "art du ciseau", False)
guilde.ouvrir()
avant = joueur.points_apprentissage_max
# Rejoindre la guilde devrait donner 100 points d'apprentissage de plus
guilde.rejoindre(joueur)
apres = joueur.points_apprentissage_max
self.assertEqual(avant + 100, apres)
# Nettoyage après le test
importeur.crafting.supprimer_guilde("forgerons")
self.supprimer_joueur(joueur)
| bsd-3-clause | -2,057,537,649,934,792,200 | 43.183333 | 79 | 0.740098 | false |
FelixZYY/gyp | pylib/gyp/common.py | 34 | 19170 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# Convert to normalized (and therefore absolute paths).
path = os.path.realpath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer:
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
| bsd-3-clause | -6,816,253,646,193,558,000 | 31.602041 | 83 | 0.663485 | false |
deka108/meas_deka | assessment/utilities/answer_formatter.py | 2 | 3442 | """
# Name: check_answer_api/utilities/answer_formatter.py
# Description:
# Created by: Martinus Alexander
# Date Created: Nov 30, 2016
# Last Modified: Dec 21, 2016
# Modified by: Martinus Alexander
"""
import re
'''
Splitting a sequence of character into a list of terms.
Used improve trigonometric proving assessment.
This is to support first additional feature: Find if the user writes the
important step.
Approach: Split the expression into a list of term, then compare each term.
'''
def split_term(expression):
# End of expression
if len(expression) == 0:
return []
# Find the number of bracket pairs needed
# Counter should be number of bracket
if re.match(r"^\\frac", expression):
bracket_left = 2
elif re.match(r"^\\(sin|cos|tan|cot|sec|csc|log)\^", expression):
bracket_left = 2
elif re.match(r"^\\(sin|cos|tan|cot|sec|csc|log)", expression):
bracket_left = 1
else:
bracket_left = 0
# Find the index of first bracket + 1, or 0 otherwise
# Indicates the starting index to explore
if bracket_left > 0:
start_position = expression.index("{")
else:
start_position = 0
end_position = start_position
counter = 1
# Find the location of closing bracket
while counter > 0 and bracket_left > 0:
end_position = end_position + 1
# Increment or decrement according to the current character
if expression[end_position] == '{':
counter = counter + 1
elif expression[end_position] == '}':
counter = counter - 1
# One pair of bracket has been found
if counter == 0:
bracket_left = bracket_left - 1
# Include the following string before delimited (+ or -)
while end_position < len(expression) - 1:
if ((expression[end_position + 1] == '+') or
(expression[end_position + 1] == '-')):
break
else:
end_position = end_position + 1
result = expression[0:end_position + 1]
# Remove the unnecessarry positive sign (+) at the beginning
if result.startswith('+'):
result = result[1:]
# Wrap into list
result_list = [result]
return (result_list + split_term(expression[end_position + 1:]))
'''
Splitting a sequence of character into a list of answer.
Delimiter = "|"
'''
def split_answer(expression):
return expression.split("|")
"""
Split an expression into two parts, LHS and RHS
Can handle the plane geometry as well.
"""
def split_LHS_RHS(expression):
# Remove space
expression = expression.replace(" ", "")
split_expr = re.split("=|\bot|\\\parallel", expression)
if len(split_expr) != 2:
# Error
return None, None
# Find delimiter
if "\bot" in expression:
delimiter = "\bot"
elif "\parallel" in expression:
delimiter = "\parallel"
else:
delimiter = "="
LHS = split_expr[0]
RHS = split_expr[1]
return LHS, RHS, delimiter
def split_plane_geometry_term(expression):
LHS, RHS, delimiter = split_LHS_RHS(expression)
if LHS is None and RHS is None:
# Error when splitting RHS and RHS
return None
LHS_split = split_term(LHS)
RHS_split = split_term(RHS)
result_dict = {}
result_dict['LHS'] = LHS_split
result_dict['RHS'] = RHS_split
result_dict['delimiter'] = delimiter
return result_dict
| apache-2.0 | -8,629,615,851,293,824,000 | 28.930435 | 75 | 0.622022 | false |
xodus7/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/bijectors/batch_normalization_test.py | 25 | 9994 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for BatchNorm Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.contrib.distributions.python.ops.bijectors.batch_normalization import BatchNormalization
from tensorflow.contrib.distributions.python.ops.bijectors.invert import Invert
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.layers import normalization
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class BatchNormTest(test_util.VectorDistributionTestHelpers,
test.TestCase):
def _reduction_axes(self, input_shape, event_dims):
if isinstance(event_dims, int):
event_dims = [event_dims]
ndims = len(input_shape)
# Convert event_dims to non-negative indexing.
event_dims = list(event_dims)
for idx, x in enumerate(event_dims):
if x < 0:
event_dims[idx] = ndims + x
return tuple(i for i in range(ndims) if i not in event_dims)
def testForwardInverse(self):
"""Tests forward and backward passes with different event shapes.
input_shape: Tuple of shapes for input tensor.
event_dims: Tuple of dimension indices that will be normalized.
training: Boolean of whether bijector runs in training or inference mode.
"""
params = [
((5*2, 4), [-1], False),
((5, 2, 4), [-1], False),
((5, 2, 4), [1, 2], False),
((5, 2, 4), [0, 1], False),
((5*2, 4), [-1], True),
((5, 2, 4), [-1], True),
((5, 2, 4), [1, 2], True),
((5, 2, 4), [0, 1], True)
]
for input_shape, event_dims, training in params:
x_ = np.arange(5 * 4 * 2).astype(np.float32).reshape(input_shape)
with self.cached_session() as sess:
x = constant_op.constant(x_)
# When training, memorize the exact mean of the last
# minibatch that it normalized (instead of moving average assignment).
layer = normalization.BatchNormalization(
axis=event_dims, momentum=0., epsilon=0.)
batch_norm = BatchNormalization(
batchnorm_layer=layer, training=training)
# Minibatch statistics are saved only after norm_x has been computed.
norm_x = batch_norm.inverse(x)
with ops.control_dependencies(batch_norm.batchnorm.updates):
moving_mean = array_ops.identity(batch_norm.batchnorm.moving_mean)
moving_var = array_ops.identity(batch_norm.batchnorm.moving_variance)
denorm_x = batch_norm.forward(array_ops.identity(norm_x))
fldj = batch_norm.forward_log_det_jacobian(
x, event_ndims=len(event_dims))
# Use identity to invalidate cache.
ildj = batch_norm.inverse_log_det_jacobian(
array_ops.identity(denorm_x), event_ndims=len(event_dims))
variables.global_variables_initializer().run()
# Update variables.
norm_x_ = sess.run(norm_x)
[
norm_x_,
moving_mean_,
moving_var_,
denorm_x_,
ildj_,
fldj_,
] = sess.run([
norm_x,
moving_mean,
moving_var,
denorm_x,
ildj,
fldj,
])
self.assertEqual("batch_normalization", batch_norm.name)
reduction_axes = self._reduction_axes(input_shape, event_dims)
keepdims = len(event_dims) > 1
expected_batch_mean = np.mean(
x_, axis=reduction_axes, keepdims=keepdims)
expected_batch_var = np.var(x_, axis=reduction_axes, keepdims=keepdims)
if training:
# When training=True, values become normalized across batch dim and
# original values are recovered after de-normalizing.
zeros = np.zeros_like(norm_x_)
self.assertAllClose(np.mean(zeros, axis=reduction_axes),
np.mean(norm_x_, axis=reduction_axes))
self.assertAllClose(expected_batch_mean, moving_mean_)
self.assertAllClose(expected_batch_var, moving_var_)
self.assertAllClose(x_, denorm_x_, atol=1e-5)
# Since moving statistics are set to batch statistics after
# normalization, ildj and -fldj should match.
self.assertAllClose(ildj_, -fldj_)
# ildj is computed with minibatch statistics.
expected_ildj = np.sum(np.log(1.) - .5 * np.log(
expected_batch_var + batch_norm.batchnorm.epsilon))
self.assertAllClose(expected_ildj, ildj_)
else:
# When training=False, moving_mean, moving_var remain at their
# initialized values (0., 1.), resulting in no scale/shift (a small
# shift occurs if epsilon > 0.)
self.assertAllClose(x_, norm_x_)
self.assertAllClose(x_, denorm_x_, atol=1e-5)
# ildj is computed with saved statistics.
expected_ildj = np.sum(
np.log(1.) - .5 * np.log(1. + batch_norm.batchnorm.epsilon))
self.assertAllClose(expected_ildj, ildj_)
def testMaximumLikelihoodTraining(self):
# Test Maximum Likelihood training with default bijector.
with self.cached_session() as sess:
base_dist = distributions.MultivariateNormalDiag(loc=[0., 0.])
batch_norm = BatchNormalization(training=True)
dist = transformed_distribution_lib.TransformedDistribution(
distribution=base_dist,
bijector=batch_norm)
target_dist = distributions.MultivariateNormalDiag(loc=[1., 2.])
target_samples = target_dist.sample(100)
dist_samples = dist.sample(3000)
loss = -math_ops.reduce_mean(dist.log_prob(target_samples))
with ops.control_dependencies(batch_norm.batchnorm.updates):
train_op = adam.AdamOptimizer(1e-2).minimize(loss)
moving_mean = array_ops.identity(batch_norm.batchnorm.moving_mean)
moving_var = array_ops.identity(batch_norm.batchnorm.moving_variance)
variables.global_variables_initializer().run()
for _ in range(3000):
sess.run(train_op)
[
dist_samples_,
moving_mean_,
moving_var_
] = sess.run([
dist_samples,
moving_mean,
moving_var
])
self.assertAllClose([1., 2.], np.mean(dist_samples_, axis=0), atol=5e-2)
self.assertAllClose([1., 2.], moving_mean_, atol=5e-2)
self.assertAllClose([1., 1.], moving_var_, atol=5e-2)
def testLogProb(self):
with self.cached_session() as sess:
layer = normalization.BatchNormalization(epsilon=0.)
batch_norm = BatchNormalization(batchnorm_layer=layer, training=False)
base_dist = distributions.MultivariateNormalDiag(loc=[0., 0.])
dist = transformed_distribution_lib.TransformedDistribution(
distribution=base_dist,
bijector=batch_norm,
validate_args=True)
samples = dist.sample(int(1e5))
# No volume distortion since training=False, bijector is initialized
# to the identity transformation.
base_log_prob = base_dist.log_prob(samples)
dist_log_prob = dist.log_prob(samples)
variables.global_variables_initializer().run()
base_log_prob_, dist_log_prob_ = sess.run([base_log_prob, dist_log_prob])
self.assertAllClose(base_log_prob_, dist_log_prob_)
def testMutuallyConsistent(self):
# BatchNorm bijector is only mutually consistent when training=False.
dims = 4
with self.cached_session() as sess:
layer = normalization.BatchNormalization(epsilon=0.)
batch_norm = BatchNormalization(batchnorm_layer=layer, training=False)
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=batch_norm,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=2.,
center=0.,
rtol=0.02)
def testInvertMutuallyConsistent(self):
# BatchNorm bijector is only mutually consistent when training=False.
dims = 4
with self.cached_session() as sess:
layer = normalization.BatchNormalization(epsilon=0.)
batch_norm = Invert(
BatchNormalization(batchnorm_layer=layer, training=False))
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=batch_norm,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=2.,
center=0.,
rtol=0.02)
if __name__ == "__main__":
test.main()
| apache-2.0 | 1,465,846,117,281,848,000 | 41.168776 | 104 | 0.644287 | false |
rallylee/gem5 | tests/configs/realview-switcheroo-atomic.py | 64 | 2427 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from arm_generic import *
import switcheroo
root = LinuxArmFSSwitcheroo(
mem_class=SimpleMemory,
cpu_classes=(AtomicSimpleCPU, AtomicSimpleCPU)
).create_root()
# Setup a custom test method that uses the switcheroo tester that
# switches between CPU models.
run_test = switcheroo.run_test
| bsd-3-clause | -851,378,677,790,431,500 | 48.530612 | 72 | 0.792748 | false |
Juraci/tempest | tempest/tests/test_list_tests.py | 34 | 1824 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import six
import subprocess
from tempest.tests import base
class TestTestList(base.TestCase):
def test_testr_list_tests_no_errors(self):
# Remove unit test discover path from env to test tempest tests
test_env = os.environ.copy()
test_env.pop('OS_TEST_PATH')
import_failures = []
p = subprocess.Popen(['testr', 'list-tests'], stdout=subprocess.PIPE,
env=test_env)
ids, err = p.communicate()
self.assertEqual(0, p.returncode,
"test discovery failed, one or more files cause an "
"error on import %s" % ids)
ids = six.text_type(ids).split('\n')
for test_id in ids:
if re.match('(\w+\.){3}\w+', test_id):
if not test_id.startswith('tempest.'):
parts = test_id.partition('tempest')
fail_id = parts[1] + parts[2]
import_failures.append(fail_id)
error_message = ("The following tests have import failures and aren't"
" being run with test filters %s" % import_failures)
self.assertFalse(import_failures, error_message)
| apache-2.0 | 7,190,439,728,409,217,000 | 39.533333 | 78 | 0.614035 | false |
vasyarv/edx-platform | lms/djangoapps/shoppingcart/processors/CyberSource.py | 142 | 19828 | """
Implementation the CyberSource credit card processor.
IMPORTANT: CyberSource will deprecate this version of the API ("Hosted Order Page") in September 2014.
We are keeping this implementation in the code-base for now, but we should
eventually replace this module with the newer implementation (in `CyberSource2.py`)
To enable this implementation, add the following to Django settings:
CC_PROCESSOR_NAME = "CyberSource"
CC_PROCESSOR = {
"CyberSource": {
"SHARED_SECRET": "<shared secret>",
"MERCHANT_ID": "<merchant ID>",
"SERIAL_NUMBER": "<serial number>",
"PURCHASE_ENDPOINT": "<purchase endpoint>"
}
}
"""
import time
import hmac
import binascii
import re
import json
from collections import OrderedDict, defaultdict
from decimal import Decimal, InvalidOperation
from hashlib import sha1
from textwrap import dedent
from django.conf import settings
from django.utils.translation import ugettext as _
from edxmako.shortcuts import render_to_string
from shoppingcart.models import Order
from shoppingcart.processors.exceptions import *
from shoppingcart.processors.helpers import get_processor_config
from microsite_configuration import microsite
def process_postpay_callback(params, **kwargs):
"""
The top level call to this module, basically
This function is handed the callback request after the customer has entered the CC info and clicked "buy"
on the external Hosted Order Page.
It is expected to verify the callback and determine if the payment was successful.
It returns {'success':bool, 'order':Order, 'error_html':str}
If successful this function must have the side effect of marking the order purchased and calling the
purchased_callbacks of the cart items.
If unsuccessful this function should not have those side effects but should try to figure out why and
return a helpful-enough error message in error_html.
"""
try:
verify_signatures(params)
result = payment_accepted(params)
if result['accepted']:
# SUCCESS CASE first, rest are some sort of oddity
record_purchase(params, result['order'])
return {'success': True,
'order': result['order'],
'error_html': ''}
else:
return {'success': False,
'order': result['order'],
'error_html': get_processor_decline_html(params)}
except CCProcessorException as error:
return {'success': False,
'order': None, # due to exception we may not have the order
'error_html': get_processor_exception_html(error)}
def processor_hash(value):
"""
Performs the base64(HMAC_SHA1(key, value)) used by CyberSource Hosted Order Page
"""
shared_secret = get_processor_config().get('SHARED_SECRET', '')
hash_obj = hmac.new(shared_secret.encode('utf-8'), value.encode('utf-8'), sha1)
return binascii.b2a_base64(hash_obj.digest())[:-1] # last character is a '\n', which we don't want
def sign(params, signed_fields_key='orderPage_signedFields', full_sig_key='orderPage_signaturePublic'):
"""
params needs to be an ordered dict, b/c cybersource documentation states that order is important.
Reverse engineered from PHP version provided by cybersource
"""
merchant_id = get_processor_config().get('MERCHANT_ID', '')
order_page_version = get_processor_config().get('ORDERPAGE_VERSION', '7')
serial_number = get_processor_config().get('SERIAL_NUMBER', '')
params['merchantID'] = merchant_id
params['orderPage_timestamp'] = int(time.time() * 1000)
params['orderPage_version'] = order_page_version
params['orderPage_serialNumber'] = serial_number
fields = u",".join(params.keys())
values = u",".join([u"{0}={1}".format(i, params[i]) for i in params.keys()])
fields_sig = processor_hash(fields)
values += u",signedFieldsPublicSignature=" + fields_sig
params[full_sig_key] = processor_hash(values)
params[signed_fields_key] = fields
return params
def verify_signatures(params, signed_fields_key='signedFields', full_sig_key='signedDataPublicSignature'):
"""
Verify the signatures accompanying the POST back from Cybersource Hosted Order Page
returns silently if verified
raises CCProcessorSignatureException if not verified
"""
signed_fields = params.get(signed_fields_key, '').split(',')
data = u",".join([u"{0}={1}".format(k, params.get(k, '')) for k in signed_fields])
signed_fields_sig = processor_hash(params.get(signed_fields_key, ''))
data += u",signedFieldsPublicSignature=" + signed_fields_sig
returned_sig = params.get(full_sig_key, '')
if processor_hash(data) != returned_sig:
raise CCProcessorSignatureException()
def render_purchase_form_html(cart, **kwargs):
"""
Renders the HTML of the hidden POST form that must be used to initiate a purchase with CyberSource
"""
return render_to_string('shoppingcart/cybersource_form.html', {
'action': get_purchase_endpoint(),
'params': get_signed_purchase_params(cart),
})
def get_signed_purchase_params(cart, **kwargs):
return sign(get_purchase_params(cart))
def get_purchase_params(cart):
total_cost = cart.total_cost
amount = "{0:0.2f}".format(total_cost)
cart_items = cart.orderitem_set.all()
params = OrderedDict()
params['amount'] = amount
params['currency'] = cart.currency
params['orderPage_transactionType'] = 'sale'
params['orderNumber'] = "{0:d}".format(cart.id)
return params
def get_purchase_endpoint():
return get_processor_config().get('PURCHASE_ENDPOINT', '')
def payment_accepted(params):
"""
Check that cybersource has accepted the payment
params: a dictionary of POST parameters returned by CyberSource in their post-payment callback
returns: true if the payment was correctly accepted, for the right amount
false if the payment was not accepted
raises: CCProcessorDataException if the returned message did not provide required parameters
CCProcessorWrongAmountException if the amount charged is different than the order amount
"""
#make sure required keys are present and convert their values to the right type
valid_params = {}
for key, key_type in [('orderNumber', int),
('orderCurrency', str),
('decision', str)]:
if key not in params:
raise CCProcessorDataException(
_("The payment processor did not return a required parameter: {0}").format(key)
)
try:
valid_params[key] = key_type(params[key])
except ValueError:
raise CCProcessorDataException(
_("The payment processor returned a badly-typed value {0} for param {1}.").format(params[key], key)
)
try:
order = Order.objects.get(id=valid_params['orderNumber'])
except Order.DoesNotExist:
raise CCProcessorDataException(_("The payment processor accepted an order whose number is not in our system."))
if valid_params['decision'] == 'ACCEPT':
try:
# Moved reading of charged_amount here from the valid_params loop above because
# only 'ACCEPT' messages have a 'ccAuthReply_amount' parameter
charged_amt = Decimal(params['ccAuthReply_amount'])
except InvalidOperation:
raise CCProcessorDataException(
_("The payment processor returned a badly-typed value {0} for param {1}.").format(
params['ccAuthReply_amount'], 'ccAuthReply_amount'
)
)
if charged_amt == order.total_cost and valid_params['orderCurrency'] == order.currency:
return {'accepted': True,
'amt_charged': charged_amt,
'currency': valid_params['orderCurrency'],
'order': order}
else:
raise CCProcessorWrongAmountException(
_("The amount charged by the processor {0} {1} is different than the total cost of the order {2} {3}.")
.format(
charged_amt,
valid_params['orderCurrency'],
order.total_cost,
order.currency
)
)
else:
return {'accepted': False,
'amt_charged': 0,
'currency': 'usd',
'order': order}
def record_purchase(params, order):
"""
Record the purchase and run purchased_callbacks
"""
ccnum_str = params.get('card_accountNumber', '')
m = re.search("\d", ccnum_str)
if m:
ccnum = ccnum_str[m.start():]
else:
ccnum = "####"
order.purchase(
first=params.get('billTo_firstName', ''),
last=params.get('billTo_lastName', ''),
street1=params.get('billTo_street1', ''),
street2=params.get('billTo_street2', ''),
city=params.get('billTo_city', ''),
state=params.get('billTo_state', ''),
country=params.get('billTo_country', ''),
postalcode=params.get('billTo_postalCode', ''),
ccnum=ccnum,
cardtype=CARDTYPE_MAP[params.get('card_cardType', 'UNKNOWN')],
processor_reply_dump=json.dumps(params)
)
def get_processor_decline_html(params):
"""Have to parse through the error codes to return a helpful message"""
# see if we have an override in the microsites
payment_support_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
msg = _(
"Sorry! Our payment processor did not accept your payment. "
"The decision they returned was {decision_text}, "
"and the reason was {reason_text}. "
"You were not charged. "
"Please try a different form of payment. "
"Contact us with payment-related questions at {email}."
)
formatted = msg.format(
decision_text='<span class="decision">{}</span>'.format(params['decision']),
reason_text='<span class="reason">{code}:{msg}</span>'.format(
code=params['reasonCode'], msg=REASONCODE_MAP[params['reasonCode']],
),
email=payment_support_email,
)
return '<p class="error_msg">{}</p>'.format(formatted)
def get_processor_exception_html(exception):
"""Return error HTML associated with exception"""
# see if we have an override in the microsites
payment_support_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
if isinstance(exception, CCProcessorDataException):
msg = _(
"Sorry! Our payment processor sent us back a payment confirmation "
"that had inconsistent data!"
"We apologize that we cannot verify whether the charge went through "
"and take further action on your order."
"The specific error message is: {error_message}. "
"Your credit card may possibly have been charged. "
"Contact us with payment-specific questions at {email}."
)
formatted = msg.format(
error_message='<span class="exception_msg">{msg}</span>'.format(
msg=exception.message,
),
email=payment_support_email,
)
return '<p class="error_msg">{}</p>'.format(formatted)
elif isinstance(exception, CCProcessorWrongAmountException):
msg = _(
"Sorry! Due to an error your purchase was charged for "
"a different amount than the order total! "
"The specific error message is: {error_message}. "
"Your credit card has probably been charged. "
"Contact us with payment-specific questions at {email}."
)
formatted = msg.format(
error_message='<span class="exception_msg">{msg}</span>'.format(
msg=exception.message,
),
email=payment_support_email,
)
return '<p class="error_msg">{}</p>'.format(formatted)
elif isinstance(exception, CCProcessorSignatureException):
msg = _(
"Sorry! Our payment processor sent us back a corrupted message "
"regarding your charge, so we are unable to validate that "
"the message actually came from the payment processor. "
"The specific error message is: {error_message}. "
"We apologize that we cannot verify whether the charge went through "
"and take further action on your order. "
"Your credit card may possibly have been charged. "
"Contact us with payment-specific questions at {email}."
)
formatted = msg.format(
error_message='<span class="exception_msg">{msg}</span>'.format(
msg=exception.message,
),
email=payment_support_email,
)
return '<p class="error_msg">{}</p>'.format(formatted)
# fallthrough case, which basically never happens
return '<p class="error_msg">EXCEPTION!</p>'
CARDTYPE_MAP = defaultdict(lambda: "UNKNOWN")
CARDTYPE_MAP.update(
{
'001': 'Visa',
'002': 'MasterCard',
'003': 'American Express',
'004': 'Discover',
'005': 'Diners Club',
'006': 'Carte Blanche',
'007': 'JCB',
'014': 'EnRoute',
'021': 'JAL',
'024': 'Maestro',
'031': 'Delta',
'033': 'Visa Electron',
'034': 'Dankort',
'035': 'Laser',
'036': 'Carte Bleue',
'037': 'Carta Si',
'042': 'Maestro',
'043': 'GE Money UK card'
}
)
REASONCODE_MAP = defaultdict(lambda: "UNKNOWN REASON")
REASONCODE_MAP.update(
{
'100': _('Successful transaction.'),
'101': _('The request is missing one or more required fields.'),
'102': _('One or more fields in the request contains invalid data.'),
'104': dedent(_(
"""
The merchantReferenceCode sent with this authorization request matches the
merchantReferenceCode of another authorization request that you sent in the last 15 minutes.
Possible fix: retry the payment after 15 minutes.
""")),
'150': _('Error: General system failure. Possible fix: retry the payment after a few minutes.'),
'151': dedent(_(
"""
Error: The request was received but there was a server timeout.
This error does not include timeouts between the client and the server.
Possible fix: retry the payment after some time.
""")),
'152': dedent(_(
"""
Error: The request was received, but a service did not finish running in time
Possible fix: retry the payment after some time.
""")),
'201': _('The issuing bank has questions about the request. Possible fix: retry with another form of payment'),
'202': dedent(_(
"""
Expired card. You might also receive this if the expiration date you
provided does not match the date the issuing bank has on file.
Possible fix: retry with another form of payment
""")),
'203': dedent(_(
"""
General decline of the card. No other information provided by the issuing bank.
Possible fix: retry with another form of payment
""")),
'204': _('Insufficient funds in the account. Possible fix: retry with another form of payment'),
# 205 was Stolen or lost card. Might as well not show this message to the person using such a card.
'205': _('Unknown reason'),
'207': _('Issuing bank unavailable. Possible fix: retry again after a few minutes'),
'208': dedent(_(
"""
Inactive card or card not authorized for card-not-present transactions.
Possible fix: retry with another form of payment
""")),
'210': _('The card has reached the credit limit. Possible fix: retry with another form of payment'),
'211': _('Invalid card verification number. Possible fix: retry with another form of payment'),
# 221 was The customer matched an entry on the processor's negative file.
# Might as well not show this message to the person using such a card.
'221': _('Unknown reason'),
'231': _('Invalid account number. Possible fix: retry with another form of payment'),
'232': dedent(_(
"""
The card type is not accepted by the payment processor.
Possible fix: retry with another form of payment
""")),
'233': _('General decline by the processor. Possible fix: retry with another form of payment'),
'234': _(
"There is a problem with our CyberSource merchant configuration. Please let us know at {0}"
).format(settings.PAYMENT_SUPPORT_EMAIL),
# reason code 235 only applies if we are processing a capture through the API. so we should never see it
'235': _('The requested amount exceeds the originally authorized amount.'),
'236': _('Processor Failure. Possible fix: retry the payment'),
# reason code 238 only applies if we are processing a capture through the API. so we should never see it
'238': _('The authorization has already been captured'),
# reason code 239 only applies if we are processing a capture or credit through the API,
# so we should never see it
'239': _('The requested transaction amount must match the previous transaction amount.'),
'240': dedent(_(
"""
The card type sent is invalid or does not correlate with the credit card number.
Possible fix: retry with the same card or another form of payment
""")),
# reason code 241 only applies when we are processing a capture or credit through the API,
# so we should never see it
'241': _('The request ID is invalid.'),
# reason code 242 occurs if there was not a previously successful authorization request or
# if the previously successful authorization has already been used by another capture request.
# This reason code only applies when we are processing a capture through the API
# so we should never see it
'242': dedent(_(
"""
You requested a capture through the API, but there is no corresponding, unused authorization record.
""")),
# we should never see 243
'243': _('The transaction has already been settled or reversed.'),
# reason code 246 applies only if we are processing a void through the API. so we should never see it
'246': dedent(_(
"""
The capture or credit is not voidable because the capture or credit information has already been
submitted to your processor. Or, you requested a void for a type of transaction that cannot be voided.
""")),
# reason code 247 applies only if we are processing a void through the API. so we should never see it
'247': _('You requested a credit for a capture that was previously voided'),
'250': dedent(_(
"""
Error: The request was received, but there was a timeout at the payment processor.
Possible fix: retry the payment.
""")),
'520': dedent(_(
"""
The authorization request was approved by the issuing bank but declined by CyberSource.'
Possible fix: retry with a different form of payment.
""")),
}
)
| agpl-3.0 | -6,651,539,142,418,140,000 | 42.482456 | 119 | 0.623008 | false |
Gamebasis/3DGamebasisServer | GameData/blender-2.71-windows64/2.71/python/lib/plat-unixware7/STROPTS.py | 106 | 6524 | # Generated by h2py from /usr/include/sys/stropts.h
# Included from sys/types.h
def quad_low(x): return x.val[0]
ADT_EMASKSIZE = 8
SHRT_MIN = -32768
SHRT_MAX = 32767
INT_MIN = (-2147483647-1)
INT_MAX = 2147483647
LONG_MIN = (-2147483647-1)
LONG_MAX = 2147483647
OFF32_MAX = LONG_MAX
ISTAT_ASSERTED = 0
ISTAT_ASSUMED = 1
ISTAT_NONE = 2
OFF_MAX = OFF32_MAX
CLOCK_MAX = LONG_MAX
P_MYID = (-1)
P_MYHOSTID = (-1)
# Included from sys/select.h
FD_SETSIZE = 4096
NBBY = 8
NULL = 0
# Included from sys/conf.h
D_NEW = 0x00
D_OLD = 0x01
D_DMA = 0x02
D_BLKOFF = 0x400
D_LFS = 0x8000
D_STR = 0x0800
D_MOD = 0x1000
D_PSEUDO = 0x2000
D_RANDOM = 0x4000
D_HOT = 0x10000
D_SEEKNEG = 0x04
D_TAPE = 0x08
D_NOBRKUP = 0x10
D_INITPUB = 0x20
D_NOSPECMACDATA = 0x40
D_RDWEQ = 0x80
SECMASK = (D_INITPUB|D_NOSPECMACDATA|D_RDWEQ)
DAF_REQDMA = 0x1
DAF_PHYSREQ = 0x2
DAF_PRE8 = 0x4
DAF_STATIC = 0x8
DAF_STR = 0x10
D_MP = 0x100
D_UPF = 0x200
ROOTFS_NAMESZ = 7
FMNAMESZ = 8
MCD_VERSION = 1
DI_BCBP = 0
DI_MEDIA = 1
# Included from sys/secsys.h
ES_MACOPENLID = 1
ES_MACSYSLID = 2
ES_MACROOTLID = 3
ES_PRVINFO = 4
ES_PRVSETCNT = 5
ES_PRVSETS = 6
ES_MACADTLID = 7
ES_PRVID = 8
ES_TPGETMAJOR = 9
SA_EXEC = 0o01
SA_WRITE = 0o02
SA_READ = 0o04
SA_SUBSIZE = 0o10
# Included from sys/stropts_f.h
X_STR = (ord('S')<<8)
X_I_BASE = (X_STR|0o200)
X_I_NREAD = (X_STR|0o201)
X_I_PUSH = (X_STR|0o202)
X_I_POP = (X_STR|0o203)
X_I_LOOK = (X_STR|0o204)
X_I_FLUSH = (X_STR|0o205)
X_I_SRDOPT = (X_STR|0o206)
X_I_GRDOPT = (X_STR|0o207)
X_I_STR = (X_STR|0o210)
X_I_SETSIG = (X_STR|0o211)
X_I_GETSIG = (X_STR|0o212)
X_I_FIND = (X_STR|0o213)
X_I_LINK = (X_STR|0o214)
X_I_UNLINK = (X_STR|0o215)
X_I_PEEK = (X_STR|0o217)
X_I_FDINSERT = (X_STR|0o220)
X_I_SENDFD = (X_STR|0o221)
X_I_RECVFD = (X_STR|0o222)
# Included from unistd.h
# Included from sys/unistd.h
R_OK = 0o04
W_OK = 0o02
X_OK = 0o01
F_OK = 000
EFF_ONLY_OK = 0o10
EX_OK = 0o20
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
_SC_ARG_MAX = 1
_SC_CHILD_MAX = 2
_SC_CLK_TCK = 3
_SC_NGROUPS_MAX = 4
_SC_OPEN_MAX = 5
_SC_JOB_CONTROL = 6
_SC_SAVED_IDS = 7
_SC_VERSION = 8
_SC_PASS_MAX = 9
_SC_LOGNAME_MAX = 10
_SC_PAGESIZE = 11
_SC_PAGE_SIZE = _SC_PAGESIZE
_SC_XOPEN_VERSION = 12
_SC_NACLS_MAX = 13
_SC_NPROCESSORS_CONF = 14
_SC_NPROCESSORS_ONLN = 15
_SC_NPROCESSES = 39
_SC_TOTAL_MEMORY = 40
_SC_USEABLE_MEMORY = 41
_SC_GENERAL_MEMORY = 42
_SC_DEDICATED_MEMORY = 43
_SC_NCGS_CONF = 44
_SC_NCGS_ONLN = 45
_SC_MAX_CPUS_PER_CG = 46
_SC_CG_SIMPLE_IMPL = 47
_SC_CACHE_LINE = 48
_SC_SYSTEM_ID = 49
_SC_THREADS = 51
_SC_THREAD_ATTR_STACKADDR = 52
_SC_THREAD_ATTR_STACKSIZE = 53
_SC_THREAD_DESTRUCTOR_ITERATIONS = 54
_SC_THREAD_KEYS_MAX = 55
_SC_THREAD_PRIORITY_SCHEDULING = 56
_SC_THREAD_PRIO_INHERIT = 57
_SC_THREAD_PRIO_PROTECT = 58
_SC_THREAD_STACK_MIN = 59
_SC_THREAD_PROCESS_SHARED = 60
_SC_THREAD_SAFE_FUNCTIONS = 61
_SC_THREAD_THREADS_MAX = 62
_SC_KERNEL_VM = 63
_SC_TZNAME_MAX = 320
_SC_STREAM_MAX = 321
_SC_XOPEN_CRYPT = 323
_SC_XOPEN_ENH_I18N = 324
_SC_XOPEN_SHM = 325
_SC_XOPEN_XCU_VERSION = 327
_SC_AES_OS_VERSION = 330
_SC_ATEXIT_MAX = 331
_SC_2_C_BIND = 350
_SC_2_C_DEV = 351
_SC_2_C_VERSION = 352
_SC_2_CHAR_TERM = 353
_SC_2_FORT_DEV = 354
_SC_2_FORT_RUN = 355
_SC_2_LOCALEDEF = 356
_SC_2_SW_DEV = 357
_SC_2_UPE = 358
_SC_2_VERSION = 359
_SC_BC_BASE_MAX = 370
_SC_BC_DIM_MAX = 371
_SC_BC_SCALE_MAX = 372
_SC_BC_STRING_MAX = 373
_SC_COLL_WEIGHTS_MAX = 380
_SC_EXPR_NEST_MAX = 381
_SC_LINE_MAX = 382
_SC_RE_DUP_MAX = 383
_SC_IOV_MAX = 390
_SC_NPROC_CONF = 391
_SC_NPROC_ONLN = 392
_SC_XOPEN_UNIX = 400
_SC_SEMAPHORES = 440
_CS_PATH = 1
__O_CS_HOSTNAME = 2
_CS_RELEASE = 3
_CS_VERSION = 4
__O_CS_MACHINE = 5
__O_CS_ARCHITECTURE = 6
_CS_HW_SERIAL = 7
__O_CS_HW_PROVIDER = 8
_CS_SRPC_DOMAIN = 9
_CS_INITTAB_NAME = 10
__O_CS_SYSNAME = 11
_CS_LFS_CFLAGS = 20
_CS_LFS_LDFLAGS = 21
_CS_LFS_LIBS = 22
_CS_LFS_LINTFLAGS = 23
_CS_LFS64_CFLAGS = 24
_CS_LFS64_LDFLAGS = 25
_CS_LFS64_LIBS = 26
_CS_LFS64_LINTFLAGS = 27
_CS_ARCHITECTURE = 100
_CS_BUSTYPES = 101
_CS_HOSTNAME = 102
_CS_HW_PROVIDER = 103
_CS_KERNEL_STAMP = 104
_CS_MACHINE = 105
_CS_OS_BASE = 106
_CS_OS_PROVIDER = 107
_CS_SYSNAME = 108
_CS_USER_LIMIT = 109
_PC_LINK_MAX = 1
_PC_MAX_CANON = 2
_PC_MAX_INPUT = 3
_PC_NAME_MAX = 4
_PC_PATH_MAX = 5
_PC_PIPE_BUF = 6
_PC_NO_TRUNC = 7
_PC_VDISABLE = 8
_PC_CHOWN_RESTRICTED = 9
_PC_FILESIZEBITS = 10
_POSIX_VERSION = 199009
_XOPEN_VERSION = 4
GF_PATH = "/etc/group"
PF_PATH = "/etc/passwd"
F_ULOCK = 0
F_LOCK = 1
F_TLOCK = 2
F_TEST = 3
_POSIX_JOB_CONTROL = 1
_POSIX_SAVED_IDS = 1
_POSIX_VDISABLE = 0
NULL = 0
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
_XOPEN_UNIX = 1
_XOPEN_ENH_I18N = 1
_XOPEN_XPG4 = 1
_POSIX2_C_VERSION = 199209
_POSIX2_VERSION = 199209
_XOPEN_XCU_VERSION = 4
_POSIX_SEMAPHORES = 1
_POSIX_THREADS = 1
_POSIX_THREAD_ATTR_STACKADDR = 1
_POSIX_THREAD_ATTR_STACKSIZE = 1
_POSIX_THREAD_PRIORITY_SCHEDULING = 1
_POSIX_THREAD_PROCESS_SHARED = 1
_POSIX_THREAD_SAFE_FUNCTIONS = 1
_POSIX2_C_BIND = 1
_POSIX2_CHAR_TERM = 1
_POSIX2_FORT_RUN = 1
_POSIX2_LOCALEDEF = 1
_POSIX2_UPE = 1
_LFS_ASYNCHRONOUS_IO = 1
_LFS_LARGEFILE = 1
_LFS64_ASYNCHRONOUS_IO = 1
_LFS64_LARGEFILE = 1
_LFS64_STDIO = 1
FMNAMESZ = 8
SNDZERO = 0x001
SNDPIPE = 0x002
RNORM = 0x000
RMSGD = 0x001
RMSGN = 0x002
RMODEMASK = 0x003
RPROTDAT = 0x004
RPROTDIS = 0x008
RPROTNORM = 0x010
RPROTMASK = 0x01c
FLUSHR = 0x01
FLUSHW = 0x02
FLUSHRW = 0x03
FLUSHBAND = 0x04
S_INPUT = 0x0001
S_HIPRI = 0x0002
S_OUTPUT = 0x0004
S_MSG = 0x0008
S_ERROR = 0x0010
S_HANGUP = 0x0020
S_RDNORM = 0x0040
S_WRNORM = S_OUTPUT
S_RDBAND = 0x0080
S_WRBAND = 0x0100
S_BANDURG = 0x0200
RS_HIPRI = 0x01
MSG_HIPRI = 0x01
MSG_ANY = 0x02
MSG_BAND = 0x04
MSG_DISCARD = 0x08
MSG_PEEKIOCTL = 0x10
MORECTL = 1
MOREDATA = 2
MUXID_ALL = (-1)
ANYMARK = 0x01
LASTMARK = 0x02
STR = (ord('S')<<8)
I_NREAD = (STR|0o1)
I_PUSH = (STR|0o2)
I_POP = (STR|0o3)
I_LOOK = (STR|0o4)
I_FLUSH = (STR|0o5)
I_SRDOPT = (STR|0o6)
I_GRDOPT = (STR|0o7)
I_STR = (STR|0o10)
I_SETSIG = (STR|0o11)
I_GETSIG = (STR|0o12)
I_FIND = (STR|0o13)
I_LINK = (STR|0o14)
I_UNLINK = (STR|0o15)
I_PEEK = (STR|0o17)
I_FDINSERT = (STR|0o20)
I_SENDFD = (STR|0o21)
I_RECVFD = (STR|0o22)
I_E_RECVFD = (STR|0o16)
I_RECVFD = (STR|0o16)
I_RECVFD = (STR|0o22)
I_SWROPT = (STR|0o23)
I_GWROPT = (STR|0o24)
I_LIST = (STR|0o25)
I_PLINK = (STR|0o26)
I_PUNLINK = (STR|0o27)
I_FLUSHBAND = (STR|0o34)
I_CKBAND = (STR|0o35)
I_GETBAND = (STR|0o36)
I_ATMARK = (STR|0o37)
I_SETCLTIME = (STR|0o40)
I_GETCLTIME = (STR|0o41)
I_CANPUT = (STR|0o42)
I_S_RECVFD = (STR|0o43)
I_STATS = (STR|0o44)
I_BIGPIPE = (STR|0o45)
I_GETTP = (STR|0o46)
INFTIM = -1
| gpl-3.0 | -4,002,799,897,796,372,000 | 18.890244 | 51 | 0.669528 | false |
varunkamra/kuma | vendor/packages/translate/tools/pocount.py | 24 | 12909 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2003-2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Count strings and words for supported localization files.
These include: XLIFF, TMX, Gettex PO and MO, Qt .ts and .qm, Wordfast TM, etc
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/pocount.html
for examples and usage instructions.
"""
from __future__ import print_function
import logging
import os
import sys
from argparse import ArgumentParser
from translate.storage import factory, statsdb
logger = logging.getLogger(__name__)
# define style constants
style_full, style_csv, style_short_strings, style_short_words = range(4)
# default output style
default_style = style_full
def calcstats_old(filename):
"""This is the previous implementation of calcstats() and is left for
comparison and debuging purposes."""
# ignore totally blank or header units
try:
store = factory.getobject(filename)
except ValueError as e:
logger.warning(e)
return {}
units = filter(lambda unit: unit.istranslatable(), store.units)
translated = translatedmessages(units)
fuzzy = fuzzymessages(units)
review = filter(lambda unit: unit.isreview(), units)
untranslated = untranslatedmessages(units)
wordcounts = dict(map(lambda unit: (unit, statsdb.wordsinunit(unit)), units))
sourcewords = lambda elementlist: sum(map(lambda unit: wordcounts[unit][0], elementlist))
targetwords = lambda elementlist: sum(map(lambda unit: wordcounts[unit][1], elementlist))
stats = {}
# units
stats["translated"] = len(translated)
stats["fuzzy"] = len(fuzzy)
stats["untranslated"] = len(untranslated)
stats["review"] = len(review)
stats["total"] = stats["translated"] + \
stats["fuzzy"] + \
stats["untranslated"]
# words
stats["translatedsourcewords"] = sourcewords(translated)
stats["translatedtargetwords"] = targetwords(translated)
stats["fuzzysourcewords"] = sourcewords(fuzzy)
stats["untranslatedsourcewords"] = sourcewords(untranslated)
stats["reviewsourcewords"] = sourcewords(review)
stats["totalsourcewords"] = stats["translatedsourcewords"] + \
stats["fuzzysourcewords"] + \
stats["untranslatedsourcewords"]
return stats
def calcstats(filename):
statscache = statsdb.StatsCache()
return statscache.filetotals(filename, extended=True)
def summarize(title, stats, style=style_full, indent=8, incomplete_only=False):
"""Print summary for a .po file in specified format.
:param title: name of .po file
:param stats: array with translation statistics for the file specified
:param indent: indentation of the 2nd column (length of longest filename)
:param incomplete_only: omit fully translated files
:type incomplete_only: Boolean
:rtype: Boolean
:return: 1 if counting incomplete files (incomplete_only=True) and the
file is completely translated, 0 otherwise
"""
def percent(denominator, devisor):
if devisor == 0:
return 0
else:
return denominator * 100 / devisor
if incomplete_only and (stats["total"] == stats["translated"]):
return 1
if (style == style_csv):
print("%s, " % title, end=' ')
print("%d, %d, %d," % (stats["translated"],
stats["translatedsourcewords"],
stats["translatedtargetwords"]), end=' ')
print("%d, %d," % (stats["fuzzy"], stats["fuzzysourcewords"]), end=' ')
print("%d, %d," % (stats["untranslated"],
stats["untranslatedsourcewords"]), end=' ')
print("%d, %d" % (stats["total"], stats["totalsourcewords"]), end=' ')
if stats["review"] > 0:
print(", %d, %d" % (stats["review"], stats["reviewsourdcewords"]), end=' ')
print()
elif (style == style_short_strings):
spaces = " " * (indent - len(title))
print("%s%s strings: total: %d\t| %dt\t%df\t%du\t| %d%%t\t%d%%f\t%d%%u" % (
title, spaces,
stats["total"], stats["translated"], stats["fuzzy"], stats["untranslated"],
percent(stats["translated"], stats["total"]),
percent(stats["fuzzy"], stats["total"]),
percent(stats["untranslated"], stats["total"])))
elif (style == style_short_words):
spaces = " " * (indent - len(title))
print("%s%s source words: total: %d\t| %dt\t%df\t%du\t| %d%%t\t%d%%f\t%d%%u" % (
title, spaces,
stats["totalsourcewords"], stats["translatedsourcewords"], stats["fuzzysourcewords"], stats["untranslatedsourcewords"],
percent(stats["translatedsourcewords"], stats["totalsourcewords"]),
percent(stats["fuzzysourcewords"], stats["totalsourcewords"]),
percent(stats["untranslatedsourcewords"], stats["totalsourcewords"])))
else: # style == style_full
print(title)
print("type strings words (source) words (translation)")
print("translated: %5d (%3d%%) %10d (%3d%%) %15d" % (
stats["translated"],
percent(stats["translated"], stats["total"]),
stats["translatedsourcewords"],
percent(stats["translatedsourcewords"], stats["totalsourcewords"]),
stats["translatedtargetwords"]))
print("fuzzy: %5d (%3d%%) %10d (%3d%%) n/a" % (
stats["fuzzy"],
percent(stats["fuzzy"], stats["total"]),
stats["fuzzysourcewords"],
percent(stats["fuzzysourcewords"], stats["totalsourcewords"])))
print("untranslated: %5d (%3d%%) %10d (%3d%%) n/a" % (
stats["untranslated"],
percent(stats["untranslated"], stats["total"]),
stats["untranslatedsourcewords"],
percent(stats["untranslatedsourcewords"], stats["totalsourcewords"])))
print("Total: %5d %17d %22d" % (
stats["total"],
stats["totalsourcewords"],
stats["translatedtargetwords"]))
if "extended" in stats:
print("")
for state, e_stats in stats["extended"].iteritems():
print("%s: %5d (%3d%%) %10d (%3d%%) %15d" % (
state, e_stats["units"], percent(e_stats["units"], stats["total"]),
e_stats["sourcewords"], percent(e_stats["sourcewords"], stats["totalsourcewords"]),
e_stats["targetwords"]))
if stats["review"] > 0:
print("review: %5d %17d n/a" % (
stats["review"], stats["reviewsourcewords"]))
print()
return 0
def fuzzymessages(units):
return filter(lambda unit: unit.isfuzzy() and unit.target, units)
def translatedmessages(units):
return filter(lambda unit: unit.istranslated(), units)
def untranslatedmessages(units):
return filter(lambda unit: not (unit.istranslated() or unit.isfuzzy()) and unit.source, units)
class summarizer:
def __init__(self, filenames, style=default_style, incomplete_only=False):
self.totals = {}
self.filecount = 0
self.longestfilename = 0
self.style = style
self.incomplete_only = incomplete_only
self.complete_count = 0
if (self.style == style_csv):
print("""Filename, Translated Messages, Translated Source Words, Translated
Target Words, Fuzzy Messages, Fuzzy Source Words, Untranslated Messages,
Untranslated Source Words, Total Message, Total Source Words,
Review Messages, Review Source Words""")
if (self.style == style_short_strings or self.style == style_short_words):
for filename in filenames: # find longest filename
if (len(filename) > self.longestfilename):
self.longestfilename = len(filename)
for filename in filenames:
if not os.path.exists(filename):
logger.error("cannot process %s: does not exist", filename)
continue
elif os.path.isdir(filename):
self.handledir(filename)
else:
self.handlefile(filename)
if self.filecount > 1 and (self.style == style_full):
if self.incomplete_only:
summarize("TOTAL (incomplete only):", self.totals,
incomplete_only=True)
print("File count (incomplete): %5d" % (self.filecount - self.complete_count))
else:
summarize("TOTAL:", self.totals, incomplete_only=False)
print("File count: %5d" % (self.filecount))
print()
def updatetotals(self, stats):
"""Update self.totals with the statistics in stats."""
for key in stats.keys():
if key == "extended":
# FIXME: calculate extended totals
continue
if not key in self.totals:
self.totals[key] = 0
self.totals[key] += stats[key]
def handlefile(self, filename):
try:
stats = calcstats(filename)
self.updatetotals(stats)
self.complete_count += summarize(filename, stats, self.style,
self.longestfilename,
self.incomplete_only)
self.filecount += 1
except Exception: # This happens if we have a broken file.
logger.error(sys.exc_info()[1])
def handlefiles(self, dirname, filenames):
for filename in filenames:
pathname = os.path.join(dirname, filename)
if os.path.isdir(pathname):
self.handledir(pathname)
else:
self.handlefile(pathname)
def handledir(self, dirname):
path, name = os.path.split(dirname)
if name in ["CVS", ".svn", "_darcs", ".git", ".hg", ".bzr"]:
return
entries = os.listdir(dirname)
self.handlefiles(dirname, entries)
def main():
parser = ArgumentParser()
parser.add_argument("--incomplete", action="store_true", default=False,
dest="incomplete_only",
help="skip 100%% translated files.")
if sys.version_info[:2] <= (2, 6):
# Python 2.6 using argparse from PyPI cannot define a mutually
# exclusive group as a child of a group, but it works if it is a child
# of the parser. We lose the group title but the functionality works.
# See https://code.google.com/p/argparse/issues/detail?id=90
megroup = parser.add_mutually_exclusive_group()
else:
output_group = parser.add_argument_group("Output format")
megroup = output_group.add_mutually_exclusive_group()
megroup.add_argument("--full", action="store_const", const=style_full,
dest="style", default=style_full,
help="(default) statistics in full, verbose format")
megroup.add_argument("--csv", action="store_const", const=style_csv,
dest="style",
help="statistics in CSV format")
megroup.add_argument("--short", action="store_const", const=style_short_strings,
dest="style",
help="same as --short-strings")
megroup.add_argument("--short-strings", action="store_const",
const=style_short_strings, dest="style",
help="statistics of strings in short format - one line per file")
megroup.add_argument("--short-words", action="store_const",
const=style_short_words, dest="style",
help="statistics of words in short format - one line per file")
parser.add_argument("files", nargs="+")
args = parser.parse_args()
logging.basicConfig(format="%(name)s: %(levelname)s: %(message)s")
summarizer(args.files, args.style, args.incomplete_only)
if __name__ == '__main__':
main()
| mpl-2.0 | -594,759,850,433,515,600 | 41.186275 | 133 | 0.594934 | false |
kingvuplus/BH-SH4 | mytest.py | 2 | 16609 | import sys, os
if os.path.isfile("/usr/lib/enigma2/python/enigma.zip"):
sys.path.append("/usr/lib/enigma2/python/enigma.zip")
from Tools.Profile import profile, profile_final
profile("PYTHON_START")
import Tools.RedirectOutput
import enigma
import eConsoleImpl
import eBaseImpl
enigma.eTimer = eBaseImpl.eTimer
enigma.eSocketNotifier = eBaseImpl.eSocketNotifier
enigma.eConsoleAppContainer = eConsoleImpl.eConsoleAppContainer
from traceback import print_exc
profile("SimpleSummary")
from Screens import InfoBar
from Screens.SimpleSummary import SimpleSummary
from sys import stdout, exc_info
profile("Bouquets")
from Components.config import config, configfile, ConfigText, ConfigYesNo, ConfigInteger, NoSave
config.misc.load_unlinked_userbouquets = ConfigYesNo(default=True)
def setLoadUnlinkedUserbouquets(configElement):
enigma.eDVBDB.getInstance().setLoadUnlinkedUserbouquets(configElement.value)
config.misc.load_unlinked_userbouquets.addNotifier(setLoadUnlinkedUserbouquets)
enigma.eDVBDB.getInstance().reloadBouquets()
profile("ParentalControl")
import Components.ParentalControl
Components.ParentalControl.InitParentalControl()
profile("LOAD:Navigation")
from Navigation import Navigation
profile("LOAD:skin")
from skin import readSkin
profile("LOAD:Tools")
from Tools.Directories import InitFallbackFiles, resolveFilename, SCOPE_PLUGINS, SCOPE_CURRENT_SKIN
InitFallbackFiles()
profile("config.misc")
config.misc.radiopic = ConfigText(default = resolveFilename(SCOPE_CURRENT_SKIN, "radio.mvi"))
config.misc.blackradiopic = ConfigText(default = resolveFilename(SCOPE_CURRENT_SKIN, "black.mvi"))
config.misc.useTransponderTime = ConfigYesNo(default=True)
config.misc.startCounter = ConfigInteger(default=0) # number of e2 starts...
config.misc.standbyCounter = NoSave(ConfigInteger(default=0)) # number of standby
config.misc.DeepStandby = NoSave(ConfigYesNo(default=False)) # detect deepstandby
config.misc.RestartUI = ConfigYesNo(default=False) # detect user interface restart
config.misc.epgcache_filename = ConfigText(default = "/hdd/epg.dat")
def setEPGCachePath(configElement):
enigma.eEPGCache.getInstance().setCacheFile(configElement.value)
#demo code for use of standby enter leave callbacks
#def leaveStandby():
# print "!!!!!!!!!!!!!!!!!leave standby"
#def standbyCountChanged(configElement):
# print "!!!!!!!!!!!!!!!!!enter standby num", configElement.value
# from Screens.Standby import inStandby
# inStandby.onClose.append(leaveStandby)
#config.misc.standbyCounter.addNotifier(standbyCountChanged, initial_call = False)
####################################################
def useTransponderTimeChanged(configElement):
enigma.eDVBLocalTimeHandler.getInstance().setUseDVBTime(configElement.value)
config.misc.useTransponderTime.addNotifier(useTransponderTimeChanged)
profile("Twisted")
try:
import twisted.python.runtime
import e2reactor
e2reactor.install()
from twisted.internet import reactor
def runReactor():
reactor.run(installSignalHandlers=False)
except ImportError:
print "twisted not available"
def runReactor():
enigma.runMainloop()
profile("LOAD:Plugin")
# initialize autorun plugins and plugin menu entries
from Components.PluginComponent import plugins
profile("LOAD:Wizard")
from Screens.Wizard import wizardManager
from Screens.StartWizard import *
import Screens.Rc
from Tools.BoundFunction import boundFunction
from Plugins.Plugin import PluginDescriptor
profile("misc")
had = dict()
def dump(dir, p = ""):
if isinstance(dir, dict):
for (entry, val) in dir.items():
dump(val, p + "(dict)/" + entry)
if hasattr(dir, "__dict__"):
for name, value in dir.__dict__.items():
if not had.has_key(str(value)):
had[str(value)] = 1
dump(value, p + "/" + str(name))
else:
print p + "/" + str(name) + ":" + str(dir.__class__) + "(cycle)"
else:
print p + ":" + str(dir)
# + ":" + str(dir.__class__)
# display
profile("LOAD:ScreenGlobals")
from Screens.Globals import Globals
from Screens.SessionGlobals import SessionGlobals
from Screens.Screen import Screen
profile("Screen")
Screen.global_screen = Globals()
# Session.open:
# * push current active dialog ('current_dialog') onto stack
# * call execEnd for this dialog
# * clear in_exec flag
# * hide screen
# * instantiate new dialog into 'current_dialog'
# * create screens, components
# * read, apply skin
# * create GUI for screen
# * call execBegin for new dialog
# * set in_exec
# * show gui screen
# * call components' / screen's onExecBegin
# ... screen is active, until it calls 'close'...
# Session.close:
# * assert in_exec
# * save return value
# * start deferred close handler ('onClose')
# * execEnd
# * clear in_exec
# * hide screen
# .. a moment later:
# Session.doClose:
# * destroy screen
class Session:
def __init__(self, desktop = None, summary_desktop = None, navigation = None):
self.desktop = desktop
self.summary_desktop = summary_desktop
self.nav = navigation
self.delay_timer = enigma.eTimer()
self.delay_timer.callback.append(self.processDelay)
self.current_dialog = None
self.dialog_stack = [ ]
self.summary_stack = [ ]
self.summary = None
self.in_exec = False
self.screen = SessionGlobals(self)
for p in plugins.getPlugins(PluginDescriptor.WHERE_SESSIONSTART):
try:
p(reason=0, session=self)
except:
print "Plugin raised exception at WHERE_SESSIONSTART"
import traceback
traceback.print_exc()
def processDelay(self):
callback = self.current_dialog.callback
retval = self.current_dialog.returnValue
if self.current_dialog.isTmp:
self.current_dialog.doClose()
# dump(self.current_dialog)
del self.current_dialog
else:
del self.current_dialog.callback
self.popCurrent()
if callback is not None:
callback(*retval)
def execBegin(self, first=True, do_show = True):
assert not self.in_exec
self.in_exec = True
c = self.current_dialog
# when this is an execbegin after a execend of a "higher" dialog,
# popSummary already did the right thing.
if first:
self.instantiateSummaryDialog(c)
c.saveKeyboardMode()
c.execBegin()
# when execBegin opened a new dialog, don't bother showing the old one.
if c == self.current_dialog and do_show:
c.show()
def execEnd(self, last=True):
assert self.in_exec
self.in_exec = False
self.current_dialog.execEnd()
self.current_dialog.restoreKeyboardMode()
self.current_dialog.hide()
if last:
self.current_dialog.removeSummary(self.summary)
self.popSummary()
def instantiateDialog(self, screen, *arguments, **kwargs):
return self.doInstantiateDialog(screen, arguments, kwargs, self.desktop)
def deleteDialog(self, screen):
screen.hide()
screen.doClose()
def instantiateSummaryDialog(self, screen, **kwargs):
self.pushSummary()
summary = screen.createSummary() or SimpleSummary
arguments = (screen,)
self.summary = self.doInstantiateDialog(summary, arguments, kwargs, self.summary_desktop)
self.summary.show()
screen.addSummary(self.summary)
def doInstantiateDialog(self, screen, arguments, kwargs, desktop):
# create dialog
dlg = screen(self, *arguments, **kwargs)
if dlg is None:
return
# read skin data
readSkin(dlg, None, dlg.skinName, desktop)
# create GUI view of this dialog
dlg.setDesktop(desktop)
dlg.applySkin()
return dlg
def pushCurrent(self):
if self.current_dialog is not None:
self.dialog_stack.append((self.current_dialog, self.current_dialog.shown))
self.execEnd(last=False)
def popCurrent(self):
if self.dialog_stack:
(self.current_dialog, do_show) = self.dialog_stack.pop()
self.execBegin(first=False, do_show=do_show)
else:
self.current_dialog = None
def execDialog(self, dialog):
self.pushCurrent()
self.current_dialog = dialog
self.current_dialog.isTmp = False
self.current_dialog.callback = None # would cause re-entrancy problems.
self.execBegin()
def openWithCallback(self, callback, screen, *arguments, **kwargs):
dlg = self.open(screen, *arguments, **kwargs)
dlg.callback = callback
return dlg
def open(self, screen, *arguments, **kwargs):
if self.dialog_stack and not self.in_exec:
raise RuntimeError("modal open are allowed only from a screen which is modal!")
# ...unless it's the very first screen.
self.pushCurrent()
dlg = self.current_dialog = self.instantiateDialog(screen, *arguments, **kwargs)
dlg.isTmp = True
dlg.callback = None
self.execBegin()
return dlg
def close(self, screen, *retval):
if not self.in_exec:
print "close after exec!"
return
# be sure that the close is for the right dialog!
# if it's not, you probably closed after another dialog
# was opened. this can happen if you open a dialog
# onExecBegin, and forget to do this only once.
# after close of the top dialog, the underlying will
# gain focus again (for a short time), thus triggering
# the onExec, which opens the dialog again, closing the loop.
assert screen == self.current_dialog
self.current_dialog.returnValue = retval
self.delay_timer.start(0, 1)
self.execEnd()
def pushSummary(self):
if self.summary is not None:
self.summary.hide()
self.summary_stack.append(self.summary)
self.summary = None
def popSummary(self):
if self.summary is not None:
self.summary.doClose()
self.summary = self.summary_stack.pop()
if self.summary is not None:
self.summary.show()
profile("Standby,PowerKey")
import Screens.Standby
from Screens.Menu import MainMenu, mdom
from GlobalActions import globalActionMap
class PowerKey:
""" PowerKey stuff - handles the powerkey press and powerkey release actions"""
def __init__(self, session):
self.session = session
globalActionMap.actions["power_down"]=self.powerdown
globalActionMap.actions["power_up"]=self.powerup
globalActionMap.actions["power_long"]=self.powerlong
globalActionMap.actions["deepstandby"]=self.shutdown # frontpanel long power button press
globalActionMap.actions["discrete_off"]=self.standby
self.standbyblocked = 1
def MenuClosed(self, *val):
self.session.infobar = None
def shutdown(self):
print "PowerOff - Now!"
if not Screens.Standby.inTryQuitMainloop and self.session.current_dialog and self.session.current_dialog.ALLOW_SUSPEND:
self.session.open(Screens.Standby.TryQuitMainloop, 1)
def powerlong(self):
if Screens.Standby.inTryQuitMainloop or (self.session.current_dialog and not self.session.current_dialog.ALLOW_SUSPEND):
return
self.doAction(action = config.usage.on_long_powerpress.value)
def doAction(self, action):
self.standbyblocked = 1
if action == "shutdown":
self.shutdown()
elif action == "show_menu":
print "Show shutdown Menu"
root = mdom.getroot()
for x in root.findall("menu"):
y = x.find("id")
if y is not None:
id = y.get("val")
if id and id == "shutdown":
self.session.infobar = self
menu_screen = self.session.openWithCallback(self.MenuClosed, MainMenu, x)
menu_screen.setTitle(_("Standby / restart"))
return
elif action == "standby":
self.standby()
def powerdown(self):
self.standbyblocked = 0
def powerup(self):
if self.standbyblocked == 0:
self.doAction(action = config.usage.on_short_powerpress.value)
def standby(self):
if not Screens.Standby.inStandby and self.session.current_dialog and self.session.current_dialog.ALLOW_SUSPEND and self.session.in_exec:
self.session.open(Screens.Standby.Standby)
profile("Scart")
from Screens.Scart import Scart
class AutoScartControl:
def __init__(self, session):
self.force = False
self.current_vcr_sb = enigma.eAVSwitch.getInstance().getVCRSlowBlanking()
if self.current_vcr_sb and config.av.vcrswitch.value:
self.scartDialog = session.instantiateDialog(Scart, True)
else:
self.scartDialog = session.instantiateDialog(Scart, False)
config.av.vcrswitch.addNotifier(self.recheckVCRSb)
enigma.eAVSwitch.getInstance().vcr_sb_notifier.get().append(self.VCRSbChanged)
def recheckVCRSb(self, configElement):
self.VCRSbChanged(self.current_vcr_sb)
def VCRSbChanged(self, value):
#print "vcr sb changed to", value
self.current_vcr_sb = value
if config.av.vcrswitch.value or value > 2:
if value:
self.scartDialog.showMessageBox()
else:
self.scartDialog.switchToTV()
profile("Load:CI")
from enigma import eDVBCIInterfaces
from Screens.Ci import CiHandler
profile("Load:VolumeControl")
from Components.VolumeControl import VolumeControl
def runScreenTest():
config.misc.startCounter.value += 1
config.misc.startCounter.save()
profile("readPluginList")
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
profile("Init:Session")
nav = Navigation()
session = Session(desktop = enigma.getDesktop(0), summary_desktop = enigma.getDesktop(1), navigation = nav)
CiHandler.setSession(session)
screensToRun = [ p.__call__ for p in plugins.getPlugins(PluginDescriptor.WHERE_WIZARD) ]
profile("wizards")
screensToRun += wizardManager.getWizards()
screensToRun.append((100, InfoBar.InfoBar))
screensToRun.sort()
enigma.ePythonConfigQuery.setQueryFunc(configfile.getResolvedKey)
# eDVBCIInterfaces.getInstance().setDescrambleRules(0 # Slot Number
# ,( ["1:0:1:24:4:85:C00000:0:0:0:"], #service_list
# ["PREMIERE"], #provider_list,
# [] #caid_list
# ));
def runNextScreen(session, screensToRun, *result):
if result:
enigma.quitMainloop(*result)
return
screen = screensToRun[0][1]
args = screensToRun[0][2:]
if screensToRun:
session.openWithCallback(boundFunction(runNextScreen, session, screensToRun[1:]), screen, *args)
else:
session.open(screen, *args)
config.misc.epgcache_filename.addNotifier(setEPGCachePath)
runNextScreen(session, screensToRun)
profile("Init:VolumeControl")
vol = VolumeControl(session)
profile("Init:PowerKey")
power = PowerKey(session)
# we need session.scart to access it from within menu.xml
session.scart = AutoScartControl(session)
profile("Init:Trashcan")
import Tools.Trashcan
Tools.Trashcan.init(session)
profile("RunReactor")
profile_final()
runReactor()
profile("wakeup")
from time import time, strftime, localtime
from Tools.StbHardware import setFPWakeuptime, getFPWakeuptime, setRTCtime
#get currentTime
nowTime = time()
wakeupList = [
x for x in ((session.nav.RecordTimer.getNextRecordingTime(), 0),
(session.nav.RecordTimer.getNextZapTime(isWakeup=True), 1),
(plugins.getNextWakeupTime(), 2))
if x[0] != -1
]
wakeupList.sort()
if wakeupList:
from time import strftime
startTime = wakeupList[0]
if (startTime[0] - nowTime) < 270: # no time to switch box back on
wptime = nowTime + 30 # so switch back on in 30 seconds
else:
wptime = startTime[0] - 240
if not config.misc.useTransponderTime.value:
print "dvb time sync disabled... so set RTC now to current linux time!", strftime("%Y/%m/%d %H:%M", localtime(nowTime))
setRTCtime(nowTime)
print "set wakeup time to", strftime("%Y/%m/%d %H:%M", localtime(wptime))
setFPWakeuptime(wptime)
profile("stopService")
session.nav.stopService()
profile("nav shutdown")
session.nav.shutdown()
profile("configfile.save")
configfile.save()
from Screens import InfoBarGenerics
InfoBarGenerics.saveResumePoints()
return 0
profile("Init:skin")
import skin
skin.loadSkinData(enigma.getDesktop(0))
profile("InputDevice")
import Components.InputDevice
Components.InputDevice.InitInputDevices()
import Components.InputHotplug
profile("SetupDevices")
import Components.SetupDevices
Components.SetupDevices.InitSetupDevices()
profile("AVSwitch")
import Components.AVSwitch
Components.AVSwitch.InitAVSwitch()
profile("RecordingConfig")
import Components.RecordingConfig
Components.RecordingConfig.InitRecordingConfig()
profile("UsageConfig")
import Components.UsageConfig
Components.UsageConfig.InitUsageConfig()
profile("keymapparser")
import keymapparser
keymapparser.readKeymap(config.usage.keymap.value)
profile("Network")
import Components.Network
Components.Network.InitNetwork()
profile("LCD")
import Components.Lcd
Components.Lcd.InitLcd()
profile("RFMod")
import Components.RFmod
Components.RFmod.InitRFmod()
profile("Init:CI")
import Screens.Ci
Screens.Ci.InitCiConfig()
profile("RcModel")
import Components.RcModel
#from enigma import dump_malloc_stats
#t = eTimer()
#t.callback.append(dump_malloc_stats)
#t.start(1000)
# first, setup a screen
try:
runScreenTest()
plugins.shutdown()
Components.ParentalControl.parentalControl.save()
except:
print 'EXCEPTION IN PYTHON STARTUP CODE:'
print '-'*60
print_exc(file=stdout)
enigma.quitMainloop(5)
print '-'*60
| gpl-2.0 | 6,123,206,204,391,660,000 | 27.636207 | 138 | 0.744054 | false |
tdtrask/ansible | test/units/modules/cloud/amazon/test_lambda.py | 57 | 11980 | #
# (c) 2017 Michael De La Rue
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
import copy
import pytest
from ansible.compat.tests.mock import MagicMock, Mock, patch
from ansible.module_utils import basic
from units.modules.utils import set_module_args
boto3 = pytest.importorskip("boto3")
# lambda is a keyword so we have to hack this.
_temp = __import__("ansible.modules.cloud.amazon.lambda")
lda = getattr(_temp.modules.cloud.amazon, "lambda")
base_lambda_config = {
'FunctionName': 'lambda_name',
'Role': 'arn:aws:iam::987654321012:role/lambda_basic_execution',
'Handler': 'lambda_python.my_handler',
'Description': 'this that the other',
'Timeout': 3,
'MemorySize': 128,
'Runtime': 'python2.7',
'CodeSha256': 'AqMZ+xptM7aC9VXu+5jyp1sqO+Nj4WFMNzQxtPMP2n8=',
}
one_change_lambda_config = copy.copy(base_lambda_config)
one_change_lambda_config['Timeout'] = 4
two_change_lambda_config = copy.copy(one_change_lambda_config)
two_change_lambda_config['Role'] = 'arn:aws:iam::987654321012:role/lambda_advanced_execution'
code_change_lambda_config = copy.copy(base_lambda_config)
code_change_lambda_config['CodeSha256'] = 'P+Zy8U4T4RiiHWElhL10VBKj9jw4rSJ5bm/TiW+4Rts='
base_module_args = {
"region": "us-west-1",
"name": "lambda_name",
"state": "present",
"zip_file": "test/units/modules/cloud/amazon/fixtures/thezip.zip",
"runtime": 'python2.7',
"role": 'arn:aws:iam::987654321012:role/lambda_basic_execution',
"memory_size": 128,
"timeout": 3,
"handler": 'lambda_python.my_handler'
}
module_args_with_environment = dict(base_module_args, environment_variables={
"variable_name": "variable_value"
})
def make_mock_no_connection_connection(config):
"""return a mock of ansible's boto3_conn ready to return a mock AWS API client"""
lambda_client_double = MagicMock()
lambda_client_double.get_function.configure_mock(
return_value=False
)
lambda_client_double.update_function_configuration.configure_mock(
return_value={
'Version': 1
}
)
fake_boto3_conn = Mock(return_value=lambda_client_double)
return (fake_boto3_conn, lambda_client_double)
def make_mock_connection(config):
"""return a mock of ansible's boto3_conn ready to return a mock AWS API client"""
lambda_client_double = MagicMock()
lambda_client_double.get_function.configure_mock(
return_value={
'Configuration': config
}
)
lambda_client_double.update_function_configuration.configure_mock(
return_value={
'Version': 1
}
)
fake_boto3_conn = Mock(return_value=lambda_client_double)
return (fake_boto3_conn, lambda_client_double)
class AnsibleFailJson(Exception):
pass
def fail_json_double(*args, **kwargs):
"""works like fail_json but returns module results inside exception instead of stdout"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
# TODO: def test_handle_different_types_in_config_params():
def test_create_lambda_if_not_exist():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double) = make_mock_no_connection_connection(code_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
"unexpectedly updated lambda configuration when should have only created"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"update lambda function code when function should have been created only"
assert(len(lambda_client_double.create_function.mock_calls) > 0), \
"failed to call create_function "
(create_args, create_kwargs) = lambda_client_double.create_function.call_args
assert (len(create_kwargs) > 0), "expected create called with keyword args, none found"
try:
# For now I assume that we should NOT send an empty environment. It might
# be okay / better to explicitly send an empty environment. However `None'
# is not acceptable - mikedlr
create_kwargs["Environment"]
raise(Exception("Environment sent to boto when none expected"))
except KeyError:
pass # We are happy, no environment is fine
def test_update_lambda_if_code_changed():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double) = make_mock_connection(code_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
"unexpectedly updatede lambda configuration when only code changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) > 1), \
"failed to update lambda function when code changed"
# 3 because after uploading we call into the return from mock to try to find what function version
# was returned so the MagicMock actually sees two calls for one update.
assert(len(lambda_client_double.update_function_code.mock_calls) < 3), \
"lambda function code update called multiple times when only one time should be needed"
def test_update_lambda_if_config_changed():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double) = make_mock_connection(two_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
"failed to update lambda function when configuration changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
def test_update_lambda_if_only_one_config_item_changed():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double) = make_mock_connection(one_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
"failed to update lambda function when configuration changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
def test_update_lambda_if_added_environment_variable():
set_module_args(module_args_with_environment)
(boto3_conn_double, lambda_client_double) = make_mock_connection(base_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
"failed to update lambda function when configuration changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
(update_args, update_kwargs) = lambda_client_double.update_function_configuration.call_args
assert (len(update_kwargs) > 0), "expected update configuration called with keyword args, none found"
assert update_kwargs['Environment']['Variables'] == module_args_with_environment['environment_variables']
def test_dont_update_lambda_if_nothing_changed():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double) = make_mock_connection(base_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
"updated lambda function when no configuration changed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
def test_warn_region_not_specified():
set_module_args({
"name": "lambda_name",
"state": "present",
# Module is called without a region causing error
# "region": "us-east-1",
"zip_file": "test/units/modules/cloud/amazon/fixtures/thezip.zip",
"runtime": 'python2.7',
"role": 'arn:aws:iam::987654321012:role/lambda_basic_execution',
"handler": 'lambda_python.my_handler'})
get_aws_connection_info_double = Mock(return_value=(None, None, None))
with patch.object(lda, 'get_aws_connection_info', get_aws_connection_info_double):
with patch.object(basic.AnsibleModule, 'fail_json', fail_json_double):
try:
lda.main()
except AnsibleFailJson as e:
result = e.args[0]
assert("region must be specified" in result['msg'])
| gpl-3.0 | 8,253,106,449,008,656,000 | 41.183099 | 109 | 0.696494 | false |
qwertyezi/Test | text/freeline_core/gradle_inc_build.py | 5 | 43598 | # -*- coding:utf8 -*-
from __future__ import print_function
import os
import shutil
import android_tools
from build_commands import CompileCommand, IncAaptCommand, IncJavacCommand, IncDexCommand
from builder import IncrementalBuilder, Builder
from gradle_tools import get_project_info, GradleDirectoryFinder, GradleSyncClient, GradleSyncTask, \
GradleCleanCacheTask, GradleMergeDexTask, get_sync_native_file_path, fix_package_name, DataBindingProcessor, \
DatabindingDirectoryLookUp
from task import find_root_tasks, find_last_tasks, Task
from utils import get_file_content, write_file_content, is_windows_system, cexec, load_json_cache, get_md5, \
write_json_cache
from tracing import Tracing
from exceptions import FreelineException
class GradleIncBuilder(IncrementalBuilder):
def __init__(self, changed_files, config, task_engine, project_info=None):
IncrementalBuilder.__init__(self, changed_files, config, task_engine, builder_name="gradle_inc_builder")
self._project_info = project_info
self._tasks_dictionary = {}
self._module_dependencies = {}
self._all_modules = []
self._is_art = False
self._module_dir_map = {}
self._has_res_changed = self.__is_any_modules_have_res_changed()
self._changed_modules = self.__changed_modules()
self._original_changed_files = dict(changed_files)
def check_build_environment(self):
if not self._project_info:
self._project_info = get_project_info(self._config)
self._all_modules = self._project_info.keys()
for item in self._project_info.values():
self._module_dir_map[item['name']] = item['relative_dir']
for key, value in self._project_info.iteritems():
self._module_dependencies[key] = [item for item in value['local_module_dep']]
self._is_art = android_tools.get_device_sdk_version_by_adb(Builder.get_adb(self._config)) > 20
# merge all resources modified files to main resources
self.__merge_res_files()
self.__merge_native_files()
def generate_sorted_build_tasks(self):
"""
sort build tasks according to the module's dependency
:return: None
"""
for module in self._all_modules:
task = android_tools.AndroidIncrementalBuildTask(module, self.__setup_inc_command(module))
self._tasks_dictionary[module] = task
for module in self._all_modules:
task = self._tasks_dictionary[module]
for dep in self._module_dependencies[module]:
task.add_parent_task(self._tasks_dictionary[dep])
def __setup_inc_command(self, module):
return GradleCompileCommand(module, self.__setup_invoker(module))
def __setup_invoker(self, module):
return GradleIncBuildInvoker(module, self._project_info[module]['path'], self._config,
self._changed_files['projects'][module], self._project_info[module], self._is_art,
all_module_info=self._project_info, module_dir_map=self._module_dir_map,
is_any_modules_have_res_changed=self._has_res_changed,
changed_modules=self._changed_modules)
def __merge_res_files(self):
main_res = self._changed_files['projects'][self._config['main_project_name']]
for module, file_dict in self._changed_files['projects'].iteritems():
if module == self._config['main_project_name']:
continue
for key, files in file_dict.iteritems():
if key == 'res' or key == 'assets':
main_res[key].extend(files)
self._changed_files['projects'][self._config['main_project_name']] = main_res
def __merge_native_files(self):
so_files = []
for module, file_dict in self._changed_files['projects'].iteritems():
for key, files in file_dict.iteritems():
if key == 'so':
for m in range(len(files)):
self.debug('append {} to native queue'.format(files[m]))
so_files.append(files[m])
if len(so_files) > 0:
from zipfile import ZipFile
with ZipFile(get_sync_native_file_path(self._config['build_cache_dir']), "w") as nativeZip:
for m in range(len(so_files)):
nativeZip.write(so_files[m])
def __is_any_modules_have_res_changed(self):
for key, value in self._changed_files['projects'].iteritems():
if len(value['res']) > 0:
self.debug('find {} modules have res changed'.format(key))
return True
return False
def __changed_modules(self):
modules = []
for module, file_dict in self._changed_files['projects'].iteritems():
if len(file_dict['src']) > 0 or len(file_dict['res']) or len(file_dict['assets']) > 0:
modules.append(module)
return modules
def incremental_build(self):
merge_dex_task = GradleMergeDexTask(self._config['build_cache_dir'], self._all_modules, self._project_info)
aapt_task = GradleAaptTask(self.__setup_invoker(self._config['main_project_name']),
self._original_changed_files, self._changed_files)
task_list = self._tasks_dictionary.values()
last_tasks = find_last_tasks(task_list)
for rtask in find_root_tasks(task_list):
aapt_task.add_child_task(rtask)
clean_cache_task = GradleCleanCacheTask(self._config['build_cache_dir'], self._project_info)
sync_client = GradleSyncClient(self._is_art, self._config, self._project_info, self._all_modules)
connect_task = android_tools.ConnectDeviceTask(sync_client)
sync_task = GradleSyncTask(sync_client, self._config['build_cache_dir'])
update_stat_task = android_tools.UpdateStatTask(self._config, self._changed_files['projects'])
map(lambda task: task.add_child_task(merge_dex_task), last_tasks)
connect_task.add_child_task(sync_task)
merge_dex_task.add_child_task(sync_task)
sync_task.add_child_task(clean_cache_task)
clean_cache_task.add_child_task(update_stat_task)
# self._task_engine.add_root_task(find_root_tasks(task_list))
self._task_engine.add_root_task(aapt_task)
self._task_engine.add_root_task(connect_task)
self._task_engine.start()
class GradleAaptTask(Task):
def __init__(self, invoker, original_changed_files, changed_files):
Task.__init__(self, 'gradle_aapt_task')
self._invoker = invoker
self._original_changed_files = original_changed_files
self._changed_files_ref = changed_files
def execute(self):
should_run_res_task = self._invoker.check_res_task()
if not should_run_res_task:
self.debug('no need to execute')
return
self.debug('start to execute aapt command...')
self._invoker.fill_dependant_jars()
self._invoker.check_ids_change()
with Tracing("generate_id_keeper_files"):
self._invoker.generate_r_file()
# self._invoker.backup_res_files()
with Tracing("incremental_databinding_process"):
self._invoker.process_databinding(self._original_changed_files, self._changed_files_ref)
with Tracing("run_incremental_aapt_task"):
self._invoker.run_aapt_task()
with Tracing("check_other_modules_resources"):
self._invoker.check_other_modules_resources()
self._invoker.recover_original_file_path()
class GradleCompileCommand(CompileCommand):
def __init__(self, module, invoker):
self._module = module
CompileCommand.__init__(self, 'gradle_{}_compile_command'.format(module), invoker)
def _setup(self):
# self.add_command(GradleIncAaptCommand(self._module, self._invoker))
self.add_command(GradleIncJavacCommand(self._module, self._invoker))
self.add_command(GradleIncDexCommand(self._module, self._invoker))
def execute(self):
map(lambda command: command.execute(), self.command_list)
class GradleIncAaptCommand(IncAaptCommand):
def __init__(self, module_name, invoker):
IncAaptCommand.__init__(self, module_name, invoker)
def execute(self):
should_run_res_task = self._invoker.check_res_task()
if not should_run_res_task:
self.debug('no need to execute')
return
self.debug('start to execute aapt command...')
self._invoker.fill_dependant_jars()
self._invoker.check_ids_change()
self._invoker.generate_r_file()
# self._invoker.backup_res_files()
self._invoker.run_aapt_task()
class GradleIncJavacCommand(IncJavacCommand):
def __init__(self, module_name, invoker):
IncJavacCommand.__init__(self, module_name, invoker)
def execute(self):
self._invoker.check_r_md5() # check if R.java has changed
# self._invoker.check_other_modules_resources()
should_run_javac_task = self._invoker.check_javac_task()
if not should_run_javac_task:
self.debug('no need to execute')
return
self.debug('start to execute javac command...')
self._invoker.append_r_file()
self._invoker.fill_classpaths()
self._invoker.fill_extra_javac_args()
self._invoker.clean_dex_cache()
self._invoker.run_apt_only()
self._invoker.run_javac_task()
self._invoker.run_retrolambda()
class GradleIncDexCommand(IncDexCommand):
def __init__(self, module_name, invoker):
IncDexCommand.__init__(self, module_name, invoker)
def execute(self):
should_run_dex_task = self._invoker.check_dex_task()
if not should_run_dex_task:
self.debug('no need to execute')
return
self.debug('start to execute dex command...')
self._invoker.run_dex_task()
class GradleIncBuildInvoker(android_tools.AndroidIncBuildInvoker):
def __init__(self, module_name, path, config, changed_files, module_info, is_art, all_module_info=None,
module_dir_map=None, is_any_modules_have_res_changed=False, changed_modules=None):
android_tools.AndroidIncBuildInvoker.__init__(self, module_name, path, config, changed_files, module_info,
is_art=is_art)
self._all_module_info = all_module_info
self._module_dir_map = module_dir_map
self._is_any_modules_have_res_changed = is_any_modules_have_res_changed
self._changed_modules = changed_modules
self._merged_res_paths = []
self._merged_res_paths.append(self._finder.get_backup_res_dir())
self._replace_mapper = {}
self._is_retrolambda_enabled = 'retrolambda' in self._config and self._name in self._config['retrolambda'] \
and self._config['retrolambda'][self._name]['enabled']
self._is_databinding_enabled = 'databinding_modules' in self._config and self._name in self._config[
'databinding_modules']
self._is_dagger_enabled = 'apt_libraries' in self._config and self._config['apt_libraries']['dagger']
self._apt_output_dir = None
for mname in self._all_module_info.keys():
if mname in self._config['project_source_sets']:
self._merged_res_paths.extend(self._config['project_source_sets'][mname]['main_res_directory'])
self._merged_res_paths.extend(self._config['project_source_sets'][mname]['main_assets_directory'])
def before_execute(self):
self._finder = GradleDirectoryFinder(self._name, self._module_path, self._cache_dir,
package_name=self._module_info['packagename'], config=self._config)
def check_res_task(self):
if self._name != self._config['main_project_name']:
self.debug('skip {} aapt task'.format(self._name))
return False
return android_tools.AndroidIncBuildInvoker.check_res_task(self)
def fill_dependant_jars(self):
self._res_dependencies = self._module_info['dep_jar_path']
def process_databinding(self, original_changed_files, changed_files_ref):
if 'databinding' in self._config:
if self._config['databinding_modules'] == 0:
self.debug('no modules for processing databinding')
return
databinding_config = self._config['databinding']
DatabindingDirectoryLookUp.load_path_map(self._config['build_cache_dir'])
procossor = DataBindingProcessor(self._config)
for module_config in databinding_config:
module_name = module_config['name']
if module_name in original_changed_files['projects']:
resources_files = original_changed_files['projects'][module_config['name']]['res']
if len(resources_files) == 0:
self.debug('module {} has no resources files changed'.format(module_name))
continue
changed_files_map = {}
res_dirs = self._config['project_source_sets'][module_name]['main_res_directory']
# TODO: detect matches missing issue
for path in resources_files:
for rdir in res_dirs:
if path.startswith(rdir):
if rdir in changed_files_map:
changed_files_map[rdir].append(path)
else:
changed_files_map[rdir] = [path]
break
for rdir in changed_files_map.keys():
output_res_dir = DatabindingDirectoryLookUp.find_target_res_path(rdir)
output_java_dir = DatabindingDirectoryLookUp.find_target_java_path(rdir)
output_layoutinfo_dir = DatabindingDirectoryLookUp.get_merged_layoutinfo_dir(self._cache_dir)
if output_res_dir and output_java_dir and output_layoutinfo_dir:
changed_files_list = changed_files_map[rdir]
procossor.process_module_databinding(module_config, rdir, output_res_dir,
output_layoutinfo_dir, output_java_dir,
self._config['sdk_directory'],
changed_files=changed_files_list)
# replace file path
for path in changed_files_list:
new_path = path.replace(rdir, output_res_dir)
self._merged_res_paths.append(output_res_dir) # append new path prefix
self.debug('replace {} with output path: {}'.format(path, new_path))
self._replace_mapper[new_path] = path
self._changed_files['res'].remove(path)
self._changed_files['res'].append(new_path)
# mark java compiler
if os.path.exists(output_layoutinfo_dir):
has_layoutinfo = False
for name in os.listdir(output_layoutinfo_dir):
if name.endswith('.xml'):
has_layoutinfo = True
break
if has_layoutinfo:
info_file = os.path.join(output_java_dir, 'android', 'databinding', 'layouts',
'DataBindingInfo.java')
if os.path.exists(info_file):
append_files = [info_file]
append_files.extend(procossor.extract_related_java_files(module_name,
output_layoutinfo_dir))
if 'apt' not in changed_files_ref['projects'][module_name]:
changed_files_ref['projects'][module_name]['apt'] = []
for fpath in append_files:
self.debug('add {} to {} module'.format(fpath, module_name))
changed_files_ref['projects'][module_name]['apt'].append(fpath)
if not android_tools.is_src_changed(self._config['build_cache_dir']):
android_tools.mark_src_changed(self._config['build_cache_dir'])
def _get_aapt_args(self):
aapt_args = [self._aapt, 'package', '-f', '-I',
os.path.join(self._config['compile_sdk_directory'], 'android.jar'),
'-M', fix_package_name(self._config, self._finder.get_dst_manifest_path())]
for rdir in self._config['project_source_sets'][self._name]['main_res_directory']:
if os.path.exists(rdir):
aapt_args.append('-S')
aapt_args.append(DatabindingDirectoryLookUp.find_target_res_path(rdir))
for rdir in self._module_info['local_dep_res_path']:
if os.path.exists(rdir):
aapt_args.append('-S')
aapt_args.append(DatabindingDirectoryLookUp.find_target_res_path(rdir))
for resdir in self._module_info['dep_res_path']:
if os.path.exists(resdir):
aapt_args.append('-S')
aapt_args.append(resdir)
if 'extra_dep_res_paths' in self._config and self._config['extra_dep_res_paths'] is not None:
arr = self._config['extra_dep_res_paths']
for path in arr:
path = path.strip()
if os.path.isdir(path):
aapt_args.append('-S')
aapt_args.append(path)
aapt_args.append('-S')
aapt_args.append(self._finder.get_backup_res_dir())
freeline_assets_dir = os.path.join(self._config['build_cache_dir'], 'freeline-assets')
aapt_args.append('-A')
aapt_args.append(freeline_assets_dir)
for adir in self._config['project_source_sets'][self._name]['main_assets_directory']:
if os.path.exists(adir):
aapt_args.append('-A')
aapt_args.append(adir)
for adir in self._module_info['local_dep_assets_path']:
if os.path.exists(adir):
aapt_args.append('-A')
aapt_args.append(adir)
for adir in self._module_info['dep_assets_path']:
if os.path.exists(adir):
aapt_args.append('-A')
aapt_args.append(adir)
gen_path = self._finder.get_backup_dir()
aapt_args.append('--custom-package')
aapt_args.append(self._config['package'])
aapt_args.append('-m')
aapt_args.append('-J')
aapt_args.append(gen_path)
aapt_args.append('--auto-add-overlay')
aapt_args.append('-P')
aapt_args.append(self._finder.get_public_xml_path())
final_changed_list = self._parse_changed_list()
if is_windows_system():
final_changed_list = [fpath.replace('\\', '/') for fpath in final_changed_list]
final_changed_list_chain = ':'.join(final_changed_list)
aapt_args.append('-F')
aapt_args.append(self._finder.get_dst_res_pack_path(self._name))
aapt_args.append('--debug-mode')
aapt_args.append('--auto-add-overlay')
aapt_args.append('--no-version-vectors')
if len(final_changed_list_chain) > 0 and self._is_art:
aapt_args.append('--buildIncrement')
aapt_args.append(final_changed_list_chain)
aapt_args.append('--resoucres-md5-cache-path')
aapt_args.append(os.path.join(self._cache_dir, "arsc_cache.dat"))
aapt_args.append('--ignore-assets')
aapt_args.append('public_id.xml:public.xml:*.bak:.*')
if 'ignore_resource_ids' in self._config and len(self._config['ignore_resource_ids']) > 0 and not is_windows_system():
aapt_args.append('--ignore-ids')
aapt_args.append(':'.join(self._config['ignore_resource_ids']))
return aapt_args, final_changed_list
def recover_original_file_path(self):
copylist = list(self._changed_files['res'])
for fpath in copylist:
if fpath in self._replace_mapper:
self._changed_files['res'].remove(fpath)
self._changed_files['res'].append(self._replace_mapper[fpath])
def check_other_modules_resources(self):
if self._name == self._config['main_project_name'] and self._all_module_info is not None:
changed_modules = self._changed_modules
if len(changed_modules) > 0:
self.__modify_main_r()
for module in changed_modules:
fpath = self.__modify_other_modules_r(self._all_module_info[module]['packagename'])
self.debug('modify {}'.format(fpath))
def __modify_main_r(self):
main_r_fpath = os.path.join(self._finder.get_backup_dir(),
self._module_info['packagename'].replace('.', os.sep), 'R.java')
self.debug('modify {}'.format(main_r_fpath))
buf = GradleIncBuildInvoker.remove_final_tag(get_file_content(main_r_fpath))
buf = android_tools.fix_unicode_parse_error(buf, main_r_fpath)
write_file_content(main_r_fpath, buf)
target_main_r_dir = os.path.join(self.__get_freeline_backup_r_dir(),
self._module_info['packagename'].replace('.', os.sep))
if not os.path.exists(target_main_r_dir):
os.makedirs(target_main_r_dir)
target_main_r_path = os.path.join(target_main_r_dir, 'R.java')
self.debug('copy {} to {}'.format(main_r_fpath, target_main_r_path))
shutil.copy(main_r_fpath, target_main_r_path)
def append_r_file(self):
if self._name != self._config['main_project_name']:
backupdir = self.__get_freeline_backup_r_dir()
main_r_path = os.path.join(backupdir, self._config['package'].replace('.', os.sep), 'R.java')
# main_r_path existence means that resource modification exists, so that need to add R.java to classpath
if os.path.exists(main_r_path):
pns = [self._config['package'], self._module_info['packagename']]
for m in self._module_info['local_module_dep']:
pns.append(self._all_module_info[m]['packagename'])
for pn in pns:
rpath = os.path.join(backupdir, pn.replace('.', os.sep), 'R.java')
if os.path.exists(rpath) and rpath not in self._changed_files['src']:
self._changed_files['src'].append(rpath)
self.debug('add R.java to changed list: ' + rpath)
elif pn == self._module_info['packagename']:
fpath = self.__modify_other_modules_r(pn)
self.debug('modify {}'.format(fpath))
if fpath and os.path.exists(fpath):
self._changed_files['src'].append(fpath)
self.debug('add R.java to changed list: ' + fpath)
else:
if is_windows_system():
main_r_path = os.path.join(self._finder.get_backup_dir(),
self._module_info['packagename'].replace('.', os.sep), 'R.java')
if os.path.exists(main_r_path):
content = android_tools.fix_unicode_parse_error(get_file_content(main_r_path), main_r_path)
write_file_content(main_r_path, content)
def fill_classpaths(self):
# classpaths:
# 1. patch classes
# 2. dependent modules' patch classes
# 3. android.jar
# 4. third party jars
# 5. generated classes in build directory
patch_classes_cache_dir = self._finder.get_patch_classes_cache_dir()
self._classpaths.append(patch_classes_cache_dir)
self._classpaths.append(self._finder.get_dst_classes_dir())
for module in self._module_info['local_module_dep']:
finder = GradleDirectoryFinder(module, self._module_dir_map[module], self._cache_dir)
self._classpaths.append(finder.get_patch_classes_cache_dir())
# add main module classes dir to classpath to generate databinding files
main_module_name = self._config['main_project_name']
if self._name != main_module_name and self._is_databinding_enabled:
finder = GradleDirectoryFinder(main_module_name, self._module_dir_map[main_module_name], self._cache_dir,
config=self._config)
self._classpaths.append(finder.get_dst_classes_dir())
self._classpaths.append(os.path.join(self._config['compile_sdk_directory'], 'android.jar'))
self._classpaths.extend(self._module_info['dep_jar_path'])
# remove existing same-name class in build directory
srcdirs = self._config['project_source_sets'][self._name]['main_src_directory']
for dirpath, dirnames, files in os.walk(patch_classes_cache_dir):
for fn in files:
if self._is_r_file_changed and self._module_info['packagename'] + '.R.' in fn:
android_tools.delete_class(dirpath, fn.replace('.class', ''))
if fn.endswith('.class') and '$' not in fn and 'R.' not in fn and 'Manifest.' not in fn:
cp = os.path.join(dirpath, fn)
java_src = cp.replace('.class', '.java').split('classes' + os.path.sep)[1]
existence = True
for src_dir in srcdirs:
if os.path.exists(os.path.join(src_dir, java_src)):
existence = True
break
if not existence:
android_tools.delete_class(dirpath, fn.replace('.class', ''))
def fill_extra_javac_args(self):
if 'apt' in self._config and self._name in self._config['apt'] and self._config['apt'][self._name]['enabled']:
apt_config = self._config['apt'][self._name]
self._apt_output_dir = apt_config['aptOutput']
apt_args = ['-s', apt_config['aptOutput']]
if apt_config['processor']:
apt_args.append('-processor')
apt_args.append(apt_config['processor'])
if not apt_config['disableDiscovery']:
apt_args.append('-processorpath')
apt_args.append(apt_config['processorPath'])
apt_args.extend(apt_config['aptArgs'])
self._extra_javac_args.extend(apt_args)
elif self._is_databinding_enabled:
if self._name == self._config['main_project_name']:
apt_output = os.path.join(self._config['build_directory'], 'generated', 'source', 'apt',
self._config['product_flavor'], 'debug')
else:
apt_output = os.path.join(self._config['build_directory'], 'generated', 'source', 'apt', 'release')
self._apt_output_dir = apt_output
if not os.path.exists(apt_output):
os.makedirs(apt_output)
if self._config['databinding_compiler_jar'] != '':
self.debug('add compiler jar to classpath: {}'.format(self._config['databinding_compiler_jar']))
self._module_info['dep_jar_path'].append(self._config['databinding_compiler_jar'])
apt_args = ['-s', apt_output, '-processorpath', os.pathsep.join(self._module_info['dep_jar_path'])]
self._extra_javac_args.extend(apt_args)
def run_apt_only(self):
if self._is_databinding_enabled and self._should_run_databinding_apt():
apt_args = self._generate_java_compile_args(extra_javac_args_enabled=True)
self.debug('apt exec: ' + ' '.join(apt_args))
output, err, code = cexec(apt_args, callback=None)
if code != 0:
raise FreelineException('apt compile failed.', '{}\n{}'.format(output, err))
if self._apt_output_dir and os.path.exists(self._apt_output_dir):
apt_cache_path = os.path.join(self._config['build_cache_dir'], 'apt_files_stat_cache.json')
if os.path.exists(apt_cache_path):
apt_cache = load_json_cache(apt_cache_path)
for dirpath, dirnames, files in os.walk(self._apt_output_dir):
for fn in files:
fpath = os.path.join(dirpath, fn)
if apt_cache and self._name in apt_cache:
if fpath in apt_cache[self._name]:
new_md5 = get_md5(fpath)
if new_md5 != apt_cache[self._name][fpath]['md5']:
self.debug('detect new md5 value, add apt file to change list: {}'.format(fpath))
self._changed_files['src'].append(fpath)
else:
self.debug('find new apt file, add to change list: {}'.format(fpath))
self._changed_files['src'].append(fpath)
else:
self.debug('apt cache not found, add to change list: {}'.format(fpath))
self._changed_files['src'].append(fpath)
def run_javac_task(self):
if self._is_only_r_changed() and not self._is_other_modules_has_src_changed:
self._is_need_javac = False
android_tools.clean_src_changed_flag(self._cache_dir)
self.debug('apt process do not generate new files, ignore javac task.')
return
extra_javac_args_enabled = not (self._is_databinding_enabled and self._should_run_databinding_apt())
javacargs = self._generate_java_compile_args(extra_javac_args_enabled=extra_javac_args_enabled)
self.debug('javac exec: ' + ' '.join(javacargs))
output, err, code = cexec(javacargs, callback=None)
if code != 0:
raise FreelineException('incremental javac compile failed.', '{}\n{}'.format(output, err))
else:
if self._is_r_file_changed:
old_r_file = self._finder.get_dst_r_path(config=self._config)
new_r_file = android_tools.DirectoryFinder.get_r_file_path(self._finder.get_backup_dir())
if old_r_file and new_r_file:
shutil.copyfile(new_r_file, old_r_file)
self.debug('copy {} to {}'.format(new_r_file, old_r_file))
def _should_run_databinding_apt(self):
if 'apt' in self._changed_files:
for fpath in self._changed_files['apt']:
if fpath.endswith('DataBindingInfo.java'):
return True
return False
def _generate_java_compile_args(self, extra_javac_args_enabled=False):
javacargs = [self._javac]
arguments = ['-encoding', 'UTF-8', '-g']
if not self._is_retrolambda_enabled:
arguments.extend(['-target', '1.7', '-source', '1.7'])
arguments.append('-cp')
arguments.append(os.pathsep.join(self._classpaths))
for fpath in self._changed_files['src']:
arguments.append(fpath)
if extra_javac_args_enabled:
if 'apt' in self._changed_files:
for fpath in self._changed_files['apt']:
arguments.append(fpath)
filter_tags = []
if self._is_databinding_enabled:
filter_tags.extend(['BindingAdapter', 'BindingConversion', 'Bindable'])
if self._is_dagger_enabled:
filter_tags.extend(['DaggerComponent', 'DaggerModule'])
files = self._get_apt_related_files(filter_tags=filter_tags)
for fpath in files:
if fpath and os.path.exists(fpath) and fpath not in self._changed_files['src']:
if 'apt' in self._changed_files and fpath in self._changed_files['apt']:
continue
self.debug('add apt related file: {}'.format(fpath))
arguments.append(fpath)
arguments.extend(self._extra_javac_args)
arguments.append('-d')
arguments.append(self._finder.get_patch_classes_cache_dir())
# ref: https://support.microsoft.com/en-us/kb/830473
if is_windows_system():
arguments_length = sum(map(len, arguments))
if arguments_length > 8000:
argument_file_path = os.path.join(self._finder.get_module_cache_dir(), 'javac_args_file')
self.debug('arguments length: {} > 8000, save args to {}'.format(arguments_length, argument_file_path))
if os.path.exists(argument_file_path):
os.remove(argument_file_path)
arguments_content = ' '.join(arguments)
self.debug('javac arguments: ' + arguments_content)
write_file_content(argument_file_path, arguments_content)
arguments = ['@{}'.format(argument_file_path)]
javacargs.extend(arguments)
return javacargs
def _get_apt_related_files(self, filter_tags=None):
path = self._get_apt_related_files_cache_path()
if os.path.exists(path):
return load_json_cache(path)
else:
info_path = os.path.join(self._cache_dir, 'freeline_annotation_info.json')
if os.path.exists(info_path):
info_cache = load_json_cache(info_path)
related_files = []
for anno, files in info_cache.iteritems():
if filter_tags and anno not in filter_tags:
self.debug('ignore annotation: {}'.format(anno))
continue
for info in files:
if info['module'] == self._name or info['module'] in self._module_info['local_module_dep']:
if 'java_path' in info and info['java_path']:
related_files.append(info['java_path'])
write_json_cache(self._get_apt_related_files_cache_path(), related_files)
return related_files
return []
def _append_new_related_files(self):
related_files = self._get_apt_related_files()
def append_files(file_list):
for fpath in file_list:
if fpath and fpath not in related_files:
self.debug('add new related file: {}'.format(fpath))
related_files.append(fpath)
append_files(self._changed_files['src'])
append_files(self._changed_files['apt'])
write_json_cache(self._get_apt_related_files_cache_path(), related_files)
def _get_apt_related_files_cache_path(self):
return os.path.join(self._cache_dir, 'apt_related_files_cache.json')
def run_retrolambda(self):
if self._is_need_javac and self._is_retrolambda_enabled:
lambda_config = self._config['retrolambda'][self._name]
target_dir = self._finder.get_patch_classes_cache_dir()
jar_args = [Builder.get_java(self._config),
'-Dretrolambda.inputDir={}'.format(target_dir),
'-Dretrolambda.outputDir={}'.format(target_dir)]
if lambda_config['supportIncludeFiles']:
files_stat_path = os.path.join(self._cache_dir, self._name, 'lambda_files_stat.json')
include_files = []
if os.path.exists(files_stat_path):
files_stat = load_json_cache(files_stat_path)
else:
files_stat = {}
for dirpath, dirnames, files in os.walk(target_dir):
for fn in files:
fpath = os.path.join(dirpath, fn)
if fpath not in files_stat:
include_files.append(fpath)
self.debug('incremental build new lambda file: {}'.format(fpath))
else:
if os.path.getmtime(fpath) > files_stat[fpath]['mtime']:
include_files.append(fpath)
self.debug('incremental build lambda file: {}'.format(fpath))
include_files_param = os.pathsep.join(include_files)
if len(include_files_param) > 3496:
include_files_path = os.path.join(self._cache_dir, self._name, 'retrolambda_inc.list')
self.__save_parms_to_file(include_files_path, include_files)
jar_args.append('-Dretrolambda.includedFile={}'.format(include_files_path))
else:
jar_args.append('-Dretrolambda.includedFiles={}'.format(include_files_param))
lambda_classpaths = [target_dir, lambda_config['rtJar']]
lambda_classpaths.extend(self._classpaths)
param = os.pathsep.join(lambda_classpaths)
if lambda_config['supportIncludeFiles'] and len(param) > 3496:
classpath_file = os.path.join(self._cache_dir, self._name, 'retrolambda_classpaths.path')
self.__save_parms_to_file(classpath_file, lambda_classpaths)
jar_args.append('-Dretrolambda.classpathFile={}'.format(classpath_file))
else:
jar_args.append('-Dretrolambda.classpath={}'.format(param))
jar_args.append('-cp')
jar_args.append(lambda_config['targetJar'])
jar_args.append(lambda_config['mainClass'])
self.debug('retrolambda exec: ' + ' '.join(jar_args))
output, err, code = cexec(jar_args, callback=None)
if code != 0:
raise FreelineException('retrolambda compile failed.', '{}\n{}'.format(output, err))
if lambda_config['supportIncludeFiles']:
for fpath in include_files:
if fpath not in files_stat:
files_stat[fpath] = {}
files_stat[fpath]['mtime'] = os.path.getmtime(fpath)
write_json_cache(files_stat_path, files_stat)
self.debug('save lambda files stat to {}'.format(files_stat_path))
def __save_parms_to_file(self, path, params):
if os.path.exists(path):
os.remove(path)
content = ''
for param in params:
content += param + '\n'
write_file_content(path, content)
self.debug('save retrolambda params to {}'.format(path))
def _get_res_incremental_dst_path(self, fpath):
if 'assets' + os.sep in fpath:
return os.path.join(self._finder.get_base_gen_dir(), 'assets', 'debug', fpath.split('assets' + os.sep)[1])
elif 'res' + os.sep in fpath:
return os.path.join(self._finder.get_res_dir(), fpath.split('res' + os.sep)[1])
def _parse_changed_list(self):
changed_list = []
for rfile in self._changed_files['res']:
if rfile not in changed_list:
changed_list.append(self._get_res_relative_path(rfile))
for afile in self._changed_files['assets']:
if afile not in changed_list:
changed_list.append(self._get_res_relative_path(afile))
return changed_list
def _get_res_relative_path(self, res):
if res.startswith('res') or res.startswith('AndroidManifest.xml'):
return res
def path_fix(path):
return path if path.endswith(os.sep) else path + os.sep
for respath in self._merged_res_paths:
respath = path_fix(respath)
if res.startswith(respath):
index = respath.strip(os.sep).rfind(os.sep)
if index >= 0:
res_dir_name = respath[index + 1:].strip(os.sep)
relative_path = os.path.join(res_dir_name, res.replace(respath, ''))
self.debug("find relative path: {}".format(relative_path))
return relative_path
self.debug('relative path not found: {}'.format(res))
return None
def __get_freeline_backup_r_dir(self):
dirpath = os.path.join(self._cache_dir, 'freeline-backup-r')
if not os.path.exists(dirpath):
os.makedirs(dirpath)
return dirpath
def __modify_other_modules_r(self, package_name, finder=None):
if not finder:
finder = self._finder
r_path = android_tools.find_r_file(finder.get_dst_r_dir(), package_name=package_name)
if r_path and os.path.exists(r_path):
target_dir = os.path.join(self.__get_freeline_backup_r_dir(), package_name.replace('.', os.sep))
if not os.path.exists(target_dir):
os.makedirs(target_dir)
target_path = os.path.join(target_dir, 'R.java')
if not os.path.exists(target_path):
self.debug('copy {} to {}'.format(r_path, target_path))
shutil.copy(r_path, target_path)
content = get_file_content(target_path)
content = GradleIncBuildInvoker.remove_final_tag(content)
content = GradleIncBuildInvoker.extend_main_r(content, self._config['package'])
content = android_tools.fix_unicode_parse_error(content, target_path)
write_file_content(target_path, content)
return target_path
def __find_res_in_which_module(self, res_path):
for module in self._all_module_info.keys():
# rdir = android_tools.get_res_dir(module)
res_dirs = self._config['project_source_sets'][module]['main_res_directory']
for rdir in res_dirs:
if rdir is not None:
if res_path.startswith(rdir) or rdir in res_path:
return module
return None
@staticmethod
def remove_final_tag(content):
content = content.replace('public final class', 'public class').replace('public static final class',
'public static class')
return content
@staticmethod
def extend_main_r(content, main_package_name):
import re
result = re.findall(r'''public static class (.*) \{''', content)
for tag in result:
content = content.replace('class ' + tag + ' {',
'class ' + tag + ' extends ' + main_package_name + '.R.' + tag + ' {')
return content
| apache-2.0 | -2,873,127,968,788,922,400 | 47.658482 | 126 | 0.563466 | false |
davidcox/glumpy | demos/demo-heightmap-2.py | 1 | 3540 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (C) 2009-2010 Nicolas P. Rougier
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy, glumpy
import OpenGL.GL as gl
class Mesh(object):
def __init__(self, n=64):
self.indices = numpy.zeros((n-1,n-1,4), dtype=numpy.float32)
self.vertices = numpy.zeros((n,n,3), dtype=numpy.float32)
self.texcoords= numpy.zeros((n,n,2), dtype=numpy.float32)
for xi in range(n):
for yi in range(n):
x,y,z = xi/float(n-1), yi/float(n-1), 0
self.vertices[xi,yi] = x-0.5,y-0.5,z
self.texcoords[xi,yi] = x,y
for yi in range(n-1):
for xi in range(n-1):
i = yi*n + xi
self.indices[xi,yi] = i,i+1,i+n+1,i+n
def draw(self):
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnableClientState(gl.GL_TEXTURE_COORD_ARRAY);
gl.glVertexPointerf(self.vertices)
gl.glTexCoordPointerf(self.texcoords)
gl.glDrawElementsus(gl.GL_QUADS, self.indices)
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glDisableClientState(gl.GL_TEXTURE_COORD_ARRAY);
if __name__ == '__main__':
window = glumpy.Window(width=800,height=600)
trackball = glumpy.Trackball(60,30,0.75)
mesh = Mesh(64)
# def func3(x,y):
# return (1-x/2+x**5+y**3)*numpy.exp(-x**2-y**2)
# dx, dy = .01, .01
# x = numpy.arange(-3.0, 3.0, dx, dtype=numpy.float32)
# y = numpy.arange(-3.0, 3.0, dy, dtype=numpy.float32)
# Z = func3(*numpy.meshgrid(x, y))
n = 64.0
X = numpy.empty((n,n), dtype=numpy.float32)
X.flat = numpy.arange(n)*2*numpy.pi/n*2
Y = numpy.empty((n,n), dtype=numpy.float32)
Y.flat = numpy.arange(n)*2*numpy.pi/n*2
Y = numpy.transpose(Y)
Z = numpy.sin(X) + numpy.cos(Y)
I = glumpy.Image(Z, interpolation='bilinear', cmap=glumpy.colormap.Hot,
gridsize= (31.0,31.0,10.0), elevation=0.25)
def draw_background():
viewport = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glDisable (gl.GL_LIGHTING)
gl.glDisable (gl.GL_DEPTH_TEST)
gl.glPolygonMode (gl.GL_FRONT_AND_BACK, gl.GL_FILL)
gl.glBegin(gl.GL_QUADS)
#gl.glColor(0.75,0.75,1.0)
gl.glColor(1.0,1.0,0.75)
gl.glVertex(0,0,-1)
gl.glVertex(viewport[2],0,-1)
gl.glColor(1.0,1.0,1.0)
gl.glVertex(viewport[2],viewport[3],0)
gl.glVertex(0,viewport[3],0)
gl.glEnd()
@window.event
def on_draw():
gl.glClearColor(1,1,1,1)
window.clear()
draw_background()
trackball.push()
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glTranslatef(0,0,-0.125)
gl.glColor4f(1,1,1,1)
I.shader.bind(I.texture,I._lut)
mesh.draw()
I.shader.unbind()
trackball.pop()
@window.event
def on_mouse_drag(x, y, dx, dy, button):
trackball.drag_to(x,y,dx,dy)
@window.event
def on_mouse_scroll(x, y, dx, dy):
trackball.zoom_to(x,y,dx,dy)
@window.timer(60.0)
def update(dt):
global X,Y
X += numpy.pi/150.
Y += numpy.pi/200.
Z[...] = numpy.sin(X) + numpy.cos(Y)
I.update()
window.draw()
window.mainloop()
| bsd-3-clause | 6,586,859,430,881,178,000 | 31.477064 | 78 | 0.544633 | false |
zhuwenping/python-for-android | python-modules/twisted/twisted/words/test/test_jabberxmppstringprep.py | 57 | 4450 | # Copyright (c) 2005 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.words.protocols.jabber.xmpp_stringprep import nodeprep, resourceprep, nameprep, crippled
class XMPPStringPrepTest(unittest.TestCase):
"""
The nodeprep stringprep profile is similar to the resourceprep profile,
but does an extra mapping of characters (table B.2) and disallows
more characters (table C.1.1 and eight extra punctuation characters).
Due to this similarity, the resourceprep tests are more extensive, and
the nodeprep tests only address the mappings additional restrictions.
The nameprep profile is nearly identical to the nameprep implementation in
L{encodings.idna}, but that implementation assumes the C{UseSTD4ASCIIRules}
flag to be false. This implementation assumes it to be true, and restricts
the allowed set of characters. The tests here only check for the
differences.
"""
def testResourcePrep(self):
self.assertEquals(resourceprep.prepare(u'resource'), u'resource')
self.assertNotEquals(resourceprep.prepare(u'Resource'), u'resource')
self.assertEquals(resourceprep.prepare(u' '), u' ')
if crippled:
return
self.assertEquals(resourceprep.prepare(u'Henry \u2163'), u'Henry IV')
self.assertEquals(resourceprep.prepare(u'foo\xad\u034f\u1806\u180b'
u'bar\u200b\u2060'
u'baz\ufe00\ufe08\ufe0f\ufeff'),
u'foobarbaz')
self.assertEquals(resourceprep.prepare(u'\u00a0'), u' ')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\u1680')
self.assertEquals(resourceprep.prepare(u'\u2000'), u' ')
self.assertEquals(resourceprep.prepare(u'\u200b'), u'')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\u0010\u007f')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\u0085')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\u180e')
self.assertEquals(resourceprep.prepare(u'\ufeff'), u'')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\uf123')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\U000f1234')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\U0010f234')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\U0008fffe')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\U0010ffff')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\udf42')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\ufffd')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\u2ff5')
self.assertEquals(resourceprep.prepare(u'\u0341'), u'\u0301')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\u200e')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\u202a')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\U000e0001')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\U000e0042')
self.assertRaises(UnicodeError, resourceprep.prepare, u'foo\u05bebar')
self.assertRaises(UnicodeError, resourceprep.prepare, u'foo\ufd50bar')
#self.assertEquals(resourceprep.prepare(u'foo\ufb38bar'),
# u'foo\u064ebar')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\u06271')
self.assertEquals(resourceprep.prepare(u'\u06271\u0628'),
u'\u06271\u0628')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\U000e0002')
def testNodePrep(self):
self.assertEquals(nodeprep.prepare(u'user'), u'user')
self.assertEquals(nodeprep.prepare(u'User'), u'user')
self.assertRaises(UnicodeError, nodeprep.prepare, u'us&er')
def testNamePrep(self):
self.assertEquals(nameprep.prepare(u'example.com'), u'example.com')
self.assertEquals(nameprep.prepare(u'Example.com'), u'example.com')
self.assertRaises(UnicodeError, nameprep.prepare, u'[email protected]')
self.assertRaises(UnicodeError, nameprep.prepare, u'-example.com')
self.assertRaises(UnicodeError, nameprep.prepare, u'example-.com')
if crippled:
return
self.assertEquals(nameprep.prepare(u'stra\u00dfe.example.com'),
u'strasse.example.com')
| apache-2.0 | -5,012,356,464,337,888,000 | 51.97619 | 101 | 0.686742 | false |
sumanau7/Ele_CC_Sumanau | lib/IPython/extensions/storemagic.py | 9 | 8183 | # -*- coding: utf-8 -*-
"""
%store magic for lightweight persistence.
Stores variables, aliases and macros in IPython's database.
To automatically restore stored variables at startup, add this to your
:file:`ipython_config.py` file::
c.StoreMagics.autorestore = True
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (c) 2012, The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import inspect, os, sys, textwrap
# Our own
from IPython.core.error import UsageError
from IPython.core.magic import Magics, magics_class, line_magic
from traitlets import Bool
from IPython.utils.py3compat import string_types
#-----------------------------------------------------------------------------
# Functions and classes
#-----------------------------------------------------------------------------
def restore_aliases(ip):
staliases = ip.db.get('stored_aliases', {})
for k,v in staliases.items():
#print "restore alias",k,v # dbg
#self.alias_table[k] = v
ip.alias_manager.define_alias(k,v)
def refresh_variables(ip):
db = ip.db
for key in db.keys('autorestore/*'):
# strip autorestore
justkey = os.path.basename(key)
try:
obj = db[key]
except KeyError:
print("Unable to restore variable '%s', ignoring (use %%store -d to forget!)" % justkey)
print("The error was:", sys.exc_info()[0])
else:
#print "restored",justkey,"=",obj #dbg
ip.user_ns[justkey] = obj
def restore_dhist(ip):
ip.user_ns['_dh'] = ip.db.get('dhist',[])
def restore_data(ip):
refresh_variables(ip)
restore_aliases(ip)
restore_dhist(ip)
@magics_class
class StoreMagics(Magics):
"""Lightweight persistence for python variables.
Provides the %store magic."""
autorestore = Bool(False, config=True, help=
"""If True, any %store-d variables will be automatically restored
when IPython starts.
"""
)
def __init__(self, shell):
super(StoreMagics, self).__init__(shell=shell)
self.shell.configurables.append(self)
if self.autorestore:
restore_data(self.shell)
@line_magic
def store(self, parameter_s=''):
"""Lightweight persistence for python variables.
Example::
In [1]: l = ['hello',10,'world']
In [2]: %store l
In [3]: exit
(IPython session is closed and started again...)
ville@badger:~$ ipython
In [1]: l
NameError: name 'l' is not defined
In [2]: %store -r
In [3]: l
Out[3]: ['hello', 10, 'world']
Usage:
* ``%store`` - Show list of all variables and their current
values
* ``%store spam`` - Store the *current* value of the variable spam
to disk
* ``%store -d spam`` - Remove the variable and its value from storage
* ``%store -z`` - Remove all variables from storage
* ``%store -r`` - Refresh all variables from store (overwrite
current vals)
* ``%store -r spam bar`` - Refresh specified variables from store
(delete current val)
* ``%store foo >a.txt`` - Store value of foo to new file a.txt
* ``%store foo >>a.txt`` - Append value of foo to file a.txt
It should be noted that if you change the value of a variable, you
need to %store it again if you want to persist the new value.
Note also that the variables will need to be pickleable; most basic
python types can be safely %store'd.
Also aliases can be %store'd across sessions.
"""
opts,argsl = self.parse_options(parameter_s,'drz',mode='string')
args = argsl.split(None,1)
ip = self.shell
db = ip.db
# delete
if 'd' in opts:
try:
todel = args[0]
except IndexError:
raise UsageError('You must provide the variable to forget')
else:
try:
del db['autorestore/' + todel]
except:
raise UsageError("Can't delete variable '%s'" % todel)
# reset
elif 'z' in opts:
for k in db.keys('autorestore/*'):
del db[k]
elif 'r' in opts:
if args:
for arg in args:
try:
obj = db['autorestore/' + arg]
except KeyError:
print("no stored variable %s" % arg)
else:
ip.user_ns[arg] = obj
else:
restore_data(ip)
# run without arguments -> list variables & values
elif not args:
vars = db.keys('autorestore/*')
vars.sort()
if vars:
size = max(map(len, vars))
else:
size = 0
print('Stored variables and their in-db values:')
fmt = '%-'+str(size)+'s -> %s'
get = db.get
for var in vars:
justkey = os.path.basename(var)
# print 30 first characters from every var
print(fmt % (justkey, repr(get(var, '<unavailable>'))[:50]))
# default action - store the variable
else:
# %store foo >file.txt or >>file.txt
if len(args) > 1 and args[1].startswith('>'):
fnam = os.path.expanduser(args[1].lstrip('>').lstrip())
if args[1].startswith('>>'):
fil = open(fnam, 'a')
else:
fil = open(fnam, 'w')
obj = ip.ev(args[0])
print("Writing '%s' (%s) to file '%s'." % (args[0],
obj.__class__.__name__, fnam))
if not isinstance (obj, string_types):
from pprint import pprint
pprint(obj, fil)
else:
fil.write(obj)
if not obj.endswith('\n'):
fil.write('\n')
fil.close()
return
# %store foo
try:
obj = ip.user_ns[args[0]]
except KeyError:
# it might be an alias
name = args[0]
try:
cmd = ip.alias_manager.retrieve_alias(name)
except ValueError:
raise UsageError("Unknown variable '%s'" % name)
staliases = db.get('stored_aliases',{})
staliases[name] = cmd
db['stored_aliases'] = staliases
print("Alias stored: %s (%s)" % (name, cmd))
return
else:
modname = getattr(inspect.getmodule(obj), '__name__', '')
if modname == '__main__':
print(textwrap.dedent("""\
Warning:%s is %s
Proper storage of interactively declared classes (or instances
of those classes) is not possible! Only instances
of classes in real modules on file system can be %%store'd.
""" % (args[0], obj) ))
return
#pickled = pickle.dumps(obj)
db[ 'autorestore/' + args[0] ] = obj
print("Stored '%s' (%s)" % (args[0], obj.__class__.__name__))
def load_ipython_extension(ip):
"""Load the extension in IPython."""
ip.register_magics(StoreMagics)
| apache-2.0 | 5,808,235,975,672,443,000 | 32.954357 | 100 | 0.476598 | false |
h2oai/h2o | py/testdir_release/c3/test_c3_exec_copy.py | 9 | 4111 | import unittest, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_glm, h2o_common, h2o_exec as h2e
import h2o_print
DO_GLM = True
LOG_MACHINE_STATS = False
# fails during exec env push ..second import has to do a key delete (the first)
DO_DOUBLE_IMPORT = False
print "Assumes you ran ../build_for_clone.py in this directory"
print "Using h2o-nodes.json. Also the sandbox dir"
class releaseTest(h2o_common.ReleaseCommon, unittest.TestCase):
def sub_c3_nongz_fvec_long(self, csvFilenameList):
# a kludge
h2o.setup_benchmark_log()
bucket = 'home-0xdiag-datasets'
importFolderPath = 'manyfiles-nflx'
print "Using nongz'ed files in", importFolderPath
if LOG_MACHINE_STATS:
benchmarkLogging = ['cpu', 'disk', 'network']
else:
benchmarkLogging = []
pollTimeoutSecs = 120
retryDelaySecs = 10
for trial, (csvFilepattern, csvFilename, totalBytes, timeoutSecs) in enumerate(csvFilenameList):
csvPathname = importFolderPath + "/" + csvFilepattern
if DO_DOUBLE_IMPORT:
(importResult, importPattern) = h2i.import_only(bucket=bucket, path=csvPathname, schema='local')
importFullList = importResult['files']
importFailList = importResult['fails']
print "\n Problem if this is not empty: importFailList:", h2o.dump_json(importFailList)
# this accumulates performance stats into a benchmark log over multiple runs
# good for tracking whether we're getting slower or faster
h2o.cloudPerfH2O.change_logfile(csvFilename)
h2o.cloudPerfH2O.message("")
h2o.cloudPerfH2O.message("Parse " + csvFilename + " Start--------------------------------")
start = time.time()
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='local',
hex_key="A.hex", timeoutSecs=timeoutSecs,
retryDelaySecs=retryDelaySecs,
pollTimeoutSecs=pollTimeoutSecs,
benchmarkLogging=benchmarkLogging)
elapsed = time.time() - start
print "Parse #", trial, "completed in", "%6.2f" % elapsed, "seconds.", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "Parse result['destination_key']:", parseResult['destination_key']
h2o_cmd.columnInfoFromInspect(parseResult['destination_key'], exceptionOnMissingValues=False)
fileMBS = (totalBytes/1e6)/elapsed
msg = '{!s} jvms, {!s}GB heap, {:s} {:s} {:6.2f} MB/sec for {:.2f} secs'.format(
len(h2o.nodes), h2o.nodes[0].java_heap_GB, csvFilepattern, csvFilename, fileMBS, elapsed)
print msg
h2o.cloudPerfH2O.message(msg)
h2o_cmd.checkKeyDistribution()
# are the unparsed keys slowing down exec?
h2i.delete_keys_at_all_nodes(pattern="manyfile")
execExpr = 'B.hex=A.hex'
h2e.exec_expr(execExpr=execExpr, timeoutSecs=180)
h2o_cmd.checkKeyDistribution()
execExpr = 'C.hex=B.hex'
h2e.exec_expr(execExpr=execExpr, timeoutSecs=180)
h2o_cmd.checkKeyDistribution()
execExpr = 'D.hex=C.hex'
h2e.exec_expr(execExpr=execExpr, timeoutSecs=180)
h2o_cmd.checkKeyDistribution()
#***********************************************************************
# these will be tracked individual by jenkins, which is nice
#***********************************************************************
def test_c3_exec_copy(self):
avgMichalSize = 237270000
csvFilenameList= [
("*[1][0-4][0-9].dat", "file_50_A.dat", 50 * avgMichalSize, 1800),
]
self.sub_c3_nongz_fvec_long(csvFilenameList)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 | 5,290,331,159,494,751,000 | 43.204301 | 116 | 0.56434 | false |
marcuskelly/recover | Lib/site-packages/alembic/testing/plugin/bootstrap.py | 43 | 1646 | """
Bootstrapper for nose/pytest plugins.
The entire rationale for this system is to get the modules in plugin/
imported without importing all of the supporting library, so that we can
set up things for testing before coverage starts.
The rationale for all of plugin/ being *in* the supporting library in the
first place is so that the testing and plugin suite is available to other
libraries, mainly external SQLAlchemy and Alembic dialects, to make use
of the same test environment and standard suites available to
SQLAlchemy/Alembic themselves without the need to ship/install a separate
package outside of SQLAlchemy.
NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
this should be removable when Alembic targets SQLAlchemy 1.0.0.
"""
import os
import sys
bootstrap_file = locals()['bootstrap_file']
to_bootstrap = locals()['to_bootstrap']
def load_file_as_module(name):
path = os.path.join(os.path.dirname(bootstrap_file), "%s.py" % name)
if sys.version_info >= (3, 3):
from importlib import machinery
mod = machinery.SourceFileLoader(name, path).load_module()
else:
import imp
mod = imp.load_source(name, path)
return mod
if to_bootstrap == "pytest":
sys.modules["alembic_plugin_base"] = load_file_as_module("plugin_base")
sys.modules["alembic_pytestplugin"] = load_file_as_module("pytestplugin")
elif to_bootstrap == "nose":
sys.modules["alembic_plugin_base"] = load_file_as_module("plugin_base")
sys.modules["alembic_noseplugin"] = load_file_as_module("noseplugin")
else:
raise Exception("unknown bootstrap: %s" % to_bootstrap) # noqa
| bsd-2-clause | 5,062,535,874,411,646,000 | 36.409091 | 77 | 0.735723 | false |
tgsd96/gargnotes | venv/lib/python2.7/site-packages/django/contrib/admin/utils.py | 62 | 16223 | from __future__ import unicode_literals
import datetime
import decimal
from django.contrib.auth import get_permission_codename
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.forms.forms import pretty_name
from django.utils import formats
from django.utils.html import format_html
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils import six
from django.utils.translation import ungettext
from django.core.urlresolvers import reverse, NoReverseMatch
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
field_name = lookup_path.split('__', 1)[0]
field = opts.get_field_by_name(field_name)[0]
if hasattr(field, 'get_path_info') and any(path.m2m for path in field.get_path_info()):
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and the string literals 'false' and '0'
if key.endswith('__isnull'):
if value.lower() in ('', 'false', '0'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten(fields):
"""Returns a list which is a single level of flattening of the
original list."""
flat = []
for field in fields:
if isinstance(field, (list, tuple)):
flat.extend(field)
else:
flat.append(field)
return flat
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
field_names.extend(
flatten(opts['fields'])
)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
if has_admin:
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
return no_edit_link
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{0}: <a href="{1}">{2}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source=None, source_attr=None, **kwargs):
for obj in objs:
if source_attr and not source_attr.endswith('+'):
related_name = source_attr % {
'class': source._meta.model_name,
'app_label': source._meta.app_label,
}
self.add_edge(getattr(obj, related_name), obj)
else:
self.add_edge(None, obj)
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = opts.get_field(name)
except models.FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and
hasattr(model_admin, name) and
not name == '__str__' and
not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable,
property (but not created with @property decorator) or the name of an
object's attribute, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = model._meta.get_field_by_name(name)[0]
try:
label = field.verbose_name
except AttributeError:
# field is likely a RelatedObject
label = field.opts.verbose_name
except models.FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (isinstance(attr, property) and
hasattr(attr, "fget") and
hasattr(attr.fget, "short_description")):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
help_text = ""
try:
field_data = model._meta.get_field_by_name(name)
except models.FieldDoesNotExist:
pass
else:
field = field_data[0]
if hasattr(field, 'help_text'):
help_text = field.help_text
return smart_text(help_text)
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, models.FloatField):
return formats.number_format(value)
else:
return smart_text(value)
def display_for_value(value, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if boolean:
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if hasattr(field, 'get_path_info'):
return field.get_path_info()[-1].to_opts.model
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field, model, direct, m2m = parent._meta.get_field_by_name(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces) - 1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
if direct:
related_name = field.related_query_name()
parent = field.rel.to
else:
related_name = field.field.name
parent = field.model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field_by_name(piece)[0])
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
def get_limit_choices_to_from_path(model, path):
""" Return Q object for limiting choices if applicable.
If final model in path is linked via a ForeignKey or ManyToManyField which
has a ``limit_choices_to`` attribute, return it as a Q object.
"""
fields = get_fields_from_path(model, path)
fields = remove_trailing_data_field(fields)
get_limit_choices_to = (
fields and hasattr(fields[-1], 'rel') and
getattr(fields[-1].rel, 'get_limit_choices_to', None))
if not get_limit_choices_to:
return models.Q() # empty Q
limit_choices_to = get_limit_choices_to()
if isinstance(limit_choices_to, models.Q):
return limit_choices_to # already a Q
else:
return models.Q(**limit_choices_to) # convert dict to Q
| mit | -4,132,736,029,291,805,000 | 32.312115 | 94 | 0.591198 | false |
hollabaq86/haikuna-matata | env/lib/python2.7/site-packages/flask/signals.py | 123 | 2209 | # -*- coding: utf-8 -*-
"""
flask.signals
~~~~~~~~~~~~~
Implements signals based on blinker if available, otherwise
falls silently back to a noop.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
signals_available = False
try:
from blinker import Namespace
signals_available = True
except ImportError:
class Namespace(object):
def signal(self, name, doc=None):
return _FakeSignal(name, doc)
class _FakeSignal(object):
"""If blinker is unavailable, create a fake class with the same
interface that allows sending of signals but will fail with an
error on anything else. Instead of doing anything on send, it
will just ignore the arguments and do nothing instead.
"""
def __init__(self, name, doc=None):
self.name = name
self.__doc__ = doc
def _fail(self, *args, **kwargs):
raise RuntimeError('signalling support is unavailable '
'because the blinker library is '
'not installed.')
send = lambda *a, **kw: None
connect = disconnect = has_receivers_for = receivers_for = \
temporarily_connected_to = connected_to = _fail
del _fail
# The namespace for code signals. If you are not Flask code, do
# not put signals in here. Create your own namespace instead.
_signals = Namespace()
# Core signals. For usage examples grep the source code or consult
# the API documentation in docs/api.rst as well as docs/signals.rst
template_rendered = _signals.signal('template-rendered')
before_render_template = _signals.signal('before-render-template')
request_started = _signals.signal('request-started')
request_finished = _signals.signal('request-finished')
request_tearing_down = _signals.signal('request-tearing-down')
got_request_exception = _signals.signal('got-request-exception')
appcontext_tearing_down = _signals.signal('appcontext-tearing-down')
appcontext_pushed = _signals.signal('appcontext-pushed')
appcontext_popped = _signals.signal('appcontext-popped')
message_flashed = _signals.signal('message-flashed')
| mit | 7,958,880,112,585,410,000 | 38.446429 | 71 | 0.665912 | false |
CoolCloud/aliyun-cli | aliyuncli/advance/userConfigHandler.py | 11 | 1850 | __author__ = 'zhaoyang.szy'
import os,sys
import response
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import aliyunExtensionCliHandler
class ConfigCmd:
showConfig = 'showConfig'
importConfig = 'importConfig'
exportConfig = 'exportConfig'
name = '--filename'
class ConfigHandler:
def __init__(self):
self.extensionCliHandler = aliyunExtensionCliHandler.aliyunExtensionCliHandler()
def getConfigHandlerCmd(self):
return [ConfigCmd.showConfig,ConfigCmd.importConfig,ConfigCmd.exportConfig]
def getConfigHandlerOptions(self):
return [ConfigCmd.name]
def showConfig(self):
_credentialsPath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.credentials)
_configurePath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.configure)
config = dict()
configContent = dict()
credentialsContent = dict ()
if os.path.exists(_configurePath):
for line in open(_configurePath):
line = line.strip('\n')
if line.find('=') > 0:
list = line.split("=",1)
configContent[list[0]] = list[1]
else:
pass
config['configure'] = configContent
if os.path.exists(_credentialsPath):
for line in open(_credentialsPath):
line = line.strip('\n')
if line.find('=') > 0:
list = line.split("=",1)
credentialsContent[list[0]] = list[1]
else:
pass
config ['credentials'] = credentialsContent
response.display_response("showConfigure",config,'table')
def importConfig():
pass
def exportConfig():
pass
if __name__ == "__main__":
handler = ConfigHandler()
handler.showConfig()
| apache-2.0 | 114,488,224,226,711,200 | 32.636364 | 122 | 0.648649 | false |
ksachs/invenio | modules/bibformat/lib/elements/bfe_url.py | 39 | 1484 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints full-text URLs
"""
__revision__ = "$Id$"
def format_element(bfo, style, separator='; '):
"""
This is the default format for formatting full-text URLs.
@param separator: the separator between urls.
@param style: CSS class of the link
"""
urls_u = bfo.fields("8564_u")
if style != "":
style = 'class="'+style+'"'
urls = ['<a '+ style + \
'href="' + url + '">' + url +'</a>'
for url in urls_u]
return separator.join(urls)
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 | -105,602,046,535,711,300 | 32.727273 | 75 | 0.657008 | false |
bright-sparks/chromium-spacewalk | build/win/reorder-imports.py | 103 | 1807 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import optparse
import os
import shutil
import subprocess
import sys
def reorder_imports(input_dir, output_dir, architecture):
"""Run swapimports.exe on the initial chrome.exe, and write to the output
directory. Also copy over any related files that might be needed
(pdbs, manifests etc.).
"""
input_image = os.path.join(input_dir, 'chrome.exe')
output_image = os.path.join(output_dir, 'chrome.exe')
swap_exe = os.path.join(
__file__,
'..\\..\\..\\third_party\\syzygy\\binaries\\exe\\swapimport.exe')
args = [swap_exe, '--input-image=%s' % input_image,
'--output-image=%s' % output_image, '--overwrite', '--no-logo']
if architecture == 'x64':
args.append('--x64');
args.append('chrome_elf.dll');
subprocess.call(args)
for fname in glob.iglob(os.path.join(input_dir, 'chrome.exe.*')):
shutil.copy(fname, os.path.join(output_dir, os.path.basename(fname)))
return 0
def main(argv):
usage = 'reorder_imports.py -i <input_dir> -o <output_dir> -a <target_arch>'
parser = optparse.OptionParser(usage=usage)
parser.add_option('-i', '--input', help='reorder chrome.exe in DIR',
metavar='DIR')
parser.add_option('-o', '--output', help='write new chrome.exe to DIR',
metavar='DIR')
parser.add_option('-a', '--arch', help='architecture of build (optional)',
default='ia32')
opts, args = parser.parse_args()
if not opts.input or not opts.output:
parser.error('Please provide and input and output directory')
return reorder_imports(opts.input, opts.output, opts.arch)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | -5,671,701,774,612,172,000 | 30.701754 | 78 | 0.672939 | false |
ChronoMonochrome/android_external_chromium_org | native_client_sdk/src/build_tools/verify_ppapi.py | 62 | 5922 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper script for PPAPI's PRESUBMIT.py to detect if additions or removals of
PPAPI interfaces have been propagated to the Native Client libraries (.dsc
files).
For example, if a user adds "ppapi/c/foo.h", we check that the interface has
been added to "native_client_sdk/src/libraries/ppapi/library.dsc".
"""
import optparse
import os
import sys
from build_paths import PPAPI_DIR, SRC_DIR, SDK_LIBRARY_DIR
import parse_dsc
class VerifyException(Exception):
def __init__(self, lib_path, expected, unexpected):
self.expected = expected
self.unexpected = unexpected
msg = 'In %s:\n' % lib_path
if expected:
msg += ' these files are missing and should be added:\n'
for filename in sorted(expected):
msg += ' %s\n' % filename
if unexpected:
msg += ' these files no longer exist and should be removed:\n'
for filename in sorted(unexpected):
msg += ' %s\n' % filename
Exception.__init__(self, msg)
def PartitionFiles(filenames):
c_filenames = set()
cpp_filenames = set()
private_filenames = set()
for filename in filenames:
if os.path.splitext(filename)[1] not in ('.cc', '.h'):
continue
parts = filename.split(os.sep)
if 'private' in filename:
if 'flash' in filename:
continue
private_filenames.add(filename)
elif parts[0:2] == ['ppapi', 'c']:
if len(parts) >= 2 and parts[2] in ('documentation', 'trusted'):
continue
c_filenames.add(filename)
elif (parts[0:2] == ['ppapi', 'cpp'] or
parts[0:2] == ['ppapi', 'utility']):
if len(parts) >= 2 and parts[2] in ('documentation', 'trusted'):
continue
cpp_filenames.add(filename)
else:
continue
return {
'ppapi': c_filenames,
'ppapi_cpp': cpp_filenames,
'ppapi_cpp_private': private_filenames
}
def GetDirectoryList(directory_path, relative_to):
result = []
for root, _, files in os.walk(directory_path):
rel_root = os.path.relpath(root, relative_to)
if rel_root == '.':
rel_root = ''
for base_name in files:
result.append(os.path.join(rel_root, base_name))
return result
def GetDscSourcesAndHeaders(dsc):
result = []
for headers_info in dsc.get('HEADERS', []):
result.extend(headers_info['FILES'])
for targets_info in dsc.get('TARGETS', []):
result.extend(targets_info['SOURCES'])
return result
def GetChangedAndRemovedFilenames(modified_filenames, directory_list):
changed = set()
removed = set()
directory_list_set = set(directory_list)
for filename in modified_filenames:
if filename in directory_list_set:
# We can't know if a file was added (that would require knowing the
# previous state of the working directory). Instead, we assume that a
# changed file may have been added, and check it accordingly.
changed.add(filename)
else:
removed.add(filename)
return changed, removed
def GetDscFilenameFromLibraryName(lib_name):
return os.path.join(SDK_LIBRARY_DIR, lib_name, 'library.dsc')
def Verify(dsc_filename, dsc_sources_and_headers, changed_filenames,
removed_filenames):
expected_filenames = set()
unexpected_filenames = set()
for filename in changed_filenames:
basename = os.path.basename(filename)
if basename not in dsc_sources_and_headers:
expected_filenames.add(filename)
for filename in removed_filenames:
basename = os.path.basename(filename)
if basename in dsc_sources_and_headers:
unexpected_filenames.add(filename)
if expected_filenames or unexpected_filenames:
raise VerifyException(dsc_filename, expected_filenames,
unexpected_filenames)
def VerifyOrPrintError(dsc_filename, dsc_sources_and_headers, changed_filenames,
removed_filenames, is_private=False):
try:
Verify(dsc_filename, dsc_sources_and_headers, changed_filenames,
removed_filenames)
except VerifyException as e:
should_fail = True
if is_private and e.expected:
# For ppapi_cpp_private, we don't fail if there are expected filenames...
# we may not want to include them. We still want to fail if there are
# unexpected filenames, though.
sys.stderr.write('>>> WARNING: private interface files changed. '
'Should they be added to the Native Client SDK? <<<\n')
if not e.unexpected:
should_fail = False
sys.stderr.write(str(e) + '\n')
if should_fail:
return False
return True
def main(args):
usage = '%prog <file>...'
description = __doc__
parser = optparse.OptionParser(usage=usage, description=description)
args = parser.parse_args(args)[1]
if not args:
parser.error('Expected a PPAPI header or source file.')
retval = 0
lib_files = PartitionFiles(args)
directory_list = GetDirectoryList(PPAPI_DIR, relative_to=SRC_DIR)
for lib_name, filenames in lib_files.iteritems():
if not filenames:
continue
changed_filenames, removed_filenames = \
GetChangedAndRemovedFilenames(filenames, directory_list)
dsc_filename = GetDscFilenameFromLibraryName(lib_name)
dsc = parse_dsc.LoadProject(dsc_filename)
dsc_sources_and_headers = GetDscSourcesAndHeaders(dsc)
# Use the relative path to the .dsc to make the error messages shorter.
rel_dsc_filename = os.path.relpath(dsc_filename, SRC_DIR)
is_private = lib_name == 'ppapi_cpp_private'
if not VerifyOrPrintError(rel_dsc_filename, dsc_sources_and_headers,
changed_filenames, removed_filenames,
is_private=is_private):
retval = 1
return retval
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | 8,122,821,297,286,448,000 | 30.83871 | 80 | 0.668693 | false |
jtimberman/omnibus | source/libxml2-2.7.7/python/generator.py | 24 | 47525 | #!/usr/bin/python -u
#
# generate python wrappers from the XML API description
#
functions = {}
enums = {} # { enumType: { enumConstant: enumValue } }
import os
import sys
import string
if __name__ == "__main__":
# launched as a script
srcPref = os.path.dirname(sys.argv[0])
else:
# imported
srcPref = os.path.dirname(__file__)
#######################################################################
#
# That part if purely the API acquisition phase from the
# XML API description
#
#######################################################################
import os
import xml.sax
debug = 0
def getparser():
# Attach parser to an unmarshalling object. return both objects.
target = docParser()
parser = xml.sax.make_parser()
parser.setContentHandler(target)
return parser, target
class docParser(xml.sax.handler.ContentHandler):
def __init__(self):
self._methodname = None
self._data = []
self.in_function = 0
self.startElement = self.start
self.endElement = self.end
self.characters = self.data
def close(self):
if debug:
print "close"
def getmethodname(self):
return self._methodname
def data(self, text):
if debug:
print "data %s" % text
self._data.append(text)
def start(self, tag, attrs):
if debug:
print "start %s, %s" % (tag, attrs)
if tag == 'function':
self._data = []
self.in_function = 1
self.function = None
self.function_cond = None
self.function_args = []
self.function_descr = None
self.function_return = None
self.function_file = None
if attrs.has_key('name'):
self.function = attrs['name']
if attrs.has_key('file'):
self.function_file = attrs['file']
elif tag == 'cond':
self._data = []
elif tag == 'info':
self._data = []
elif tag == 'arg':
if self.in_function == 1:
self.function_arg_name = None
self.function_arg_type = None
self.function_arg_info = None
if attrs.has_key('name'):
self.function_arg_name = attrs['name']
if attrs.has_key('type'):
self.function_arg_type = attrs['type']
if attrs.has_key('info'):
self.function_arg_info = attrs['info']
elif tag == 'return':
if self.in_function == 1:
self.function_return_type = None
self.function_return_info = None
self.function_return_field = None
if attrs.has_key('type'):
self.function_return_type = attrs['type']
if attrs.has_key('info'):
self.function_return_info = attrs['info']
if attrs.has_key('field'):
self.function_return_field = attrs['field']
elif tag == 'enum':
enum(attrs['type'],attrs['name'],attrs['value'])
def end(self, tag):
if debug:
print "end %s" % tag
if tag == 'function':
if self.function != None:
function(self.function, self.function_descr,
self.function_return, self.function_args,
self.function_file, self.function_cond)
self.in_function = 0
elif tag == 'arg':
if self.in_function == 1:
self.function_args.append([self.function_arg_name,
self.function_arg_type,
self.function_arg_info])
elif tag == 'return':
if self.in_function == 1:
self.function_return = [self.function_return_type,
self.function_return_info,
self.function_return_field]
elif tag == 'info':
str = ''
for c in self._data:
str = str + c
if self.in_function == 1:
self.function_descr = str
elif tag == 'cond':
str = ''
for c in self._data:
str = str + c
if self.in_function == 1:
self.function_cond = str
def function(name, desc, ret, args, file, cond):
functions[name] = (desc, ret, args, file, cond)
def enum(type, name, value):
if not enums.has_key(type):
enums[type] = {}
enums[type][name] = value
#######################################################################
#
# Some filtering rukes to drop functions/types which should not
# be exposed as-is on the Python interface
#
#######################################################################
skipped_modules = {
'xmlmemory': None,
'DOCBparser': None,
'SAX': None,
'hash': None,
'list': None,
'threads': None,
# 'xpointer': None,
}
skipped_types = {
'int *': "usually a return type",
'xmlSAXHandlerPtr': "not the proper interface for SAX",
'htmlSAXHandlerPtr': "not the proper interface for SAX",
'xmlRMutexPtr': "thread specific, skipped",
'xmlMutexPtr': "thread specific, skipped",
'xmlGlobalStatePtr': "thread specific, skipped",
'xmlListPtr': "internal representation not suitable for python",
'xmlBufferPtr': "internal representation not suitable for python",
'FILE *': None,
}
#######################################################################
#
# Table of remapping to/from the python type or class to the C
# counterpart.
#
#######################################################################
py_types = {
'void': (None, None, None, None),
'int': ('i', None, "int", "int"),
'long': ('l', None, "long", "long"),
'double': ('d', None, "double", "double"),
'unsigned int': ('i', None, "int", "int"),
'xmlChar': ('c', None, "int", "int"),
'unsigned char *': ('z', None, "charPtr", "char *"),
'char *': ('z', None, "charPtr", "char *"),
'const char *': ('z', None, "charPtrConst", "const char *"),
'xmlChar *': ('z', None, "xmlCharPtr", "xmlChar *"),
'const xmlChar *': ('z', None, "xmlCharPtrConst", "const xmlChar *"),
'xmlNodePtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlNodePtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlNode *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlNode *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlDtdPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlDtdPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlDtd *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlDtd *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlAttrPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlAttrPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlAttr *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlAttr *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlEntityPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlEntityPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlEntity *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlEntity *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlElementPtr': ('O', "xmlElement", "xmlElementPtr", "xmlElementPtr"),
'const xmlElementPtr': ('O', "xmlElement", "xmlElementPtr", "xmlElementPtr"),
'xmlElement *': ('O', "xmlElement", "xmlElementPtr", "xmlElementPtr"),
'const xmlElement *': ('O', "xmlElement", "xmlElementPtr", "xmlElementPtr"),
'xmlAttributePtr': ('O', "xmlAttribute", "xmlAttributePtr", "xmlAttributePtr"),
'const xmlAttributePtr': ('O', "xmlAttribute", "xmlAttributePtr", "xmlAttributePtr"),
'xmlAttribute *': ('O', "xmlAttribute", "xmlAttributePtr", "xmlAttributePtr"),
'const xmlAttribute *': ('O', "xmlAttribute", "xmlAttributePtr", "xmlAttributePtr"),
'xmlNsPtr': ('O', "xmlNode", "xmlNsPtr", "xmlNsPtr"),
'const xmlNsPtr': ('O', "xmlNode", "xmlNsPtr", "xmlNsPtr"),
'xmlNs *': ('O', "xmlNode", "xmlNsPtr", "xmlNsPtr"),
'const xmlNs *': ('O', "xmlNode", "xmlNsPtr", "xmlNsPtr"),
'xmlDocPtr': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'const xmlDocPtr': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'xmlDoc *': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'const xmlDoc *': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'htmlDocPtr': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'const htmlDocPtr': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'htmlDoc *': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'const htmlDoc *': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'htmlNodePtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const htmlNodePtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'htmlNode *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const htmlNode *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlXPathContextPtr': ('O', "xmlXPathContext", "xmlXPathContextPtr", "xmlXPathContextPtr"),
'xmlXPathContext *': ('O', "xpathContext", "xmlXPathContextPtr", "xmlXPathContextPtr"),
'xmlXPathParserContextPtr': ('O', "xmlXPathParserContext", "xmlXPathParserContextPtr", "xmlXPathParserContextPtr"),
'xmlParserCtxtPtr': ('O', "parserCtxt", "xmlParserCtxtPtr", "xmlParserCtxtPtr"),
'xmlParserCtxt *': ('O', "parserCtxt", "xmlParserCtxtPtr", "xmlParserCtxtPtr"),
'htmlParserCtxtPtr': ('O', "parserCtxt", "xmlParserCtxtPtr", "xmlParserCtxtPtr"),
'htmlParserCtxt *': ('O', "parserCtxt", "xmlParserCtxtPtr", "xmlParserCtxtPtr"),
'xmlValidCtxtPtr': ('O', "ValidCtxt", "xmlValidCtxtPtr", "xmlValidCtxtPtr"),
'xmlCatalogPtr': ('O', "catalog", "xmlCatalogPtr", "xmlCatalogPtr"),
'FILE *': ('O', "File", "FILEPtr", "FILE *"),
'xmlURIPtr': ('O', "URI", "xmlURIPtr", "xmlURIPtr"),
'xmlErrorPtr': ('O', "Error", "xmlErrorPtr", "xmlErrorPtr"),
'xmlOutputBufferPtr': ('O', "outputBuffer", "xmlOutputBufferPtr", "xmlOutputBufferPtr"),
'xmlParserInputBufferPtr': ('O', "inputBuffer", "xmlParserInputBufferPtr", "xmlParserInputBufferPtr"),
'xmlRegexpPtr': ('O', "xmlReg", "xmlRegexpPtr", "xmlRegexpPtr"),
'xmlTextReaderLocatorPtr': ('O', "xmlTextReaderLocator", "xmlTextReaderLocatorPtr", "xmlTextReaderLocatorPtr"),
'xmlTextReaderPtr': ('O', "xmlTextReader", "xmlTextReaderPtr", "xmlTextReaderPtr"),
'xmlRelaxNGPtr': ('O', "relaxNgSchema", "xmlRelaxNGPtr", "xmlRelaxNGPtr"),
'xmlRelaxNGParserCtxtPtr': ('O', "relaxNgParserCtxt", "xmlRelaxNGParserCtxtPtr", "xmlRelaxNGParserCtxtPtr"),
'xmlRelaxNGValidCtxtPtr': ('O', "relaxNgValidCtxt", "xmlRelaxNGValidCtxtPtr", "xmlRelaxNGValidCtxtPtr"),
'xmlSchemaPtr': ('O', "Schema", "xmlSchemaPtr", "xmlSchemaPtr"),
'xmlSchemaParserCtxtPtr': ('O', "SchemaParserCtxt", "xmlSchemaParserCtxtPtr", "xmlSchemaParserCtxtPtr"),
'xmlSchemaValidCtxtPtr': ('O', "SchemaValidCtxt", "xmlSchemaValidCtxtPtr", "xmlSchemaValidCtxtPtr"),
}
py_return_types = {
'xmlXPathObjectPtr': ('O', "foo", "xmlXPathObjectPtr", "xmlXPathObjectPtr"),
}
unknown_types = {}
foreign_encoding_args = (
'htmlCreateMemoryParserCtxt',
'htmlCtxtReadMemory',
'htmlParseChunk',
'htmlReadMemory',
'xmlCreateMemoryParserCtxt',
'xmlCtxtReadMemory',
'xmlCtxtResetPush',
'xmlParseChunk',
'xmlParseMemory',
'xmlReadMemory',
'xmlRecoverMemory',
)
#######################################################################
#
# This part writes the C <-> Python stubs libxml2-py.[ch] and
# the table libxml2-export.c to add when registrering the Python module
#
#######################################################################
# Class methods which are written by hand in libxml.c but the Python-level
# code is still automatically generated (so they are not in skip_function()).
skip_impl = (
'xmlSaveFileTo',
'xmlSaveFormatFileTo',
)
def skip_function(name):
if name[0:12] == "xmlXPathWrap":
return 1
if name == "xmlFreeParserCtxt":
return 1
if name == "xmlCleanupParser":
return 1
if name == "xmlFreeTextReader":
return 1
# if name[0:11] == "xmlXPathNew":
# return 1
# the next function is defined in libxml.c
if name == "xmlRelaxNGFreeValidCtxt":
return 1
if name == "xmlFreeValidCtxt":
return 1
if name == "xmlSchemaFreeValidCtxt":
return 1
#
# Those are skipped because the Const version is used of the bindings
# instead.
#
if name == "xmlTextReaderBaseUri":
return 1
if name == "xmlTextReaderLocalName":
return 1
if name == "xmlTextReaderName":
return 1
if name == "xmlTextReaderNamespaceUri":
return 1
if name == "xmlTextReaderPrefix":
return 1
if name == "xmlTextReaderXmlLang":
return 1
if name == "xmlTextReaderValue":
return 1
if name == "xmlOutputBufferClose": # handled by by the superclass
return 1
if name == "xmlOutputBufferFlush": # handled by by the superclass
return 1
if name == "xmlErrMemory":
return 1
if name == "xmlValidBuildContentModel":
return 1
if name == "xmlValidateElementDecl":
return 1
if name == "xmlValidateAttributeDecl":
return 1
return 0
def print_function_wrapper(name, output, export, include):
global py_types
global unknown_types
global functions
global skipped_modules
try:
(desc, ret, args, file, cond) = functions[name]
except:
print "failed to get function %s infos"
return
if skipped_modules.has_key(file):
return 0
if skip_function(name) == 1:
return 0
if name in skip_impl:
# Don't delete the function entry in the caller.
return 1
c_call = ""
format=""
format_args=""
c_args=""
c_return=""
c_convert=""
num_bufs=0
for arg in args:
# This should be correct
if arg[1][0:6] == "const ":
arg[1] = arg[1][6:]
c_args = c_args + " %s %s;\n" % (arg[1], arg[0])
if py_types.has_key(arg[1]):
(f, t, n, c) = py_types[arg[1]]
if (f == 'z') and (name in foreign_encoding_args) and (num_bufs == 0):
f = 't#'
if f != None:
format = format + f
if t != None:
format_args = format_args + ", &pyobj_%s" % (arg[0])
c_args = c_args + " PyObject *pyobj_%s;\n" % (arg[0])
c_convert = c_convert + \
" %s = (%s) Py%s_Get(pyobj_%s);\n" % (arg[0],
arg[1], t, arg[0])
else:
format_args = format_args + ", &%s" % (arg[0])
if f == 't#':
format_args = format_args + ", &py_buffsize%d" % num_bufs
c_args = c_args + " int py_buffsize%d;\n" % num_bufs
num_bufs = num_bufs + 1
if c_call != "":
c_call = c_call + ", "
c_call = c_call + "%s" % (arg[0])
else:
if skipped_types.has_key(arg[1]):
return 0
if unknown_types.has_key(arg[1]):
lst = unknown_types[arg[1]]
lst.append(name)
else:
unknown_types[arg[1]] = [name]
return -1
if format != "":
format = format + ":%s" % (name)
if ret[0] == 'void':
if file == "python_accessor":
if args[1][1] == "char *" or args[1][1] == "xmlChar *":
c_call = "\n if (%s->%s != NULL) xmlFree(%s->%s);\n" % (
args[0][0], args[1][0], args[0][0], args[1][0])
c_call = c_call + " %s->%s = (%s)xmlStrdup((const xmlChar *)%s);\n" % (args[0][0],
args[1][0], args[1][1], args[1][0])
else:
c_call = "\n %s->%s = %s;\n" % (args[0][0], args[1][0],
args[1][0])
else:
c_call = "\n %s(%s);\n" % (name, c_call)
ret_convert = " Py_INCREF(Py_None);\n return(Py_None);\n"
elif py_types.has_key(ret[0]):
(f, t, n, c) = py_types[ret[0]]
c_return = " %s c_retval;\n" % (ret[0])
if file == "python_accessor" and ret[2] != None:
c_call = "\n c_retval = %s->%s;\n" % (args[0][0], ret[2])
else:
c_call = "\n c_retval = %s(%s);\n" % (name, c_call)
ret_convert = " py_retval = libxml_%sWrap((%s) c_retval);\n" % (n,c)
ret_convert = ret_convert + " return(py_retval);\n"
elif py_return_types.has_key(ret[0]):
(f, t, n, c) = py_return_types[ret[0]]
c_return = " %s c_retval;\n" % (ret[0])
c_call = "\n c_retval = %s(%s);\n" % (name, c_call)
ret_convert = " py_retval = libxml_%sWrap((%s) c_retval);\n" % (n,c)
ret_convert = ret_convert + " return(py_retval);\n"
else:
if skipped_types.has_key(ret[0]):
return 0
if unknown_types.has_key(ret[0]):
lst = unknown_types[ret[0]]
lst.append(name)
else:
unknown_types[ret[0]] = [name]
return -1
if cond != None and cond != "":
include.write("#if %s\n" % cond)
export.write("#if %s\n" % cond)
output.write("#if %s\n" % cond)
include.write("PyObject * ")
include.write("libxml_%s(PyObject *self, PyObject *args);\n" % (name))
export.write(" { (char *)\"%s\", libxml_%s, METH_VARARGS, NULL },\n" %
(name, name))
if file == "python":
# Those have been manually generated
if cond != None and cond != "":
include.write("#endif\n")
export.write("#endif\n")
output.write("#endif\n")
return 1
if file == "python_accessor" and ret[0] != "void" and ret[2] is None:
# Those have been manually generated
if cond != None and cond != "":
include.write("#endif\n")
export.write("#endif\n")
output.write("#endif\n")
return 1
output.write("PyObject *\n")
output.write("libxml_%s(PyObject *self ATTRIBUTE_UNUSED," % (name))
output.write(" PyObject *args")
if format == "":
output.write(" ATTRIBUTE_UNUSED")
output.write(") {\n")
if ret[0] != 'void':
output.write(" PyObject *py_retval;\n")
if c_return != "":
output.write(c_return)
if c_args != "":
output.write(c_args)
if format != "":
output.write("\n if (!PyArg_ParseTuple(args, (char *)\"%s\"%s))\n" %
(format, format_args))
output.write(" return(NULL);\n")
if c_convert != "":
output.write(c_convert)
output.write(c_call)
output.write(ret_convert)
output.write("}\n\n")
if cond != None and cond != "":
include.write("#endif /* %s */\n" % cond)
export.write("#endif /* %s */\n" % cond)
output.write("#endif /* %s */\n" % cond)
return 1
def buildStubs():
global py_types
global py_return_types
global unknown_types
try:
f = open(os.path.join(srcPref,"libxml2-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError, msg:
try:
f = open(os.path.join(srcPref,"..","doc","libxml2-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError, msg:
print file, ":", msg
sys.exit(1)
n = len(functions.keys())
print "Found %d functions in libxml2-api.xml" % (n)
py_types['pythonObject'] = ('O', "pythonObject", "pythonObject", "pythonObject")
try:
f = open(os.path.join(srcPref,"libxml2-python-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError, msg:
print file, ":", msg
print "Found %d functions in libxml2-python-api.xml" % (
len(functions.keys()) - n)
nb_wrap = 0
failed = 0
skipped = 0
include = open("libxml2-py.h", "w")
include.write("/* Generated */\n\n")
export = open("libxml2-export.c", "w")
export.write("/* Generated */\n\n")
wrapper = open("libxml2-py.c", "w")
wrapper.write("/* Generated */\n\n")
wrapper.write("#include <Python.h>\n")
wrapper.write("#include <libxml/xmlversion.h>\n")
wrapper.write("#include <libxml/tree.h>\n")
wrapper.write("#include <libxml/xmlschemastypes.h>\n")
wrapper.write("#include \"libxml_wrap.h\"\n")
wrapper.write("#include \"libxml2-py.h\"\n\n")
for function in functions.keys():
ret = print_function_wrapper(function, wrapper, export, include)
if ret < 0:
failed = failed + 1
del functions[function]
if ret == 0:
skipped = skipped + 1
del functions[function]
if ret == 1:
nb_wrap = nb_wrap + 1
include.close()
export.close()
wrapper.close()
print "Generated %d wrapper functions, %d failed, %d skipped\n" % (nb_wrap,
failed, skipped)
print "Missing type converters: "
for type in unknown_types.keys():
print "%s:%d " % (type, len(unknown_types[type])),
print
#######################################################################
#
# This part writes part of the Python front-end classes based on
# mapping rules between types and classes and also based on function
# renaming to get consistent function names at the Python level
#
#######################################################################
#
# The type automatically remapped to generated classes
#
classes_type = {
"xmlNodePtr": ("._o", "xmlNode(_obj=%s)", "xmlNode"),
"xmlNode *": ("._o", "xmlNode(_obj=%s)", "xmlNode"),
"xmlDocPtr": ("._o", "xmlDoc(_obj=%s)", "xmlDoc"),
"xmlDocPtr *": ("._o", "xmlDoc(_obj=%s)", "xmlDoc"),
"htmlDocPtr": ("._o", "xmlDoc(_obj=%s)", "xmlDoc"),
"htmlxmlDocPtr *": ("._o", "xmlDoc(_obj=%s)", "xmlDoc"),
"xmlAttrPtr": ("._o", "xmlAttr(_obj=%s)", "xmlAttr"),
"xmlAttr *": ("._o", "xmlAttr(_obj=%s)", "xmlAttr"),
"xmlNsPtr": ("._o", "xmlNs(_obj=%s)", "xmlNs"),
"xmlNs *": ("._o", "xmlNs(_obj=%s)", "xmlNs"),
"xmlDtdPtr": ("._o", "xmlDtd(_obj=%s)", "xmlDtd"),
"xmlDtd *": ("._o", "xmlDtd(_obj=%s)", "xmlDtd"),
"xmlEntityPtr": ("._o", "xmlEntity(_obj=%s)", "xmlEntity"),
"xmlEntity *": ("._o", "xmlEntity(_obj=%s)", "xmlEntity"),
"xmlElementPtr": ("._o", "xmlElement(_obj=%s)", "xmlElement"),
"xmlElement *": ("._o", "xmlElement(_obj=%s)", "xmlElement"),
"xmlAttributePtr": ("._o", "xmlAttribute(_obj=%s)", "xmlAttribute"),
"xmlAttribute *": ("._o", "xmlAttribute(_obj=%s)", "xmlAttribute"),
"xmlXPathContextPtr": ("._o", "xpathContext(_obj=%s)", "xpathContext"),
"xmlXPathContext *": ("._o", "xpathContext(_obj=%s)", "xpathContext"),
"xmlXPathParserContext *": ("._o", "xpathParserContext(_obj=%s)", "xpathParserContext"),
"xmlXPathParserContextPtr": ("._o", "xpathParserContext(_obj=%s)", "xpathParserContext"),
"xmlParserCtxtPtr": ("._o", "parserCtxt(_obj=%s)", "parserCtxt"),
"xmlParserCtxt *": ("._o", "parserCtxt(_obj=%s)", "parserCtxt"),
"htmlParserCtxtPtr": ("._o", "parserCtxt(_obj=%s)", "parserCtxt"),
"htmlParserCtxt *": ("._o", "parserCtxt(_obj=%s)", "parserCtxt"),
"xmlValidCtxtPtr": ("._o", "ValidCtxt(_obj=%s)", "ValidCtxt"),
"xmlCatalogPtr": ("._o", "catalog(_obj=%s)", "catalog"),
"xmlURIPtr": ("._o", "URI(_obj=%s)", "URI"),
"xmlErrorPtr": ("._o", "Error(_obj=%s)", "Error"),
"xmlOutputBufferPtr": ("._o", "outputBuffer(_obj=%s)", "outputBuffer"),
"xmlParserInputBufferPtr": ("._o", "inputBuffer(_obj=%s)", "inputBuffer"),
"xmlRegexpPtr": ("._o", "xmlReg(_obj=%s)", "xmlReg"),
"xmlTextReaderLocatorPtr": ("._o", "xmlTextReaderLocator(_obj=%s)", "xmlTextReaderLocator"),
"xmlTextReaderPtr": ("._o", "xmlTextReader(_obj=%s)", "xmlTextReader"),
'xmlRelaxNGPtr': ('._o', "relaxNgSchema(_obj=%s)", "relaxNgSchema"),
'xmlRelaxNGParserCtxtPtr': ('._o', "relaxNgParserCtxt(_obj=%s)", "relaxNgParserCtxt"),
'xmlRelaxNGValidCtxtPtr': ('._o', "relaxNgValidCtxt(_obj=%s)", "relaxNgValidCtxt"),
'xmlSchemaPtr': ("._o", "Schema(_obj=%s)", "Schema"),
'xmlSchemaParserCtxtPtr': ("._o", "SchemaParserCtxt(_obj=%s)", "SchemaParserCtxt"),
'xmlSchemaValidCtxtPtr': ("._o", "SchemaValidCtxt(_obj=%s)", "SchemaValidCtxt"),
}
converter_type = {
"xmlXPathObjectPtr": "xpathObjectRet(%s)",
}
primary_classes = ["xmlNode", "xmlDoc"]
classes_ancestor = {
"xmlNode" : "xmlCore",
"xmlDtd" : "xmlNode",
"xmlDoc" : "xmlNode",
"xmlAttr" : "xmlNode",
"xmlNs" : "xmlNode",
"xmlEntity" : "xmlNode",
"xmlElement" : "xmlNode",
"xmlAttribute" : "xmlNode",
"outputBuffer": "ioWriteWrapper",
"inputBuffer": "ioReadWrapper",
"parserCtxt": "parserCtxtCore",
"xmlTextReader": "xmlTextReaderCore",
"ValidCtxt": "ValidCtxtCore",
"SchemaValidCtxt": "SchemaValidCtxtCore",
"relaxNgValidCtxt": "relaxNgValidCtxtCore",
}
classes_destructors = {
"parserCtxt": "xmlFreeParserCtxt",
"catalog": "xmlFreeCatalog",
"URI": "xmlFreeURI",
# "outputBuffer": "xmlOutputBufferClose",
"inputBuffer": "xmlFreeParserInputBuffer",
"xmlReg": "xmlRegFreeRegexp",
"xmlTextReader": "xmlFreeTextReader",
"relaxNgSchema": "xmlRelaxNGFree",
"relaxNgParserCtxt": "xmlRelaxNGFreeParserCtxt",
"relaxNgValidCtxt": "xmlRelaxNGFreeValidCtxt",
"Schema": "xmlSchemaFree",
"SchemaParserCtxt": "xmlSchemaFreeParserCtxt",
"SchemaValidCtxt": "xmlSchemaFreeValidCtxt",
"ValidCtxt": "xmlFreeValidCtxt",
}
functions_noexcept = {
"xmlHasProp": 1,
"xmlHasNsProp": 1,
"xmlDocSetRootElement": 1,
"xmlNodeGetNs": 1,
"xmlNodeGetNsDefs": 1,
"xmlNextElementSibling": 1,
"xmlPreviousElementSibling": 1,
"xmlFirstElementChild": 1,
"xmlLastElementChild": 1,
}
reference_keepers = {
"xmlTextReader": [('inputBuffer', 'input')],
"relaxNgValidCtxt": [('relaxNgSchema', 'schema')],
"SchemaValidCtxt": [('Schema', 'schema')],
}
function_classes = {}
function_classes["None"] = []
def nameFixup(name, classe, type, file):
listname = classe + "List"
ll = len(listname)
l = len(classe)
if name[0:l] == listname:
func = name[l:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:12] == "xmlParserGet" and file == "python_accessor":
func = name[12:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:12] == "xmlParserSet" and file == "python_accessor":
func = name[12:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:10] == "xmlNodeGet" and file == "python_accessor":
func = name[10:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:9] == "xmlURIGet" and file == "python_accessor":
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:9] == "xmlURISet" and file == "python_accessor":
func = name[6:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:11] == "xmlErrorGet" and file == "python_accessor":
func = name[11:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:17] == "xmlXPathParserGet" and file == "python_accessor":
func = name[17:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:11] == "xmlXPathGet" and file == "python_accessor":
func = name[11:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:11] == "xmlXPathSet" and file == "python_accessor":
func = name[8:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:15] == "xmlOutputBuffer" and file != "python":
func = name[15:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:20] == "xmlParserInputBuffer" and file != "python":
func = name[20:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:9] == "xmlRegexp" and file == "xmlregexp":
func = "regexp" + name[9:]
elif name[0:6] == "xmlReg" and file == "xmlregexp":
func = "regexp" + name[6:]
elif name[0:20] == "xmlTextReaderLocator" and file == "xmlreader":
func = name[20:]
elif name[0:18] == "xmlTextReaderConst" and file == "xmlreader":
func = name[18:]
elif name[0:13] == "xmlTextReader" and file == "xmlreader":
func = name[13:]
elif name[0:12] == "xmlReaderNew" and file == "xmlreader":
func = name[9:]
elif name[0:11] == "xmlACatalog":
func = name[11:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:l] == classe:
func = name[l:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:7] == "libxml_":
func = name[7:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:6] == "xmlGet":
func = name[6:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:3] == "xml":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
else:
func = name
if func[0:5] == "xPath":
func = "xpath" + func[5:]
elif func[0:4] == "xPtr":
func = "xpointer" + func[4:]
elif func[0:8] == "xInclude":
func = "xinclude" + func[8:]
elif func[0:2] == "iD":
func = "ID" + func[2:]
elif func[0:3] == "uRI":
func = "URI" + func[3:]
elif func[0:4] == "uTF8":
func = "UTF8" + func[4:]
elif func[0:3] == 'sAX':
func = "SAX" + func[3:]
return func
def functionCompare(info1, info2):
(index1, func1, name1, ret1, args1, file1) = info1
(index2, func2, name2, ret2, args2, file2) = info2
if file1 == file2:
if func1 < func2:
return -1
if func1 > func2:
return 1
if file1 == "python_accessor":
return -1
if file2 == "python_accessor":
return 1
if file1 < file2:
return -1
if file1 > file2:
return 1
return 0
def writeDoc(name, args, indent, output):
if functions[name][0] is None or functions[name][0] == "":
return
val = functions[name][0]
val = string.replace(val, "NULL", "None")
output.write(indent)
output.write('"""')
while len(val) > 60:
if val[0] == " ":
val = val[1:]
continue
str = val[0:60]
i = string.rfind(str, " ")
if i < 0:
i = 60
str = val[0:i]
val = val[i:]
output.write(str)
output.write('\n ')
output.write(indent)
output.write(val)
output.write(' """\n')
def buildWrappers():
global ctypes
global py_types
global py_return_types
global unknown_types
global functions
global function_classes
global classes_type
global classes_list
global converter_type
global primary_classes
global converter_type
global classes_ancestor
global converter_type
global primary_classes
global classes_ancestor
global classes_destructors
global functions_noexcept
for type in classes_type.keys():
function_classes[classes_type[type][2]] = []
#
# Build the list of C types to look for ordered to start
# with primary classes
#
ctypes = []
classes_list = []
ctypes_processed = {}
classes_processed = {}
for classe in primary_classes:
classes_list.append(classe)
classes_processed[classe] = ()
for type in classes_type.keys():
tinfo = classes_type[type]
if tinfo[2] == classe:
ctypes.append(type)
ctypes_processed[type] = ()
for type in classes_type.keys():
if ctypes_processed.has_key(type):
continue
tinfo = classes_type[type]
if not classes_processed.has_key(tinfo[2]):
classes_list.append(tinfo[2])
classes_processed[tinfo[2]] = ()
ctypes.append(type)
ctypes_processed[type] = ()
for name in functions.keys():
found = 0
(desc, ret, args, file, cond) = functions[name]
for type in ctypes:
classe = classes_type[type][2]
if name[0:3] == "xml" and len(args) >= 1 and args[0][1] == type:
found = 1
func = nameFixup(name, classe, type, file)
info = (0, func, name, ret, args, file)
function_classes[classe].append(info)
elif name[0:3] == "xml" and len(args) >= 2 and args[1][1] == type \
and file != "python_accessor":
found = 1
func = nameFixup(name, classe, type, file)
info = (1, func, name, ret, args, file)
function_classes[classe].append(info)
elif name[0:4] == "html" and len(args) >= 1 and args[0][1] == type:
found = 1
func = nameFixup(name, classe, type, file)
info = (0, func, name, ret, args, file)
function_classes[classe].append(info)
elif name[0:4] == "html" and len(args) >= 2 and args[1][1] == type \
and file != "python_accessor":
found = 1
func = nameFixup(name, classe, type, file)
info = (1, func, name, ret, args, file)
function_classes[classe].append(info)
if found == 1:
continue
if name[0:8] == "xmlXPath":
continue
if name[0:6] == "xmlStr":
continue
if name[0:10] == "xmlCharStr":
continue
func = nameFixup(name, "None", file, file)
info = (0, func, name, ret, args, file)
function_classes['None'].append(info)
classes = open("libxml2class.py", "w")
txt = open("libxml2class.txt", "w")
txt.write(" Generated Classes for libxml2-python\n\n")
txt.write("#\n# Global functions of the module\n#\n\n")
if function_classes.has_key("None"):
flist = function_classes["None"]
flist.sort(functionCompare)
oldfile = ""
for info in flist:
(index, func, name, ret, args, file) = info
if file != oldfile:
classes.write("#\n# Functions from module %s\n#\n\n" % file)
txt.write("\n# functions from module %s\n" % file)
oldfile = file
classes.write("def %s(" % func)
txt.write("%s()\n" % func)
n = 0
for arg in args:
if n != 0:
classes.write(", ")
classes.write("%s" % arg[0])
n = n + 1
classes.write("):\n")
writeDoc(name, args, ' ', classes)
for arg in args:
if classes_type.has_key(arg[1]):
classes.write(" if %s is None: %s__o = None\n" %
(arg[0], arg[0]))
classes.write(" else: %s__o = %s%s\n" %
(arg[0], arg[0], classes_type[arg[1]][0]))
if ret[0] != "void":
classes.write(" ret = ")
else:
classes.write(" ")
classes.write("libxml2mod.%s(" % name)
n = 0
for arg in args:
if n != 0:
classes.write(", ")
classes.write("%s" % arg[0])
if classes_type.has_key(arg[1]):
classes.write("__o")
n = n + 1
classes.write(")\n")
if ret[0] != "void":
if classes_type.has_key(ret[0]):
#
# Raise an exception
#
if functions_noexcept.has_key(name):
classes.write(" if ret is None:return None\n")
elif string.find(name, "URI") >= 0:
classes.write(
" if ret is None:raise uriError('%s() failed')\n"
% (name))
elif string.find(name, "XPath") >= 0:
classes.write(
" if ret is None:raise xpathError('%s() failed')\n"
% (name))
elif string.find(name, "Parse") >= 0:
classes.write(
" if ret is None:raise parserError('%s() failed')\n"
% (name))
else:
classes.write(
" if ret is None:raise treeError('%s() failed')\n"
% (name))
classes.write(" return ")
classes.write(classes_type[ret[0]][1] % ("ret"))
classes.write("\n")
else:
classes.write(" return ret\n")
classes.write("\n")
txt.write("\n\n#\n# Set of classes of the module\n#\n\n")
for classname in classes_list:
if classname == "None":
pass
else:
if classes_ancestor.has_key(classname):
txt.write("\n\nClass %s(%s)\n" % (classname,
classes_ancestor[classname]))
classes.write("class %s(%s):\n" % (classname,
classes_ancestor[classname]))
classes.write(" def __init__(self, _obj=None):\n")
if classes_ancestor[classname] == "xmlCore" or \
classes_ancestor[classname] == "xmlNode":
classes.write(" if type(_obj).__name__ != ")
classes.write("'PyCObject':\n")
classes.write(" raise TypeError, ")
classes.write("'%s needs a PyCObject argument'\n" % \
classname)
if reference_keepers.has_key(classname):
rlist = reference_keepers[classname]
for ref in rlist:
classes.write(" self.%s = None\n" % ref[1])
classes.write(" self._o = _obj\n")
classes.write(" %s.__init__(self, _obj=_obj)\n\n" % (
classes_ancestor[classname]))
if classes_ancestor[classname] == "xmlCore" or \
classes_ancestor[classname] == "xmlNode":
classes.write(" def __repr__(self):\n")
format = "<%s (%%s) object at 0x%%x>" % (classname)
classes.write(" return \"%s\" %% (self.name, long(pos_id (self)))\n\n" % (
format))
else:
txt.write("Class %s()\n" % (classname))
classes.write("class %s:\n" % (classname))
classes.write(" def __init__(self, _obj=None):\n")
if reference_keepers.has_key(classname):
list = reference_keepers[classname]
for ref in list:
classes.write(" self.%s = None\n" % ref[1])
classes.write(" if _obj != None:self._o = _obj;return\n")
classes.write(" self._o = None\n\n")
destruct=None
if classes_destructors.has_key(classname):
classes.write(" def __del__(self):\n")
classes.write(" if self._o != None:\n")
classes.write(" libxml2mod.%s(self._o)\n" %
classes_destructors[classname])
classes.write(" self._o = None\n\n")
destruct=classes_destructors[classname]
flist = function_classes[classname]
flist.sort(functionCompare)
oldfile = ""
for info in flist:
(index, func, name, ret, args, file) = info
#
# Do not provide as method the destructors for the class
# to avoid double free
#
if name == destruct:
continue
if file != oldfile:
if file == "python_accessor":
classes.write(" # accessors for %s\n" % (classname))
txt.write(" # accessors\n")
else:
classes.write(" #\n")
classes.write(" # %s functions from module %s\n" % (
classname, file))
txt.write("\n # functions from module %s\n" % file)
classes.write(" #\n\n")
oldfile = file
classes.write(" def %s(self" % func)
txt.write(" %s()\n" % func)
n = 0
for arg in args:
if n != index:
classes.write(", %s" % arg[0])
n = n + 1
classes.write("):\n")
writeDoc(name, args, ' ', classes)
n = 0
for arg in args:
if classes_type.has_key(arg[1]):
if n != index:
classes.write(" if %s is None: %s__o = None\n" %
(arg[0], arg[0]))
classes.write(" else: %s__o = %s%s\n" %
(arg[0], arg[0], classes_type[arg[1]][0]))
n = n + 1
if ret[0] != "void":
classes.write(" ret = ")
else:
classes.write(" ")
classes.write("libxml2mod.%s(" % name)
n = 0
for arg in args:
if n != 0:
classes.write(", ")
if n != index:
classes.write("%s" % arg[0])
if classes_type.has_key(arg[1]):
classes.write("__o")
else:
classes.write("self")
if classes_type.has_key(arg[1]):
classes.write(classes_type[arg[1]][0])
n = n + 1
classes.write(")\n")
if ret[0] != "void":
if classes_type.has_key(ret[0]):
#
# Raise an exception
#
if functions_noexcept.has_key(name):
classes.write(
" if ret is None:return None\n")
elif string.find(name, "URI") >= 0:
classes.write(
" if ret is None:raise uriError('%s() failed')\n"
% (name))
elif string.find(name, "XPath") >= 0:
classes.write(
" if ret is None:raise xpathError('%s() failed')\n"
% (name))
elif string.find(name, "Parse") >= 0:
classes.write(
" if ret is None:raise parserError('%s() failed')\n"
% (name))
else:
classes.write(
" if ret is None:raise treeError('%s() failed')\n"
% (name))
#
# generate the returned class wrapper for the object
#
classes.write(" __tmp = ")
classes.write(classes_type[ret[0]][1] % ("ret"))
classes.write("\n")
#
# Sometime one need to keep references of the source
# class in the returned class object.
# See reference_keepers for the list
#
tclass = classes_type[ret[0]][2]
if reference_keepers.has_key(tclass):
list = reference_keepers[tclass]
for pref in list:
if pref[0] == classname:
classes.write(" __tmp.%s = self\n" %
pref[1])
#
# return the class
#
classes.write(" return __tmp\n")
elif converter_type.has_key(ret[0]):
#
# Raise an exception
#
if functions_noexcept.has_key(name):
classes.write(
" if ret is None:return None")
elif string.find(name, "URI") >= 0:
classes.write(
" if ret is None:raise uriError('%s() failed')\n"
% (name))
elif string.find(name, "XPath") >= 0:
classes.write(
" if ret is None:raise xpathError('%s() failed')\n"
% (name))
elif string.find(name, "Parse") >= 0:
classes.write(
" if ret is None:raise parserError('%s() failed')\n"
% (name))
else:
classes.write(
" if ret is None:raise treeError('%s() failed')\n"
% (name))
classes.write(" return ")
classes.write(converter_type[ret[0]] % ("ret"))
classes.write("\n")
else:
classes.write(" return ret\n")
classes.write("\n")
#
# Generate enum constants
#
for type,enum in enums.items():
classes.write("# %s\n" % type)
items = enum.items()
items.sort(lambda i1,i2: cmp(long(i1[1]),long(i2[1])))
for name,value in items:
classes.write("%s = %s\n" % (name,value))
classes.write("\n")
txt.close()
classes.close()
buildStubs()
buildWrappers()
| apache-2.0 | -3,586,902,407,920,603,000 | 39.037911 | 120 | 0.489742 | false |
alexthered/kienhoc-platform | common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py | 107 | 6601 | import logging
import dogstats_wrapper as dog_stats_api
from .grading_service_module import GradingService
from opaque_keys.edx.keys import UsageKey
log = logging.getLogger(__name__)
class PeerGradingService(GradingService):
"""
Interface with the grading controller for peer grading
"""
METRIC_NAME = 'edxapp.open_ended_grading.peer_grading_service'
def __init__(self, config, render_template):
config['render_template'] = render_template
super(PeerGradingService, self).__init__(config)
self.url = config['url'] + config['peer_grading']
self.login_url = self.url + '/login/'
self.get_next_submission_url = self.url + '/get_next_submission/'
self.save_grade_url = self.url + '/save_grade/'
self.is_student_calibrated_url = self.url + '/is_student_calibrated/'
self.show_calibration_essay_url = self.url + '/show_calibration_essay/'
self.save_calibration_essay_url = self.url + '/save_calibration_essay/'
self.get_problem_list_url = self.url + '/get_problem_list/'
self.get_notifications_url = self.url + '/get_notifications/'
self.get_data_for_location_url = self.url + '/get_data_for_location/'
def get_data_for_location(self, problem_location, student_id):
if isinstance(problem_location, UsageKey):
problem_location = problem_location.to_deprecated_string()
params = {'location': problem_location, 'student_id': student_id}
result = self.get(self.get_data_for_location_url, params)
self._record_result('get_data_for_location', result)
for key in result.keys():
if key in ('success', 'error', 'version'):
continue
dog_stats_api.histogram(
self._metric_name('get_data_for_location.{}'.format(key)),
result[key],
)
return result
def get_next_submission(self, problem_location, grader_id):
if isinstance(problem_location, UsageKey):
problem_location = problem_location.to_deprecated_string()
result = self._render_rubric(self.get(
self.get_next_submission_url,
{
'location': problem_location,
'grader_id': grader_id
}
))
self._record_result('get_next_submission', result)
return result
def save_grade(self, **kwargs):
data = kwargs
data.update({'rubric_scores_complete': True})
result = self.post(self.save_grade_url, data)
self._record_result('save_grade', result)
return result
def is_student_calibrated(self, problem_location, grader_id):
if isinstance(problem_location, UsageKey):
problem_location = problem_location.to_deprecated_string()
params = {'problem_id': problem_location, 'student_id': grader_id}
result = self.get(self.is_student_calibrated_url, params)
self._record_result(
'is_student_calibrated',
result,
tags=['calibrated:{}'.format(result.get('calibrated'))]
)
return result
def show_calibration_essay(self, problem_location, grader_id):
if isinstance(problem_location, UsageKey):
problem_location = problem_location.to_deprecated_string()
params = {'problem_id': problem_location, 'student_id': grader_id}
result = self._render_rubric(self.get(self.show_calibration_essay_url, params))
self._record_result('show_calibration_essay', result)
return result
def save_calibration_essay(self, **kwargs):
data = kwargs
data.update({'rubric_scores_complete': True})
result = self.post(self.save_calibration_essay_url, data)
self._record_result('show_calibration_essay', result)
return result
def get_problem_list(self, course_id, grader_id):
params = {'course_id': course_id.to_deprecated_string(), 'student_id': grader_id}
result = self.get(self.get_problem_list_url, params)
if 'problem_list' in result:
for problem in result['problem_list']:
problem['location'] = course_id.make_usage_key_from_deprecated_string(problem['location'])
self._record_result('get_problem_list', result)
dog_stats_api.histogram(
self._metric_name('get_problem_list.result.length'),
len(result.get('problem_list', [])),
)
return result
def get_notifications(self, course_id, grader_id):
params = {'course_id': course_id.to_deprecated_string(), 'student_id': grader_id}
result = self.get(self.get_notifications_url, params)
self._record_result(
'get_notifications',
result,
tags=['needs_to_peer_grade:{}'.format(result.get('student_needs_to_peer_grade'))]
)
return result
class MockPeerGradingService(object):
"""
This is a mock peer grading service that can be used for unit tests
without making actual service calls to the grading controller
"""
def get_next_submission(self, problem_location, grader_id):
return {
'success': True,
'submission_id': 1,
'submission_key': "",
'student_response': 'Sample student response.',
'prompt': 'Sample submission prompt.',
'rubric': 'Placeholder text for the full rubric.',
'max_score': 4
}
def save_grade(self, **kwargs):
return {'success': True}
def is_student_calibrated(self, problem_location, grader_id):
return {'success': True, 'calibrated': True}
def show_calibration_essay(self, problem_location, grader_id):
return {'success': True,
'submission_id': 1,
'submission_key': '',
'student_response': 'Sample student response.',
'prompt': 'Sample submission prompt.',
'rubric': 'Placeholder text for the full rubric.',
'max_score': 4}
def save_calibration_essay(self, **kwargs):
return {'success': True, 'actual_score': 2}
def get_problem_list(self, course_id, grader_id):
return {'success': True,
'problem_list': [
]}
def get_data_for_location(self, problem_location, student_id):
return {
"version": 1,
"count_graded": 3,
"count_required": 3,
"success": True,
"student_sub_count": 1,
'submissions_available': 0,
}
| agpl-3.0 | 5,814,696,213,533,288,000 | 38.291667 | 106 | 0.602636 | false |
kishikawakatsumi/Mozc-for-iOS | src/third_party/gyp/pylib/gyp/generator/android.py | 3 | 45146 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
import subprocess
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(GYP_CONFIGURATION)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Generator-specific gyp specs.
generator_additional_non_configuration_keys = [
# Boolean to declare that this target does not want its name mangled.
'android_unmangled_name',
]
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
ALL_MODULES_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
android_standard_include_paths = set([
# JNI_H_INCLUDE in build/core/binary.mk
'dalvik/libnativehelper/include/nativehelper',
# from SRC_HEADERS in build/core/config.mk
'system/core/include',
'hardware/libhardware/include',
'hardware/libhardware_legacy/include',
'hardware/ril/include',
'dalvik/libnativehelper/include',
'frameworks/native/include',
'frameworks/native/opengl/include',
'frameworks/base/include',
'frameworks/base/opengl/include',
'frameworks/base/native/include',
'external/skia/include',
# TARGET_C_INCLUDES in build/core/combo/TARGET_linux-arm.mk
'bionic/libc/arch-arm/include',
'bionic/libc/include',
'bionic/libstdc++/include',
'bionic/libc/kernel/common',
'bionic/libc/kernel/arch-arm',
'bionic/libm/include',
'bionic/libm/include/arm',
'bionic/libthread_db/include',
])
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, relative_target, base_path, output_filename,
spec, configs, part_of_all, write_alias_target):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
relative_target: qualified target name relative to the root
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for
this target
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.relative_target = relative_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'GYP')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
self.WriteLn('LOCAL_MODULE_TAGS := optional')
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
self.WriteLn('LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)')
else:
self.WriteLn('LOCAL_MODULE_TARGET_ARCH := '
'$(TARGET_$(GYP_VAR_PREFIX)ARCH)')
# Grab output directories; needed for Actions and Rules.
if self.toolset == 'host':
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))')
else:
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_VAR_PREFIX))')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all,
write_alias_target)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# Android's envsetup.sh adds a number of directories to the path including
# the built host binary directory. This causes actions/rules invoked by
# gyp to sometimes use these instead of system versions, e.g. bison.
# The built host binaries may not be suitable, and can cause errors.
# So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable
# set by envsetup.
self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))'
% main_output)
# Don't allow spaces in input/output filenames, but make an exception for
# filenames which start with '$(' since it's okay for there to be spaces
# inside of make function/macro invocations.
for input in inputs:
if not input.startswith('$(') and ' ' in input:
raise gyp.common.GypError(
'Action input filename "%s" in target %s contains a space' %
(input, self.target))
for output in outputs:
if not output.startswith('$(') and ' ' in output:
raise gyp.common.GypError(
'Action output filename "%s" in target %s contains a space' %
(output, self.target))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# See explanation in WriteActions.
self.WriteLn('%s: export PATH := '
'$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (output, main_output))
self.WriteLn()
self.WriteLn()
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.relative_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -rpf $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
for configname, config in sorted(configs.iteritems()):
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags', []) + config.get('cflags_c', []))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS_%s' % configname)
self.WriteList(config.get('defines'), 'MY_DEFS_%s' % configname,
prefix='-D', quoter=make.EscapeCppDefine)
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES_%s' % configname)
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS_%s' % configname)
self.WriteLn('\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) '
'$(MY_DEFS_$(GYP_CONFIGURATION))')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host
# or target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))')
self.WriteLn('LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))')
# Android uses separate flags for assembly file invocations, but gyp expects
# the same CFLAGS to be applied:
self.WriteLn('LOCAL_ASFLAGS := $(LOCAL_CFLAGS)')
def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs)
def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if int(spec.get('android_unmangled_name', 0)):
assert self.type != 'shared_library' or self.target.startswith('lib')
return self.target
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp'
else:
suffix = '_gyp'
if self.path:
middle = make.StringToMakefileVariable('%s_%s' % (self.path, self.target))
else:
middle = make.StringToMakefileVariable(self.target)
return ''.join([prefix, middle, suffix])
def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext)
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec))
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$($(GYP_HOST_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$($(GYP_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = ('$(call intermediates-dir-for,%s,%s,true,,'
'$(GYP_HOST_VAR_PREFIX))' % (self.android_class,
self.android_module))
else:
path = ('$(call intermediates-dir-for,%s,%s,,,$(GYP_VAR_PREFIX))'
% (self.android_class, self.android_module))
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec))
def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory;
filter out include paths that are already brought in by the Android build
system.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
# Filter out the Android standard search path.
if path not in android_standard_include_paths:
normalized.append(path)
return normalized
def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths)
def ComputeAndroidLibraryModuleNames(self, libraries):
"""Compute the Android module names from libraries, ie spec.get('libraries')
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules)
"""
static_lib_modules = []
dynamic_lib_modules = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
# "-lstlport" -> libstlport
if lib.startswith('-l'):
if lib.endswith('_static'):
static_lib_modules.append('lib' + lib[2:])
else:
dynamic_lib_modules.append('lib' + lib[2:])
return (static_lib_modules, dynamic_lib_modules)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
for configname, config in sorted(configs.iteritems()):
ldflags = list(config.get('ldflags', []))
self.WriteLn('')
self.WriteList(ldflags, 'LOCAL_LDFLAGS_%s' % configname)
self.WriteLn('\nLOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))')
# Libraries (i.e. -lfoo)
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs = self.ComputeAndroidLibraryModuleNames(
libraries)
# Link dependencies (i.e. libfoo.a, libfoo.so)
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
self.WriteLn('')
self.WriteList(static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
self.WriteLn('')
self.WriteList(dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES')
def WriteTarget(self, spec, configs, deps, link_deps, part_of_all,
write_alias_target):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for this
target
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all and write_alias_target:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
if self.target != self.android_module and write_alias_target:
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
# Executables are for build and test purposes only, so they're installed
# to a directory that doesn't get included in the system image.
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
if self.toolset == 'target':
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_VAR_PREFIX)')
else:
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
self.WriteLn()
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX :=')
def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return os.path.normpath(path)
def PerformBuild(data, configurations, params):
# The android backend only supports the default configuration.
options = params['options']
makefile = os.path.abspath(os.path.join(options.toplevel_dir,
'GypAndroid.mk'))
env = dict(os.environ)
env['ONE_SHOT_MAKEFILE'] = makefile
arguments = ['make', '-C', os.environ['ANDROID_BUILD_TOP'], 'gyp_all_modules']
print 'Building: %s' % arguments
subprocess.check_call(arguments, env=env)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
limit_to_target_all = generator_flags.get('limit_to_target_all', False)
write_alias_targets = generator_flags.get('write_alias_targets', True)
android_top_dir = os.environ.get('ANDROID_BUILD_TOP')
assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.'
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'GypAndroid' + options.suffix + '.mk'
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(header)
# We set LOCAL_PATH just once, here, to the top of the project tree. This
# allows all the other paths we use to be relative to the Android.mk file,
# as the Android build system expects.
root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n')
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
android_modules = {}
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
relative_build_file = gyp.common.RelativePath(build_file,
options.toplevel_dir)
build_files.add(relative_build_file)
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
part_of_all = (qualified_target in needed_targets and
not int(spec.get('suppress_wildcard', False)))
if limit_to_target_all and not part_of_all:
continue
relative_target = gyp.common.QualifiedTarget(relative_build_file, target,
toolset)
writer = AndroidMkWriter(android_top_dir)
android_module = writer.Write(qualified_target, relative_target, base_path,
output_file, spec, configs,
part_of_all=part_of_all,
write_alias_target=write_alias_targets)
if android_module in android_modules:
print ('ERROR: Android module names must be unique. The following '
'targets both generate Android module name %s.\n %s\n %s' %
(android_module, android_modules[android_module],
qualified_target))
return
android_modules[android_module] = qualified_target
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
root_makefile.write('GYP_CONFIGURATION ?= %s\n' % default_configuration)
root_makefile.write('GYP_VAR_PREFIX ?=\n')
root_makefile.write('GYP_HOST_VAR_PREFIX ?=\n')
root_makefile.write('GYP_HOST_MULTILIB ?=\n')
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n')
root_makefile.write('\n')
if write_alias_targets:
root_makefile.write(ALL_MODULES_FOOTER)
root_makefile.close()
| apache-2.0 | 1,361,851,607,193,693,200 | 40.116576 | 80 | 0.641452 | false |
bev-a-tron/pledgeservice | lib/stripe/error.py | 17 | 1339 | # Exceptions
class StripeError(Exception):
def __init__(self, message=None, http_body=None, http_status=None,
json_body=None):
super(StripeError, self).__init__(message)
if http_body and hasattr(http_body, 'decode'):
try:
http_body = http_body.decode('utf-8')
except:
http_body = ('<Could not decode body as utf-8. '
'Please report to [email protected]>')
self.http_body = http_body
self.http_status = http_status
self.json_body = json_body
class APIError(StripeError):
pass
class APIConnectionError(StripeError):
pass
class CardError(StripeError):
def __init__(self, message, param, code, http_body=None,
http_status=None, json_body=None):
super(CardError, self).__init__(message,
http_body, http_status, json_body)
self.param = param
self.code = code
class InvalidRequestError(StripeError):
def __init__(self, message, param, http_body=None,
http_status=None, json_body=None):
super(InvalidRequestError, self).__init__(
message, http_body, http_status, json_body)
self.param = param
class AuthenticationError(StripeError):
pass
| agpl-3.0 | -7,578,329,266,541,436,000 | 26.326531 | 74 | 0.57655 | false |
d40223223/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/optparse.py | 728 | 60616 | """A powerful, extensible, and easy-to-use option parser.
By Greg Ward <[email protected]>
Originally distributed as Optik.
For support, use the [email protected] mailing list
(http://lists.sourceforge.net/lists/listinfo/optik-users).
Simple usage example:
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, args) = parser.parse_args()
"""
__version__ = "1.5.3"
__all__ = ['Option',
'make_option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError']
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import textwrap
def _repr(self):
return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
# This file was generated from:
# Id: option_parser.py 527 2006-07-23 15:21:30Z greg
# Id: option.py 522 2006-06-11 16:22:03Z gward
# Id: help.py 527 2006-07-23 15:21:30Z greg
# Id: errors.py 509 2006-04-20 00:58:24Z gward
try:
from gettext import gettext, ngettext
except ImportError:
def gettext(message):
return message
def ngettext(singular, plural, n):
if n == 1:
return singular
return plural
_ = gettext
class OptParseError (Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionError (OptParseError):
"""
Raised if an Option instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
"""
Raised if conflicting options are added to an OptionParser.
"""
class OptionValueError (OptParseError):
"""
Raised if an invalid option value is encountered on the command
line.
"""
class BadOptionError (OptParseError):
"""
Raised if an invalid option is seen on the command line.
"""
def __init__(self, opt_str):
self.opt_str = opt_str
def __str__(self):
return _("no such option: %s") % self.opt_str
class AmbiguousOptionError (BadOptionError):
"""
Raised if an ambiguous option is seen on the command line.
"""
def __init__(self, opt_str, possibilities):
BadOptionError.__init__(self, opt_str)
self.possibilities = possibilities
def __str__(self):
return (_("ambiguous option: %s (%s?)")
% (self.opt_str, ", ".join(self.possibilities)))
class HelpFormatter:
"""
Abstract base class for formatting option help. OptionParser
instances should use one of the HelpFormatter subclasses for
formatting help; by default IndentedHelpFormatter is used.
Instance attributes:
parser : OptionParser
the controlling OptionParser instance
indent_increment : int
the number of columns to indent per nesting level
max_help_position : int
the maximum starting column for option help text
help_position : int
the calculated starting column for option help text;
initially the same as the maximum
width : int
total number of columns for output (pass None to constructor for
this value to be taken from the $COLUMNS environment variable)
level : int
current indentation level
current_indent : int
current indentation level (in columns)
help_width : int
number of columns available for option help text (calculated)
default_tag : str
text to replace with each option's default value, "%default"
by default. Set to false value to disable default value expansion.
option_strings : { Option : str }
maps Option instances to the snippet of help text explaining
the syntax of that option, e.g. "-h, --help" or
"-fFILE, --file=FILE"
_short_opt_fmt : str
format string controlling how short options with values are
printed in help text. Must be either "%s%s" ("-fFILE") or
"%s %s" ("-f FILE"), because those are the two syntaxes that
Optik supports.
_long_opt_fmt : str
similar but for long options; must be either "%s %s" ("--file FILE")
or "%s=%s" ("--file=FILE").
"""
NO_DEFAULT_VALUE = "none"
def __init__(self,
indent_increment,
max_help_position,
width,
short_first):
self.parser = None
self.indent_increment = indent_increment
self.help_position = self.max_help_position = max_help_position
if width is None:
try:
width = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self.width = width
self.current_indent = 0
self.level = 0
self.help_width = None # computed later
self.short_first = short_first
self.default_tag = "%default"
self.option_strings = {}
self._short_opt_fmt = "%s %s"
self._long_opt_fmt = "%s=%s"
def set_parser(self, parser):
self.parser = parser
def set_short_opt_delimiter(self, delim):
if delim not in ("", " "):
raise ValueError(
"invalid metavar delimiter for short options: %r" % delim)
self._short_opt_fmt = "%s" + delim + "%s"
def set_long_opt_delimiter(self, delim):
if delim not in ("=", " "):
raise ValueError(
"invalid metavar delimiter for long options: %r" % delim)
self._long_opt_fmt = "%s" + delim + "%s"
def indent(self):
self.current_indent += self.indent_increment
self.level += 1
def dedent(self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage(self, usage):
raise NotImplementedError("subclasses must implement")
def format_heading(self, heading):
raise NotImplementedError("subclasses must implement")
def _format_text(self, text):
"""
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
text_width = self.width - self.current_indent
indent = " "*self.current_indent
return textwrap.fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
def format_description(self, description):
if description:
return self._format_text(description) + "\n"
else:
return ""
def format_epilog(self, epilog):
if epilog:
return "\n" + self._format_text(epilog) + "\n"
else:
return ""
def expand_default(self, option):
if self.parser is None or not self.default_tag:
return option.help
default_value = self.parser.defaults.get(option.dest)
if default_value is NO_DEFAULT or default_value is None:
default_value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(default_value))
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = textwrap.wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings(self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
self.help_width = self.width - self.help_position
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
"""Format help with indented section bodies.
"""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return _("Usage: %s\n") % usage
def format_heading(self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
"""Format help with underlined section headers.
"""
def __init__(self,
indent_increment=0,
max_help_position=24,
width=None,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return "%s %s\n" % (self.format_heading(_("Usage")), usage)
def format_heading(self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
def _parse_num(val, type):
if val[:2].lower() == "0x": # hexadecimal
radix = 16
elif val[:2].lower() == "0b": # binary
radix = 2
val = val[2:] or "0" # have to remove "0b" prefix
elif val[:1] == "0": # octal
radix = 8
else: # decimal
radix = 10
return type(val, radix)
def _parse_int(val):
return _parse_num(val, int)
_builtin_cvt = { "int" : (_parse_int, _("integer")),
"long" : (_parse_int, _("integer")),
"float" : (float, _("floating-point")),
"complex" : (complex, _("complex")) }
def check_builtin(option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
_("option %s: invalid %s value: %r") % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
_("option %s: invalid choice: %r (choose from %s)")
% (opt, value, choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = ("NO", "DEFAULT")
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. which may consume an argument from the command line.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of actions which *require* a value type, ie. that
# always consume an argument from the command line.
ALWAYS_TYPED_ACTIONS = ("store",
"append")
# The set of actions which take a 'const' attribute.
CONST_ACTIONS = ("store_const",
"append_const")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex": check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__(self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple.
# Have to be set now, in case no option strings are supplied.
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings(self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = [opt for opt in opts if opt]
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings(self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs(self, attrs):
for attr in self.ATTRS:
if attr in attrs:
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
attrs = sorted(attrs.keys())
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs),
self)
# -- Constructor validation methods --------------------------------
def _check_action(self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type(self):
if self.type is None:
if self.action in self.ALWAYS_TYPED_ACTIONS:
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
# Allow type objects or builtin type conversion functions
# (int, str, etc.) as an alternative to their names. (The
# complicated check of builtins is only necessary for
# Python 2.1 and earlier, and is short-circuited by the
# first check on modern Pythons.)
import builtins
if ( isinstance(self.type, type) or
(hasattr(self.type, "__name__") and
getattr(builtins, self.type.__name__, None) is self.type) ):
self.type = self.type.__name__
if self.type == "str":
self.type = "string"
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif not isinstance(self.choices, (tuple, list)):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest(self):
# No destination given, and we need one for this action. The
# self.type check is for callbacks that take a value.
takes_value = (self.action in self.STORE_ACTIONS or
self.type is not None)
if self.dest is None and takes_value:
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const(self):
if self.action not in self.CONST_ACTIONS and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs(self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback(self):
if self.action == "callback":
if not callable(self.callback):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
not isinstance(self.callback_args, tuple)):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
not isinstance(self.callback_kwargs, dict)):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__(self):
return "/".join(self._short_opts + self._long_opts)
__repr__ = _repr
def takes_value(self):
return self.type is not None
def get_opt_string(self):
if self._long_opts:
return self._long_opts[0]
else:
return self._short_opts[0]
# -- Processing methods --------------------------------------------
def check_value(self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "append_const":
values.ensure_value(dest, []).append(self.const)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
parser.exit()
elif action == "version":
parser.print_version()
parser.exit()
else:
raise ValueError("unknown action %r" % self.action)
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
class Values:
def __init__(self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __str__(self):
return str(self.__dict__)
__repr__ = _repr
def __eq__(self, other):
if isinstance(other, Values):
return self.__dict__ == other.__dict__
elif isinstance(other, dict):
return self.__dict__ == other
else:
return NotImplemented
def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if attr in dict:
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose(self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update(self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError("invalid update mode: %r" % mode)
def read_module(self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file(self, filename, mode="careful"):
vars = {}
exec(open(filename).read(), vars)
self._update(vars, mode)
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
"""
Abstract base class.
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
option_list : [Option]
the list of Option objects contained by this OptionContainer
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appears in this
dictionary multiple times. [1]
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary. [1]
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination [1]
[1] These mappings are common to (shared by) all components of the
controlling OptionParser, where they are initially created.
"""
def __init__(self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings(self):
# For use by OptionParser constructor -- create the master
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings(self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler(self, handler):
if handler not in ("error", "resolve"):
raise ValueError("invalid conflict_resolution value %r" % handler)
self.conflict_handler = handler
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def destroy(self):
"""see OptionParser.destroy()."""
del self._short_opt
del self._long_opt
del self.defaults
# -- Option-adding methods -----------------------------------------
def _check_conflict(self, option):
conflict_opts = []
for opt in option._short_opts:
if opt in self._short_opt:
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if opt in self._long_opt:
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "error":
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve":
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if isinstance(args[0], str):
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError("not an Option instance: %r" % option)
else:
raise TypeError("invalid arguments")
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif option.dest not in self.defaults:
self.defaults[option.dest] = None
return option
def add_options(self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option(self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option(self, opt_str):
return (opt_str in self._short_opt or
opt_str in self._long_opt)
def remove_option(self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help(self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description(self, formatter):
return formatter.format_description(self.get_description())
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__(self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list(self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title(self, title):
self.title = title
def destroy(self):
"""see OptionParser.destroy()."""
OptionContainer.destroy(self)
del self.option_list
# -- Help-formatting methods ---------------------------------------
def format_help(self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (self.prog or os.path.basename(sys.argv[0])).
prog : string
the name of the current program (to override
os.path.basename(sys.argv[0])).
description : string
A paragraph of text giving a brief overview of your program.
optparse reformats this paragraph to fit the current terminal
width and prints it when the user requests help (after usage,
but before the list of options).
epilog : string
paragraph of help text to print after option help
option_groups : [OptionGroup]
list of option groups in this parser (option groups are
irrelevant for parsing the command-line, but very useful
for generating help)
allow_interspersed_args : bool = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
process_default_values : bool = true
if true, option default values are processed similarly to option
values from the command line: that is, they are passed to the
type-checking function for the option's type (as long as the
default value is a string). (This really only matters if you
have defined custom types; see SF bug #955889.) Set it to false
to restore the behaviour of Optik 1.4.1 and earlier.
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = True
self.process_default_values = True
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
self.formatter.set_parser(self)
self.epilog = epilog
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
def destroy(self):
"""
Declare that you are done with this OptionParser. This cleans up
reference cycles so the OptionParser (and all objects referenced by
it) can be garbage-collected promptly. After calling destroy(), the
OptionParser is unusable.
"""
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list(self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option(self):
self.add_option("-h", "--help",
action="help",
help=_("show this help message and exit"))
def _add_version_option(self):
self.add_option("--version",
action="version",
help=_("show program's version number and exit"))
def _populate_option_list(self, option_list, add_help=True):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state(self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage(self, usage):
if usage is None:
self.usage = _("%prog [options]")
elif usage is SUPPRESS_USAGE:
self.usage = None
# For backwards compatibility with Optik 1.3 and earlier.
elif usage.lower().startswith("usage: "):
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args(self):
"""Set parsing to not stop on the first non-option, allowing
interspersing switches with command arguments. This is the
default behavior. See also disable_interspersed_args() and the
class documentation description of the attribute
allow_interspersed_args."""
self.allow_interspersed_args = True
def disable_interspersed_args(self):
"""Set parsing to stop on the first non-option. Use this if
you have a command processor which runs another command that
has options of its own and you want to make sure these options
don't get confused.
"""
self.allow_interspersed_args = False
def set_process_default_values(self, process):
self.process_default_values = process
def set_default(self, dest, value):
self.defaults[dest] = value
def set_defaults(self, **kwargs):
self.defaults.update(kwargs)
def _get_all_options(self):
options = self.option_list[:]
for group in self.option_groups:
options.extend(group.option_list)
return options
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, str):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return Values(defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group(self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if isinstance(args[0], str):
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError("not an OptionGroup instance: %r" % group)
if group.parser is not self:
raise ValueError("invalid OptionGroup (wrong parser)")
else:
raise TypeError("invalid arguments")
self.option_groups.append(group)
return group
def get_option_group(self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args(self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError) as err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def check_values(self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbreviation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error(_("%s option does not take a value") % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
raise BadOptionError(opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name(self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def expand_prog_name(self, s):
return s.replace("%prog", self.get_prog_name())
def get_description(self):
return self.expand_prog_name(self.description)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
sys.exit(status)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
def get_usage(self):
if self.usage:
return self.formatter.format_usage(
self.expand_prog_name(self.usage))
else:
return ""
def print_usage(self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurrence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if self.usage:
print(self.get_usage(), file=file)
def get_version(self):
if self.version:
return self.expand_prog_name(self.version)
else:
return ""
def print_version(self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurrence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if self.version:
print(self.get_version(), file=file)
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(_("Options")))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_epilog(self, formatter):
return formatter.format_epilog(self.epilog)
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
def print_help(self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
if file is None:
file = sys.stdout
file.write(self.format_help())
# class OptionParser
def _match_abbrev(s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if s in wordmap:
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError(s)
else:
# More than one possible completion: ambiguous prefix.
possibilities.sort()
raise AmbiguousOptionError(s, possibilities)
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
| gpl-3.0 | 7,972,951,639,689,818,000 | 34.973887 | 79 | 0.566369 | false |
Saturn/livestreamer | src/livestreamer/stream/akamaihd.py | 37 | 7880 | import base64
import io
import hashlib
import hmac
import random
from .stream import Stream
from .wrappers import StreamIOThreadWrapper, StreamIOIterWrapper
from ..buffers import Buffer
from ..compat import str, bytes, urlparse
from ..exceptions import StreamError
from ..utils import swfdecompress
from ..packages.flashmedia import FLV, FLVError
from ..packages.flashmedia.tag import ScriptData
class TokenGenerator(object):
def __init__(self, stream):
self.stream = stream
def generate(self):
raise NotImplementedError
class Auth3TokenGenerator(TokenGenerator):
def generate(self):
if not self.stream.swf:
raise StreamError("A SWF URL is required to create session token")
res = self.stream.session.http.get(self.stream.swf,
exception=StreamError)
data = swfdecompress(res.content)
md5 = hashlib.md5()
md5.update(data)
data = bytes(self.stream.sessionid, "ascii") + md5.digest()
sig = hmac.new(b"foo", data, hashlib.sha1)
b64 = base64.encodestring(sig.digest())
token = str(b64, "ascii").replace("\n", "")
return token
def cache_bust_string(length):
rval = ""
for i in range(length):
rval += chr(65 + int(round(random.random() * 25)))
return rval
class AkamaiHDStreamIO(io.IOBase):
Version = "2.5.8"
FlashVersion = "LNX 11,1,102,63"
StreamURLFormat = "{host}/{streamname}"
ControlURLFormat = "{host}/control/{streamname}"
ControlData = b":)"
TokenGenerators = {
"c11e59dea648d56e864fc07a19f717b9": Auth3TokenGenerator
}
StatusComplete = 3
StatusError = 4
Errors = {
1: "Stream not found",
2: "Track not found",
3: "Seek out of bounds",
4: "Authentication failed",
5: "DVR disabled",
6: "Invalid bitrate test"
}
def __init__(self, session, url, swf=None, seek=None):
parsed = urlparse(url)
self.session = session
self.logger = self.session.logger.new_module("stream.akamaihd")
self.host = ("{scheme}://{netloc}").format(scheme=parsed.scheme, netloc=parsed.netloc)
self.streamname = parsed.path[1:]
self.swf = swf
self.seek = seek
def open(self):
self.guid = cache_bust_string(12)
self.islive = None
self.sessionid = None
self.flv = None
self.buffer = Buffer()
self.completed_handshake = False
url = self.StreamURLFormat.format(host=self.host, streamname=self.streamname)
params = self._create_params(seek=self.seek)
self.logger.debug("Opening host={host} streamname={streamname}",
host=self.host, streamname=self.streamname)
try:
res = self.session.http.get(url, stream=True, params=params)
self.fd = StreamIOIterWrapper(res.iter_content(8192))
except Exception as err:
raise StreamError(str(err))
self.handshake(self.fd)
return self
def handshake(self, fd):
try:
self.flv = FLV(fd)
except FLVError as err:
raise StreamError(str(err))
self.buffer.write(self.flv.header.serialize())
self.logger.debug("Attempting to handshake")
for i, tag in enumerate(self.flv):
if i == 10:
raise StreamError("No OnEdge metadata in FLV after 10 tags, probably not a AkamaiHD stream")
self.process_tag(tag, exception=StreamError)
if self.completed_handshake:
self.logger.debug("Handshake successful")
break
def process_tag(self, tag, exception=IOError):
if isinstance(tag.data, ScriptData) and tag.data.name == "onEdge":
self._on_edge(tag.data.value, exception=exception)
self.buffer.write(tag.serialize())
def send_token(self, token):
headers = { "x-Akamai-Streaming-SessionToken": token }
self.logger.debug("Sending new session token")
self.send_control("sendingNewToken", headers=headers,
swf=self.swf)
def send_control(self, cmd, headers=None, **params):
if not headers:
headers = {}
url = self.ControlURLFormat.format(host=self.host,
streamname=self.streamname)
headers["x-Akamai-Streaming-SessionID"] = self.sessionid
params = self._create_params(cmd=cmd, **params)
return self.session.http.post(url,
headers=headers,
params=params,
data=self.ControlData,
exception=StreamError)
def read(self, size=-1):
if not (self.flv and self.fd):
return b""
if self.buffer.length:
return self.buffer.read(size)
else:
return self.fd.read(size)
def _create_params(self, **extra):
params = dict(v=self.Version, fp=self.FlashVersion,
r=cache_bust_string(5), g=self.guid)
params.update(extra)
return params
def _generate_session_token(self, data64):
swfdata = base64.decodestring(bytes(data64, "ascii"))
md5 = hashlib.md5()
md5.update(swfdata)
hash = md5.hexdigest()
if hash in self.TokenGenerators:
generator = self.TokenGenerators[hash](self)
return generator.generate()
else:
raise StreamError(("No token generator available for hash '{0}'").format(hash))
def _on_edge(self, data, exception=IOError):
def updateattr(attr, key):
if key in data:
setattr(self, attr, data[key])
self.logger.debug("onEdge data")
for key, val in data.items():
if isinstance(val, str):
val = val[:50]
self.logger.debug(" {key}={val}",
key=key, val=val)
updateattr("islive", "isLive")
updateattr("sessionid", "session")
updateattr("status", "status")
updateattr("streamname", "streamName")
if self.status == self.StatusComplete:
self.flv = None
elif self.status == self.StatusError:
errornum = data["errorNumber"]
if errornum in self.Errors:
msg = self.Errors[errornum]
else:
msg = "Unknown error"
raise exception("onEdge error: " + msg)
if not self.completed_handshake:
if "data64" in data:
sessiontoken = self._generate_session_token(data["data64"])
else:
sessiontoken = None
self.send_token(sessiontoken)
self.completed_handshake = True
class AkamaiHDStream(Stream):
"""
Implements the AkamaiHD Adaptive Streaming protocol
*Attributes:*
- :attr:`url` URL to the stream
- :attr:`swf` URL to a SWF used by the handshake protocol
- :attr:`seek` Position to seek to when opening the stream
"""
__shortname__ = "akamaihd"
def __init__(self, session, url, swf=None, seek=None):
Stream.__init__(self, session)
self.seek = seek
self.swf = swf
self.url = url
def __repr__(self):
return ("<AkamaiHDStream({0!r}, "
"swf={1!r})>".format(self.url, self.swf))
def __json__(self):
return dict(type=AkamaiHDStream.shortname(),
url=self.url, swf=self.swf)
def open(self):
stream = AkamaiHDStreamIO(self.session, self.url,
self.swf, self.seek)
return StreamIOThreadWrapper(self.session, stream.open())
__all__ = ["AkamaiHDStream"]
| bsd-2-clause | 6,845,928,164,781,481,000 | 28.513109 | 108 | 0.57703 | false |
mgymrek/lobstr-code | scripts/lobSTR_capillary_comparator.py | 1 | 6675 | #!/usr/bin/env python
"""
Compare capillary vs. lobSTR calls
This script is part of lobSTR_validation_suite.sh and
is not mean to be called directly.
"""
import argparse
import numpy as np
import pandas as pd
import sys
from scipy.stats import pearsonr
def ConvertSample(x):
"""
Convert HGDP samples numbers to standard format
HGDPXXXXX
"""
num = x.split("_")[1]
zeros = 5-len(num)
return "HGDP"+"0"*zeros + num
def LoadCapillaryFromStru(capfile, convfile):
"""
Input:
capfile: filename for .stru file
convfile: filename giving illumina sample id->HGDP id
Output: data frame with capillary calls
Construct data frame with:
marker
sample
allele1.cap
allele2.cap
Use sample names converted to Illumina format
Ignore alleles that are -9,-9
"""
# Load conversions
conv = pd.read_csv(convfile, sep="\t")
converter = dict(zip(conv.hgdp, conv.sample))
# Load genotypes
markers = []
samples = []
allele1s = []
allele2s = []
f = open(capfile, "r")
marker_names = f.readline().strip().split()
line = f.readline()
while line != "":
items = line.strip().split()
ident = "HGDP_%s"%items[0]
pop_code = items[1]
pop_name = items[2]
geo = items[3]
geo2 = items[4]
alleles1 = items[5:]
line = f.readline() # get second allele for the individual
items = line.strip().split()
ident2 = "HGDP_%s"%items[0]
if ident != ident2:
sys.stderr.write("ERROR parsing .stru file for individual %s\n"%items[0])
sys.exit(1)
alleles2 = items[5:]
sample = converter.get(ConvertSample(ident), "NA")
if sample != "NA":
for i in range(len(alleles1)):
if str(alleles1[i]) != "-9" and str(alleles2[i]) != "-9":
markers.append(marker_names[i])
samples.append(sample)
allele1s.append(int(alleles1[i]))
allele2s.append(int(alleles2[i]))
line = f.readline()
return pd.DataFrame({"marker": markers, "sample": samples, \
"allele1.cap": allele1s, "allele2.cap": allele2s})
def GetAllele(x, allele_num):
if allele_num == 1:
al = x["allele1.cap"]
else: al = x["allele2.cap"]
raw_allele = ((al-x["effective_product_size"])/x["period"])*x["period"]
corr_allele = raw_allele - x["correction"]
return corr_allele
def GetDosage(a1, a2):
if str(a1) == "." or str(a2) == ".": return "NA"
else: return (float(a1)+float(a2))*0.5
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--lobSTR", help="Tab file with lobSTR calls created by lobSTR_vcf_to_tab.py", type=str, required=True)
parser.add_argument("--cap", help=".stru file with capillary calls", type=str, required=True)
parser.add_argument("--corrections", help="Tab file with Marshfield marker corrections", type=str, required=True)
parser.add_argument("--sample-conversions", help="Tab file with conversion between sample ids", type=str, required=True)
parser.add_argument("--output-stats", help="Output sample, call, and locus level stats to files with this prefix", type=str, required=False)
args = parser.parse_args()
LOBFILE = args.lobSTR
CAPFILE = args.cap
CORRFILE = args.corrections
CONVFILE = args.sample_conversions
# Load lobSTR calls and corrections
lob = pd.read_csv(LOBFILE, sep="\t")
corr = pd.read_csv(CORRFILE, sep="\t")
# Load capillary calls to data frame
cap = LoadCapillaryFromStru(CAPFILE, CONVFILE)
# Merge ddatasets
res = pd.merge(lob, corr, on=["chrom", "start"])
res = pd.merge(res, cap, on=["marker", "sample"])
res["allele1.cap.corr"] = res.apply(lambda x: GetAllele(x, 1), 1)
res["allele2.cap.corr"] = res.apply(lambda x: GetAllele(x, 2), 1)
res["correct"] = res.apply(lambda x: str(x["allele1"])==str(x["allele1.cap.corr"]) and \
str(x["allele2"])==str(x["allele2.cap.corr"]), 1)
res["dosage_lob"] = res.apply(lambda x: GetDosage(x["allele1"], x["allele2"]), 1)
res["dosage_cap"] = res.apply(lambda x: GetDosage(x["allele1.cap.corr"], x["allele2.cap.corr"]), 1)
##### Stats #####
if args.output_stats:
# Call level stats
res.to_csv(args.output_stats+".calllevel.tab", index=False, sep="\t")
# Sample level stats
sample_level = res.groupby("sample", as_index=False).agg({"DP": np.mean,
"Q": np.mean,
"start": len,
"correct": np.mean})
sample_level.to_csv(args.output_stats+".samplelevel.tab", index=False, sep="\t")
# Locus level stats
res["length"] = res["end_x"]-res["start"]+1
locus_level = res.groupby(["chrom","start"], as_index=False).agg({"length": np.mean,
"DP": np.mean,
"Q": np.mean,
"GT": len,
"SB": np.mean,
"DISTENDS": np.mean,
"correct": np.mean})
locus_level.to_csv(args.output_stats+".locuslevel.tab", index=False, sep="\t")
##### Results #####
sys.stdout.write("########## Results ########\n")
# Get stats about calls
num_samples = len(set(res[res["allele1"].apply(str)!="."]["sample"]))
num_markers = len(set(res[res["allele1"].apply(str)!="."]["marker"]))
num_nocalls = res[res["allele1"].apply(str)=="."].shape[0]
sys.stdout.write("# Samples: %s\n"%num_samples)
sys.stdout.write("# Markers: %s\n"%num_markers)
sys.stdout.write("# No call rate: %s\n"%(num_nocalls*1.0/res.shape[0]))
sys.stdout.write("# Number of calls compared: %s\n"%res.shape[0])
# Accuracy
acc = np.mean(res[res["allele1"].apply(str)!="."]["correct"])
sys.stdout.write("# Accuracy: %s\n"%acc)
# R2
dl = map(float, list(res[res["allele1"].apply(str)!="."]["dosage_lob"]))
dc = map(float, list(res[res["allele1"].apply(str)!="."]["dosage_cap"]))
r2 = pearsonr(dl, dc)[0]**2
sys.stdout.write("# R2: %s\n"%r2)
| gpl-3.0 | -6,129,424,915,326,303,000 | 40.459627 | 144 | 0.541573 | false |
itsjeyd/edx-platform | common/djangoapps/track/views/tests/test_segmentio.py | 19 | 22011 | """Ensure we can parse events sent to us from the Segment webhook integration"""
from datetime import datetime
import json
from ddt import ddt, data, unpack
from mock import sentinel
from nose.plugins.attrib import attr
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.test.utils import override_settings
from openedx.core.lib.tests.assertions.events import assert_event_matches
from track.middleware import TrackMiddleware
from track.tests import EventTrackingTestCase
from track.views import segmentio
SECRET = 'anything'
ENDPOINT = '/segmentio/test/event'
USER_ID = 10
MOBILE_SHIM_PROCESSOR = [
{'ENGINE': 'track.shim.LegacyFieldMappingProcessor'},
{'ENGINE': 'track.shim.PrefixedEventProcessor'},
]
def expect_failure_with_message(message):
"""Ensure the test raises an exception and does not emit an event"""
def test_decorator(func):
def test_decorated(self, *args, **kwargs):
self.assertRaisesRegexp(segmentio.EventValidationError, message, func, self, *args, **kwargs)
self.assert_no_events_emitted()
return test_decorated
return test_decorator
@attr(shard=3)
@ddt
@override_settings(
TRACKING_SEGMENTIO_WEBHOOK_SECRET=SECRET,
TRACKING_IGNORE_URL_PATTERNS=[ENDPOINT],
TRACKING_SEGMENTIO_ALLOWED_TYPES=['track'],
TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES=['.bi.'],
TRACKING_SEGMENTIO_SOURCE_MAP={'test-app': 'mobile'},
EVENT_TRACKING_PROCESSORS=MOBILE_SHIM_PROCESSOR,
)
class SegmentIOTrackingTestCase(EventTrackingTestCase):
"""Test processing of Segment events"""
def setUp(self):
super(SegmentIOTrackingTestCase, self).setUp()
self.maxDiff = None # pylint: disable=invalid-name
self.request_factory = RequestFactory()
def test_get_request(self):
request = self.request_factory.get(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 405)
self.assert_no_events_emitted()
@override_settings(
TRACKING_SEGMENTIO_WEBHOOK_SECRET=None
)
def test_no_secret_config(self):
request = self.request_factory.post(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def test_no_secret_provided(self):
request = self.request_factory.post(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def test_secret_mismatch(self):
request = self.create_request(key='y')
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def create_request(self, key=None, **kwargs):
"""Create a fake request that emulates a request from the Segment servers to ours"""
if key is None:
key = SECRET
request = self.request_factory.post(ENDPOINT + "?key=" + key, **kwargs)
if 'data' in kwargs:
request.json = json.loads(kwargs['data'])
return request
@data('identify', 'Group', 'Alias', 'Page', 'identify', 'screen')
def test_segmentio_ignore_actions(self, action):
self.post_segmentio_event(action=action)
self.assert_no_events_emitted()
@data('edx.bi.some_name', 'EDX.BI.CAPITAL_NAME')
def test_segmentio_ignore_names(self, name):
self.post_segmentio_event(name=name)
self.assert_no_events_emitted()
def post_segmentio_event(self, **kwargs):
"""Post a fake Segment event to the view that processes it"""
request = self.create_request(
data=self.create_segmentio_event_json(**kwargs),
content_type='application/json'
)
segmentio.track_segmentio_event(request)
def create_segmentio_event(self, **kwargs):
"""Populate a fake Segment event with data of interest"""
action = kwargs.get('action', 'Track')
sample_event = {
"userId": kwargs.get('user_id', USER_ID),
"event": "Did something",
"properties": {
'name': kwargs.get('name', str(sentinel.name)),
'data': kwargs.get('data', {}),
'context': {
'course_id': kwargs.get('course_id') or '',
'app_name': 'edx.mobile.android',
}
},
"channel": 'server',
"context": {
"library": {
"name": kwargs.get('library_name', 'test-app'),
"version": "unknown"
},
"app": {
"version": "1.0.1",
},
'userAgent': str(sentinel.user_agent),
},
"receivedAt": "2014-08-27T16:33:39.100Z",
"timestamp": "2014-08-27T16:33:39.215Z",
"type": action.lower(),
"projectId": "u0j33yjkr8",
"messageId": "qy52hwp4",
"version": 2,
"integrations": {},
"options": {
"library": "unknown",
"providers": {}
},
"action": action
}
if 'context' in kwargs:
sample_event['properties']['context'].update(kwargs['context'])
return sample_event
def create_segmentio_event_json(self, **kwargs):
"""Return a json string containing a fake Segment event"""
return json.dumps(self.create_segmentio_event(**kwargs))
def test_segmentio_ignore_unknown_libraries(self):
self.post_segmentio_event(library_name='foo')
self.assert_no_events_emitted()
@expect_failure_with_message(segmentio.ERROR_USER_NOT_EXIST)
def test_no_user_for_user_id(self):
self.post_segmentio_event(user_id=40)
@expect_failure_with_message(segmentio.ERROR_INVALID_USER_ID)
def test_invalid_user_id(self):
self.post_segmentio_event(user_id='foobar')
@data('foo/bar/baz', 'course-v1:foo+bar+baz')
def test_success(self, course_id):
middleware = TrackMiddleware()
request = self.create_request(
data=self.create_segmentio_event_json(data={'foo': 'bar'}, course_id=course_id),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
middleware.process_request(request)
# The middleware normally emits an event, make sure it doesn't in this case.
self.assert_no_events_emitted()
try:
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
expected_event = {
'accept_language': '',
'referer': '',
'username': str(sentinel.username),
'ip': '',
'session': '',
'event_source': 'mobile',
'event_type': str(sentinel.name),
'name': str(sentinel.name),
'event': {'foo': 'bar'},
'agent': str(sentinel.user_agent),
'page': None,
'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'host': 'testserver',
'context': {
'application': {
'name': 'edx.mobile.android',
'version': '1.0.1',
},
'user_id': USER_ID,
'course_id': course_id,
'org_id': u'foo',
'path': ENDPOINT,
'client': {
'library': {
'name': 'test-app',
'version': 'unknown'
},
'app': {
'version': '1.0.1',
},
},
'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
},
}
finally:
middleware.process_response(request, None)
assert_event_matches(expected_event, self.get_event())
def test_invalid_course_id(self):
request = self.create_request(
data=self.create_segmentio_event_json(course_id='invalid'),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
self.assert_events_emitted()
@expect_failure_with_message(segmentio.ERROR_MISSING_NAME)
def test_missing_name(self):
sample_event_raw = self.create_segmentio_event()
del sample_event_raw['properties']['name']
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
@expect_failure_with_message(segmentio.ERROR_MISSING_DATA)
def test_missing_data(self):
sample_event_raw = self.create_segmentio_event()
del sample_event_raw['properties']['data']
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
@expect_failure_with_message(segmentio.ERROR_MISSING_TIMESTAMP)
def test_missing_timestamp(self):
sample_event_raw = self.create_event_without_fields('timestamp')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
@expect_failure_with_message(segmentio.ERROR_MISSING_RECEIVED_AT)
def test_missing_received_at(self):
sample_event_raw = self.create_event_without_fields('receivedAt')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
def create_event_without_fields(self, *fields):
"""Create a fake event and remove some fields from it"""
event = self.create_segmentio_event()
for field in fields:
if field in event:
del event[field]
return event
def test_string_user_id(self):
User.objects.create(pk=USER_ID, username=str(sentinel.username))
self.post_segmentio_event(user_id=str(USER_ID))
self.assert_events_emitted()
def test_hiding_failure(self):
sample_event_raw = self.create_event_without_fields('timestamp')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
self.assert_no_events_emitted()
@data(
('edx.video.played', 'play_video'),
('edx.video.paused', 'pause_video'),
('edx.video.stopped', 'stop_video'),
('edx.video.loaded', 'load_video'),
('edx.video.position.changed', 'seek_video'),
('edx.video.transcript.shown', 'show_transcript'),
('edx.video.transcript.hidden', 'hide_transcript'),
)
@unpack
def test_video_event(self, name, event_type):
course_id = 'foo/bar/baz'
middleware = TrackMiddleware()
input_payload = {
'current_time': 132.134456,
'module_id': 'i4x://foo/bar/baz/some_module',
'code': 'mobile'
}
if name == 'edx.video.loaded':
# We use the same expected payload for all of these types of events, but the load video event is the only
# one that is not actually expected to contain a "current time" field. So we remove it from the expected
# event here.
del input_payload['current_time']
request = self.create_request(
data=self.create_segmentio_event_json(
name=name,
data=input_payload,
context={
'open_in_browser_url': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity/2',
'course_id': course_id,
'application': {
'name': 'edx.mobileapp.android',
'version': '29',
'component': 'videoplayer'
}
}),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
middleware.process_request(request)
try:
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
expected_event = {
'accept_language': '',
'referer': '',
'username': str(sentinel.username),
'ip': '',
'session': '',
'event_source': 'mobile',
'event_type': event_type,
'name': name,
'agent': str(sentinel.user_agent),
'page': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity',
'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'host': 'testserver',
'context': {
'user_id': USER_ID,
'course_id': course_id,
'org_id': 'foo',
'path': ENDPOINT,
'client': {
'library': {
'name': 'test-app',
'version': 'unknown'
},
'app': {
'version': '1.0.1',
},
},
'application': {
'name': 'edx.mobileapp.android',
'version': '29',
'component': 'videoplayer'
},
'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
},
'event': {
'currentTime': 132.134456,
'id': 'i4x-foo-bar-baz-some_module',
'code': 'mobile'
}
}
if name == 'edx.video.loaded':
# We use the same expected payload for all of these types of events, but the load video event is the
# only one that is not actually expected to contain a "current time" field. So we remove it from the
# expected event here.
del expected_event['event']['currentTime']
finally:
middleware.process_response(request, None)
actual_event = self.get_event()
assert_event_matches(expected_event, actual_event)
@data(
# Verify positive slide case. Verify slide to onSlideSeek. Verify
# edx.video.seeked emitted from iOS v1.0.02 is changed to
# edx.video.position.changed.
(1, 1, "seek_type", "slide", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify negative slide case. Verify slide to onSlideSeek. Verify
# edx.video.seeked to edx.video.position.changed.
(-2, -2, "seek_type", "slide", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify +30 is changed to -30 which is incorrectly emitted in iOS
# v1.0.02. Verify skip to onSkipSeek
(30, -30, "seek_type", "skip", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify the correct case of -30 is also handled as well. Verify skip
# to onSkipSeek
(-30, -30, "seek_type", "skip", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify positive slide case where onSkipSeek is changed to
# onSlideSkip. Verify edx.video.seeked emitted from Android v1.0.02 is
# changed to edx.video.position.changed.
(1, 1, "type", "onSkipSeek", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'),
# Verify positive slide case where onSkipSeek is changed to
# onSlideSkip. Verify edx.video.seeked emitted from Android v1.0.02 is
# changed to edx.video.position.changed.
(-2, -2, "type", "onSkipSeek", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'),
# Verify positive skip case where onSkipSeek is not changed and does
# not become negative.
(30, 30, "type", "onSkipSeek", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'),
# Verify positive skip case where onSkipSeek is not changed.
(-30, -30, "type", "onSkipSeek", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02')
)
@unpack
def test_previous_builds(self,
requested_skip_interval,
expected_skip_interval,
seek_type_key,
seek_type,
expected_seek_type,
name,
expected_name,
platform,
version,
):
"""
Test backwards compatibility of previous app builds
iOS version 1.0.02: Incorrectly emits the skip back 30 seconds as +30
instead of -30.
Android version 1.0.02: Skip and slide were both being returned as a
skip. Skip or slide is determined by checking if the skip time is == -30
Additionally, for both of the above mentioned versions, edx.video.seeked
was sent instead of edx.video.position.changed
"""
course_id = 'foo/bar/baz'
middleware = TrackMiddleware()
input_payload = {
"code": "mobile",
"new_time": 89.699177437,
"old_time": 119.699177437,
seek_type_key: seek_type,
"requested_skip_interval": requested_skip_interval,
'module_id': 'i4x://foo/bar/baz/some_module',
}
request = self.create_request(
data=self.create_segmentio_event_json(
name=name,
data=input_payload,
context={
'open_in_browser_url': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity/2',
'course_id': course_id,
'application': {
'name': platform,
'version': version,
'component': 'videoplayer'
}
},
),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
middleware.process_request(request)
try:
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
expected_event = {
'accept_language': '',
'referer': '',
'username': str(sentinel.username),
'ip': '',
'session': '',
'event_source': 'mobile',
'event_type': "seek_video",
'name': expected_name,
'agent': str(sentinel.user_agent),
'page': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity',
'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'host': 'testserver',
'context': {
'user_id': USER_ID,
'course_id': course_id,
'org_id': 'foo',
'path': ENDPOINT,
'client': {
'library': {
'name': 'test-app',
'version': 'unknown'
},
'app': {
'version': '1.0.1',
},
},
'application': {
'name': platform,
'version': version,
'component': 'videoplayer'
},
'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
},
'event': {
"code": "mobile",
"new_time": 89.699177437,
"old_time": 119.699177437,
"type": expected_seek_type,
"requested_skip_interval": expected_skip_interval,
'id': 'i4x-foo-bar-baz-some_module',
}
}
finally:
middleware.process_response(request, None)
actual_event = self.get_event()
assert_event_matches(expected_event, actual_event)
| agpl-3.0 | 9,047,553,053,644,725,000 | 39.685767 | 149 | 0.541865 | false |
DickJC123/mxnet | example/profiler/profiler_matmul.py | 7 | 2449 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
import argparse
import time
def parse_args():
parser = argparse.ArgumentParser(description='Set network parameters for benchmark test.')
parser.add_argument('--profile_filename', type=str, default='profile_matmul_20iter.json')
parser.add_argument('--iter_num', type=int, default=100)
parser.add_argument('--begin_profiling_iter', type=int, default=50)
parser.add_argument('--end_profiling_iter', type=int, default=70)
return parser.parse_args()
args = parse_args()
if __name__ == '__main__':
mx.profiler.set_config(profile_symbolic=True, filename=args.profile_filename)
print('profile file save to {0}'.format(args.profile_filename))
A = mx.sym.Variable('A')
B = mx.sym.Variable('B')
C = mx.symbol.dot(A, B)
executor = C.simple_bind(mx.gpu(0), 'write', A=(4096, 4096), B=(4096, 4096))
a = mx.random.uniform(-1.0, 1.0, shape=(4096, 4096))
b = mx.random.uniform(-1.0, 1.0, shape=(4096, 4096))
a.copyto(executor.arg_dict['A'])
b.copyto(executor.arg_dict['B'])
flag = False
print("execution begin")
for i in range(args.iter_num):
if i == args.begin_profiling_iter:
t0 = time.process_time()
mx.profiler.set_state('run')
if i == args.end_profiling_iter:
t1 = time.process_time()
mx.profiler.set_state('stop')
executor.forward()
c = executor.outputs[0]
c.wait_to_read()
print("execution end")
duration = t1 - t0
print('duration: {0}s'.format(duration))
print(' {0}ms/operator'.format(duration*1000/args.iter_num))
| apache-2.0 | 3,663,799,963,536,551,000 | 36.106061 | 94 | 0.675378 | false |
sve-odoo/odoo | addons/sale_mrp/__openerp__.py | 61 | 1935 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sales and MRP Management',
'version': '1.0',
'category': 'Hidden',
'description': """
This module provides facility to the user to install mrp and sales modulesat a time.
====================================================================================
It is basically used when we want to keep track of production orders generated
from sales order. It adds sales name and sales Reference on production order.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/SO_to_MO.jpeg'],
'depends': ['mrp', 'sale_stock'],
'data': [
'security/ir.model.access.csv',
'sale_mrp_view.xml',
],
'demo': [],
'test':[
'test/cancellation_propagated.yml',
'test/sale_mrp.yml',
],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 7,097,095,248,138,474,000 | 37.7 | 84 | 0.567442 | false |
BaluDontu/docker-volume-vsphere | esx_service/vsan_policy.py | 1 | 9052 | #!/usr/bin/env python
# Copyright 2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Module for VSAN storage policy creation and configuration
import os
import logging
import shutil
import vmdk_utils
import vsan_info
import volume_kv as kv
import vsan_info
ERROR_NO_VSAN_DATASTORE = 'Error: VSAN datastore does not exist'
def create(name, content):
"""
Create a new storage policy and save it as dockvols/policies/name in
the VSAN datastore. If there are VSAN volumes currently using a policy with
the same name, creation will fail. Return a string on error and None on
success.
"""
datastore_path = vsan_info.get_vsan_dockvols_path()
if not datastore_path:
return ERROR_NO_VSAN_DATASTORE
policies_dir = make_policies_dir(datastore_path)
filename = os.path.join(policies_dir, name)
if os.path.isfile(filename):
return 'Error: Policy already exists'
if not validate_vsan_policy_string(content):
return 'Error: Invalid policy string'
return create_policy_file(filename, content)
def update(name, content):
"""
Update the content of an existing VSAN policy in the VSAN datastore.
Update the policy content in each VSAN object currently using the policy. If
a VSAN policy of the given name does not exist return an error string.
Return None on success.
"""
path = policy_path(name)
if not path:
return ERROR_NO_VSAN_DATASTORE
err = update_policy_file_content(path, content)
if err:
return err
return update_vsan_objects_with_policy(name, content)
def update_policy_file_content(path, content):
"""
Update the VSAN policy file content. Return an error msg or None on success.
"""
try:
with open(path) as f:
existing_content = f.read()
except OSError as e:
if not os.path.isfile(path):
return 'Error: Policy {0} does not exist'.format(
os.path.basename(path))
else:
return 'Error opening existing policy file {0}: {1}'.format(path, e)
if existing_content.strip() == content.strip():
return 'Error: New policy is identical to old policy. Ignoring.'
# Create a temporary file so we don't corrupt an existing policy file
tmpfile = '{0}.tmp'.format(path)
err = create_policy_file(tmpfile, content)
if err:
return err
# Copy the original policy file to a backup file (.old)
# The backup will be maintained in case the policy content is invalid, when
# attempting to apply it to existing volumes.
# Do an atomic rename of the tmpfile to the real policy file name
try:
shutil.copy(path, backup_policy_filename(path))
os.rename(tmpfile, path)
except OSError:
print('Internal Error: Failed to update policy file contents: '
'{0}').format(path)
raise
return None
def update_vsan_objects_with_policy(name, content):
"""
Find all VSAN objects using the policy given by `name` and update the policy
contents in their objects. Returns an error string containing the list of
volumes that failed to update, or a msg if there were no volumes to update.
Returns None if all volumes were updated successfully.
Note: This function assumes datastore_path exists.
"""
update_count = 0
failed_updates = []
dockvols_path = vsan_info.get_vsan_dockvols_path()
print("This operation may take a while. Please be patient.")
for v in list_volumes_and_policies():
if v['policy'] == name:
volume_name = v['volume']
vmdk_path = os.path.join(dockvols_path, volume_name)
if vsan_info.set_policy(vmdk_path, content):
update_count = 1
else:
failed_updates.append(volume_name)
if len(failed_updates) != 0:
if update_count == 0:
# All volumes failed to update, so reset the original policy
os.rename(policy_path(backup_policy_filename(name)),
policy_path(name))
else:
log_failed_updates(failed_updates, name)
return ('Successfully updated: {0} volumes.\n'
'Failed to update: {1} volumes'.format(update_count,
failed_updates))
# Remove old policy file on success
os.remove(policy_path(backup_policy_filename(name)))
return None
def backup_policy_filename(name):
""" Generate a .old file from a policy name or path """
return '{0}.old'.format(name)
def log_failed_updates(volumes, policy_name):
"""
During policy update, some volumes may fail to have their VSAN policies
updated. We create a file containing these volumes for debugging purposes.
"""
filename = policy_path('{0}.failed_volume_updates'.format(policy_name))
try:
with open(filename, 'w') as f:
f.write(volumes)
f.write('\n')
except:
print("Failed to save volume names that failed to update to file."
"Please record them for future use.")
def make_policies_dir(datastore_path):
"""
Create the policies dir if it doesn't exist and return the path.
This function assumes that datastore_path is a VSAN datastore,
although it won't fail if it isn't.
"""
policies_dir = os.path.join(datastore_path, 'policies')
try:
os.mkdir(policies_dir)
except OSError:
pass
return policies_dir
def create_policy_file(filename, content):
"""
Create a storage policy file in filename. Returns an error string on
failure and None on success.
"""
try:
with open(filename, 'w') as f:
f.write(content)
f.write('\n')
except:
msg = 'Error: Failed to open {0} for writing'.format(filename)
logging.exception(msg)
if os.path.isfile(filename):
os.remove(filename)
return msg
return None
def delete(name):
"""
Remove a given policy. If the policy does not exist return an error string,
otherwise return None
"""
path = vsan_info.get_vsan_dockvols_path()
if not path:
return ERROR_NO_VSAN_DATASTORE
vmdk = policy_in_use(path, name)
if vmdk:
return 'Error: Cannot remove. Policy is in use by {0}'.format(vmdk)
try:
os.remove(policy_path(name))
except:
logging.exception("Failed to remove %s policy file", name)
return 'Error: {0} does not exist'.format(name)
return None
def get_policies():
""" Return a dict of all VSAN policy names to policy content. """
policies = {}
path = vsan_info.get_vsan_dockvols_path()
if not path:
return {}
path = make_policies_dir(path)
for name in os.listdir(path):
with open(os.path.join(path, name)) as f:
content = f.read()
policies[name] = content
return policies
def list_volumes_and_policies():
""" Return a list of vmdks and the policies in use"""
vmdks_and_policies = []
path = vsan_info.get_vsan_dockvols_path()
if not path:
return []
for vmdk in vmdk_utils.list_vmdks(path):
policy = kv_get_vsan_policy_name(os.path.join(path, vmdk))
vmdks_and_policies.append({'volume': vmdk, 'policy': policy})
return vmdks_and_policies
def policy_exists(name):
""" Check if the policy file exists """
return os.path.isfile(policy_path(name))
def policy_path(name):
"""
Return the path to a given policy file or None if VSAN datastore doesn't
exist
"""
path = vsan_info.get_vsan_dockvols_path()
if not path:
return None
return os.path.join(path, 'policies', name)
def kv_get_vsan_policy_name(path):
"""
Take a path for a vmdk and return a policy name if it exists or None if it
doesn't
"""
try:
return kv.getAll(path)[kv.VOL_OPTS][kv.VSAN_POLICY_NAME]
except:
return None
def policy_in_use(path, name):
"""
Check if a policy is in use by a VMDK and return the name of the first VMDK
using it if it is, None otherwise
"""
for vmdk in vmdk_utils.list_vmdks(path):
policy = kv_get_vsan_policy_name(os.path.join(path, vmdk))
if policy == name:
return vmdk
return None
def validate_vsan_policy_string(content):
"""
Stub for a function that validates the syntax of a vsan policy string
"""
return True
| apache-2.0 | -1,053,890,982,789,795,200 | 30.321799 | 80 | 0.642841 | false |
Ditmar/plugin.video.pelisalacarta | servers/watchfreeinhd.py | 44 | 1983 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para watchfreeinhd
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[watchfreeinhd.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
# Descarga la página, el usuario tiene dos botones de "Descargar" o "Ver"
data = scrapertools.cache_page(page_url)
# La descarga de nuevo como si hubiera pulsado el botón "Ver"
# http://srv.hdplay.org/storage/flv/xGylz8.flv?token=703acade4b51aa6b26ad264327c4a4cf
data = scrapertools.cache_page(page_url,post="agree=")
patron = '<div id="playerHolder">[^<]+'
patron += '<a href="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)>0:
video_urls.append( ["[watchfreeinhd]",matches[0] ] )
for video_url in video_urls:
logger.info("[watchfreeinhd.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://www.watchfreeinhd.com/r0GUbN
patronvideos = '(http://www.watchfreeinhd.com/[A-Za-z0-9]+)'
logger.info("[watchfreeinhd.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[watchfreeinhd]"
url = match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'watchfreeinhd' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
| gpl-3.0 | -6,358,708,320,980,787,000 | 33.736842 | 91 | 0.605051 | false |
adaitche/luigi | test/task_test.py | 13 | 14484 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import doctest
import pickle
import six
import warnings
from helpers import unittest, LuigiTestCase
from datetime import datetime, timedelta
import luigi
import luigi.task
import luigi.util
import collections
from luigi.task_register import load_task
class DummyTask(luigi.Task):
param = luigi.Parameter()
bool_param = luigi.BoolParameter()
int_param = luigi.IntParameter()
float_param = luigi.FloatParameter()
date_param = luigi.DateParameter()
datehour_param = luigi.DateHourParameter()
timedelta_param = luigi.TimeDeltaParameter()
insignificant_param = luigi.Parameter(significant=False)
DUMMY_TASK_OK_PARAMS = dict(
param='test',
bool_param=True,
int_param=666,
float_param=123.456,
date_param=datetime(2014, 9, 13).date(),
datehour_param=datetime(2014, 9, 13, 9),
timedelta_param=timedelta(44), # doesn't support seconds
insignificant_param='test')
class DefaultInsignificantParamTask(luigi.Task):
insignificant_param = luigi.Parameter(significant=False, default='value')
necessary_param = luigi.Parameter(significant=False)
class TaskTest(unittest.TestCase):
def test_tasks_doctest(self):
doctest.testmod(luigi.task)
def test_task_to_str_to_task(self):
original = DummyTask(**DUMMY_TASK_OK_PARAMS)
other = DummyTask.from_str_params(original.to_str_params())
self.assertEqual(original, other)
def test_task_from_str_insignificant(self):
params = {'necessary_param': 'needed'}
original = DefaultInsignificantParamTask(**params)
other = DefaultInsignificantParamTask.from_str_params(params)
self.assertEqual(original, other)
def test_task_missing_necessary_param(self):
with self.assertRaises(luigi.parameter.MissingParameterException):
DefaultInsignificantParamTask.from_str_params({})
def test_external_tasks_loadable(self):
task = load_task("luigi", "ExternalTask", {})
assert(isinstance(task, luigi.ExternalTask))
def test_getpaths(self):
class RequiredTask(luigi.Task):
def output(self):
return luigi.LocalTarget("/path/to/target/file")
t = RequiredTask()
reqs = {}
reqs["bare"] = t
reqs["dict"] = {"key": t}
reqs["OrderedDict"] = collections.OrderedDict([("key", t)])
reqs["list"] = [t]
reqs["tuple"] = (t,)
reqs["generator"] = (t for _ in range(10))
struct = luigi.task.getpaths(reqs)
self.assertIsInstance(struct, dict)
self.assertIsInstance(struct["bare"], luigi.Target)
self.assertIsInstance(struct["dict"], dict)
self.assertIsInstance(struct["OrderedDict"], collections.OrderedDict)
self.assertIsInstance(struct["list"], list)
self.assertIsInstance(struct["tuple"], tuple)
self.assertTrue(hasattr(struct["generator"], "__iter__"))
def test_flatten(self):
flatten = luigi.task.flatten
self.assertEqual(sorted(flatten({'a': 'foo', 'b': 'bar'})), ['bar', 'foo'])
self.assertEqual(sorted(flatten(['foo', ['bar', 'troll']])), ['bar', 'foo', 'troll'])
self.assertEqual(flatten('foo'), ['foo'])
self.assertEqual(flatten(42), [42])
self.assertEqual(flatten((len(i) for i in ["foo", "troll"])), [3, 5])
self.assertRaises(TypeError, flatten, (len(i) for i in ["foo", "troll", None]))
def test_externalized_task_picklable(self):
task = luigi.task.externalize(luigi.Task())
pickled_task = pickle.dumps(task)
self.assertEqual(task, pickle.loads(pickled_task))
def test_no_unpicklable_properties(self):
task = luigi.Task()
task.set_tracking_url = lambda tracking_url: tracking_url
task.set_status_message = lambda message: message
with task.no_unpicklable_properties():
pickle.dumps(task)
self.assertIsNotNone(task.set_tracking_url)
self.assertIsNotNone(task.set_status_message)
tracking_url = task.set_tracking_url('http://test.luigi.com/')
self.assertEqual(tracking_url, 'http://test.luigi.com/')
message = task.set_status_message('message')
self.assertEqual(message, 'message')
def test_no_warn_if_param_types_ok(self):
with warnings.catch_warnings(record=True) as w:
DummyTask(**DUMMY_TASK_OK_PARAMS)
self.assertEqual(len(w), 0, msg='No warning should be raised when correct parameter types are used')
if six.PY3: # assertWarnsRegex was introduced in Python 3.2
def test_warn_on_non_str_param(self):
params = dict(**DUMMY_TASK_OK_PARAMS)
params['param'] = 42
with self.assertWarnsRegex(UserWarning, 'Parameter "param" with value "42" is not of type string.'):
DummyTask(**params)
def test_warn_on_non_timedelta_param(self):
params = dict(**DUMMY_TASK_OK_PARAMS)
class MockTimedelta(object):
days = 1
seconds = 1
params['timedelta_param'] = MockTimedelta()
with self.assertWarnsRegex(UserWarning, 'Parameter "timedelta_param" with value ".*" is not of type timedelta.'):
DummyTask(**params)
class ExternalizeTaskTest(LuigiTestCase):
def test_externalize_taskclass(self):
class MyTask(luigi.Task):
def run(self):
pass
self.assertIsNotNone(MyTask.run) # Assert what we believe
task_object = luigi.task.externalize(MyTask)()
self.assertIsNone(task_object.run)
self.assertIsNotNone(MyTask.run) # Check immutability
self.assertIsNotNone(MyTask().run) # Check immutability
def test_externalize_taskobject(self):
class MyTask(luigi.Task):
def run(self):
pass
task_object = luigi.task.externalize(MyTask())
self.assertIsNone(task_object.run)
self.assertIsNotNone(MyTask.run) # Check immutability
self.assertIsNotNone(MyTask().run) # Check immutability
def test_externalize_taskclass_readable_name(self):
class MyTask(luigi.Task):
def run(self):
pass
task_class = luigi.task.externalize(MyTask)
self.assertIsNot(task_class, MyTask)
self.assertIn("MyTask", task_class.__name__)
def test_externalize_taskclass_instance_cache(self):
class MyTask(luigi.Task):
def run(self):
pass
task_class = luigi.task.externalize(MyTask)
self.assertIsNot(task_class, MyTask)
self.assertIs(MyTask(), MyTask()) # Assert it have enabled the instance caching
self.assertIsNot(task_class(), MyTask()) # Now, they should not be the same of course
def test_externalize_same_id(self):
class MyTask(luigi.Task):
def run(self):
pass
task_normal = MyTask()
task_ext_1 = luigi.task.externalize(MyTask)()
task_ext_2 = luigi.task.externalize(MyTask())
self.assertEqual(task_normal.task_id, task_ext_1.task_id)
self.assertEqual(task_normal.task_id, task_ext_2.task_id)
def test_externalize_same_id_with_task_namespace(self):
# Dependent on the new behavior from spotify/luigi#1953
class MyTask(luigi.Task):
task_namespace = "something.domething"
def run(self):
pass
task_normal = MyTask()
task_ext_1 = luigi.task.externalize(MyTask())
task_ext_2 = luigi.task.externalize(MyTask)()
self.assertEqual(task_normal.task_id, task_ext_1.task_id)
self.assertEqual(task_normal.task_id, task_ext_2.task_id)
self.assertEqual(str(task_normal), str(task_ext_1))
self.assertEqual(str(task_normal), str(task_ext_2))
def test_externalize_same_id_with_luigi_namespace(self):
# Dependent on the new behavior from spotify/luigi#1953
luigi.namespace('lets.externalize')
class MyTask(luigi.Task):
def run(self):
pass
luigi.namespace()
task_normal = MyTask()
task_ext_1 = luigi.task.externalize(MyTask())
task_ext_2 = luigi.task.externalize(MyTask)()
self.assertEqual(task_normal.task_id, task_ext_1.task_id)
self.assertEqual(task_normal.task_id, task_ext_2.task_id)
self.assertEqual(str(task_normal), str(task_ext_1))
self.assertEqual(str(task_normal), str(task_ext_2))
def test_externalize_with_requires(self):
class MyTask(luigi.Task):
def run(self):
pass
@luigi.util.requires(luigi.task.externalize(MyTask))
class Requirer(luigi.Task):
def run(self):
pass
self.assertIsNotNone(MyTask.run) # Check immutability
self.assertIsNotNone(MyTask().run) # Check immutability
def test_externalize_doesnt_affect_the_registry(self):
class MyTask(luigi.Task):
pass
reg_orig = luigi.task_register.Register._get_reg()
luigi.task.externalize(MyTask)
reg_afterwards = luigi.task_register.Register._get_reg()
self.assertEqual(reg_orig, reg_afterwards)
def test_can_uniquely_command_line_parse(self):
class MyTask(luigi.Task):
pass
# This first check is just an assumption rather than assertion
self.assertTrue(self.run_locally(['MyTask']))
luigi.task.externalize(MyTask)
# Now we check we don't encounter "ambiguous task" issues
self.assertTrue(self.run_locally(['MyTask']))
# We do this once again, is there previously was a bug like this.
luigi.task.externalize(MyTask)
self.assertTrue(self.run_locally(['MyTask']))
class TaskNamespaceTest(LuigiTestCase):
def setup_tasks(self):
class Foo(luigi.Task):
pass
class FooSubclass(Foo):
pass
return (Foo, FooSubclass, self.go_mynamespace())
def go_mynamespace(self):
luigi.namespace("mynamespace")
class Foo(luigi.Task):
p = luigi.IntParameter()
class Bar(Foo):
task_namespace = "othernamespace" # namespace override
class Baz(Bar): # inherits namespace for Bar
pass
luigi.namespace()
return collections.namedtuple('mynamespace', 'Foo Bar Baz')(Foo, Bar, Baz)
def test_vanilla(self):
(Foo, FooSubclass, namespace_test_helper) = self.setup_tasks()
self.assertEqual(Foo.task_family, "Foo")
self.assertEqual(str(Foo()), "Foo()")
self.assertEqual(FooSubclass.task_family, "FooSubclass")
self.assertEqual(str(FooSubclass()), "FooSubclass()")
def test_namespace(self):
(Foo, FooSubclass, namespace_test_helper) = self.setup_tasks()
self.assertEqual(namespace_test_helper.Foo.task_family, "mynamespace.Foo")
self.assertEqual(str(namespace_test_helper.Foo(1)), "mynamespace.Foo(p=1)")
self.assertEqual(namespace_test_helper.Bar.task_namespace, "othernamespace")
self.assertEqual(namespace_test_helper.Bar.task_family, "othernamespace.Bar")
self.assertEqual(str(namespace_test_helper.Bar(1)), "othernamespace.Bar(p=1)")
self.assertEqual(namespace_test_helper.Baz.task_namespace, "othernamespace")
self.assertEqual(namespace_test_helper.Baz.task_family, "othernamespace.Baz")
self.assertEqual(str(namespace_test_helper.Baz(1)), "othernamespace.Baz(p=1)")
def test_uses_latest_namespace(self):
luigi.namespace('a')
class _BaseTask(luigi.Task):
pass
luigi.namespace('b')
class _ChildTask(_BaseTask):
pass
luigi.namespace() # Reset everything
child_task = _ChildTask()
self.assertEqual(child_task.task_family, 'b._ChildTask')
self.assertEqual(str(child_task), 'b._ChildTask()')
def test_with_scope(self):
luigi.namespace('wohoo', scope='task_test')
luigi.namespace('bleh', scope='')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='task_test')
luigi.namespace(scope='')
self.assertEqual(MyTask.get_task_namespace(), 'wohoo')
def test_with_scope_not_matching(self):
luigi.namespace('wohoo', scope='incorrect_namespace')
luigi.namespace('bleh', scope='')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='incorrect_namespace')
luigi.namespace(scope='')
self.assertEqual(MyTask.get_task_namespace(), 'bleh')
class AutoNamespaceTest(LuigiTestCase):
this_module = 'task_test'
def test_auto_namespace_global(self):
luigi.auto_namespace()
class MyTask(luigi.Task):
pass
luigi.namespace()
self.assertEqual(MyTask.get_task_namespace(), self.this_module)
def test_auto_namespace_scope(self):
luigi.auto_namespace(scope='task_test')
luigi.namespace('bleh', scope='')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='task_test')
luigi.namespace(scope='')
self.assertEqual(MyTask.get_task_namespace(), self.this_module)
def test_auto_namespace_not_matching(self):
luigi.auto_namespace(scope='incorrect_namespace')
luigi.namespace('bleh', scope='')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='incorrect_namespace')
luigi.namespace(scope='')
self.assertEqual(MyTask.get_task_namespace(), 'bleh')
def test_auto_namespace_not_matching_2(self):
luigi.auto_namespace(scope='incorrect_namespace')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='incorrect_namespace')
self.assertEqual(MyTask.get_task_namespace(), '')
| apache-2.0 | -3,083,763,735,565,128,700 | 35.483627 | 125 | 0.640569 | false |
storm-computers/odoo | addons/hw_escpos/escpos/escpos.py | 48 | 31717 | # -*- coding: utf-8 -*-
import time
import copy
import io
import base64
import math
import md5
import re
import traceback
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
from PIL import Image
try:
import jcconv
except ImportError:
jcconv = None
try:
import qrcode
except ImportError:
qrcode = None
from constants import *
from exceptions import *
def utfstr(stuff):
""" converts stuff to string and does without failing if stuff is a utf8 string """
if isinstance(stuff,basestring):
return stuff
else:
return str(stuff)
class StyleStack:
"""
The stylestack is used by the xml receipt serializer to compute the active styles along the xml
document. Styles are just xml attributes, there is no css mechanism. But the style applied by
the attributes are inherited by deeper nodes.
"""
def __init__(self):
self.stack = []
self.defaults = { # default style values
'align': 'left',
'underline': 'off',
'bold': 'off',
'size': 'normal',
'font' : 'a',
'width': 48,
'indent': 0,
'tabwidth': 2,
'bullet': ' - ',
'line-ratio':0.5,
'color': 'black',
'value-decimals': 2,
'value-symbol': '',
'value-symbol-position': 'after',
'value-autoint': 'off',
'value-decimals-separator': '.',
'value-thousands-separator': ',',
'value-width': 0,
}
self.types = { # attribute types, default is string and can be ommitted
'width': 'int',
'indent': 'int',
'tabwidth': 'int',
'line-ratio': 'float',
'value-decimals': 'int',
'value-width': 'int',
}
self.cmds = {
# translation from styles to escpos commands
# some style do not correspond to escpos command are used by
# the serializer instead
'align': {
'left': TXT_ALIGN_LT,
'right': TXT_ALIGN_RT,
'center': TXT_ALIGN_CT,
'_order': 1,
},
'underline': {
'off': TXT_UNDERL_OFF,
'on': TXT_UNDERL_ON,
'double': TXT_UNDERL2_ON,
# must be issued after 'size' command
# because ESC ! resets ESC -
'_order': 10,
},
'bold': {
'off': TXT_BOLD_OFF,
'on': TXT_BOLD_ON,
# must be issued after 'size' command
# because ESC ! resets ESC -
'_order': 10,
},
'font': {
'a': TXT_FONT_A,
'b': TXT_FONT_B,
# must be issued after 'size' command
# because ESC ! resets ESC -
'_order': 10,
},
'size': {
'normal': TXT_NORMAL,
'double-height': TXT_2HEIGHT,
'double-width': TXT_2WIDTH,
'double': TXT_DOUBLE,
'_order': 1,
},
'color': {
'black': TXT_COLOR_BLACK,
'red': TXT_COLOR_RED,
'_order': 1,
},
}
self.push(self.defaults)
def get(self,style):
""" what's the value of a style at the current stack level"""
level = len(self.stack) -1
while level >= 0:
if style in self.stack[level]:
return self.stack[level][style]
else:
level = level - 1
return None
def enforce_type(self, attr, val):
"""converts a value to the attribute's type"""
if not attr in self.types:
return utfstr(val)
elif self.types[attr] == 'int':
return int(float(val))
elif self.types[attr] == 'float':
return float(val)
else:
return utfstr(val)
def push(self, style={}):
"""push a new level on the stack with a style dictionnary containing style:value pairs"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
_style[attr] = self.enforce_type(attr, style[attr])
self.stack.append(_style)
def set(self, style={}):
"""overrides style values at the current stack level"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
self.stack[-1][attr] = self.enforce_type(attr, style[attr])
def pop(self):
""" pop a style stack level """
if len(self.stack) > 1 :
self.stack = self.stack[:-1]
def to_escpos(self):
""" converts the current style to an escpos command string """
cmd = ''
ordered_cmds = self.cmds.keys()
ordered_cmds.sort(lambda x,y: cmp(self.cmds[x]['_order'], self.cmds[y]['_order']))
for style in ordered_cmds:
cmd += self.cmds[style][self.get(style)]
return cmd
class XmlSerializer:
"""
Converts the xml inline / block tree structure to a string,
keeping track of newlines and spacings.
The string is outputted asap to the provided escpos driver.
"""
def __init__(self,escpos):
self.escpos = escpos
self.stack = ['block']
self.dirty = False
def start_inline(self,stylestack=None):
""" starts an inline entity with an optional style definition """
self.stack.append('inline')
if self.dirty:
self.escpos._raw(' ')
if stylestack:
self.style(stylestack)
def start_block(self,stylestack=None):
""" starts a block entity with an optional style definition """
if self.dirty:
self.escpos._raw('\n')
self.dirty = False
self.stack.append('block')
if stylestack:
self.style(stylestack)
def end_entity(self):
""" ends the entity definition. (but does not cancel the active style!) """
if self.stack[-1] == 'block' and self.dirty:
self.escpos._raw('\n')
self.dirty = False
if len(self.stack) > 1:
self.stack = self.stack[:-1]
def pre(self,text):
""" puts a string of text in the entity keeping the whitespace intact """
if text:
self.escpos.text(text)
self.dirty = True
def text(self,text):
""" puts text in the entity. Whitespace and newlines are stripped to single spaces. """
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self.dirty = True
self.escpos.text(text)
def linebreak(self):
""" inserts a linebreak in the entity """
self.dirty = False
self.escpos._raw('\n')
def style(self,stylestack):
""" apply a style to the entity (only applies to content added after the definition) """
self.raw(stylestack.to_escpos())
def raw(self,raw):
""" puts raw text or escpos command in the entity without affecting the state of the serializer """
self.escpos._raw(raw)
class XmlLineSerializer:
"""
This is used to convert a xml tree into a single line, with a left and a right part.
The content is not output to escpos directly, and is intended to be fedback to the
XmlSerializer as the content of a block entity.
"""
def __init__(self, indent=0, tabwidth=2, width=48, ratio=0.5):
self.tabwidth = tabwidth
self.indent = indent
self.width = max(0, width - int(tabwidth*indent))
self.lwidth = int(self.width*ratio)
self.rwidth = max(0, self.width - self.lwidth)
self.clwidth = 0
self.crwidth = 0
self.lbuffer = ''
self.rbuffer = ''
self.left = True
def _txt(self,txt):
if self.left:
if self.clwidth < self.lwidth:
txt = txt[:max(0, self.lwidth - self.clwidth)]
self.lbuffer += txt
self.clwidth += len(txt)
else:
if self.crwidth < self.rwidth:
txt = txt[:max(0, self.rwidth - self.crwidth)]
self.rbuffer += txt
self.crwidth += len(txt)
def start_inline(self,stylestack=None):
if (self.left and self.clwidth) or (not self.left and self.crwidth):
self._txt(' ')
def start_block(self,stylestack=None):
self.start_inline(stylestack)
def end_entity(self):
pass
def pre(self,text):
if text:
self._txt(text)
def text(self,text):
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self._txt(text)
def linebreak(self):
pass
def style(self,stylestack):
pass
def raw(self,raw):
pass
def start_right(self):
self.left = False
def get_line(self):
return ' ' * self.indent * self.tabwidth + self.lbuffer + ' ' * (self.width - self.clwidth - self.crwidth) + self.rbuffer
class Escpos:
""" ESC/POS Printer object """
device = None
encoding = None
img_cache = {}
def _check_image_size(self, size):
""" Check and fix the size of the image to 32 bits """
if size % 32 == 0:
return (0, 0)
else:
image_border = 32 - (size % 32)
if (image_border % 2) == 0:
return (image_border / 2, image_border / 2)
else:
return (image_border / 2, (image_border / 2) + 1)
def _print_image(self, line, size):
""" Print formatted image """
i = 0
cont = 0
buffer = ""
self._raw(S_RASTER_N)
buffer = "%02X%02X%02X%02X" % (((size[0]/size[1])/8), 0, size[1], 0)
self._raw(buffer.decode('hex'))
buffer = ""
while i < len(line):
hex_string = int(line[i:i+8],2)
buffer += "%02X" % hex_string
i += 8
cont += 1
if cont % 4 == 0:
self._raw(buffer.decode("hex"))
buffer = ""
cont = 0
def _raw_print_image(self, line, size, output=None ):
""" Print formatted image """
i = 0
cont = 0
buffer = ""
raw = ""
def __raw(string):
if output:
output(string)
else:
self._raw(string)
raw += S_RASTER_N
buffer = "%02X%02X%02X%02X" % (((size[0]/size[1])/8), 0, size[1], 0)
raw += buffer.decode('hex')
buffer = ""
while i < len(line):
hex_string = int(line[i:i+8],2)
buffer += "%02X" % hex_string
i += 8
cont += 1
if cont % 4 == 0:
raw += buffer.decode("hex")
buffer = ""
cont = 0
return raw
def _convert_image(self, im):
""" Parse image and prepare it to a printable format """
pixels = []
pix_line = ""
im_left = ""
im_right = ""
switch = 0
img_size = [ 0, 0 ]
if im.size[0] > 512:
print "WARNING: Image is wider than 512 and could be truncated at print time "
if im.size[1] > 255:
raise ImageSizeError()
im_border = self._check_image_size(im.size[0])
for i in range(im_border[0]):
im_left += "0"
for i in range(im_border[1]):
im_right += "0"
for y in range(im.size[1]):
img_size[1] += 1
pix_line += im_left
img_size[0] += im_border[0]
for x in range(im.size[0]):
img_size[0] += 1
RGB = im.getpixel((x, y))
im_color = (RGB[0] + RGB[1] + RGB[2])
im_pattern = "1X0"
pattern_len = len(im_pattern)
switch = (switch - 1 ) * (-1)
for x in range(pattern_len):
if im_color <= (255 * 3 / pattern_len * (x+1)):
if im_pattern[x] == "X":
pix_line += "%d" % switch
else:
pix_line += im_pattern[x]
break
elif im_color > (255 * 3 / pattern_len * pattern_len) and im_color <= (255 * 3):
pix_line += im_pattern[-1]
break
pix_line += im_right
img_size[0] += im_border[1]
return (pix_line, img_size)
def image(self,path_img):
""" Open image file """
im_open = Image.open(path_img)
im = im_open.convert("RGB")
# Convert the RGB image in printable image
pix_line, img_size = self._convert_image(im)
self._print_image(pix_line, img_size)
def print_base64_image(self,img):
print 'print_b64_img'
id = md5.new(img).digest()
if id not in self.img_cache:
print 'not in cache'
img = img[img.find(',')+1:]
f = io.BytesIO('img')
f.write(base64.decodestring(img))
f.seek(0)
img_rgba = Image.open(f)
img = Image.new('RGB', img_rgba.size, (255,255,255))
channels = img_rgba.split()
if len(channels) > 3:
# use alpha channel as mask
img.paste(img_rgba, mask=channels[3])
else:
img.paste(img_rgba)
print 'convert image'
pix_line, img_size = self._convert_image(img)
print 'print image'
buffer = self._raw_print_image(pix_line, img_size)
self.img_cache[id] = buffer
print 'raw image'
self._raw(self.img_cache[id])
def qr(self,text):
""" Print QR Code for the provided string """
qr_code = qrcode.QRCode(version=4, box_size=4, border=1)
qr_code.add_data(text)
qr_code.make(fit=True)
qr_img = qr_code.make_image()
im = qr_img._img.convert("RGB")
# Convert the RGB image in printable image
self._convert_image(im)
def barcode(self, code, bc, width=255, height=2, pos='below', font='a'):
""" Print Barcode """
# Align Bar Code()
self._raw(TXT_ALIGN_CT)
# Height
if height >=2 or height <=6:
self._raw(BARCODE_HEIGHT)
else:
raise BarcodeSizeError()
# Width
if width >= 1 or width <=255:
self._raw(BARCODE_WIDTH)
else:
raise BarcodeSizeError()
# Font
if font.upper() == "B":
self._raw(BARCODE_FONT_B)
else: # DEFAULT FONT: A
self._raw(BARCODE_FONT_A)
# Position
if pos.upper() == "OFF":
self._raw(BARCODE_TXT_OFF)
elif pos.upper() == "BOTH":
self._raw(BARCODE_TXT_BTH)
elif pos.upper() == "ABOVE":
self._raw(BARCODE_TXT_ABV)
else: # DEFAULT POSITION: BELOW
self._raw(BARCODE_TXT_BLW)
# Type
if bc.upper() == "UPC-A":
self._raw(BARCODE_UPC_A)
elif bc.upper() == "UPC-E":
self._raw(BARCODE_UPC_E)
elif bc.upper() == "EAN13":
self._raw(BARCODE_EAN13)
elif bc.upper() == "EAN8":
self._raw(BARCODE_EAN8)
elif bc.upper() == "CODE39":
self._raw(BARCODE_CODE39)
elif bc.upper() == "ITF":
self._raw(BARCODE_ITF)
elif bc.upper() == "NW7":
self._raw(BARCODE_NW7)
else:
raise BarcodeTypeError()
# Print Code
if code:
self._raw(code)
else:
raise exception.BarcodeCodeError()
def receipt(self,xml):
"""
Prints an xml based receipt definition
"""
def strclean(string):
if not string:
string = ''
string = string.strip()
string = re.sub('\s+',' ',string)
return string
def format_value(value, decimals=3, width=0, decimals_separator='.', thousands_separator=',', autoint=False, symbol='', position='after'):
decimals = max(0,int(decimals))
width = max(0,int(width))
value = float(value)
if autoint and math.floor(value) == value:
decimals = 0
if width == 0:
width = ''
if thousands_separator:
formatstr = "{:"+str(width)+",."+str(decimals)+"f}"
else:
formatstr = "{:"+str(width)+"."+str(decimals)+"f}"
ret = formatstr.format(value)
ret = ret.replace(',','COMMA')
ret = ret.replace('.','DOT')
ret = ret.replace('COMMA',thousands_separator)
ret = ret.replace('DOT',decimals_separator)
if symbol:
if position == 'after':
ret = ret + symbol
else:
ret = symbol + ret
return ret
def print_elem(stylestack, serializer, elem, indent=0):
elem_styles = {
'h1': {'bold': 'on', 'size':'double'},
'h2': {'size':'double'},
'h3': {'bold': 'on', 'size':'double-height'},
'h4': {'size': 'double-height'},
'h5': {'bold': 'on'},
'em': {'font': 'b'},
'b': {'bold': 'on'},
}
stylestack.push()
if elem.tag in elem_styles:
stylestack.set(elem_styles[elem.tag])
stylestack.set(elem.attrib)
if elem.tag in ('p','div','section','article','receipt','header','footer','li','h1','h2','h3','h4','h5'):
serializer.start_block(stylestack)
serializer.text(elem.text)
for child in elem:
print_elem(stylestack,serializer,child)
serializer.start_inline(stylestack)
serializer.text(child.tail)
serializer.end_entity()
serializer.end_entity()
elif elem.tag in ('span','em','b','left','right'):
serializer.start_inline(stylestack)
serializer.text(elem.text)
for child in elem:
print_elem(stylestack,serializer,child)
serializer.start_inline(stylestack)
serializer.text(child.tail)
serializer.end_entity()
serializer.end_entity()
elif elem.tag == 'value':
serializer.start_inline(stylestack)
serializer.pre(format_value(
elem.text,
decimals=stylestack.get('value-decimals'),
width=stylestack.get('value-width'),
decimals_separator=stylestack.get('value-decimals-separator'),
thousands_separator=stylestack.get('value-thousands-separator'),
autoint=(stylestack.get('value-autoint') == 'on'),
symbol=stylestack.get('value-symbol'),
position=stylestack.get('value-symbol-position')
))
serializer.end_entity()
elif elem.tag == 'line':
width = stylestack.get('width')
if stylestack.get('size') in ('double', 'double-width'):
width = width / 2
lineserializer = XmlLineSerializer(stylestack.get('indent')+indent,stylestack.get('tabwidth'),width,stylestack.get('line-ratio'))
serializer.start_block(stylestack)
for child in elem:
if child.tag == 'left':
print_elem(stylestack,lineserializer,child,indent=indent)
elif child.tag == 'right':
lineserializer.start_right()
print_elem(stylestack,lineserializer,child,indent=indent)
serializer.pre(lineserializer.get_line())
serializer.end_entity()
elif elem.tag == 'ul':
serializer.start_block(stylestack)
bullet = stylestack.get('bullet')
for child in elem:
if child.tag == 'li':
serializer.style(stylestack)
serializer.raw(' ' * indent * stylestack.get('tabwidth') + bullet)
print_elem(stylestack,serializer,child,indent=indent+1)
serializer.end_entity()
elif elem.tag == 'ol':
cwidth = len(str(len(elem))) + 2
i = 1
serializer.start_block(stylestack)
for child in elem:
if child.tag == 'li':
serializer.style(stylestack)
serializer.raw(' ' * indent * stylestack.get('tabwidth') + ' ' + (str(i)+')').ljust(cwidth))
i = i + 1
print_elem(stylestack,serializer,child,indent=indent+1)
serializer.end_entity()
elif elem.tag == 'pre':
serializer.start_block(stylestack)
serializer.pre(elem.text)
serializer.end_entity()
elif elem.tag == 'hr':
width = stylestack.get('width')
if stylestack.get('size') in ('double', 'double-width'):
width = width / 2
serializer.start_block(stylestack)
serializer.text('-'*width)
serializer.end_entity()
elif elem.tag == 'br':
serializer.linebreak()
elif elem.tag == 'img':
if 'src' in elem.attrib and 'data:' in elem.attrib['src']:
self.print_base64_image(elem.attrib['src'])
elif elem.tag == 'barcode' and 'encoding' in elem.attrib:
serializer.start_block(stylestack)
self.barcode(strclean(elem.text),elem.attrib['encoding'])
serializer.end_entity()
elif elem.tag == 'cut':
self.cut()
elif elem.tag == 'partialcut':
self.cut(mode='part')
elif elem.tag == 'cashdraw':
self.cashdraw(2)
self.cashdraw(5)
stylestack.pop()
try:
stylestack = StyleStack()
serializer = XmlSerializer(self)
root = ET.fromstring(xml.encode('utf-8'))
self._raw(stylestack.to_escpos())
print_elem(stylestack,serializer,root)
if 'open-cashdrawer' in root.attrib and root.attrib['open-cashdrawer'] == 'true':
self.cashdraw(2)
self.cashdraw(5)
if not 'cut' in root.attrib or root.attrib['cut'] == 'true' :
self.cut()
except Exception as e:
errmsg = str(e)+'\n'+'-'*48+'\n'+traceback.format_exc() + '-'*48+'\n'
self.text(errmsg)
self.cut()
raise e
def text(self,txt):
""" Print Utf8 encoded alpha-numeric text """
if not txt:
return
try:
txt = txt.decode('utf-8')
except:
try:
txt = txt.decode('utf-16')
except:
pass
self.extra_chars = 0
def encode_char(char):
"""
Encodes a single utf-8 character into a sequence of
esc-pos code page change instructions and character declarations
"""
char_utf8 = char.encode('utf-8')
encoded = ''
encoding = self.encoding # we reuse the last encoding to prevent code page switches at every character
encodings = {
# TODO use ordering to prevent useless switches
# TODO Support other encodings not natively supported by python ( Thai, Khazakh, Kanjis )
'cp437': TXT_ENC_PC437,
'cp850': TXT_ENC_PC850,
'cp852': TXT_ENC_PC852,
'cp857': TXT_ENC_PC857,
'cp858': TXT_ENC_PC858,
'cp860': TXT_ENC_PC860,
'cp863': TXT_ENC_PC863,
'cp865': TXT_ENC_PC865,
'cp866': TXT_ENC_PC866,
'cp862': TXT_ENC_PC862,
'cp720': TXT_ENC_PC720,
'cp936': TXT_ENC_PC936,
'iso8859_2': TXT_ENC_8859_2,
'iso8859_7': TXT_ENC_8859_7,
'iso8859_9': TXT_ENC_8859_9,
'cp1254' : TXT_ENC_WPC1254,
'cp1255' : TXT_ENC_WPC1255,
'cp1256' : TXT_ENC_WPC1256,
'cp1257' : TXT_ENC_WPC1257,
'cp1258' : TXT_ENC_WPC1258,
'katakana' : TXT_ENC_KATAKANA,
}
remaining = copy.copy(encodings)
if not encoding :
encoding = 'cp437'
while True: # Trying all encoding until one succeeds
try:
if encoding == 'katakana': # Japanese characters
if jcconv:
# try to convert japanese text to a half-katakanas
kata = jcconv.kata2half(jcconv.hira2kata(char_utf8))
if kata != char_utf8:
self.extra_chars += len(kata.decode('utf-8')) - 1
# the conversion may result in multiple characters
return encode_str(kata.decode('utf-8'))
else:
kata = char_utf8
if kata in TXT_ENC_KATAKANA_MAP:
encoded = TXT_ENC_KATAKANA_MAP[kata]
break
else:
raise ValueError()
else:
encoded = char.encode(encoding)
break
except ValueError: #the encoding failed, select another one and retry
if encoding in remaining:
del remaining[encoding]
if len(remaining) >= 1:
encoding = remaining.items()[0][0]
else:
encoding = 'cp437'
encoded = '\xb1' # could not encode, output error character
break;
if encoding != self.encoding:
# if the encoding changed, remember it and prefix the character with
# the esc-pos encoding change sequence
self.encoding = encoding
encoded = encodings[encoding] + encoded
return encoded
def encode_str(txt):
buffer = ''
for c in txt:
buffer += encode_char(c)
return buffer
txt = encode_str(txt)
# if the utf-8 -> codepage conversion inserted extra characters,
# remove double spaces to try to restore the original string length
# and prevent printing alignment issues
while self.extra_chars > 0:
dspace = txt.find(' ')
if dspace > 0:
txt = txt[:dspace] + txt[dspace+1:]
self.extra_chars -= 1
else:
break
self._raw(txt)
def set(self, align='left', font='a', type='normal', width=1, height=1):
""" Set text properties """
# Align
if align.upper() == "CENTER":
self._raw(TXT_ALIGN_CT)
elif align.upper() == "RIGHT":
self._raw(TXT_ALIGN_RT)
elif align.upper() == "LEFT":
self._raw(TXT_ALIGN_LT)
# Font
if font.upper() == "B":
self._raw(TXT_FONT_B)
else: # DEFAULT FONT: A
self._raw(TXT_FONT_A)
# Type
if type.upper() == "B":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_OFF)
elif type.upper() == "U":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "U2":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL2_ON)
elif type.upper() == "BU":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "BU2":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL2_ON)
elif type.upper == "NORMAL":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_OFF)
# Width
if width == 2 and height != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2WIDTH)
elif height == 2 and width != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2HEIGHT)
elif height == 2 and width == 2:
self._raw(TXT_2WIDTH)
self._raw(TXT_2HEIGHT)
else: # DEFAULT SIZE: NORMAL
self._raw(TXT_NORMAL)
def cut(self, mode=''):
""" Cut paper """
# Fix the size between last line and cut
# TODO: handle this with a line feed
self._raw("\n\n\n\n\n\n")
if mode.upper() == "PART":
self._raw(PAPER_PART_CUT)
else: # DEFAULT MODE: FULL CUT
self._raw(PAPER_FULL_CUT)
def cashdraw(self, pin):
""" Send pulse to kick the cash drawer """
if pin == 2:
self._raw(CD_KICK_2)
elif pin == 5:
self._raw(CD_KICK_5)
else:
raise CashDrawerError()
def hw(self, hw):
""" Hardware operations """
if hw.upper() == "INIT":
self._raw(HW_INIT)
elif hw.upper() == "SELECT":
self._raw(HW_SELECT)
elif hw.upper() == "RESET":
self._raw(HW_RESET)
else: # DEFAULT: DOES NOTHING
pass
def control(self, ctl):
""" Feed control sequences """
if ctl.upper() == "LF":
self._raw(CTL_LF)
elif ctl.upper() == "FF":
self._raw(CTL_FF)
elif ctl.upper() == "CR":
self._raw(CTL_CR)
elif ctl.upper() == "HT":
self._raw(CTL_HT)
elif ctl.upper() == "VT":
self._raw(CTL_VT)
| agpl-3.0 | -7,961,076,660,371,493,000 | 33.512514 | 146 | 0.473468 | false |
uwevil/namebench | nb_third_party/dns/opcode.py | 248 | 2615 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Opcodes."""
import dns.exception
QUERY = 0
IQUERY = 1
STATUS = 2
NOTIFY = 4
UPDATE = 5
_by_text = {
'QUERY' : QUERY,
'IQUERY' : IQUERY,
'STATUS' : STATUS,
'NOTIFY' : NOTIFY,
'UPDATE' : UPDATE
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be true inverse.
_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
class UnknownOpcode(dns.exception.DNSException):
"""Raised if an opcode is unknown."""
pass
def from_text(text):
"""Convert text into an opcode.
@param text: the textual opcode
@type text: string
@raises UnknownOpcode: the opcode is unknown
@rtype: int
"""
if text.isdigit():
value = int(text)
if value >= 0 and value <= 15:
return value
value = _by_text.get(text.upper())
if value is None:
raise UnknownOpcode
return value
def from_flags(flags):
"""Extract an opcode from DNS message flags.
@param flags: int
@rtype: int
"""
return (flags & 0x7800) >> 11
def to_flags(value):
"""Convert an opcode to a value suitable for ORing into DNS message
flags.
@rtype: int
"""
return (value << 11) & 0x7800
def to_text(value):
"""Convert an opcode to text.
@param value: the opcdoe
@type value: int
@raises UnknownOpcode: the opcode is unknown
@rtype: string
"""
text = _by_value.get(value)
if text is None:
text = str(value)
return text
def is_update(flags):
"""True if the opcode in flags is UPDATE.
@param flags: DNS flags
@type flags: int
@rtype: bool
"""
if (from_flags(flags) == UPDATE):
return True
return False
| apache-2.0 | -595,610,952,276,217,000 | 24.144231 | 72 | 0.659656 | false |
EdDev/vdsm | lib/vdsm/network/errors.py | 1 | 2525 | #
# Copyright 2011-2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import six
ERR_OK = 0
ERR_BAD_PARAMS = 21
ERR_BAD_ADDR = 22
ERR_BAD_NIC = 23
ERR_USED_NIC = 24
ERR_BAD_BONDING = 25
ERR_BAD_VLAN = 26
ERR_BAD_BRIDGE = 27
ERR_USED_BRIDGE = 28
ERR_FAILED_IFUP = 29
ERR_FAILED_IFDOWN = 30
ERR_USED_BOND = 31
ERR_LOST_CONNECTION = 10 # noConPeer
ERR_OVS_CONNECTION = 32
class ConfigNetworkError(Exception):
def __init__(self, errCode, message):
self.errCode = errCode
self.message = message
super(ConfigNetworkError, self).__init__(errCode, message)
class OvsDBConnectionError(ConfigNetworkError):
def __init__(self, *args):
message = _get_message(args)
super(OvsDBConnectionError, self).__init__(errCode=ERR_OVS_CONNECTION,
message=message)
@staticmethod
def is_ovs_db_conn_error(err_msg):
return 'database connection failed' in err_msg[0]
class RollbackIncomplete(Exception):
"""
This exception is raised in order to signal API.Global that a call to
setupNetworks has failed and there are leftovers that need to be cleaned
up.
Note that it is never raised by the default ifcfg configurator.
"""
pass
def _get_message(args):
"""
Due to multiprocessing limitation in the way it processes an exception
serialization and deserialization, a derived exception needs to accept
all super classes arguments as input, even if it ignores them.
Given the list of arguments and assuming the message is a string type,
this helper function fetches the message argument.
"""
for arg in args:
if isinstance(arg, six.string_types):
return arg
| gpl-2.0 | -3,600,168,580,581,480,400 | 30.5625 | 78 | 0.70297 | false |
seize-the-dave/XlsxWriter | xlsxwriter/test/comparison/test_rich_string07.py | 8 | 1570 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'rich_string07.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
italic = workbook.add_format({'italic': 1})
worksheet.write('A1', 'Foo', bold)
worksheet.write('A2', 'Bar', italic)
worksheet.write_rich_string('A3', 'a', bold, 'bc', 'defg')
worksheet.write_rich_string('B4', 'abc', italic, 'de', 'fg')
worksheet.write_rich_string('C5', 'a', bold, 'bc', 'defg')
worksheet.write_rich_string('D6', 'abc', italic, 'de', 'fg')
worksheet.write_rich_string('E7', 'a', bold, 'bcdef', 'g')
worksheet.write_rich_string('F8', italic, 'abcd', 'efg')
workbook.close()
self.assertExcelEqual()
| bsd-2-clause | -3,308,118,224,215,188,000 | 29.784314 | 79 | 0.577707 | false |
catchmrbharath/servo | tests/wpt/harness/wptrunner/update/update.py | 118 | 5053 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import sys
from metadata import MetadataUpdateRunner
from sync import SyncFromUpstreamRunner
from tree import GitTree, HgTree, NoVCSTree
from .. import environment as env
from base import Step, StepRunner, exit_clean, exit_unclean
from state import State
def setup_paths(sync_path):
sys.path.insert(0, os.path.abspath(sync_path))
from tools import localpaths
class LoadConfig(Step):
"""Step for loading configuration from the ini file and kwargs."""
provides = ["sync", "paths", "metadata_path", "tests_path"]
def create(self, state):
state.sync = {"remote_url": state.kwargs["remote_url"],
"branch": state.kwargs["branch"],
"path": state.kwargs["sync_path"]}
state.paths = state.kwargs["test_paths"]
state.tests_path = state.paths["/"]["tests_path"]
state.metadata_path = state.paths["/"]["metadata_path"]
assert state.tests_path.startswith("/")
class LoadTrees(Step):
"""Step for creating a Tree for the local copy and a GitTree for the
upstream sync."""
provides = ["local_tree", "sync_tree"]
def create(self, state):
if os.path.exists(state.sync["path"]):
sync_tree = GitTree(root=state.sync["path"])
else:
sync_tree = None
if GitTree.is_type():
local_tree = GitTree()
elif HgTree.is_type():
local_tree = HgTree()
else:
local_tree = NoVCSTree()
state.update({"local_tree": local_tree,
"sync_tree": sync_tree})
class SyncFromUpstream(Step):
"""Step that synchronises a local copy of the code with upstream."""
def create(self, state):
if not state.kwargs["sync"]:
return
if not state.sync_tree:
os.mkdir(state.sync["path"])
state.sync_tree = GitTree(root=state.sync["path"])
kwargs = state.kwargs
with state.push(["sync", "paths", "metadata_path", "tests_path", "local_tree",
"sync_tree"]):
state.target_rev = kwargs["rev"]
state.no_patch = kwargs["no_patch"]
state.suite_name = kwargs["suite_name"]
runner = SyncFromUpstreamRunner(self.logger, state)
runner.run()
class UpdateMetadata(Step):
"""Update the expectation metadata from a set of run logs"""
def create(self, state):
if not state.kwargs["run_log"]:
return
kwargs = state.kwargs
with state.push(["local_tree", "sync_tree", "paths", "serve_root"]):
state.run_log = kwargs["run_log"]
state.ignore_existing = kwargs["ignore_existing"]
state.no_patch = kwargs["no_patch"]
state.suite_name = kwargs["suite_name"]
state.product = kwargs["product"]
state.config = kwargs["config"]
runner = MetadataUpdateRunner(self.logger, state)
runner.run()
class UpdateRunner(StepRunner):
"""Runner for doing an overall update."""
steps = [LoadConfig,
LoadTrees,
SyncFromUpstream,
UpdateMetadata]
class WPTUpdate(object):
def __init__(self, logger, runner_cls=UpdateRunner, **kwargs):
"""Object that controls the running of a whole wptupdate.
:param runner_cls: Runner subclass holding the overall list of
steps to run.
:param kwargs: Command line arguments
"""
self.runner_cls = runner_cls
self.serve_root = kwargs["test_paths"]["/"]["tests_path"]
if not kwargs["sync"]:
setup_paths(self.serve_root)
else:
if os.path.exists(kwargs["sync_path"]):
# If the sync path doesn't exist we defer this until it does
setup_paths(kwargs["sync_path"])
self.state = State(logger)
self.kwargs = kwargs
self.logger = logger
def run(self, **kwargs):
if self.kwargs["abort"]:
self.abort()
return exit_clean
if not self.kwargs["continue"] and not self.state.is_empty():
self.logger.error("Found existing state. Run with --continue to resume or --abort to clear state")
return exit_unclean
if self.kwargs["continue"]:
if self.state.is_empty():
self.logger.error("No sync in progress?")
return exit_clean
self.kwargs = self.state.kwargs
else:
self.state.kwargs = self.kwargs
self.state.serve_root = self.serve_root
update_runner = self.runner_cls(self.logger, self.state)
rv = update_runner.run()
if rv in (exit_clean, None):
self.state.clear()
return rv
def abort(self):
self.state.clear()
| mpl-2.0 | 6,873,632,084,499,167,000 | 30.981013 | 110 | 0.585791 | false |
odooindia/odoo | openerp/tools/func.py | 49 | 3346 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010, 2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
__all__ = ['synchronized', 'lazy_property']
from functools import wraps
from inspect import getsourcefile
class lazy_property(object):
""" Decorator for a lazy property of an object, i.e., an object attribute
that is determined by the result of a method call evaluated once. To
reevaluate the property, simply delete the attribute on the object, and
get it again.
"""
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, cls):
if obj is None:
return self
value = self.fget(obj)
setattr(obj, self.fget.__name__, value)
return value
@staticmethod
def reset_all(obj):
""" Reset all lazy properties on the instance `obj`. """
cls = type(obj)
obj_dict = vars(obj)
for name in obj_dict.keys():
if isinstance(getattr(cls, name, None), lazy_property):
obj_dict.pop(name)
def synchronized(lock_attr='_lock'):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
lock = getattr(self, lock_attr)
try:
lock.acquire()
return func(self, *args, **kwargs)
finally:
lock.release()
return wrapper
return decorator
def frame_codeinfo(fframe, back=0):
""" Return a (filename, line) pair for a previous frame .
@return (filename, lineno) where lineno is either int or string==''
"""
try:
if not fframe:
return "<unknown>", ''
for i in range(back):
fframe = fframe.f_back
try:
fname = getsourcefile(fframe)
except TypeError:
fname = '<builtin>'
lineno = fframe.f_lineno or ''
return fname, lineno
except Exception:
return "<unknown>", ''
def compose(a, b):
""" Composes the callables ``a`` and ``b``. ``compose(a, b)(*args)`` is
equivalent to ``a(b(*args))``.
Can be used as a decorator by partially applying ``a``::
@partial(compose, a)
def b():
...
"""
@wraps(b)
def wrapper(*args, **kwargs):
return a(b(*args, **kwargs))
return wrapper
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -837,439,853,119,785,200 | 31.803922 | 79 | 0.570831 | false |
FlorentChamault/My_sickbeard | lib/requests/packages/oauthlib/oauth1/rfc5849/utils.py | 74 | 3015 | # -*- coding: utf-8 -*-
"""
oauthlib.utils
~~~~~~~~~~~~~~
This module contains utility methods used by various parts of the OAuth
spec.
"""
import string
import urllib2
from oauthlib.common import quote, unquote
UNICODE_ASCII_CHARACTER_SET = (string.ascii_letters.decode('ascii') +
string.digits.decode('ascii'))
def filter_params(target):
"""Decorator which filters params to remove non-oauth_* parameters
Assumes the decorated method takes a params dict or list of tuples as its
first argument.
"""
def wrapper(params, *args, **kwargs):
params = filter_oauth_params(params)
return target(params, *args, **kwargs)
wrapper.__doc__ = target.__doc__
return wrapper
def filter_oauth_params(params):
"""Removes all non oauth parameters from a dict or a list of params."""
is_oauth = lambda kv: kv[0].startswith(u"oauth_")
if isinstance(params, dict):
return filter(is_oauth, params.items())
else:
return filter(is_oauth, params)
def escape(u):
"""Escape a unicode string in an OAuth-compatible fashion.
Per `section 3.6`_ of the spec.
.. _`section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
"""
if not isinstance(u, unicode):
raise ValueError('Only unicode objects are escapable.')
# Letters, digits, and the characters '_.-' are already treated as safe
# by urllib.quote(). We need to add '~' to fully support rfc5849.
return quote(u, safe='~')
def unescape(u):
if not isinstance(u, unicode):
raise ValueError('Only unicode objects are unescapable.')
return unquote(u)
def urlencode(query):
"""Encode a sequence of two-element tuples or dictionary into a URL query string.
Operates using an OAuth-safe escape() method, in contrast to urllib.urlencode.
"""
# Convert dictionaries to list of tuples
if isinstance(query, dict):
query = query.items()
return u"&".join([u'='.join([escape(k), escape(v)]) for k, v in query])
def parse_keqv_list(l):
"""A unicode-safe version of urllib2.parse_keqv_list"""
encoded_list = [u.encode('utf-8') for u in l]
encoded_parsed = urllib2.parse_keqv_list(encoded_list)
return dict((k.decode('utf-8'),
v.decode('utf-8')) for k, v in encoded_parsed.items())
def parse_http_list(u):
"""A unicode-safe version of urllib2.parse_http_list"""
encoded_str = u.encode('utf-8')
encoded_list = urllib2.parse_http_list(encoded_str)
return [s.decode('utf-8') for s in encoded_list]
def parse_authorization_header(authorization_header):
"""Parse an OAuth authorization header into a list of 2-tuples"""
auth_scheme = u'OAuth '
if authorization_header.startswith(auth_scheme):
authorization_header = authorization_header.replace(auth_scheme, u'', 1)
items = parse_http_list(authorization_header)
try:
return parse_keqv_list(items).items()
except ValueError:
raise ValueError('Malformed authorization header')
| gpl-3.0 | -1,383,448,766,530,432,500 | 29.454545 | 85 | 0.66733 | false |
2014c2g8/c2g8 | wsgi/static/Brython2.1.0-20140419-113919/Lib/collections/abc.py | 739 | 16026 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
"ByteString",
]
# Private list of types that we want to register with the various ABCs
# so that they will pass tests like:
# it = iter(somebytearray)
# assert isinstance(it, Iterable)
# Note: in other implementations, these types many not be distinct
# and they make have their own implementation specific types that
# are not included on this list.
bytes_iterator = type(iter(b''))
bytearray_iterator = type(iter(bytearray()))
#callable_iterator = ???
dict_keyiterator = type(iter({}.keys()))
dict_valueiterator = type(iter({}.values()))
dict_itemiterator = type(iter({}.items()))
list_iterator = type(iter([]))
list_reverseiterator = type(iter(reversed([])))
range_iterator = type(iter(range(0)))
set_iterator = type(iter(set()))
str_iterator = type(iter(""))
tuple_iterator = type(iter(()))
zip_iterator = type(iter(zip()))
## views ##
dict_keys = type({}.keys())
dict_values = type({}.values())
dict_items = type({}.items())
## misc ##
mappingproxy = type(type.__dict__)
### ONE-TRICK PONIES ###
class Hashable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
for B in C.__mro__:
if "__hash__" in B.__dict__:
if B.__dict__["__hash__"]:
return True
break
return NotImplemented
class Iterable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if any("__iter__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Iterator(Iterable):
__slots__ = ()
@abstractmethod
def __next__(self):
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
if (any("__next__" in B.__dict__ for B in C.__mro__) and
any("__iter__" in B.__dict__ for B in C.__mro__)):
return True
return NotImplemented
Iterator.register(bytes_iterator)
Iterator.register(bytearray_iterator)
#Iterator.register(callable_iterator)
Iterator.register(dict_keyiterator)
Iterator.register(dict_valueiterator)
Iterator.register(dict_itemiterator)
Iterator.register(list_iterator)
Iterator.register(list_reverseiterator)
Iterator.register(range_iterator)
Iterator.register(set_iterator)
Iterator.register(str_iterator)
Iterator.register(tuple_iterator)
Iterator.register(zip_iterator)
class Sized(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if any("__len__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Container(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if any("__contains__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Callable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
if any("__call__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
### SETS ###
class Set(Sized, Iterable, Container):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
__slots__ = ()
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other < self
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other <= self
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
def __ne__(self, other):
return not (self == other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
def isdisjoint(self, other):
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxsize
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
__slots__ = ()
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
__slots__ = ()
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def keys(self):
return KeysView(self)
def items(self):
return ItemsView(self)
def values(self):
return ValuesView(self)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
Mapping.register(mappingproxy)
class MappingView(Sized):
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
class KeysView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
for key in self._mapping:
yield key
KeysView.register(dict_keys)
class ItemsView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
ItemsView.register(dict_items)
class ValuesView(MappingView):
def __contains__(self, value):
for key in self._mapping:
if value == self._mapping[key]:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
ValuesView.register(dict_values)
class MutableMapping(Mapping):
__slots__ = ()
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Sized, Iterable, Container):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
__slots__ = ()
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value):
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
return sum(1 for v in self if v == value)
Sequence.register(tuple)
Sequence.register(str)
Sequence.register(range)
class ByteString(Sequence):
"""This unifies bytes and bytearray.
XXX Should add all their methods.
"""
__slots__ = ()
ByteString.register(bytes)
ByteString.register(bytearray)
class MutableSequence(Sequence):
__slots__ = ()
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
raise IndexError
def append(self, value):
self.insert(len(self), value)
def clear(self):
try:
while True:
self.pop()
except IndexError:
pass
def reverse(self):
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
for v in values:
self.append(v)
def pop(self, index=-1):
v = self[index]
del self[index]
return v
def remove(self, value):
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)
MutableSequence.register(bytearray) # Multiply inheriting, see ByteString
| gpl-2.0 | -4,785,695,333,880,706,000 | 23.281818 | 77 | 0.551916 | false |
simartin/servo | components/script/dom/bindings/codegen/parser/WebIDL.py | 4 | 303096 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
""" A WebIDL parser. """
from ply import lex, yacc
import re
import os
import traceback
import math
import string
from collections import defaultdict, OrderedDict
from itertools import chain
# Machinery
def parseInt(literal):
string = literal
sign = 0
base = 0
if string[0] == '-':
sign = -1
string = string[1:]
else:
sign = 1
if string[0] == '0' and len(string) > 1:
if string[1] == 'x' or string[1] == 'X':
base = 16
string = string[2:]
else:
base = 8
string = string[1:]
else:
base = 10
value = int(string, base)
return value * sign
def enum(*names, **kw):
class Foo(object):
attrs = OrderedDict()
def __init__(self, names):
for v, k in enumerate(names):
self.attrs[k] = v
def __getattr__(self, attr):
if attr in self.attrs:
return self.attrs[attr]
raise AttributeError
def __setattr__(self, name, value): # this makes it read-only
raise NotImplementedError
if "base" not in kw:
return Foo(names)
return Foo(chain(list(kw["base"].attrs.keys()), names))
class WebIDLError(Exception):
def __init__(self, message, locations, warning=False):
self.message = message
self.locations = [str(loc) for loc in locations]
self.warning = warning
def __str__(self):
return "%s: %s%s%s" % (self.warning and 'warning' or 'error',
self.message,
", " if len(self.locations) != 0 else "",
"\n".join(self.locations))
class Location(object):
def __init__(self, lexer, lineno, lexpos, filename):
self._line = None
self._lineno = lineno
self._lexpos = lexpos
self._lexdata = lexer.lexdata
self._file = filename if filename else "<unknown>"
def __eq__(self, other):
return (self._lexpos == other._lexpos and
self._file == other._file)
def filename(self):
return self._file
def resolve(self):
if self._line:
return
startofline = self._lexdata.rfind('\n', 0, self._lexpos) + 1
endofline = self._lexdata.find('\n', self._lexpos, self._lexpos + 80)
if endofline != -1:
self._line = self._lexdata[startofline:endofline]
else:
self._line = self._lexdata[startofline:]
self._colno = self._lexpos - startofline
# Our line number seems to point to the start of self._lexdata
self._lineno += self._lexdata.count('\n', 0, startofline)
def get(self):
self.resolve()
return "%s line %s:%s" % (self._file, self._lineno, self._colno)
def _pointerline(self):
return " " * self._colno + "^"
def __str__(self):
self.resolve()
return "%s line %s:%s\n%s\n%s" % (self._file, self._lineno, self._colno,
self._line, self._pointerline())
class BuiltinLocation(object):
def __init__(self, text):
self.msg = text + "\n"
def __eq__(self, other):
return (isinstance(other, BuiltinLocation) and
self.msg == other.msg)
def __hash__(self):
return hash(self.msg)
def filename(self):
return '<builtin>'
def resolve(self):
pass
def get(self):
return self.msg
def __str__(self):
return self.get()
# Data Model
class IDLObject(object):
def __init__(self, location):
self.location = location
self.userData = dict()
def filename(self):
return self.location.filename()
def isInterface(self):
return False
def isNamespace(self):
return False
def isInterfaceMixin(self):
return False
def isEnum(self):
return False
def isCallback(self):
return False
def isType(self):
return False
def isDictionary(self):
return False
def isUnion(self):
return False
def isTypedef(self):
return False
def getUserData(self, key, default):
return self.userData.get(key, default)
def setUserData(self, key, value):
self.userData[key] = value
def addExtendedAttributes(self, attrs):
assert False # Override me!
def handleExtendedAttribute(self, attr):
assert False # Override me!
def _getDependentObjects(self):
assert False # Override me!
def getDeps(self, visited=None):
""" Return a set of files that this object depends on. If any of
these files are changed the parser needs to be rerun to regenerate
a new IDLObject.
The visited argument is a set of all the objects already visited.
We must test to see if we are in it, and if so, do nothing. This
prevents infinite recursion."""
# NB: We can't use visited=set() above because the default value is
# evaluated when the def statement is evaluated, not when the function
# is executed, so there would be one set for all invocations.
if visited is None:
visited = set()
if self in visited:
return set()
visited.add(self)
deps = set()
if self.filename() != "<builtin>":
deps.add(self.filename())
for d in self._getDependentObjects():
deps.update(d.getDeps(visited))
return deps
class IDLScope(IDLObject):
def __init__(self, location, parentScope, identifier):
IDLObject.__init__(self, location)
self.parentScope = parentScope
if identifier:
assert isinstance(identifier, IDLIdentifier)
self._name = identifier
else:
self._name = None
self._dict = {}
self.globalNames = set()
# A mapping from global name to the set of global interfaces
# that have that global name.
self.globalNameMapping = defaultdict(set)
def __str__(self):
return self.QName()
def QName(self):
# It's possible for us to be called before __init__ has been called, for
# the IDLObjectWithScope case. In that case, self._name won't be set yet.
if hasattr(self, "_name"):
name = self._name
else:
name = None
if name:
return name.QName() + "::"
return "::"
def ensureUnique(self, identifier, object):
"""
Ensure that there is at most one 'identifier' in scope ('self').
Note that object can be None. This occurs if we end up here for an
interface type we haven't seen yet.
"""
assert isinstance(identifier, IDLUnresolvedIdentifier)
assert not object or isinstance(object, IDLObjectWithIdentifier)
assert not object or object.identifier == identifier
if identifier.name in self._dict:
if not object:
return
# ensureUnique twice with the same object is not allowed
assert id(object) != id(self._dict[identifier.name])
replacement = self.resolveIdentifierConflict(self, identifier,
self._dict[identifier.name],
object)
self._dict[identifier.name] = replacement
return
assert object
self._dict[identifier.name] = object
def resolveIdentifierConflict(self, scope, identifier, originalObject, newObject):
if (isinstance(originalObject, IDLExternalInterface) and
isinstance(newObject, IDLExternalInterface) and
originalObject.identifier.name == newObject.identifier.name):
return originalObject
if (isinstance(originalObject, IDLExternalInterface) or
isinstance(newObject, IDLExternalInterface)):
raise WebIDLError(
"Name collision between "
"interface declarations for identifier '%s' at '%s' and '%s'"
% (identifier.name,
originalObject.location, newObject.location), [])
if (isinstance(originalObject, IDLDictionary) or
isinstance(newObject, IDLDictionary)):
raise WebIDLError(
"Name collision between dictionary declarations for "
"identifier '%s'.\n%s\n%s"
% (identifier.name,
originalObject.location, newObject.location), [])
# We do the merging of overloads here as opposed to in IDLInterface
# because we need to merge overloads of NamedConstructors and we need to
# detect conflicts in those across interfaces. See also the comment in
# IDLInterface.addExtendedAttributes for "NamedConstructor".
if (isinstance(originalObject, IDLMethod) and
isinstance(newObject, IDLMethod)):
return originalObject.addOverload(newObject)
# Default to throwing, derived classes can override.
conflictdesc = "\n\t%s at %s\n\t%s at %s" % (originalObject,
originalObject.location,
newObject,
newObject.location)
raise WebIDLError(
"Multiple unresolvable definitions of identifier '%s' in scope '%s'%s"
% (identifier.name, str(self), conflictdesc), [])
def _lookupIdentifier(self, identifier):
return self._dict[identifier.name]
def lookupIdentifier(self, identifier):
assert isinstance(identifier, IDLIdentifier)
assert identifier.scope == self
return self._lookupIdentifier(identifier)
def addIfaceGlobalNames(self, interfaceName, globalNames):
"""Record the global names (from |globalNames|) that can be used in
[Exposed] to expose things in a global named |interfaceName|"""
self.globalNames.update(globalNames)
for name in globalNames:
self.globalNameMapping[name].add(interfaceName)
class IDLIdentifier(IDLObject):
def __init__(self, location, scope, name):
IDLObject.__init__(self, location)
self.name = name
assert isinstance(scope, IDLScope)
self.scope = scope
def __str__(self):
return self.QName()
def QName(self):
return self.scope.QName() + self.name
def __hash__(self):
return self.QName().__hash__()
def __eq__(self, other):
return self.QName() == other.QName()
def object(self):
return self.scope.lookupIdentifier(self)
class IDLUnresolvedIdentifier(IDLObject):
def __init__(self, location, name, allowDoubleUnderscore=False,
allowForbidden=False):
IDLObject.__init__(self, location)
assert len(name) > 0
if name == "__noSuchMethod__":
raise WebIDLError("__noSuchMethod__ is deprecated", [location])
if name[:2] == "__" and name != "__content" and not allowDoubleUnderscore:
raise WebIDLError("Identifiers beginning with __ are reserved",
[location])
if name[0] == '_' and not allowDoubleUnderscore:
name = name[1:]
if (name in ["constructor", "toString"] and
not allowForbidden):
raise WebIDLError("Cannot use reserved identifier '%s'" % (name),
[location])
self.name = name
def __str__(self):
return self.QName()
def QName(self):
return "<unresolved scope>::" + self.name
def resolve(self, scope, object):
assert isinstance(scope, IDLScope)
assert not object or isinstance(object, IDLObjectWithIdentifier)
assert not object or object.identifier == self
scope.ensureUnique(self, object)
identifier = IDLIdentifier(self.location, scope, self.name)
if object:
object.identifier = identifier
return identifier
def finish(self):
assert False # Should replace with a resolved identifier first.
class IDLObjectWithIdentifier(IDLObject):
def __init__(self, location, parentScope, identifier):
IDLObject.__init__(self, location)
assert isinstance(identifier, IDLUnresolvedIdentifier)
self.identifier = identifier
if parentScope:
self.resolve(parentScope)
def resolve(self, parentScope):
assert isinstance(parentScope, IDLScope)
assert isinstance(self.identifier, IDLUnresolvedIdentifier)
self.identifier.resolve(parentScope, self)
class IDLObjectWithScope(IDLObjectWithIdentifier, IDLScope):
def __init__(self, location, parentScope, identifier):
assert isinstance(identifier, IDLUnresolvedIdentifier)
IDLObjectWithIdentifier.__init__(self, location, parentScope, identifier)
IDLScope.__init__(self, location, parentScope, self.identifier)
class IDLIdentifierPlaceholder(IDLObjectWithIdentifier):
def __init__(self, location, identifier):
assert isinstance(identifier, IDLUnresolvedIdentifier)
IDLObjectWithIdentifier.__init__(self, location, None, identifier)
def finish(self, scope):
try:
scope._lookupIdentifier(self.identifier)
except:
raise WebIDLError("Unresolved type '%s'." % self.identifier,
[self.location])
obj = self.identifier.resolve(scope, None)
return scope.lookupIdentifier(obj)
class IDLExposureMixins():
def __init__(self, location):
# _exposureGlobalNames are the global names listed in our [Exposed]
# extended attribute. exposureSet is the exposure set as defined in the
# Web IDL spec: it contains interface names.
self._exposureGlobalNames = set()
self.exposureSet = set()
self._location = location
self._globalScope = None
def finish(self, scope):
assert scope.parentScope is None
self._globalScope = scope
# Verify that our [Exposed] value, if any, makes sense.
for globalName in self._exposureGlobalNames:
if globalName not in scope.globalNames:
raise WebIDLError("Unknown [Exposed] value %s" % globalName,
[self._location])
# Verify that we are exposed _somwhere_ if we have some place to be
# exposed. We don't want to assert that we're definitely exposed
# because a lot of our parser tests have small-enough IDL snippets that
# they don't include any globals, and we don't really want to go through
# and add global interfaces and [Exposed] annotations to all those
# tests.
if len(scope.globalNames) != 0:
if (len(self._exposureGlobalNames) == 0):
raise WebIDLError(("'%s' is not exposed anywhere even though we have "
"globals to be exposed to") % self,
[self.location])
globalNameSetToExposureSet(scope, self._exposureGlobalNames,
self.exposureSet)
def isExposedInWindow(self):
return 'Window' in self.exposureSet
def isExposedInAnyWorker(self):
return len(self.getWorkerExposureSet()) > 0
def isExposedInWorkerDebugger(self):
return len(self.getWorkerDebuggerExposureSet()) > 0
def isExposedInAnyWorklet(self):
return len(self.getWorkletExposureSet()) > 0
def isExposedInSomeButNotAllWorkers(self):
"""
Returns true if the Exposed extended attribute for this interface
exposes it in some worker globals but not others. The return value does
not depend on whether the interface is exposed in Window or System
globals.
"""
if not self.isExposedInAnyWorker():
return False
workerScopes = self.parentScope.globalNameMapping["Worker"]
return len(workerScopes.difference(self.exposureSet)) > 0
def getWorkerExposureSet(self):
workerScopes = self._globalScope.globalNameMapping["Worker"]
return workerScopes.intersection(self.exposureSet)
def getWorkletExposureSet(self):
workletScopes = self._globalScope.globalNameMapping["Worklet"]
return workletScopes.intersection(self.exposureSet)
def getWorkerDebuggerExposureSet(self):
workerDebuggerScopes = self._globalScope.globalNameMapping["WorkerDebugger"]
return workerDebuggerScopes.intersection(self.exposureSet)
class IDLExternalInterface(IDLObjectWithIdentifier):
def __init__(self, location, parentScope, identifier):
assert isinstance(identifier, IDLUnresolvedIdentifier)
assert isinstance(parentScope, IDLScope)
self.parent = None
IDLObjectWithIdentifier.__init__(self, location, parentScope, identifier)
IDLObjectWithIdentifier.resolve(self, parentScope)
def finish(self, scope):
pass
def validate(self):
pass
def isIteratorInterface(self):
return False
def isExternal(self):
return True
def isInterface(self):
return True
def addExtendedAttributes(self, attrs):
if len(attrs) != 0:
raise WebIDLError("There are no extended attributes that are "
"allowed on external interfaces",
[attrs[0].location, self.location])
def resolve(self, parentScope):
pass
def getJSImplementation(self):
return None
def isJSImplemented(self):
return False
def hasProbablyShortLivingWrapper(self):
return False
def _getDependentObjects(self):
return set()
class IDLPartialDictionary(IDLObject):
def __init__(self, location, name, members, nonPartialDictionary):
assert isinstance(name, IDLUnresolvedIdentifier)
IDLObject.__init__(self, location)
self.identifier = name
self.members = members
self._nonPartialDictionary = nonPartialDictionary
self._finished = False
nonPartialDictionary.addPartialDictionary(self)
def addExtendedAttributes(self, attrs):
pass
def finish(self, scope):
if self._finished:
return
self._finished = True
# Need to make sure our non-partial dictionary gets
# finished so it can report cases when we only have partial
# dictionaries.
self._nonPartialDictionary.finish(scope)
def validate(self):
pass
class IDLPartialInterfaceOrNamespace(IDLObject):
def __init__(self, location, name, members, nonPartialInterfaceOrNamespace):
assert isinstance(name, IDLUnresolvedIdentifier)
IDLObject.__init__(self, location)
self.identifier = name
self.members = members
# propagatedExtendedAttrs are the ones that should get
# propagated to our non-partial interface.
self.propagatedExtendedAttrs = []
self._haveSecureContextExtendedAttribute = False
self._nonPartialInterfaceOrNamespace = nonPartialInterfaceOrNamespace
self._finished = False
nonPartialInterfaceOrNamespace.addPartial(self)
def addExtendedAttributes(self, attrs):
for attr in attrs:
identifier = attr.identifier()
if identifier == "NamedConstructor":
self.propagatedExtendedAttrs.append(attr)
elif identifier == "SecureContext":
self._haveSecureContextExtendedAttribute = True
# This gets propagated to all our members.
for member in self.members:
if member.getExtendedAttribute("SecureContext"):
raise WebIDLError("[SecureContext] specified on both a "
"partial interface member and on the "
"partial interface itself",
[member.location, attr.location])
member.addExtendedAttributes([attr])
elif identifier == "Exposed":
# This just gets propagated to all our members.
for member in self.members:
if len(member._exposureGlobalNames) != 0:
raise WebIDLError("[Exposed] specified on both a "
"partial interface member and on the "
"partial interface itself",
[member.location, attr.location])
member.addExtendedAttributes([attr])
else:
raise WebIDLError("Unknown extended attribute %s on partial "
"interface" % identifier,
[attr.location])
def finish(self, scope):
if self._finished:
return
self._finished = True
if (not self._haveSecureContextExtendedAttribute and
self._nonPartialInterfaceOrNamespace.getExtendedAttribute("SecureContext")):
# This gets propagated to all our members.
for member in self.members:
if member.getExtendedAttribute("SecureContext"):
raise WebIDLError("[SecureContext] specified on both a "
"partial interface member and on the "
"non-partial interface",
[member.location,
self._nonPartialInterfaceOrNamespace.location])
member.addExtendedAttributes(
[IDLExtendedAttribute(self._nonPartialInterfaceOrNamespace.location,
("SecureContext",))])
# Need to make sure our non-partial interface or namespace gets
# finished so it can report cases when we only have partial
# interfaces/namespaces.
self._nonPartialInterfaceOrNamespace.finish(scope)
def validate(self):
pass
def convertExposedAttrToGlobalNameSet(exposedAttr, targetSet):
assert len(targetSet) == 0
if exposedAttr.hasValue():
targetSet.add(exposedAttr.value())
else:
assert exposedAttr.hasArgs()
targetSet.update(exposedAttr.args())
def globalNameSetToExposureSet(globalScope, nameSet, exposureSet):
for name in nameSet:
exposureSet.update(globalScope.globalNameMapping[name])
class IDLInterfaceOrInterfaceMixinOrNamespace(IDLObjectWithScope, IDLExposureMixins):
def __init__(self, location, parentScope, name):
assert isinstance(parentScope, IDLScope)
assert isinstance(name, IDLUnresolvedIdentifier)
self._finished = False
self.members = []
self._partials = []
self._extendedAttrDict = {}
self._isKnownNonPartial = False
IDLObjectWithScope.__init__(self, location, parentScope, name)
IDLExposureMixins.__init__(self, location)
def finish(self, scope):
if not self._isKnownNonPartial:
raise WebIDLError("%s does not have a non-partial declaration" %
str(self), [self.location])
IDLExposureMixins.finish(self, scope)
# Now go ahead and merge in our partials.
for partial in self._partials:
partial.finish(scope)
self.addExtendedAttributes(partial.propagatedExtendedAttrs)
self.members.extend(partial.members)
def resolveIdentifierConflict(self, scope, identifier, originalObject, newObject):
assert isinstance(scope, IDLScope)
assert isinstance(originalObject, IDLInterfaceMember)
assert isinstance(newObject, IDLInterfaceMember)
retval = IDLScope.resolveIdentifierConflict(self, scope, identifier,
originalObject, newObject)
# Might be a ctor, which isn't in self.members
if newObject in self.members:
self.members.remove(newObject)
return retval
def typeName(self):
if self.isInterface():
return "interface"
if self.isNamespace():
return "namespace"
assert self.isInterfaceMixin()
return "interface mixin"
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
def setNonPartial(self, location, members):
if self._isKnownNonPartial:
raise WebIDLError("Two non-partial definitions for the "
"same %s" % self.typeName(),
[location, self.location])
self._isKnownNonPartial = True
# Now make it look like we were parsed at this new location, since
# that's the place where the interface is "really" defined
self.location = location
# Put the new members at the beginning
self.members = members + self.members
def addPartial(self, partial):
assert self.identifier.name == partial.identifier.name
self._partials.append(partial)
def getPartials(self):
# Don't let people mutate our guts.
return list(self._partials)
def finishMembers(self, scope):
# Assuming we've merged in our partials, set the _exposureGlobalNames on
# any members that don't have it set yet. Note that any partial
# interfaces that had [Exposed] set have already set up
# _exposureGlobalNames on all the members coming from them, so this is
# just implementing the "members default to interface or interface mixin
# that defined them" and "partial interfaces or interface mixins default
# to interface or interface mixin they're a partial for" rules from the
# spec.
for m in self.members:
# If m, or the partial m came from, had [Exposed]
# specified, it already has a nonempty exposure global names set.
if len(m._exposureGlobalNames) == 0:
m._exposureGlobalNames.update(self._exposureGlobalNames)
if m.isAttr() and m.stringifier:
m.expand(self.members)
# resolve() will modify self.members, so we need to iterate
# over a copy of the member list here.
for member in list(self.members):
member.resolve(self)
for member in self.members:
member.finish(scope)
# Now that we've finished our members, which has updated their exposure
# sets, make sure they aren't exposed in places where we are not.
for member in self.members:
if not member.exposureSet.issubset(self.exposureSet):
raise WebIDLError("Interface or interface mixin member has "
"larger exposure set than its container",
[member.location, self.location])
def isExternal(self):
return False
class IDLInterfaceMixin(IDLInterfaceOrInterfaceMixinOrNamespace):
def __init__(self, location, parentScope, name, members, isKnownNonPartial):
self.actualExposureGlobalNames = set()
assert isKnownNonPartial or len(members) == 0
IDLInterfaceOrInterfaceMixinOrNamespace.__init__(self, location, parentScope, name)
if isKnownNonPartial:
self.setNonPartial(location, members)
def __str__(self):
return "Interface mixin '%s'" % self.identifier.name
def isInterfaceMixin(self):
return True
def finish(self, scope):
if self._finished:
return
self._finished = True
# Expose to the globals of interfaces that includes this mixin if this
# mixin has no explicit [Exposed] so that its members can be exposed
# based on the base interface exposure set.
#
# Make sure this is done before IDLExposureMixins.finish call, since
# that converts our set of exposure global names to an actual exposure
# set.
hasImplicitExposure = len(self._exposureGlobalNames) == 0
if hasImplicitExposure:
self._exposureGlobalNames.update(self.actualExposureGlobalNames)
IDLInterfaceOrInterfaceMixinOrNamespace.finish(self, scope)
self.finishMembers(scope)
def validate(self):
for member in self.members:
if member.isAttr():
if member.inherit:
raise WebIDLError("Interface mixin member cannot include "
"an inherited attribute",
[member.location, self.location])
if member.isStatic():
raise WebIDLError("Interface mixin member cannot include "
"a static member",
[member.location, self.location])
if member.isMethod():
if member.isStatic():
raise WebIDLError("Interface mixin member cannot include "
"a static operation",
[member.location, self.location])
if (member.isGetter() or
member.isSetter() or
member.isDeleter() or
member.isLegacycaller()):
raise WebIDLError("Interface mixin member cannot include a "
"special operation",
[member.location, self.location])
def addExtendedAttributes(self, attrs):
for attr in attrs:
identifier = attr.identifier()
if identifier == "SecureContext":
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
# This gets propagated to all our members.
for member in self.members:
if member.getExtendedAttribute("SecureContext"):
raise WebIDLError("[SecureContext] specified on both "
"an interface mixin member and on"
"the interface mixin itself",
[member.location, attr.location])
member.addExtendedAttributes([attr])
elif identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr,
self._exposureGlobalNames)
else:
raise WebIDLError("Unknown extended attribute %s on interface" % identifier,
[attr.location])
attrlist = attr.listValue()
self._extendedAttrDict[identifier] = attrlist if len(attrlist) else True
def _getDependentObjects(self):
return set(self.members)
class IDLInterfaceOrNamespace(IDLInterfaceOrInterfaceMixinOrNamespace):
def __init__(self, location, parentScope, name, parent, members,
isKnownNonPartial, toStringTag):
assert isKnownNonPartial or not parent
assert isKnownNonPartial or len(members) == 0
self.parent = None
self._callback = False
self.maplikeOrSetlikeOrIterable = None
# namedConstructors needs deterministic ordering because bindings code
# outputs the constructs in the order that namedConstructors enumerates
# them.
self.namedConstructors = list()
self.legacyWindowAliases = []
self.includedMixins = set()
# self.interfacesBasedOnSelf is the set of interfaces that inherit from
# self, including self itself.
# Used for distinguishability checking.
self.interfacesBasedOnSelf = set([self])
self._hasChildInterfaces = False
self._isOnGlobalProtoChain = False
# Tracking of the number of reserved slots we need for our
# members and those of ancestor interfaces.
self.totalMembersInSlots = 0
# Tracking of the number of own own members we have in slots
self._ownMembersInSlots = 0
# If this is an iterator interface, we need to know what iterable
# interface we're iterating for in order to get its nativeType.
self.iterableInterface = None
# True if we have cross-origin members.
self.hasCrossOriginMembers = False
# True if some descendant (including ourselves) has cross-origin members
self.hasDescendantWithCrossOriginMembers = False
self.toStringTag = toStringTag
IDLInterfaceOrInterfaceMixinOrNamespace.__init__(self, location, parentScope, name)
if isKnownNonPartial:
self.setNonPartial(location, parent, members)
def ctor(self):
identifier = IDLUnresolvedIdentifier(self.location, "constructor",
allowForbidden=True)
try:
return self._lookupIdentifier(identifier)
except:
return None
def isIterable(self):
return (self.maplikeOrSetlikeOrIterable and
self.maplikeOrSetlikeOrIterable.isIterable())
def isIteratorInterface(self):
return self.iterableInterface is not None
def finish(self, scope):
if self._finished:
return
self._finished = True
IDLInterfaceOrInterfaceMixinOrNamespace.finish(self, scope)
if len(self.legacyWindowAliases) > 0:
if not self.hasInterfaceObject():
raise WebIDLError("Interface %s unexpectedly has [LegacyWindowAlias] "
"and [NoInterfaceObject] together" % self.identifier.name,
[self.location])
if not self.isExposedInWindow():
raise WebIDLError("Interface %s has [LegacyWindowAlias] "
"but not exposed in Window" % self.identifier.name,
[self.location])
# Generate maplike/setlike interface members. Since generated members
# need to be treated like regular interface members, do this before
# things like exposure setting.
for member in self.members:
if member.isMaplikeOrSetlikeOrIterable():
# Check that we only have one interface declaration (currently
# there can only be one maplike/setlike declaration per
# interface)
if self.maplikeOrSetlikeOrIterable:
raise WebIDLError("%s declaration used on "
"interface that already has %s "
"declaration" %
(member.maplikeOrSetlikeOrIterableType,
self.maplikeOrSetlikeOrIterable.maplikeOrSetlikeOrIterableType),
[self.maplikeOrSetlikeOrIterable.location,
member.location])
self.maplikeOrSetlikeOrIterable = member
# If we've got a maplike or setlike declaration, we'll be building all of
# our required methods in Codegen. Generate members now.
self.maplikeOrSetlikeOrIterable.expand(self.members, self.isJSImplemented())
assert not self.parent or isinstance(self.parent, IDLIdentifierPlaceholder)
parent = self.parent.finish(scope) if self.parent else None
if parent and isinstance(parent, IDLExternalInterface):
raise WebIDLError("%s inherits from %s which does not have "
"a definition" %
(self.identifier.name,
self.parent.identifier.name),
[self.location])
if parent and not isinstance(parent, IDLInterface):
raise WebIDLError("%s inherits from %s which is not an interface " %
(self.identifier.name,
self.parent.identifier.name),
[self.location, parent.location])
self.parent = parent
assert iter(self.members)
if self.isNamespace():
assert not self.parent
for m in self.members:
if m.isAttr() or m.isMethod():
if m.isStatic():
raise WebIDLError("Don't mark things explicitly static "
"in namespaces",
[self.location, m.location])
# Just mark all our methods/attributes as static. The other
# option is to duplicate the relevant InterfaceMembers
# production bits but modified to produce static stuff to
# start with, but that sounds annoying.
m.forceStatic()
if self.parent:
self.parent.finish(scope)
self.parent._hasChildInterfaces = True
self.totalMembersInSlots = self.parent.totalMembersInSlots
# Interfaces with [Global] must not have anything inherit from them
if self.parent.getExtendedAttribute("Global"):
# Note: This is not a self.parent.isOnGlobalProtoChain() check
# because ancestors of a [Global] interface can have other
# descendants.
raise WebIDLError("[Global] interface has another interface "
"inheriting from it",
[self.location, self.parent.location])
# Make sure that we're not exposed in places where our parent is not
if not self.exposureSet.issubset(self.parent.exposureSet):
raise WebIDLError("Interface %s is exposed in globals where its "
"parent interface %s is not exposed." %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
# Callbacks must not inherit from non-callbacks.
# XXXbz Can non-callbacks inherit from callbacks? Spec issue pending.
if self.isCallback():
if not self.parent.isCallback():
raise WebIDLError("Callback interface %s inheriting from "
"non-callback interface %s" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
elif self.parent.isCallback():
raise WebIDLError("Non-callback interface %s inheriting from "
"callback interface %s" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
# Interfaces which have interface objects can't inherit
# from [NoInterfaceObject] interfaces.
if (self.parent.getExtendedAttribute("NoInterfaceObject") and
not self.getExtendedAttribute("NoInterfaceObject")):
raise WebIDLError("Interface %s does not have "
"[NoInterfaceObject] but inherits from "
"interface %s which does" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
# Interfaces that are not [SecureContext] can't inherit
# from [SecureContext] interfaces.
if (self.parent.getExtendedAttribute("SecureContext") and
not self.getExtendedAttribute("SecureContext")):
raise WebIDLError("Interface %s does not have "
"[SecureContext] but inherits from "
"interface %s which does" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
for mixin in self.includedMixins:
mixin.finish(scope)
cycleInGraph = self.findInterfaceLoopPoint(self)
if cycleInGraph:
raise WebIDLError(
"Interface %s has itself as ancestor" % self.identifier.name,
[self.location, cycleInGraph.location])
self.finishMembers(scope)
ctor = self.ctor()
if ctor is not None:
if not self.hasInterfaceObject():
raise WebIDLError(
"Can't have both a constructor and [NoInterfaceObject]",
[self.location, ctor.location])
if self.globalNames:
raise WebIDLError(
"Can't have both a constructor and [Global]",
[self.location, ctor.location])
assert(ctor._exposureGlobalNames == self._exposureGlobalNames)
ctor._exposureGlobalNames.update(self._exposureGlobalNames)
# Remove the constructor operation from our member list so
# it doesn't get in the way later.
self.members.remove(ctor)
for ctor in self.namedConstructors:
if self.globalNames:
raise WebIDLError(
"Can't have both a named constructor and [Global]",
[self.location, ctor.location])
assert len(ctor._exposureGlobalNames) == 0
ctor._exposureGlobalNames.update(self._exposureGlobalNames)
ctor.finish(scope)
# Make a copy of our member list, so things that implement us
# can get those without all the stuff we implement ourselves
# admixed.
self.originalMembers = list(self.members)
for mixin in sorted(self.includedMixins,
key=lambda x: x.identifier.name):
for mixinMember in mixin.members:
for member in self.members:
if mixinMember.identifier.name == member.identifier.name:
raise WebIDLError(
"Multiple definitions of %s on %s coming from 'includes' statements" %
(member.identifier.name, self),
[mixinMember.location, member.location])
self.members.extend(mixin.members)
for ancestor in self.getInheritedInterfaces():
ancestor.interfacesBasedOnSelf.add(self)
if (ancestor.maplikeOrSetlikeOrIterable is not None and
self.maplikeOrSetlikeOrIterable is not None):
raise WebIDLError("Cannot have maplike/setlike on %s that "
"inherits %s, which is already "
"maplike/setlike" %
(self.identifier.name,
ancestor.identifier.name),
[self.maplikeOrSetlikeOrIterable.location,
ancestor.maplikeOrSetlikeOrIterable.location])
# Deal with interfaces marked [Unforgeable], now that we have our full
# member list, except unforgeables pulled in from parents. We want to
# do this before we set "originatingInterface" on our unforgeable
# members.
if self.getExtendedAttribute("Unforgeable"):
# Check that the interface already has all the things the
# spec would otherwise require us to synthesize and is
# missing the ones we plan to synthesize.
if not any(m.isMethod() and m.isStringifier() for m in self.members):
raise WebIDLError("Unforgeable interface %s does not have a "
"stringifier" % self.identifier.name,
[self.location])
for m in self.members:
if m.identifier.name == "toJSON":
raise WebIDLError("Unforgeable interface %s has a "
"toJSON so we won't be able to add "
"one ourselves" % self.identifier.name,
[self.location, m.location])
if m.identifier.name == "valueOf" and not m.isStatic():
raise WebIDLError("Unforgeable interface %s has a valueOf "
"member so we won't be able to add one "
"ourselves" % self.identifier.name,
[self.location, m.location])
for member in self.members:
if ((member.isAttr() or member.isMethod()) and
member.isUnforgeable() and
not hasattr(member, "originatingInterface")):
member.originatingInterface = self
for member in self.members:
if ((member.isMethod() and
member.getExtendedAttribute("CrossOriginCallable")) or
(member.isAttr() and
(member.getExtendedAttribute("CrossOriginReadable") or
member.getExtendedAttribute("CrossOriginWritable")))):
self.hasCrossOriginMembers = True
break
if self.hasCrossOriginMembers:
parent = self
while parent:
parent.hasDescendantWithCrossOriginMembers = True
parent = parent.parent
# Compute slot indices for our members before we pull in unforgeable
# members from our parent. Also, maplike/setlike declarations get a
# slot to hold their backing object.
for member in self.members:
if ((member.isAttr() and
(member.getExtendedAttribute("StoreInSlot") or
member.getExtendedAttribute("Cached"))) or
member.isMaplikeOrSetlike()):
if self.isJSImplemented() and not member.isMaplikeOrSetlike():
raise WebIDLError("Interface %s is JS-implemented and we "
"don't support [Cached] or [StoreInSlot] "
"on JS-implemented interfaces" %
self.identifier.name,
[self.location, member.location])
if member.slotIndices is None:
member.slotIndices = dict()
member.slotIndices[self.identifier.name] = self.totalMembersInSlots
self.totalMembersInSlots += 1
if member.getExtendedAttribute("StoreInSlot"):
self._ownMembersInSlots += 1
if self.parent:
# Make sure we don't shadow any of the [Unforgeable] attributes on our
# ancestor interfaces. We don't have to worry about mixins here, because
# those have already been imported into the relevant .members lists. And
# we don't have to worry about anything other than our parent, because it
# has already imported its ancestors' unforgeable attributes into its
# member list.
for unforgeableMember in (member for member in self.parent.members if
(member.isAttr() or member.isMethod()) and
member.isUnforgeable()):
shadows = [m for m in self.members if
(m.isAttr() or m.isMethod()) and
not m.isStatic() and
m.identifier.name == unforgeableMember.identifier.name]
if len(shadows) != 0:
locs = [unforgeableMember.location] + [s.location for s
in shadows]
raise WebIDLError("Interface %s shadows [Unforgeable] "
"members of %s" %
(self.identifier.name,
ancestor.identifier.name),
locs)
# And now just stick it in our members, since we won't be
# inheriting this down the proto chain. If we really cared we
# could try to do something where we set up the unforgeable
# attributes/methods of ancestor interfaces, with their
# corresponding getters, on our interface, but that gets pretty
# complicated and seems unnecessary.
self.members.append(unforgeableMember)
# At this point, we have all of our members. If the current interface
# uses maplike/setlike, check for collisions anywhere in the current
# interface or higher in the inheritance chain.
if self.maplikeOrSetlikeOrIterable:
testInterface = self
isAncestor = False
while testInterface:
self.maplikeOrSetlikeOrIterable.checkCollisions(testInterface.members,
isAncestor)
isAncestor = True
testInterface = testInterface.parent
# Ensure that there's at most one of each {named,indexed}
# {getter,setter,deleter}, at most one stringifier,
# and at most one legacycaller. Note that this last is not
# quite per spec, but in practice no one overloads
# legacycallers. Also note that in practice we disallow
# indexed deleters, but it simplifies some other code to
# treat deleter analogously to getter/setter by
# prefixing it with "named".
specialMembersSeen = {}
for member in self.members:
if not member.isMethod():
continue
if member.isGetter():
memberType = "getters"
elif member.isSetter():
memberType = "setters"
elif member.isDeleter():
memberType = "deleters"
elif member.isStringifier():
memberType = "stringifiers"
elif member.isLegacycaller():
memberType = "legacycallers"
else:
continue
if (memberType != "stringifiers" and memberType != "legacycallers"):
if member.isNamed():
memberType = "named " + memberType
else:
assert member.isIndexed()
memberType = "indexed " + memberType
if memberType in specialMembersSeen:
raise WebIDLError("Multiple " + memberType + " on %s" % (self),
[self.location,
specialMembersSeen[memberType].location,
member.location])
specialMembersSeen[memberType] = member
if self.getExtendedAttribute("LegacyUnenumerableNamedProperties"):
# Check that we have a named getter.
if "named getters" not in specialMembersSeen:
raise WebIDLError(
"Interface with [LegacyUnenumerableNamedProperties] does "
"not have a named getter",
[self.location])
ancestor = self.parent
while ancestor:
if ancestor.getExtendedAttribute("LegacyUnenumerableNamedProperties"):
raise WebIDLError(
"Interface with [LegacyUnenumerableNamedProperties] "
"inherits from another interface with "
"[LegacyUnenumerableNamedProperties]",
[self.location, ancestor.location])
ancestor = ancestor.parent
if self._isOnGlobalProtoChain:
# Make sure we have no named setters or deleters
for memberType in ["setter", "deleter"]:
memberId = "named " + memberType + "s"
if memberId in specialMembersSeen:
raise WebIDLError("Interface with [Global] has a named %s" %
memberType,
[self.location,
specialMembersSeen[memberId].location])
# Make sure we're not [OverrideBuiltins]
if self.getExtendedAttribute("OverrideBuiltins"):
raise WebIDLError("Interface with [Global] also has "
"[OverrideBuiltins]",
[self.location])
# Mark all of our ancestors as being on the global's proto chain too
parent = self.parent
while parent:
# Must not inherit from an interface with [OverrideBuiltins]
if parent.getExtendedAttribute("OverrideBuiltins"):
raise WebIDLError("Interface with [Global] inherits from "
"interface with [OverrideBuiltins]",
[self.location, parent.location])
parent._isOnGlobalProtoChain = True
parent = parent.parent
def validate(self):
def checkDuplicateNames(member, name, attributeName):
for m in self.members:
if m.identifier.name == name:
raise WebIDLError("[%s=%s] has same name as interface member" %
(attributeName, name),
[member.location, m.location])
if m.isMethod() and m != member and name in m.aliases:
raise WebIDLError("conflicting [%s=%s] definitions" %
(attributeName, name),
[member.location, m.location])
if m.isAttr() and m != member and name in m.bindingAliases:
raise WebIDLError("conflicting [%s=%s] definitions" %
(attributeName, name),
[member.location, m.location])
# We also don't support inheriting from unforgeable interfaces.
if self.getExtendedAttribute("Unforgeable") and self.hasChildInterfaces():
locations = ([self.location] +
list(i.location for i in
self.interfacesBasedOnSelf if i.parent == self))
raise WebIDLError("%s is an unforgeable ancestor interface" %
self.identifier.name,
locations)
ctor = self.ctor()
if ctor is not None:
ctor.validate()
for namedCtor in self.namedConstructors:
namedCtor.validate()
indexedGetter = None
hasLengthAttribute = False
for member in self.members:
member.validate()
if self.isCallback() and member.getExtendedAttribute("Replaceable"):
raise WebIDLError("[Replaceable] used on an attribute on "
"interface %s which is a callback interface" %
self.identifier.name,
[self.location, member.location])
# Check that PutForwards refers to another attribute and that no
# cycles exist in forwarded assignments. Also check for a
# integer-typed "length" attribute.
if member.isAttr():
if (member.identifier.name == "length" and
member.type.isInteger()):
hasLengthAttribute = True
iface = self
attr = member
putForwards = attr.getExtendedAttribute("PutForwards")
if putForwards and self.isCallback():
raise WebIDLError("[PutForwards] used on an attribute "
"on interface %s which is a callback "
"interface" % self.identifier.name,
[self.location, member.location])
while putForwards is not None:
forwardIface = attr.type.unroll().inner
fowardAttr = None
for forwardedMember in forwardIface.members:
if (not forwardedMember.isAttr() or
forwardedMember.identifier.name != putForwards[0]):
continue
if forwardedMember == member:
raise WebIDLError("Cycle detected in forwarded "
"assignments for attribute %s on "
"%s" %
(member.identifier.name, self),
[member.location])
fowardAttr = forwardedMember
break
if fowardAttr is None:
raise WebIDLError("Attribute %s on %s forwards to "
"missing attribute %s" %
(attr.identifier.name, iface, putForwards),
[attr.location])
iface = forwardIface
attr = fowardAttr
putForwards = attr.getExtendedAttribute("PutForwards")
# Check that the name of an [Alias] doesn't conflict with an
# interface member and whether we support indexed properties.
if member.isMethod():
if member.isGetter() and member.isIndexed():
indexedGetter = member
for alias in member.aliases:
if self.isOnGlobalProtoChain():
raise WebIDLError("[Alias] must not be used on a "
"[Global] interface operation",
[member.location])
if (member.getExtendedAttribute("Exposed") or
member.getExtendedAttribute("ChromeOnly") or
member.getExtendedAttribute("Pref") or
member.getExtendedAttribute("Func") or
member.getExtendedAttribute("SecureContext")):
raise WebIDLError("[Alias] must not be used on a "
"conditionally exposed operation",
[member.location])
if member.isStatic():
raise WebIDLError("[Alias] must not be used on a "
"static operation",
[member.location])
if member.isIdentifierLess():
raise WebIDLError("[Alias] must not be used on an "
"identifierless operation",
[member.location])
if member.isUnforgeable():
raise WebIDLError("[Alias] must not be used on an "
"[Unforgeable] operation",
[member.location])
checkDuplicateNames(member, alias, "Alias")
# Check that the name of a [BindingAlias] doesn't conflict with an
# interface member.
if member.isAttr():
for bindingAlias in member.bindingAliases:
checkDuplicateNames(member, bindingAlias, "BindingAlias")
# Conditional exposure makes no sense for interfaces with no
# interface object.
# And SecureContext makes sense for interfaces with no interface object,
# since it is also propagated to interface members.
if (self.isExposedConditionally(exclusions=["SecureContext"]) and
not self.hasInterfaceObject()):
raise WebIDLError("Interface with no interface object is "
"exposed conditionally",
[self.location])
# Value iterators are only allowed on interfaces with indexed getters,
# and pair iterators are only allowed on interfaces without indexed
# getters.
if self.isIterable():
iterableDecl = self.maplikeOrSetlikeOrIterable
if iterableDecl.isValueIterator():
if not indexedGetter:
raise WebIDLError("Interface with value iterator does not "
"support indexed properties",
[self.location, iterableDecl.location])
if iterableDecl.valueType != indexedGetter.signatures()[0][0]:
raise WebIDLError("Iterable type does not match indexed "
"getter type",
[iterableDecl.location,
indexedGetter.location])
if not hasLengthAttribute:
raise WebIDLError('Interface with value iterator does not '
'have an integer-typed "length" attribute',
[self.location, iterableDecl.location])
else:
assert iterableDecl.isPairIterator()
if indexedGetter:
raise WebIDLError("Interface with pair iterator supports "
"indexed properties",
[self.location, iterableDecl.location,
indexedGetter.location])
if indexedGetter and not hasLengthAttribute:
raise WebIDLError('Interface with an indexed getter does not have '
'an integer-typed "length" attribute',
[self.location, indexedGetter.location])
def setCallback(self, value):
self._callback = value
def isCallback(self):
return self._callback
def isSingleOperationInterface(self):
assert self.isCallback() or self.isJSImplemented()
return (
# JS-implemented things should never need the
# this-handling weirdness of single-operation interfaces.
not self.isJSImplemented() and
# Not inheriting from another interface
not self.parent and
# No attributes of any kinds
not any(m.isAttr() for m in self.members) and
# There is at least one regular operation, and all regular
# operations have the same identifier
len(set(m.identifier.name for m in self.members if
m.isMethod() and not m.isStatic())) == 1)
def inheritanceDepth(self):
depth = 0
parent = self.parent
while parent:
depth = depth + 1
parent = parent.parent
return depth
def hasConstants(self):
return any(m.isConst() for m in self.members)
def hasInterfaceObject(self):
if self.isCallback():
return self.hasConstants()
return not hasattr(self, "_noInterfaceObject")
def hasInterfacePrototypeObject(self):
return (not self.isCallback() and not self.isNamespace()
and self.getUserData('hasConcreteDescendant', False))
def addIncludedMixin(self, includedMixin):
assert(isinstance(includedMixin, IDLInterfaceMixin))
self.includedMixins.add(includedMixin)
def getInheritedInterfaces(self):
"""
Returns a list of the interfaces this interface inherits from
(not including this interface itself). The list is in order
from most derived to least derived.
"""
assert(self._finished)
if not self.parent:
return []
parentInterfaces = self.parent.getInheritedInterfaces()
parentInterfaces.insert(0, self.parent)
return parentInterfaces
def findInterfaceLoopPoint(self, otherInterface):
"""
Finds an interface amongst our ancestors that inherits from otherInterface.
If there is no such interface, returns None.
"""
if self.parent:
if self.parent == otherInterface:
return self
loopPoint = self.parent.findInterfaceLoopPoint(otherInterface)
if loopPoint:
return loopPoint
return None
def setNonPartial(self, location, parent, members):
assert not parent or isinstance(parent, IDLIdentifierPlaceholder)
IDLInterfaceOrInterfaceMixinOrNamespace.setNonPartial(self, location, members)
assert not self.parent
self.parent = parent
def getJSImplementation(self):
classId = self.getExtendedAttribute("JSImplementation")
if not classId:
return classId
assert isinstance(classId, list)
assert len(classId) == 1
return classId[0]
def isJSImplemented(self):
return bool(self.getJSImplementation())
def hasProbablyShortLivingWrapper(self):
current = self
while current:
if current.getExtendedAttribute("ProbablyShortLivingWrapper"):
return True
current = current.parent
return False
def hasChildInterfaces(self):
return self._hasChildInterfaces
def isOnGlobalProtoChain(self):
return self._isOnGlobalProtoChain
def _getDependentObjects(self):
deps = set(self.members)
deps.update(self.includedMixins)
if self.parent:
deps.add(self.parent)
return deps
def hasMembersInSlots(self):
return self._ownMembersInSlots != 0
conditionExtendedAttributes = [ "Pref", "ChromeOnly", "Func",
"SecureContext" ]
def isExposedConditionally(self, exclusions=[]):
return any(((not a in exclusions) and self.getExtendedAttribute(a)) for a in self.conditionExtendedAttributes)
class IDLInterface(IDLInterfaceOrNamespace):
def __init__(self, location, parentScope, name, parent, members,
isKnownNonPartial, classNameOverride=None,
toStringTag=None):
IDLInterfaceOrNamespace.__init__(self, location, parentScope, name,
parent, members, isKnownNonPartial,
toStringTag)
self.classNameOverride = classNameOverride
def __str__(self):
return "Interface '%s'" % self.identifier.name
def isInterface(self):
return True
def getClassName(self):
if self.classNameOverride:
return self.classNameOverride
return self.identifier.name
def addExtendedAttributes(self, attrs):
for attr in attrs:
identifier = attr.identifier()
# Special cased attrs
if identifier == "TreatNonCallableAsNull":
raise WebIDLError("TreatNonCallableAsNull cannot be specified on interfaces",
[attr.location, self.location])
if identifier == "TreatNonObjectAsNull":
raise WebIDLError("TreatNonObjectAsNull cannot be specified on interfaces",
[attr.location, self.location])
elif identifier == "NoInterfaceObject":
if not attr.noArguments():
raise WebIDLError("[NoInterfaceObject] must take no arguments",
[attr.location])
self._noInterfaceObject = True
elif identifier == "NamedConstructor":
if not attr.hasValue():
raise WebIDLError("NamedConstructor must either take an identifier or take a named argument list",
[attr.location])
args = attr.args() if attr.hasArgs() else []
retType = IDLWrapperType(self.location, self)
method = IDLConstructor(attr.location, args, attr.value())
method.reallyInit(self)
# Named constructors are always assumed to be able to
# throw (since there's no way to indicate otherwise).
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("Throws",))])
# We need to detect conflicts for NamedConstructors across
# interfaces. We first call resolve on the parentScope,
# which will merge all NamedConstructors with the same
# identifier accross interfaces as overloads.
method.resolve(self.parentScope)
# Then we look up the identifier on the parentScope. If the
# result is the same as the method we're adding then it
# hasn't been added as an overload and it's the first time
# we've encountered a NamedConstructor with that identifier.
# If the result is not the same as the method we're adding
# then it has been added as an overload and we need to check
# whether the result is actually one of our existing
# NamedConstructors.
newMethod = self.parentScope.lookupIdentifier(method.identifier)
if newMethod == method:
self.namedConstructors.append(method)
elif newMethod not in self.namedConstructors:
raise WebIDLError("NamedConstructor conflicts with a "
"NamedConstructor of a different interface",
[method.location, newMethod.location])
elif (identifier == "ExceptionClass"):
if not attr.noArguments():
raise WebIDLError("[ExceptionClass] must take no arguments",
[attr.location])
if self.parent:
raise WebIDLError("[ExceptionClass] must not be specified on "
"an interface with inherited interfaces",
[attr.location, self.location])
elif identifier == "Global":
if attr.hasValue():
self.globalNames = [attr.value()]
elif attr.hasArgs():
self.globalNames = attr.args()
else:
self.globalNames = [self.identifier.name]
self.parentScope.addIfaceGlobalNames(self.identifier.name,
self.globalNames)
self._isOnGlobalProtoChain = True
elif identifier == "LegacyWindowAlias":
if attr.hasValue():
self.legacyWindowAliases = [attr.value()]
elif attr.hasArgs():
self.legacyWindowAliases = attr.args()
else:
raise WebIDLError("[%s] must either take an identifier "
"or take an identifier list" % identifier,
[attr.location])
for alias in self.legacyWindowAliases:
unresolved = IDLUnresolvedIdentifier(attr.location, alias)
IDLObjectWithIdentifier(attr.location, self.parentScope, unresolved)
elif identifier == "SecureContext":
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
# This gets propagated to all our members.
for member in self.members:
if member.getExtendedAttribute("SecureContext"):
raise WebIDLError("[SecureContext] specified on both "
"an interface member and on the "
"interface itself",
[member.location, attr.location])
member.addExtendedAttributes([attr])
elif (identifier == "NeedResolve" or
identifier == "OverrideBuiltins" or
identifier == "ChromeOnly" or
identifier == "Unforgeable" or
identifier == "LegacyEventInit" or
identifier == "ProbablyShortLivingWrapper" or
identifier == "LegacyUnenumerableNamedProperties" or
identifier == "RunConstructorInCallerCompartment" or
identifier == "WantsEventListenerHooks" or
identifier == "Serializable" or
identifier == "Abstract" or
identifier == "Inline"):
# Known extended attributes that do not take values
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
elif identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr,
self._exposureGlobalNames)
elif (identifier == "Pref" or
identifier == "JSImplementation" or
identifier == "HeaderFile" or
identifier == "Func" or
identifier == "Deprecated"):
# Known extended attributes that take a string value
if not attr.hasValue():
raise WebIDLError("[%s] must have a value" % identifier,
[attr.location])
elif identifier == "InstrumentedProps":
# Known extended attributes that take a list
if not attr.hasArgs():
raise WebIDLError("[%s] must have arguments" % identifier,
[attr.location])
else:
raise WebIDLError("Unknown extended attribute %s on interface" % identifier,
[attr.location])
attrlist = attr.listValue()
self._extendedAttrDict[identifier] = attrlist if len(attrlist) else True
def validate(self):
IDLInterfaceOrNamespace.validate(self)
if self.parent and self.isSerializable() and not self.parent.isSerializable():
raise WebIDLError(
"Serializable interface inherits from non-serializable "
"interface. Per spec, that means the object should not be "
"serializable, so chances are someone made a mistake here "
"somewhere.",
[self.location, self.parent.location])
def isSerializable(self):
return self.getExtendedAttribute("Serializable")
def setNonPartial(self, location, parent, members):
# Before we do anything else, finish initializing any constructors that
# might be in "members", so we don't have partially-initialized objects
# hanging around. We couldn't do it before now because we needed to have
# to have the IDLInterface on hand to properly set the return type.
for member in members:
if isinstance(member, IDLConstructor):
member.reallyInit(self)
IDLInterfaceOrNamespace.setNonPartial(self, location, parent, members)
class IDLNamespace(IDLInterfaceOrNamespace):
def __init__(self, location, parentScope, name, members, isKnownNonPartial):
IDLInterfaceOrNamespace.__init__(self, location, parentScope, name,
None, members, isKnownNonPartial,
toStringTag=None)
def __str__(self):
return "Namespace '%s'" % self.identifier.name
def isNamespace(self):
return True
def addExtendedAttributes(self, attrs):
# The set of things namespaces support is small enough it's simpler
# to factor out into a separate method than it is to sprinkle
# isNamespace() checks all through
# IDLInterfaceOrNamespace.addExtendedAttributes.
for attr in attrs:
identifier = attr.identifier()
if identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr,
self._exposureGlobalNames)
elif identifier == "ClassString":
# Takes a string value to override the default "Object" if
# desired.
if not attr.hasValue():
raise WebIDLError("[%s] must have a value" % identifier,
[attr.location])
elif (identifier == "ProtoObjectHack" or
identifier == "ChromeOnly"):
if not attr.noArguments():
raise WebIDLError("[%s] must not have arguments" % identifier,
[attr.location])
elif (identifier == "Pref" or
identifier == "HeaderFile" or
identifier == "Func"):
# Known extended attributes that take a string value
if not attr.hasValue():
raise WebIDLError("[%s] must have a value" % identifier,
[attr.location])
else:
raise WebIDLError("Unknown extended attribute %s on namespace" %
identifier,
[attr.location])
attrlist = attr.listValue()
self._extendedAttrDict[identifier] = attrlist if len(attrlist) else True
def isSerializable(self):
return False
class IDLDictionary(IDLObjectWithScope):
def __init__(self, location, parentScope, name, parent, members):
assert isinstance(parentScope, IDLScope)
assert isinstance(name, IDLUnresolvedIdentifier)
assert not parent or isinstance(parent, IDLIdentifierPlaceholder)
self.parent = parent
self._finished = False
self.members = list(members)
self._partialDictionaries = []
self._extendedAttrDict = {}
self.needsConversionToJS = False
self.needsConversionFromJS = False
IDLObjectWithScope.__init__(self, location, parentScope, name)
def __str__(self):
return "Dictionary '%s'" % self.identifier.name
def isDictionary(self):
return True
def canBeEmpty(self):
"""
Returns true if this dictionary can be empty (that is, it has no
required members and neither do any of its ancestors).
"""
return (all(member.optional for member in self.members) and
(not self.parent or self.parent.canBeEmpty()))
def finish(self, scope):
if self._finished:
return
self._finished = True
if self.parent:
assert isinstance(self.parent, IDLIdentifierPlaceholder)
oldParent = self.parent
self.parent = self.parent.finish(scope)
if not isinstance(self.parent, IDLDictionary):
raise WebIDLError("Dictionary %s has parent that is not a dictionary" %
self.identifier.name,
[oldParent.location, self.parent.location])
# Make sure the parent resolves all its members before we start
# looking at them.
self.parent.finish(scope)
# Now go ahead and merge in our partial dictionaries.
for partial in self._partialDictionaries:
partial.finish(scope)
self.members.extend(partial.members)
for member in self.members:
member.resolve(self)
if not member.isComplete():
member.complete(scope)
assert member.type.isComplete()
# Members of a dictionary are sorted in lexicographic order
self.members.sort(key=lambda x: x.identifier.name)
inheritedMembers = []
ancestor = self.parent
while ancestor:
if ancestor == self:
raise WebIDLError("Dictionary %s has itself as an ancestor" %
self.identifier.name,
[self.identifier.location])
inheritedMembers.extend(ancestor.members)
ancestor = ancestor.parent
# Catch name duplication
for inheritedMember in inheritedMembers:
for member in self.members:
if member.identifier.name == inheritedMember.identifier.name:
raise WebIDLError("Dictionary %s has two members with name %s" %
(self.identifier.name, member.identifier.name),
[member.location, inheritedMember.location])
def validate(self):
def typeContainsDictionary(memberType, dictionary):
"""
Returns a tuple whose:
- First element is a Boolean value indicating whether
memberType contains dictionary.
- Second element is:
A list of locations that leads from the type that was passed in
the memberType argument, to the dictionary being validated,
if the boolean value in the first element is True.
None, if the boolean value in the first element is False.
"""
if (memberType.nullable() or
memberType.isSequence() or
memberType.isRecord()):
return typeContainsDictionary(memberType.inner, dictionary)
if memberType.isDictionary():
if memberType.inner == dictionary:
return (True, [memberType.location])
(contains, locations) = dictionaryContainsDictionary(memberType.inner,
dictionary)
if contains:
return (True, [memberType.location] + locations)
if memberType.isUnion():
for member in memberType.flatMemberTypes:
(contains, locations) = typeContainsDictionary(member, dictionary)
if contains:
return (True, locations)
return (False, None)
def dictionaryContainsDictionary(dictMember, dictionary):
for member in dictMember.members:
(contains, locations) = typeContainsDictionary(member.type, dictionary)
if contains:
return (True, [member.location] + locations)
if dictMember.parent:
if dictMember.parent == dictionary:
return (True, [dictMember.location])
else:
(contains, locations) = dictionaryContainsDictionary(dictMember.parent, dictionary)
if contains:
return (True, [dictMember.location] + locations)
return (False, None)
for member in self.members:
if member.type.isDictionary() and member.type.nullable():
raise WebIDLError("Dictionary %s has member with nullable "
"dictionary type" % self.identifier.name,
[member.location])
(contains, locations) = typeContainsDictionary(member.type, self)
if contains:
raise WebIDLError("Dictionary %s has member with itself as type." %
self.identifier.name,
[member.location] + locations)
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
def addExtendedAttributes(self, attrs):
for attr in attrs:
identifier = attr.identifier()
if (identifier == "GenerateInitFromJSON" or
identifier == "GenerateInit"):
if not attr.noArguments():
raise WebIDLError("[%s] must not have arguments" % identifier,
[attr.location])
self.needsConversionFromJS = True
elif (identifier == "GenerateConversionToJS" or
identifier == "GenerateToJSON"):
if not attr.noArguments():
raise WebIDLError("[%s] must not have arguments" % identifier,
[attr.location])
# ToJSON methods require to-JS conversion, because we
# implement ToJSON by converting to a JS object and
# then using JSON.stringify.
self.needsConversionToJS = True
else:
raise WebIDLError("[%s] extended attribute not allowed on "
"dictionaries" % identifier,
[attr.location])
self._extendedAttrDict[identifier] = True
def _getDependentObjects(self):
deps = set(self.members)
if (self.parent):
deps.add(self.parent)
return deps
def addPartialDictionary(self, partial):
assert self.identifier.name == partial.identifier.name
self._partialDictionaries.append(partial)
class IDLEnum(IDLObjectWithIdentifier):
def __init__(self, location, parentScope, name, values):
assert isinstance(parentScope, IDLScope)
assert isinstance(name, IDLUnresolvedIdentifier)
if len(values) != len(set(values)):
raise WebIDLError("Enum %s has multiple identical strings" % name.name,
[location])
IDLObjectWithIdentifier.__init__(self, location, parentScope, name)
self._values = values
def values(self):
return self._values
def finish(self, scope):
pass
def validate(self):
pass
def isEnum(self):
return True
def addExtendedAttributes(self, attrs):
if len(attrs) != 0:
raise WebIDLError("There are no extended attributes that are "
"allowed on enums",
[attrs[0].location, self.location])
def _getDependentObjects(self):
return set()
class IDLType(IDLObject):
Tags = enum(
# The integer types
'int8',
'uint8',
'int16',
'uint16',
'int32',
'uint32',
'int64',
'uint64',
# Additional primitive types
'bool',
'unrestricted_float',
'float',
'unrestricted_double',
# "double" last primitive type to match IDLBuiltinType
'double',
# Other types
'any',
'domstring',
'bytestring',
'usvstring',
'utf8string',
'jsstring',
'object',
'void',
# Funny stuff
'interface',
'dictionary',
'enum',
'callback',
'union',
'sequence',
'record',
'promise',
)
def __init__(self, location, name):
IDLObject.__init__(self, location)
self.name = name
self.builtin = False
self.treatNullAsEmpty = False
self._clamp = False
self._enforceRange = False
self._allowShared = False
self._extendedAttrDict = {}
def __eq__(self, other):
return (other and self.builtin == other.builtin and self.name == other.name and
self._clamp == other.hasClamp() and self._enforceRange == other.hasEnforceRange() and
self.treatNullAsEmpty == other.treatNullAsEmpty and
self._allowShared == other.hasAllowShared())
def __ne__(self, other):
return not self == other
def __str__(self):
return str(self.name)
def prettyName(self):
"""
A name that looks like what this type is named in the IDL spec. By default
this is just our .name, but types that have more interesting spec
representations should override this.
"""
return str(self.name)
def isType(self):
return True
def nullable(self):
return False
def isPrimitive(self):
return False
def isBoolean(self):
return False
def isNumeric(self):
return False
def isString(self):
return False
def isByteString(self):
return False
def isDOMString(self):
return False
def isUSVString(self):
return False
def isUTF8String(self):
return False
def isJSString(self):
return False
def isVoid(self):
return self.name == "Void"
def isSequence(self):
return False
def isRecord(self):
return False
def isReadableStream(self):
return False
def isArrayBuffer(self):
return False
def isArrayBufferView(self):
return False
def isTypedArray(self):
return False
def isBufferSource(self):
return self.isArrayBuffer() or self.isArrayBufferView() or self.isTypedArray()
def isCallbackInterface(self):
return False
def isNonCallbackInterface(self):
return False
def isGeckoInterface(self):
""" Returns a boolean indicating whether this type is an 'interface'
type that is implemented in Gecko. At the moment, this returns
true for all interface types that are not types from the TypedArray
spec."""
return self.isInterface() and not self.isSpiderMonkeyInterface()
def isSpiderMonkeyInterface(self):
""" Returns a boolean indicating whether this type is an 'interface'
type that is implemented in SpiderMonkey. """
return self.isInterface() and (self.isBufferSource() or
self.isReadableStream())
def isDictionary(self):
return False
def isInterface(self):
return False
def isAny(self):
return self.tag() == IDLType.Tags.any
def isObject(self):
return self.tag() == IDLType.Tags.object
def isPromise(self):
return False
def isComplete(self):
return True
def includesRestrictedFloat(self):
return False
def isFloat(self):
return False
def isUnrestricted(self):
# Should only call this on float types
assert self.isFloat()
def isJSONType(self):
return False
def hasClamp(self):
return self._clamp
def hasEnforceRange(self):
return self._enforceRange
def hasAllowShared(self):
return self._allowShared
def tag(self):
assert False # Override me!
def treatNonCallableAsNull(self):
assert self.tag() == IDLType.Tags.callback
return self.nullable() and self.inner.callback._treatNonCallableAsNull
def treatNonObjectAsNull(self):
assert self.tag() == IDLType.Tags.callback
return self.nullable() and self.inner.callback._treatNonObjectAsNull
def withExtendedAttributes(self, attrs):
if len(attrs) > 0:
raise WebIDLError("Extended attributes on types only supported for builtins",
[attrs[0].location, self.location])
return self
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
def resolveType(self, parentScope):
pass
def unroll(self):
return self
def isDistinguishableFrom(self, other):
raise TypeError("Can't tell whether a generic type is or is not "
"distinguishable from other things")
def isExposedInAllOf(self, exposureSet):
return True
class IDLUnresolvedType(IDLType):
"""
Unresolved types are interface types
"""
def __init__(self, location, name, attrs=[]):
IDLType.__init__(self, location, name)
self.extraTypeAttributes = attrs
def isComplete(self):
return False
def complete(self, scope):
obj = None
try:
obj = scope._lookupIdentifier(self.name)
except:
raise WebIDLError("Unresolved type '%s'." % self.name,
[self.location])
assert obj
if obj.isType():
print(obj)
assert not obj.isType()
if obj.isTypedef():
assert self.name.name == obj.identifier.name
typedefType = IDLTypedefType(self.location, obj.innerType,
obj.identifier)
assert not typedefType.isComplete()
return typedefType.complete(scope).withExtendedAttributes(self.extraTypeAttributes)
elif obj.isCallback() and not obj.isInterface():
assert self.name.name == obj.identifier.name
return IDLCallbackType(obj.location, obj)
name = self.name.resolve(scope, None)
return IDLWrapperType(self.location, obj)
def withExtendedAttributes(self, attrs):
return IDLUnresolvedType(self.location, self.name, attrs)
def isDistinguishableFrom(self, other):
raise TypeError("Can't tell whether an unresolved type is or is not "
"distinguishable from other things")
class IDLParametrizedType(IDLType):
def __init__(self, location, name, innerType):
IDLType.__init__(self, location, name)
self.builtin = False
self.inner = innerType
def includesRestrictedFloat(self):
return self.inner.includesRestrictedFloat()
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.inner.resolveType(parentScope)
def isComplete(self):
return self.inner.isComplete()
def unroll(self):
return self.inner.unroll()
def _getDependentObjects(self):
return self.inner._getDependentObjects()
class IDLNullableType(IDLParametrizedType):
def __init__(self, location, innerType):
assert not innerType.isVoid()
assert not innerType == BuiltinTypes[IDLBuiltinType.Types.any]
IDLParametrizedType.__init__(self, location, None, innerType)
def __eq__(self, other):
return isinstance(other, IDLNullableType) and self.inner == other.inner
def __hash__(self):
return hash(self.inner)
def __str__(self):
return self.inner.__str__() + "OrNull"
def prettyName(self):
return self.inner.prettyName() + "?"
def nullable(self):
return True
def isCallback(self):
return self.inner.isCallback()
def isPrimitive(self):
return self.inner.isPrimitive()
def isBoolean(self):
return self.inner.isBoolean()
def isNumeric(self):
return self.inner.isNumeric()
def isString(self):
return self.inner.isString()
def isByteString(self):
return self.inner.isByteString()
def isDOMString(self):
return self.inner.isDOMString()
def isUSVString(self):
return self.inner.isUSVString()
def isUTF8String(self):
return self.inner.isUTF8String()
def isJSString(self):
return self.inner.isJSString()
def isFloat(self):
return self.inner.isFloat()
def isUnrestricted(self):
return self.inner.isUnrestricted()
def isInteger(self):
return self.inner.isInteger()
def isVoid(self):
return False
def isSequence(self):
return self.inner.isSequence()
def isRecord(self):
return self.inner.isRecord()
def isReadableStream(self):
return self.inner.isReadableStream()
def isArrayBuffer(self):
return self.inner.isArrayBuffer()
def isArrayBufferView(self):
return self.inner.isArrayBufferView()
def isTypedArray(self):
return self.inner.isTypedArray()
def isDictionary(self):
return self.inner.isDictionary()
def isInterface(self):
return self.inner.isInterface()
def isPromise(self):
# There is no such thing as a nullable Promise.
assert not self.inner.isPromise()
return False
def isCallbackInterface(self):
return self.inner.isCallbackInterface()
def isNonCallbackInterface(self):
return self.inner.isNonCallbackInterface()
def isEnum(self):
return self.inner.isEnum()
def isUnion(self):
return self.inner.isUnion()
def isJSONType(self):
return self.inner.isJSONType()
def hasClamp(self):
return self.inner.hasClamp()
def hasEnforceRange(self):
return self.inner.hasEnforceRange()
def hasAllowShared(self):
return self.inner.hasAllowShared()
def isComplete(self):
return self.name is not None
def tag(self):
return self.inner.tag()
def complete(self, scope):
if not self.inner.isComplete():
self.inner = self.inner.complete(scope)
assert self.inner.isComplete()
if self.inner.nullable():
raise WebIDLError("The inner type of a nullable type must not be "
"a nullable type",
[self.location, self.inner.location])
if self.inner.isUnion():
if self.inner.hasNullableType:
raise WebIDLError("The inner type of a nullable type must not "
"be a union type that itself has a nullable "
"type as a member type", [self.location])
if self.inner.isDOMString():
if self.inner.treatNullAsEmpty:
raise WebIDLError("[TreatNullAs] not allowed on a nullable DOMString",
[self.location, self.inner.location])
self.name = self.inner.name + "OrNull"
return self
def isDistinguishableFrom(self, other):
if (other.nullable() or
other.isDictionary() or
(other.isUnion() and
(other.hasNullableType or other.hasDictionaryType()))):
# Can't tell which type null should become
return False
return self.inner.isDistinguishableFrom(other)
def withExtendedAttributes(self, attrs):
# See https://github.com/heycam/webidl/issues/827#issuecomment-565131350
# Allowing extended attributes to apply to a nullable type is an intermediate solution.
# A potential longer term solution is to introduce a null type and get rid of nullables.
# For example, we could do `([Clamp] long or null) foo` in the future.
return IDLNullableType(self.location, self.inner.withExtendedAttributes(attrs))
class IDLSequenceType(IDLParametrizedType):
def __init__(self, location, parameterType):
assert not parameterType.isVoid()
IDLParametrizedType.__init__(self, location, parameterType.name, parameterType)
# Need to set self.name up front if our inner type is already complete,
# since in that case our .complete() won't be called.
if self.inner.isComplete():
self.name = self.inner.name + "Sequence"
def __eq__(self, other):
return isinstance(other, IDLSequenceType) and self.inner == other.inner
def __hash__(self):
return hash(self.inner)
def __str__(self):
return self.inner.__str__() + "Sequence"
def prettyName(self):
return "sequence<%s>" % self.inner.prettyName()
def nullable(self):
return False
def isPrimitive(self):
return False
def isString(self):
return False
def isByteString(self):
return False
def isDOMString(self):
return False
def isUSVString(self):
return False
def isUTF8String(self):
return False
def isJSString(self):
return False
def isVoid(self):
return False
def isSequence(self):
return True
def isDictionary(self):
return False
def isInterface(self):
return False
def isEnum(self):
return False
def isJSONType(self):
return self.inner.isJSONType()
def tag(self):
return IDLType.Tags.sequence
def complete(self, scope):
self.inner = self.inner.complete(scope)
self.name = self.inner.name + "Sequence"
return self
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isInterface() or other.isDictionary() or
other.isCallback() or other.isRecord())
class IDLRecordType(IDLParametrizedType):
def __init__(self, location, keyType, valueType):
assert keyType.isString()
assert keyType.isComplete()
assert not valueType.isVoid()
IDLParametrizedType.__init__(self, location, valueType.name, valueType)
self.keyType = keyType
# Need to set self.name up front if our inner type is already complete,
# since in that case our .complete() won't be called.
if self.inner.isComplete():
self.name = self.keyType.name + self.inner.name + "Record"
def __eq__(self, other):
return isinstance(other, IDLRecordType) and self.inner == other.inner
def __str__(self):
return self.keyType.__str__() + self.inner.__str__() + "Record"
def prettyName(self):
return "record<%s, %s>" % (self.keyType.prettyName(), self.inner.prettyName())
def isRecord(self):
return True
def isJSONType(self):
return self.inner.isJSONType()
def tag(self):
return IDLType.Tags.record
def complete(self, scope):
self.inner = self.inner.complete(scope)
self.name = self.keyType.name + self.inner.name + "Record"
return self
def unroll(self):
# We do not unroll our inner. Just stop at ourselves. That
# lets us add headers for both ourselves and our inner as
# needed.
return self
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isNonCallbackInterface() or other.isSequence())
def isExposedInAllOf(self, exposureSet):
return self.inner.unroll().isExposedInAllOf(exposureSet)
class IDLUnionType(IDLType):
def __init__(self, location, memberTypes):
IDLType.__init__(self, location, "")
self.memberTypes = memberTypes
self.hasNullableType = False
self._dictionaryType = None
self.flatMemberTypes = None
self.builtin = False
def __eq__(self, other):
return isinstance(other, IDLUnionType) and self.memberTypes == other.memberTypes
def __hash__(self):
assert self.isComplete()
return self.name.__hash__()
def prettyName(self):
return "(" + " or ".join(m.prettyName() for m in self.memberTypes) + ")"
def isVoid(self):
return False
def isUnion(self):
return True
def isJSONType(self):
return all(m.isJSONType() for m in self.memberTypes)
def includesRestrictedFloat(self):
return any(t.includesRestrictedFloat() for t in self.memberTypes)
def tag(self):
return IDLType.Tags.union
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
for t in self.memberTypes:
t.resolveType(parentScope)
def isComplete(self):
return self.flatMemberTypes is not None
def complete(self, scope):
def typeName(type):
if isinstance(type, IDLNullableType):
return typeName(type.inner) + "OrNull"
if isinstance(type, IDLWrapperType):
return typeName(type._identifier.object())
if isinstance(type, IDLObjectWithIdentifier):
return typeName(type.identifier)
if isinstance(type, IDLBuiltinType) and type.hasAllowShared():
assert type.isBufferSource()
return "MaybeShared" + type.name
return type.name
for (i, type) in enumerate(self.memberTypes):
# Exclude typedefs because if given "typedef (B or C) test",
# we want AOrTest, not AOrBOrC
if not type.isComplete() and not isinstance(type, IDLTypedefType):
self.memberTypes[i] = type.complete(scope)
self.name = "Or".join(typeName(type) for type in self.memberTypes)
# We do this again to complete the typedef types
for (i, type) in enumerate(self.memberTypes):
if not type.isComplete():
self.memberTypes[i] = type.complete(scope)
self.flatMemberTypes = list(self.memberTypes)
i = 0
while i < len(self.flatMemberTypes):
if self.flatMemberTypes[i].nullable():
if self.hasNullableType:
raise WebIDLError("Can't have more than one nullable types in a union",
[nullableType.location, self.flatMemberTypes[i].location])
if self.hasDictionaryType():
raise WebIDLError("Can't have a nullable type and a "
"dictionary type in a union",
[self._dictionaryType.location,
self.flatMemberTypes[i].location])
self.hasNullableType = True
nullableType = self.flatMemberTypes[i]
self.flatMemberTypes[i] = self.flatMemberTypes[i].inner
continue
if self.flatMemberTypes[i].isDictionary():
if self.hasNullableType:
raise WebIDLError("Can't have a nullable type and a "
"dictionary type in a union",
[nullableType.location,
self.flatMemberTypes[i].location])
self._dictionaryType = self.flatMemberTypes[i]
elif self.flatMemberTypes[i].isUnion():
self.flatMemberTypes[i:i + 1] = self.flatMemberTypes[i].memberTypes
continue
i += 1
for (i, t) in enumerate(self.flatMemberTypes[:-1]):
for u in self.flatMemberTypes[i + 1:]:
if not t.isDistinguishableFrom(u):
raise WebIDLError("Flat member types of a union should be "
"distinguishable, " + str(t) + " is not "
"distinguishable from " + str(u),
[self.location, t.location, u.location])
return self
def isDistinguishableFrom(self, other):
if self.hasNullableType and other.nullable():
# Can't tell which type null should become
return False
if other.isUnion():
otherTypes = other.unroll().memberTypes
else:
otherTypes = [other]
# For every type in otherTypes, check that it's distinguishable from
# every type in our types
for u in otherTypes:
if any(not t.isDistinguishableFrom(u) for t in self.memberTypes):
return False
return True
def isExposedInAllOf(self, exposureSet):
# We could have different member types in different globals. Just make sure that each thing in exposureSet has one of our member types exposed in it.
for globalName in exposureSet:
if not any(t.unroll().isExposedInAllOf(set([globalName])) for t
in self.flatMemberTypes):
return False
return True
def hasDictionaryType(self):
return self._dictionaryType is not None
def hasPossiblyEmptyDictionaryType(self):
return (self._dictionaryType is not None and
self._dictionaryType.inner.canBeEmpty())
def _getDependentObjects(self):
return set(self.memberTypes)
class IDLTypedefType(IDLType):
def __init__(self, location, innerType, name):
IDLType.__init__(self, location, name)
self.inner = innerType
self.builtin = False
def __eq__(self, other):
return isinstance(other, IDLTypedefType) and self.inner == other.inner
def __str__(self):
return self.name
def nullable(self):
return self.inner.nullable()
def isPrimitive(self):
return self.inner.isPrimitive()
def isBoolean(self):
return self.inner.isBoolean()
def isNumeric(self):
return self.inner.isNumeric()
def isString(self):
return self.inner.isString()
def isByteString(self):
return self.inner.isByteString()
def isDOMString(self):
return self.inner.isDOMString()
def isUSVString(self):
return self.inner.isUSVString()
def isUTF8String(self):
return self.inner.isUTF8String()
def isJSString(self):
return self.inner.isJSString()
def isVoid(self):
return self.inner.isVoid()
def isJSONType(self):
return self.inner.isJSONType()
def isSequence(self):
return self.inner.isSequence()
def isRecord(self):
return self.inner.isRecord()
def isReadableStream(self):
return self.inner.isReadableStream()
def isDictionary(self):
return self.inner.isDictionary()
def isArrayBuffer(self):
return self.inner.isArrayBuffer()
def isArrayBufferView(self):
return self.inner.isArrayBufferView()
def isTypedArray(self):
return self.inner.isTypedArray()
def isInterface(self):
return self.inner.isInterface()
def isCallbackInterface(self):
return self.inner.isCallbackInterface()
def isNonCallbackInterface(self):
return self.inner.isNonCallbackInterface()
def isComplete(self):
return False
def complete(self, parentScope):
if not self.inner.isComplete():
self.inner = self.inner.complete(parentScope)
assert self.inner.isComplete()
return self.inner
# Do we need a resolveType impl? I don't think it's particularly useful....
def tag(self):
return self.inner.tag()
def unroll(self):
return self.inner.unroll()
def isDistinguishableFrom(self, other):
return self.inner.isDistinguishableFrom(other)
def _getDependentObjects(self):
return self.inner._getDependentObjects()
def withExtendedAttributes(self, attrs):
return IDLTypedefType(self.location, self.inner.withExtendedAttributes(attrs), self.name)
class IDLTypedef(IDLObjectWithIdentifier):
def __init__(self, location, parentScope, innerType, name):
# Set self.innerType first, because IDLObjectWithIdentifier.__init__
# will call our __str__, which wants to use it.
self.innerType = innerType
identifier = IDLUnresolvedIdentifier(location, name)
IDLObjectWithIdentifier.__init__(self, location, parentScope, identifier)
def __str__(self):
return "Typedef %s %s" % (self.identifier.name, self.innerType)
def finish(self, parentScope):
if not self.innerType.isComplete():
self.innerType = self.innerType.complete(parentScope)
def validate(self):
pass
def isTypedef(self):
return True
def addExtendedAttributes(self, attrs):
if len(attrs) != 0:
raise WebIDLError("There are no extended attributes that are "
"allowed on typedefs",
[attrs[0].location, self.location])
def _getDependentObjects(self):
return self.innerType._getDependentObjects()
class IDLWrapperType(IDLType):
def __init__(self, location, inner):
IDLType.__init__(self, location, inner.identifier.name)
self.inner = inner
self._identifier = inner.identifier
self.builtin = False
def __eq__(self, other):
return (isinstance(other, IDLWrapperType) and
self._identifier == other._identifier and
self.builtin == other.builtin)
def __hash__(self):
return hash((self._identifier, self.builtin))
def __str__(self):
return str(self.name) + " (Wrapper)"
def nullable(self):
return False
def isPrimitive(self):
return False
def isString(self):
return False
def isByteString(self):
return False
def isDOMString(self):
return False
def isUSVString(self):
return False
def isUTF8String(self):
return False
def isJSString(self):
return False
def isVoid(self):
return False
def isSequence(self):
return False
def isDictionary(self):
return isinstance(self.inner, IDLDictionary)
def isInterface(self):
return (isinstance(self.inner, IDLInterface) or
isinstance(self.inner, IDLExternalInterface))
def isCallbackInterface(self):
return self.isInterface() and self.inner.isCallback()
def isNonCallbackInterface(self):
return self.isInterface() and not self.inner.isCallback()
def isEnum(self):
return isinstance(self.inner, IDLEnum)
def isJSONType(self):
if self.isInterface():
if self.inner.isExternal():
return False
iface = self.inner
while iface:
if any(m.isMethod() and m.isToJSON() for m in iface.members):
return True
iface = iface.parent
return False
elif self.isEnum():
return True
elif self.isDictionary():
dictionary = self.inner
while dictionary:
if not all(m.type.isJSONType() for m in dictionary.members):
return False
dictionary = dictionary.parent
return True
else:
raise WebIDLError("IDLWrapperType wraps type %s that we don't know if "
"is serializable" % type(self.inner), [self.location])
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.inner.resolve(parentScope)
def isComplete(self):
return True
def tag(self):
if self.isInterface():
return IDLType.Tags.interface
elif self.isEnum():
return IDLType.Tags.enum
elif self.isDictionary():
return IDLType.Tags.dictionary
else:
assert False
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
assert self.isInterface() or self.isEnum() or self.isDictionary()
if self.isEnum():
return (other.isPrimitive() or other.isInterface() or other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isRecord())
if self.isDictionary() and other.nullable():
return False
if (other.isPrimitive() or other.isString() or other.isEnum() or
other.isSequence()):
return True
if self.isDictionary():
return other.isNonCallbackInterface()
assert self.isInterface()
if other.isInterface():
if other.isSpiderMonkeyInterface():
# Just let |other| handle things
return other.isDistinguishableFrom(self)
assert self.isGeckoInterface() and other.isGeckoInterface()
if self.inner.isExternal() or other.unroll().inner.isExternal():
return self != other
return (len(self.inner.interfacesBasedOnSelf &
other.unroll().inner.interfacesBasedOnSelf) == 0 and
(self.isNonCallbackInterface() or
other.isNonCallbackInterface()))
if (other.isDictionary() or other.isCallback() or
other.isRecord()):
return self.isNonCallbackInterface()
# Not much else |other| can be
assert other.isObject()
return False
def isExposedInAllOf(self, exposureSet):
if not self.isInterface():
return True
iface = self.inner
if iface.isExternal():
# Let's say true, so we don't have to implement exposure mixins on
# external interfaces and sprinkle [Exposed=Window] on every single
# external interface declaration.
return True
return iface.exposureSet.issuperset(exposureSet)
def _getDependentObjects(self):
# NB: The codegen for an interface type depends on
# a) That the identifier is in fact an interface (as opposed to
# a dictionary or something else).
# b) The native type of the interface.
# If we depend on the interface object we will also depend on
# anything the interface depends on which is undesirable. We
# considered implementing a dependency just on the interface type
# file, but then every modification to an interface would cause this
# to be regenerated which is still undesirable. We decided not to
# depend on anything, reasoning that:
# 1) Changing the concrete type of the interface requires modifying
# Bindings.conf, which is still a global dependency.
# 2) Changing an interface to a dictionary (or vice versa) with the
# same identifier should be incredibly rare.
#
# On the other hand, if our type is a dictionary, we should
# depend on it, because the member types of a dictionary
# affect whether a method taking the dictionary as an argument
# takes a JSContext* argument or not.
if self.isDictionary():
return set([self.inner])
return set()
class IDLPromiseType(IDLParametrizedType):
def __init__(self, location, innerType):
IDLParametrizedType.__init__(self, location, "Promise", innerType)
def __eq__(self, other):
return (isinstance(other, IDLPromiseType) and
self.promiseInnerType() == other.promiseInnerType())
def __str__(self):
return self.inner.__str__() + "Promise"
def prettyName(self):
return "Promise<%s>" % self.inner.prettyName()
def isPromise(self):
return True
def promiseInnerType(self):
return self.inner
def tag(self):
return IDLType.Tags.promise
def complete(self, scope):
self.inner = self.promiseInnerType().complete(scope)
return self
def unroll(self):
# We do not unroll our inner. Just stop at ourselves. That
# lets us add headers for both ourselves and our inner as
# needed.
return self
def isDistinguishableFrom(self, other):
# Promises are not distinguishable from anything.
return False
def isExposedInAllOf(self, exposureSet):
# Check the internal type
return self.promiseInnerType().unroll().isExposedInAllOf(exposureSet)
class IDLBuiltinType(IDLType):
Types = enum(
# The integer types
'byte',
'octet',
'short',
'unsigned_short',
'long',
'unsigned_long',
'long_long',
'unsigned_long_long',
# Additional primitive types
'boolean',
'unrestricted_float',
'float',
'unrestricted_double',
# IMPORTANT: "double" must be the last primitive type listed
'double',
# Other types
'any',
'domstring',
'bytestring',
'usvstring',
'utf8string',
'jsstring',
'object',
'void',
# Funny stuff
'ArrayBuffer',
'ArrayBufferView',
'Int8Array',
'Uint8Array',
'Uint8ClampedArray',
'Int16Array',
'Uint16Array',
'Int32Array',
'Uint32Array',
'Float32Array',
'Float64Array',
'ReadableStream',
)
TagLookup = {
Types.byte: IDLType.Tags.int8,
Types.octet: IDLType.Tags.uint8,
Types.short: IDLType.Tags.int16,
Types.unsigned_short: IDLType.Tags.uint16,
Types.long: IDLType.Tags.int32,
Types.unsigned_long: IDLType.Tags.uint32,
Types.long_long: IDLType.Tags.int64,
Types.unsigned_long_long: IDLType.Tags.uint64,
Types.boolean: IDLType.Tags.bool,
Types.unrestricted_float: IDLType.Tags.unrestricted_float,
Types.float: IDLType.Tags.float,
Types.unrestricted_double: IDLType.Tags.unrestricted_double,
Types.double: IDLType.Tags.double,
Types.any: IDLType.Tags.any,
Types.domstring: IDLType.Tags.domstring,
Types.bytestring: IDLType.Tags.bytestring,
Types.usvstring: IDLType.Tags.usvstring,
Types.utf8string: IDLType.Tags.utf8string,
Types.jsstring: IDLType.Tags.jsstring,
Types.object: IDLType.Tags.object,
Types.void: IDLType.Tags.void,
Types.ArrayBuffer: IDLType.Tags.interface,
Types.ArrayBufferView: IDLType.Tags.interface,
Types.Int8Array: IDLType.Tags.interface,
Types.Uint8Array: IDLType.Tags.interface,
Types.Uint8ClampedArray: IDLType.Tags.interface,
Types.Int16Array: IDLType.Tags.interface,
Types.Uint16Array: IDLType.Tags.interface,
Types.Int32Array: IDLType.Tags.interface,
Types.Uint32Array: IDLType.Tags.interface,
Types.Float32Array: IDLType.Tags.interface,
Types.Float64Array: IDLType.Tags.interface,
Types.ReadableStream: IDLType.Tags.interface,
}
PrettyNames = {
Types.byte: "byte",
Types.octet: "octet",
Types.short: "short",
Types.unsigned_short: "unsigned short",
Types.long: "long",
Types.unsigned_long: "unsigned long",
Types.long_long: "long long",
Types.unsigned_long_long: "unsigned long long",
Types.boolean: "boolean",
Types.unrestricted_float: "unrestricted float",
Types.float: "float",
Types.unrestricted_double: "unrestricted double",
Types.double: "double",
Types.any: "any",
Types.domstring: "DOMString",
Types.bytestring: "ByteString",
Types.usvstring: "USVString",
Types.utf8string: "USVString", # That's what it is in spec terms
Types.jsstring: "USVString", # Again, that's what it is in spec terms
Types.object: "object",
Types.void: "void",
Types.ArrayBuffer: "ArrayBuffer",
Types.ArrayBufferView: "ArrayBufferView",
Types.Int8Array: "Int8Array",
Types.Uint8Array: "Uint8Array",
Types.Uint8ClampedArray: "Uint8ClampedArray",
Types.Int16Array: "Int16Array",
Types.Uint16Array: "Uint16Array",
Types.Int32Array: "Int32Array",
Types.Uint32Array: "Uint32Array",
Types.Float32Array: "Float32Array",
Types.Float64Array: "Float64Array",
Types.ReadableStream: "ReadableStream",
}
def __init__(self, location, name, type, clamp=False, enforceRange=False, treatNullAsEmpty=False,
allowShared=False, attrLocation=[]):
"""
The mutually exclusive clamp/enforceRange/treatNullAsEmpty/allowShared arguments are used
to create instances of this type with the appropriate attributes attached. Use .clamped(),
.rangeEnforced(), .withTreatNullAs() and .withAllowShared().
attrLocation is an array of source locations of these attributes for error reporting.
"""
IDLType.__init__(self, location, name)
self.builtin = True
self._typeTag = type
self._clamped = None
self._rangeEnforced = None
self._withTreatNullAs = None
self._withAllowShared = None;
if self.isInteger():
if clamp:
self._clamp = True
self.name = "Clamped" + self.name
self._extendedAttrDict["Clamp"] = True
elif enforceRange:
self._enforceRange = True
self.name = "RangeEnforced" + self.name
self._extendedAttrDict["EnforceRange"] = True
elif clamp or enforceRange:
raise WebIDLError("Non-integer types cannot be [Clamp] or [EnforceRange]", attrLocation)
if self.isDOMString() or self.isUTF8String():
if treatNullAsEmpty:
self.treatNullAsEmpty = True
self.name = "NullIsEmpty" + self.name
self._extendedAttrDict["TreatNullAs"] = ["EmptyString"]
elif treatNullAsEmpty:
raise WebIDLError("Non-string types cannot be [TreatNullAs]", attrLocation)
if self.isBufferSource():
if allowShared:
self._allowShared = True
self._extendedAttrDict["AllowShared"] = True
elif allowShared:
raise WebIDLError("Types that are not buffer source types cannot be [AllowShared]", attrLocation)
def __str__(self):
if self._allowShared:
assert self.isBufferSource()
return "MaybeShared" + str(self.name)
return str(self.name)
def __eq__(self, other):
return other and self.location == other.location and self.name == other.name and self._typeTag == other._typeTag
def __hash__(self):
return hash((self.location, self.name, self._typeTag))
def prettyName(self):
return IDLBuiltinType.PrettyNames[self._typeTag]
def clamped(self, attrLocation):
if not self._clamped:
self._clamped = IDLBuiltinType(self.location, self.name,
self._typeTag, clamp=True,
attrLocation=attrLocation)
return self._clamped
def rangeEnforced(self, attrLocation):
if not self._rangeEnforced:
self._rangeEnforced = IDLBuiltinType(self.location, self.name,
self._typeTag, enforceRange=True,
attrLocation=attrLocation)
return self._rangeEnforced
def withTreatNullAs(self, attrLocation):
if not self._withTreatNullAs:
self._withTreatNullAs = IDLBuiltinType(self.location, self.name,
self._typeTag, treatNullAsEmpty=True,
attrLocation=attrLocation)
return self._withTreatNullAs
def withAllowShared(self, attrLocation):
if not self._withAllowShared:
self._withAllowShared = IDLBuiltinType(self.location, self.name,
self._typeTag, allowShared=True,
attrLocation=attrLocation)
return self._withAllowShared
def isPrimitive(self):
return self._typeTag <= IDLBuiltinType.Types.double
def isBoolean(self):
return self._typeTag == IDLBuiltinType.Types.boolean
def isNumeric(self):
return self.isPrimitive() and not self.isBoolean()
def isString(self):
return (self._typeTag == IDLBuiltinType.Types.domstring or
self._typeTag == IDLBuiltinType.Types.bytestring or
self._typeTag == IDLBuiltinType.Types.usvstring or
self._typeTag == IDLBuiltinType.Types.utf8string or
self._typeTag == IDLBuiltinType.Types.jsstring)
def isByteString(self):
return self._typeTag == IDLBuiltinType.Types.bytestring
def isDOMString(self):
return self._typeTag == IDLBuiltinType.Types.domstring
def isUSVString(self):
return self._typeTag == IDLBuiltinType.Types.usvstring
def isUTF8String(self):
return self._typeTag == IDLBuiltinType.Types.utf8string
def isJSString(self):
return self._typeTag == IDLBuiltinType.Types.jsstring
def isInteger(self):
return self._typeTag <= IDLBuiltinType.Types.unsigned_long_long
def isArrayBuffer(self):
return self._typeTag == IDLBuiltinType.Types.ArrayBuffer
def isArrayBufferView(self):
return self._typeTag == IDLBuiltinType.Types.ArrayBufferView
def isTypedArray(self):
return (self._typeTag >= IDLBuiltinType.Types.Int8Array and
self._typeTag <= IDLBuiltinType.Types.Float64Array)
def isReadableStream(self):
return self._typeTag == IDLBuiltinType.Types.ReadableStream
def isInterface(self):
# TypedArray things are interface types per the TypedArray spec,
# but we handle them as builtins because SpiderMonkey implements
# all of it internally.
return (self.isArrayBuffer() or
self.isArrayBufferView() or
self.isTypedArray() or
self.isReadableStream())
def isNonCallbackInterface(self):
# All the interfaces we can be are non-callback
return self.isInterface()
def isFloat(self):
return (self._typeTag == IDLBuiltinType.Types.float or
self._typeTag == IDLBuiltinType.Types.double or
self._typeTag == IDLBuiltinType.Types.unrestricted_float or
self._typeTag == IDLBuiltinType.Types.unrestricted_double)
def isUnrestricted(self):
assert self.isFloat()
return (self._typeTag == IDLBuiltinType.Types.unrestricted_float or
self._typeTag == IDLBuiltinType.Types.unrestricted_double)
def isJSONType(self):
return self.isPrimitive() or self.isString() or self.isObject()
def includesRestrictedFloat(self):
return self.isFloat() and not self.isUnrestricted()
def tag(self):
return IDLBuiltinType.TagLookup[self._typeTag]
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
if self.isBoolean():
return (other.isNumeric() or other.isString() or other.isEnum() or
other.isInterface() or other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isRecord())
if self.isNumeric():
return (other.isBoolean() or other.isString() or other.isEnum() or
other.isInterface() or other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isRecord())
if self.isString():
return (other.isPrimitive() or other.isInterface() or
other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isRecord())
if self.isAny():
# Can't tell "any" apart from anything
return False
if self.isObject():
return other.isPrimitive() or other.isString() or other.isEnum()
if self.isVoid():
return not other.isVoid()
# Not much else we could be!
assert self.isSpiderMonkeyInterface()
# Like interfaces, but we know we're not a callback
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isRecord() or
(other.isInterface() and (
# ArrayBuffer is distinguishable from everything
# that's not an ArrayBuffer or a callback interface
(self.isArrayBuffer() and not other.isArrayBuffer()) or
(self.isReadableStream() and not other.isReadableStream()) or
# ArrayBufferView is distinguishable from everything
# that's not an ArrayBufferView or typed array.
(self.isArrayBufferView() and not other.isArrayBufferView() and
not other.isTypedArray()) or
# Typed arrays are distinguishable from everything
# except ArrayBufferView and the same type of typed
# array
(self.isTypedArray() and not other.isArrayBufferView() and not
(other.isTypedArray() and other.name == self.name)))))
def _getDependentObjects(self):
return set()
def withExtendedAttributes(self, attrs):
ret = self
for attribute in attrs:
identifier = attribute.identifier()
if identifier == "Clamp":
if not attribute.noArguments():
raise WebIDLError("[Clamp] must take no arguments",
[attribute.location])
if ret.hasEnforceRange() or self._enforceRange:
raise WebIDLError("[EnforceRange] and [Clamp] are mutually exclusive",
[self.location, attribute.location])
ret = self.clamped([self.location, attribute.location])
elif identifier == "EnforceRange":
if not attribute.noArguments():
raise WebIDLError("[EnforceRange] must take no arguments",
[attribute.location])
if ret.hasClamp() or self._clamp:
raise WebIDLError("[EnforceRange] and [Clamp] are mutually exclusive",
[self.location, attribute.location])
ret = self.rangeEnforced([self.location, attribute.location])
elif identifier == "TreatNullAs":
if not (self.isDOMString() or self.isUTF8String()):
raise WebIDLError("[TreatNullAs] only allowed on DOMStrings and UTF8Strings",
[self.location, attribute.location])
assert not self.nullable()
if not attribute.hasValue():
raise WebIDLError("[TreatNullAs] must take an identifier argument",
[attribute.location])
value = attribute.value()
if value != 'EmptyString':
raise WebIDLError("[TreatNullAs] must take the identifier "
"'EmptyString', not '%s'" % value,
[attribute.location])
ret = self.withTreatNullAs([self.location, attribute.location])
elif identifier == "AllowShared":
if not attribute.noArguments():
raise WebIDLError("[AllowShared] must take no arguments",
[attribute.location])
if not self.isBufferSource():
raise WebIDLError("[AllowShared] only allowed on buffer source types",
[self.location, attribute.location])
ret = self.withAllowShared([self.location, attribute.location])
else:
raise WebIDLError("Unhandled extended attribute on type",
[self.location, attribute.location])
return ret
BuiltinTypes = {
IDLBuiltinType.Types.byte:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Byte",
IDLBuiltinType.Types.byte),
IDLBuiltinType.Types.octet:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Octet",
IDLBuiltinType.Types.octet),
IDLBuiltinType.Types.short:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Short",
IDLBuiltinType.Types.short),
IDLBuiltinType.Types.unsigned_short:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedShort",
IDLBuiltinType.Types.unsigned_short),
IDLBuiltinType.Types.long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Long",
IDLBuiltinType.Types.long),
IDLBuiltinType.Types.unsigned_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedLong",
IDLBuiltinType.Types.unsigned_long),
IDLBuiltinType.Types.long_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "LongLong",
IDLBuiltinType.Types.long_long),
IDLBuiltinType.Types.unsigned_long_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedLongLong",
IDLBuiltinType.Types.unsigned_long_long),
IDLBuiltinType.Types.boolean:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Boolean",
IDLBuiltinType.Types.boolean),
IDLBuiltinType.Types.float:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float",
IDLBuiltinType.Types.float),
IDLBuiltinType.Types.unrestricted_float:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnrestrictedFloat",
IDLBuiltinType.Types.unrestricted_float),
IDLBuiltinType.Types.double:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Double",
IDLBuiltinType.Types.double),
IDLBuiltinType.Types.unrestricted_double:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnrestrictedDouble",
IDLBuiltinType.Types.unrestricted_double),
IDLBuiltinType.Types.any:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Any",
IDLBuiltinType.Types.any),
IDLBuiltinType.Types.domstring:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "String",
IDLBuiltinType.Types.domstring),
IDLBuiltinType.Types.bytestring:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ByteString",
IDLBuiltinType.Types.bytestring),
IDLBuiltinType.Types.usvstring:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "USVString",
IDLBuiltinType.Types.usvstring),
IDLBuiltinType.Types.utf8string:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UTF8String",
IDLBuiltinType.Types.utf8string),
IDLBuiltinType.Types.jsstring:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "JSString",
IDLBuiltinType.Types.jsstring),
IDLBuiltinType.Types.object:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Object",
IDLBuiltinType.Types.object),
IDLBuiltinType.Types.void:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Void",
IDLBuiltinType.Types.void),
IDLBuiltinType.Types.ArrayBuffer:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ArrayBuffer",
IDLBuiltinType.Types.ArrayBuffer),
IDLBuiltinType.Types.ArrayBufferView:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ArrayBufferView",
IDLBuiltinType.Types.ArrayBufferView),
IDLBuiltinType.Types.Int8Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int8Array",
IDLBuiltinType.Types.Int8Array),
IDLBuiltinType.Types.Uint8Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint8Array",
IDLBuiltinType.Types.Uint8Array),
IDLBuiltinType.Types.Uint8ClampedArray:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint8ClampedArray",
IDLBuiltinType.Types.Uint8ClampedArray),
IDLBuiltinType.Types.Int16Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int16Array",
IDLBuiltinType.Types.Int16Array),
IDLBuiltinType.Types.Uint16Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint16Array",
IDLBuiltinType.Types.Uint16Array),
IDLBuiltinType.Types.Int32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int32Array",
IDLBuiltinType.Types.Int32Array),
IDLBuiltinType.Types.Uint32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint32Array",
IDLBuiltinType.Types.Uint32Array),
IDLBuiltinType.Types.Float32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float32Array",
IDLBuiltinType.Types.Float32Array),
IDLBuiltinType.Types.Float64Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float64Array",
IDLBuiltinType.Types.Float64Array),
IDLBuiltinType.Types.ReadableStream:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ReadableStream",
IDLBuiltinType.Types.ReadableStream),
}
integerTypeSizes = {
IDLBuiltinType.Types.byte: (-128, 127),
IDLBuiltinType.Types.octet: (0, 255),
IDLBuiltinType.Types.short: (-32768, 32767),
IDLBuiltinType.Types.unsigned_short: (0, 65535),
IDLBuiltinType.Types.long: (-2147483648, 2147483647),
IDLBuiltinType.Types.unsigned_long: (0, 4294967295),
IDLBuiltinType.Types.long_long: (-9223372036854775808, 9223372036854775807),
IDLBuiltinType.Types.unsigned_long_long: (0, 18446744073709551615)
}
def matchIntegerValueToType(value):
for type, extremes in list(integerTypeSizes.items()):
(min, max) = extremes
if value <= max and value >= min:
return BuiltinTypes[type]
return None
class NoCoercionFoundError(WebIDLError):
"""
A class we use to indicate generic coercion failures because none of the
types worked out in IDLValue.coerceToType.
"""
class IDLValue(IDLObject):
def __init__(self, location, type, value):
IDLObject.__init__(self, location)
self.type = type
assert isinstance(type, IDLType)
self.value = value
def coerceToType(self, type, location):
if type == self.type:
return self # Nothing to do
# We first check for unions to ensure that even if the union is nullable
# we end up with the right flat member type, not the union's type.
if type.isUnion():
# We use the flat member types here, because if we have a nullable
# member type, or a nested union, we want the type the value
# actually coerces to, not the nullable or nested union type.
for subtype in type.unroll().flatMemberTypes:
try:
coercedValue = self.coerceToType(subtype, location)
# Create a new IDLValue to make sure that we have the
# correct float/double type. This is necessary because we
# use the value's type when it is a default value of a
# union, and the union cares about the exact float type.
return IDLValue(self.location, subtype, coercedValue.value)
except Exception as e:
# Make sure to propagate out WebIDLErrors that are not the
# generic "hey, we could not coerce to this type at all"
# exception, because those are specific "coercion failed for
# reason X" exceptions. Note that we want to swallow
# non-WebIDLErrors here, because those can just happen if
# "type" is not something that can have a default value at
# all.
if (isinstance(e, WebIDLError) and
not isinstance(e, NoCoercionFoundError)):
raise e
# If the type allows null, rerun this matching on the inner type, except
# nullable enums. We handle those specially, because we want our
# default string values to stay strings even when assigned to a nullable
# enum.
elif type.nullable() and not type.isEnum():
innerValue = self.coerceToType(type.inner, location)
return IDLValue(self.location, type, innerValue.value)
elif self.type.isInteger() and type.isInteger():
# We're both integer types. See if we fit.
(min, max) = integerTypeSizes[type._typeTag]
if self.value <= max and self.value >= min:
# Promote
return IDLValue(self.location, type, self.value)
else:
raise WebIDLError("Value %s is out of range for type %s." %
(self.value, type), [location])
elif self.type.isInteger() and type.isFloat():
# Convert an integer literal into float
if -2**24 <= self.value <= 2**24:
return IDLValue(self.location, type, float(self.value))
else:
raise WebIDLError("Converting value %s to %s will lose precision." %
(self.value, type), [location])
elif self.type.isString() and type.isEnum():
# Just keep our string, but make sure it's a valid value for this enum
enum = type.unroll().inner
if self.value not in list(enum.values()):
raise WebIDLError("'%s' is not a valid default value for enum %s"
% (self.value, enum.identifier.name),
[location, enum.location])
return self
elif self.type.isFloat() and type.isFloat():
if (not type.isUnrestricted() and
(self.value == float("inf") or self.value == float("-inf") or
math.isnan(self.value))):
raise WebIDLError("Trying to convert unrestricted value %s to non-unrestricted"
% self.value, [location])
return IDLValue(self.location, type, self.value)
elif self.type.isString() and type.isUSVString():
# Allow USVStrings to use default value just like
# DOMString. No coercion is required in this case as Codegen.py
# treats USVString just like DOMString, but with an
# extra normalization step.
assert self.type.isDOMString()
return self
elif self.type.isDOMString() and type.treatNullAsEmpty:
# TreatNullAsEmpty is a different type for resolution reasons,
# however once you have a value it doesn't matter
return self
elif self.type.isString() and (type.isByteString() or type.isJSString() or type.isUTF8String()):
# Allow ByteStrings, UTF8String, and JSStrings to use a default
# value like DOMString.
# No coercion is required as Codegen.py will handle the
# extra steps. We want to make sure that our string contains
# only valid characters, so we check that here.
valid_ascii_lit = " " + string.ascii_letters + string.digits + string.punctuation
for idx, c in enumerate(self.value):
if c not in valid_ascii_lit:
raise WebIDLError("Coercing this string literal %s to a ByteString is not supported yet. "
"Coercion failed due to an unsupported byte %d at index %d."
% (self.value.__repr__(), ord(c), idx), [location])
return IDLValue(self.location, type, self.value)
raise NoCoercionFoundError("Cannot coerce type %s to type %s." %
(self.type, type), [location])
def _getDependentObjects(self):
return set()
class IDLNullValue(IDLObject):
def __init__(self, location):
IDLObject.__init__(self, location)
self.type = None
self.value = None
def coerceToType(self, type, location):
if (not isinstance(type, IDLNullableType) and
not (type.isUnion() and type.hasNullableType) and
not type.isAny()):
raise WebIDLError("Cannot coerce null value to type %s." % type,
[location])
nullValue = IDLNullValue(self.location)
if type.isUnion() and not type.nullable() and type.hasDictionaryType():
# We're actually a default value for the union's dictionary member.
# Use its type.
for t in type.flatMemberTypes:
if t.isDictionary():
nullValue.type = t
return nullValue
nullValue.type = type
return nullValue
def _getDependentObjects(self):
return set()
class IDLEmptySequenceValue(IDLObject):
def __init__(self, location):
IDLObject.__init__(self, location)
self.type = None
self.value = None
def coerceToType(self, type, location):
if type.isUnion():
# We use the flat member types here, because if we have a nullable
# member type, or a nested union, we want the type the value
# actually coerces to, not the nullable or nested union type.
for subtype in type.unroll().flatMemberTypes:
try:
return self.coerceToType(subtype, location)
except:
pass
if not type.isSequence():
raise WebIDLError("Cannot coerce empty sequence value to type %s." % type,
[location])
emptySequenceValue = IDLEmptySequenceValue(self.location)
emptySequenceValue.type = type
return emptySequenceValue
def _getDependentObjects(self):
return set()
class IDLDefaultDictionaryValue(IDLObject):
def __init__(self, location):
IDLObject.__init__(self, location)
self.type = None
self.value = None
def coerceToType(self, type, location):
if type.isUnion():
# We use the flat member types here, because if we have a nullable
# member type, or a nested union, we want the type the value
# actually coerces to, not the nullable or nested union type.
for subtype in type.unroll().flatMemberTypes:
try:
return self.coerceToType(subtype, location)
except:
pass
if not type.isDictionary():
raise WebIDLError("Cannot coerce default dictionary value to type %s." % type,
[location])
defaultDictionaryValue = IDLDefaultDictionaryValue(self.location)
defaultDictionaryValue.type = type
return defaultDictionaryValue
def _getDependentObjects(self):
return set()
class IDLUndefinedValue(IDLObject):
def __init__(self, location):
IDLObject.__init__(self, location)
self.type = None
self.value = None
def coerceToType(self, type, location):
if not type.isAny():
raise WebIDLError("Cannot coerce undefined value to type %s." % type,
[location])
undefinedValue = IDLUndefinedValue(self.location)
undefinedValue.type = type
return undefinedValue
def _getDependentObjects(self):
return set()
class IDLInterfaceMember(IDLObjectWithIdentifier, IDLExposureMixins):
Tags = enum(
'Const',
'Attr',
'Method',
'MaplikeOrSetlike',
'Iterable'
)
Special = enum(
'Static',
'Stringifier'
)
AffectsValues = ("Nothing", "Everything")
DependsOnValues = ("Nothing", "DOMState", "DeviceState", "Everything")
def __init__(self, location, identifier, tag, extendedAttrDict=None):
IDLObjectWithIdentifier.__init__(self, location, None, identifier)
IDLExposureMixins.__init__(self, location)
self.tag = tag
if extendedAttrDict is None:
self._extendedAttrDict = {}
else:
self._extendedAttrDict = extendedAttrDict
def isMethod(self):
return self.tag == IDLInterfaceMember.Tags.Method
def isAttr(self):
return self.tag == IDLInterfaceMember.Tags.Attr
def isConst(self):
return self.tag == IDLInterfaceMember.Tags.Const
def isMaplikeOrSetlikeOrIterable(self):
return (self.tag == IDLInterfaceMember.Tags.MaplikeOrSetlike or
self.tag == IDLInterfaceMember.Tags.Iterable)
def isMaplikeOrSetlike(self):
return self.tag == IDLInterfaceMember.Tags.MaplikeOrSetlike
def addExtendedAttributes(self, attrs):
for attr in attrs:
self.handleExtendedAttribute(attr)
attrlist = attr.listValue()
self._extendedAttrDict[attr.identifier()] = attrlist if len(attrlist) else True
def handleExtendedAttribute(self, attr):
pass
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
def finish(self, scope):
IDLExposureMixins.finish(self, scope)
def validate(self):
if self.isAttr() or self.isMethod():
if self.affects == "Everything" and self.dependsOn != "Everything":
raise WebIDLError("Interface member is flagged as affecting "
"everything but not depending on everything. "
"That seems rather unlikely.",
[self.location])
if self.getExtendedAttribute("NewObject"):
if self.dependsOn == "Nothing" or self.dependsOn == "DOMState":
raise WebIDLError("A [NewObject] method is not idempotent, "
"so it has to depend on something other than DOM state.",
[self.location])
if (self.getExtendedAttribute("Cached") or
self.getExtendedAttribute("StoreInSlot")):
raise WebIDLError("A [NewObject] attribute shouldnt be "
"[Cached] or [StoreInSlot], since the point "
"of those is to keep returning the same "
"thing across multiple calls, which is not "
"what [NewObject] does.",
[self.location])
def _setDependsOn(self, dependsOn):
if self.dependsOn != "Everything":
raise WebIDLError("Trying to specify multiple different DependsOn, "
"Pure, or Constant extended attributes for "
"attribute", [self.location])
if dependsOn not in IDLInterfaceMember.DependsOnValues:
raise WebIDLError("Invalid [DependsOn=%s] on attribute" % dependsOn,
[self.location])
self.dependsOn = dependsOn
def _setAffects(self, affects):
if self.affects != "Everything":
raise WebIDLError("Trying to specify multiple different Affects, "
"Pure, or Constant extended attributes for "
"attribute", [self.location])
if affects not in IDLInterfaceMember.AffectsValues:
raise WebIDLError("Invalid [Affects=%s] on attribute" % dependsOn,
[self.location])
self.affects = affects
def _addAlias(self, alias):
if alias in self.aliases:
raise WebIDLError("Duplicate [Alias=%s] on attribute" % alias,
[self.location])
self.aliases.append(alias)
def _addBindingAlias(self, bindingAlias):
if bindingAlias in self.bindingAliases:
raise WebIDLError("Duplicate [BindingAlias=%s] on attribute" % bindingAlias,
[self.location])
self.bindingAliases.append(bindingAlias)
class IDLMaplikeOrSetlikeOrIterableBase(IDLInterfaceMember):
def __init__(self, location, identifier, ifaceType, keyType, valueType, ifaceKind):
IDLInterfaceMember.__init__(self, location, identifier, ifaceKind)
if keyType is not None:
assert isinstance(keyType, IDLType)
else:
assert valueType is not None
assert ifaceType in ['maplike', 'setlike', 'iterable']
if valueType is not None:
assert isinstance(valueType, IDLType)
self.keyType = keyType
self.valueType = valueType
self.maplikeOrSetlikeOrIterableType = ifaceType
self.disallowedMemberNames = []
self.disallowedNonMethodNames = []
def isMaplike(self):
return self.maplikeOrSetlikeOrIterableType == "maplike"
def isSetlike(self):
return self.maplikeOrSetlikeOrIterableType == "setlike"
def isIterable(self):
return self.maplikeOrSetlikeOrIterableType == "iterable"
def hasKeyType(self):
return self.keyType is not None
def hasValueType(self):
return self.valueType is not None
def checkCollisions(self, members, isAncestor):
for member in members:
# Check that there are no disallowed members
if (member.identifier.name in self.disallowedMemberNames and
not ((member.isMethod() and member.isMaplikeOrSetlikeOrIterableMethod()) or
(member.isAttr() and member.isMaplikeOrSetlikeAttr()))):
raise WebIDLError("Member '%s' conflicts "
"with reserved %s name." %
(member.identifier.name,
self.maplikeOrSetlikeOrIterableType),
[self.location, member.location])
# Check that there are no disallowed non-method members.
# Ancestor members are always disallowed here; own members
# are disallowed only if they're non-methods.
if ((isAncestor or member.isAttr() or member.isConst()) and
member.identifier.name in self.disallowedNonMethodNames):
raise WebIDLError("Member '%s' conflicts "
"with reserved %s method." %
(member.identifier.name,
self.maplikeOrSetlikeOrIterableType),
[self.location, member.location])
def addMethod(self, name, members, allowExistingOperations, returnType, args=[],
chromeOnly=False, isPure=False, affectsNothing=False, newObject=False,
isIteratorAlias=False):
"""
Create an IDLMethod based on the parameters passed in.
- members is the member list to add this function to, since this is
called during the member expansion portion of interface object
building.
- chromeOnly is only True for read-only js implemented classes, to
implement underscore prefixed convenience functions which would
otherwise not be available, unlike the case of C++ bindings.
- isPure is only True for idempotent functions, so it is not valid for
things like keys, values, etc. that return a new object every time.
- affectsNothing means that nothing changes due to this method, which
affects JIT optimization behavior
- newObject means the method creates and returns a new object.
"""
# Only add name to lists for collision checks if it's not chrome
# only.
if chromeOnly:
name = "__" + name
else:
if not allowExistingOperations:
self.disallowedMemberNames.append(name)
else:
self.disallowedNonMethodNames.append(name)
# If allowExistingOperations is True, and another operation exists
# with the same name as the one we're trying to add, don't add the
# maplike/setlike operation. However, if the operation is static,
# then fail by way of creating the function, which will cause a
# naming conflict, per the spec.
if allowExistingOperations:
for m in members:
if m.identifier.name == name and m.isMethod() and not m.isStatic():
return
method = IDLMethod(self.location,
IDLUnresolvedIdentifier(self.location, name, allowDoubleUnderscore=chromeOnly),
returnType, args, maplikeOrSetlikeOrIterable=self)
# We need to be able to throw from declaration methods
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("Throws",))])
if chromeOnly:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("ChromeOnly",))])
if isPure:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("Pure",))])
# Following attributes are used for keys/values/entries. Can't mark
# them pure, since they return a new object each time they are run.
if affectsNothing:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("DependsOn", "Everything")),
IDLExtendedAttribute(self.location, ("Affects", "Nothing"))])
if newObject:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("NewObject",))])
if isIteratorAlias:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("Alias", "@@iterator"))])
# Methods generated for iterables should be enumerable, but the ones for
# maplike/setlike should not be.
if not self.isIterable():
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("NonEnumerable",))])
members.append(method)
def resolve(self, parentScope):
if self.keyType:
self.keyType.resolveType(parentScope)
if self.valueType:
self.valueType.resolveType(parentScope)
def finish(self, scope):
IDLInterfaceMember.finish(self, scope)
if self.keyType and not self.keyType.isComplete():
t = self.keyType.complete(scope)
assert not isinstance(t, IDLUnresolvedType)
assert not isinstance(t, IDLTypedefType)
assert not isinstance(t.name, IDLUnresolvedIdentifier)
self.keyType = t
if self.valueType and not self.valueType.isComplete():
t = self.valueType.complete(scope)
assert not isinstance(t, IDLUnresolvedType)
assert not isinstance(t, IDLTypedefType)
assert not isinstance(t.name, IDLUnresolvedIdentifier)
self.valueType = t
def validate(self):
IDLInterfaceMember.validate(self)
def handleExtendedAttribute(self, attr):
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def _getDependentObjects(self):
deps = set()
if self.keyType:
deps.add(self.keyType)
if self.valueType:
deps.add(self.valueType)
return deps
def getForEachArguments(self):
return [IDLArgument(self.location,
IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"),
"callback"),
BuiltinTypes[IDLBuiltinType.Types.object]),
IDLArgument(self.location,
IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"),
"thisArg"),
BuiltinTypes[IDLBuiltinType.Types.any],
optional=True)]
# Iterable adds ES6 iterator style functions and traits
# (keys/values/entries/@@iterator) to an interface.
class IDLIterable(IDLMaplikeOrSetlikeOrIterableBase):
def __init__(self, location, identifier, keyType, valueType=None, scope=None):
IDLMaplikeOrSetlikeOrIterableBase.__init__(self, location, identifier,
"iterable", keyType, valueType,
IDLInterfaceMember.Tags.Iterable)
self.iteratorType = None
def __str__(self):
return "declared iterable with key '%s' and value '%s'" % (self.keyType, self.valueType)
def expand(self, members, isJSImplemented):
"""
In order to take advantage of all of the method machinery in Codegen,
we generate our functions as if they were part of the interface
specification during parsing.
"""
# We only need to add entries/keys/values here if we're a pair iterator.
# Value iterators just copy these from %ArrayPrototype% instead.
if not self.isPairIterator():
return
# object entries()
self.addMethod("entries", members, False, self.iteratorType,
affectsNothing=True, newObject=True,
isIteratorAlias=True)
# object keys()
self.addMethod("keys", members, False, self.iteratorType,
affectsNothing=True, newObject=True)
# object values()
self.addMethod("values", members, False, self.iteratorType,
affectsNothing=True, newObject=True)
# void forEach(callback(valueType, keyType), optional any thisArg)
self.addMethod("forEach", members, False,
BuiltinTypes[IDLBuiltinType.Types.void],
self.getForEachArguments())
def isValueIterator(self):
return not self.isPairIterator()
def isPairIterator(self):
return self.hasKeyType()
# MaplikeOrSetlike adds ES6 map-or-set-like traits to an interface.
class IDLMaplikeOrSetlike(IDLMaplikeOrSetlikeOrIterableBase):
def __init__(self, location, identifier, maplikeOrSetlikeType,
readonly, keyType, valueType):
IDLMaplikeOrSetlikeOrIterableBase.__init__(self, location, identifier, maplikeOrSetlikeType,
keyType, valueType, IDLInterfaceMember.Tags.MaplikeOrSetlike)
self.readonly = readonly
self.slotIndices = None
# When generating JSAPI access code, we need to know the backing object
# type prefix to create the correct function. Generate here for reuse.
if self.isMaplike():
self.prefix = 'Map'
elif self.isSetlike():
self.prefix = 'Set'
def __str__(self):
return "declared '%s' with key '%s'" % (self.maplikeOrSetlikeOrIterableType, self.keyType)
def expand(self, members, isJSImplemented):
"""
In order to take advantage of all of the method machinery in Codegen,
we generate our functions as if they were part of the interface
specification during parsing.
"""
# Both maplike and setlike have a size attribute
sizeAttr = IDLAttribute(self.location,
IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"), "size"),
BuiltinTypes[IDLBuiltinType.Types.unsigned_long],
True,
maplikeOrSetlike=self)
# This should be non-enumerable.
sizeAttr.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("NonEnumerable",))])
members.append(sizeAttr)
self.reserved_ro_names = ["size"]
self.disallowedMemberNames.append("size")
# object entries()
self.addMethod("entries", members, False, BuiltinTypes[IDLBuiltinType.Types.object],
affectsNothing=True, isIteratorAlias=self.isMaplike())
# object keys()
self.addMethod("keys", members, False, BuiltinTypes[IDLBuiltinType.Types.object],
affectsNothing=True)
# object values()
self.addMethod("values", members, False, BuiltinTypes[IDLBuiltinType.Types.object],
affectsNothing=True, isIteratorAlias=self.isSetlike())
# void forEach(callback(valueType, keyType), thisVal)
self.addMethod("forEach", members, False, BuiltinTypes[IDLBuiltinType.Types.void],
self.getForEachArguments())
def getKeyArg():
return IDLArgument(self.location,
IDLUnresolvedIdentifier(self.location, "key"),
self.keyType)
# boolean has(keyType key)
self.addMethod("has", members, False, BuiltinTypes[IDLBuiltinType.Types.boolean],
[getKeyArg()], isPure=True)
if not self.readonly:
# void clear()
self.addMethod("clear", members, True, BuiltinTypes[IDLBuiltinType.Types.void],
[])
# boolean delete(keyType key)
self.addMethod("delete", members, True,
BuiltinTypes[IDLBuiltinType.Types.boolean], [getKeyArg()])
# Always generate underscored functions (e.g. __add, __clear) for js
# implemented interfaces as convenience functions.
if isJSImplemented:
# void clear()
self.addMethod("clear", members, True, BuiltinTypes[IDLBuiltinType.Types.void],
[], chromeOnly=True)
# boolean delete(keyType key)
self.addMethod("delete", members, True,
BuiltinTypes[IDLBuiltinType.Types.boolean], [getKeyArg()],
chromeOnly=True)
if self.isSetlike():
if not self.readonly:
# Add returns the set object it just added to.
# object add(keyType key)
self.addMethod("add", members, True,
BuiltinTypes[IDLBuiltinType.Types.object], [getKeyArg()])
if isJSImplemented:
self.addMethod("add", members, True,
BuiltinTypes[IDLBuiltinType.Types.object], [getKeyArg()],
chromeOnly=True)
return
# If we get this far, we're a maplike declaration.
# valueType get(keyType key)
#
# Note that instead of the value type, we're using any here. The
# validity checks should happen as things are inserted into the map,
# and using any as the return type makes code generation much simpler.
#
# TODO: Bug 1155340 may change this to use specific type to provide
# more info to JIT.
self.addMethod("get", members, False, BuiltinTypes[IDLBuiltinType.Types.any],
[getKeyArg()], isPure=True)
def getValueArg():
return IDLArgument(self.location,
IDLUnresolvedIdentifier(self.location, "value"),
self.valueType)
if not self.readonly:
self.addMethod("set", members, True, BuiltinTypes[IDLBuiltinType.Types.object],
[getKeyArg(), getValueArg()])
if isJSImplemented:
self.addMethod("set", members, True, BuiltinTypes[IDLBuiltinType.Types.object],
[getKeyArg(), getValueArg()], chromeOnly=True)
class IDLConst(IDLInterfaceMember):
def __init__(self, location, identifier, type, value):
IDLInterfaceMember.__init__(self, location, identifier,
IDLInterfaceMember.Tags.Const)
assert isinstance(type, IDLType)
if type.isDictionary():
raise WebIDLError("A constant cannot be of a dictionary type",
[self.location])
if type.isRecord():
raise WebIDLError("A constant cannot be of a record type",
[self.location])
self.type = type
self.value = value
if identifier.name == "prototype":
raise WebIDLError("The identifier of a constant must not be 'prototype'",
[location])
def __str__(self):
return "'%s' const '%s'" % (self.type, self.identifier)
def finish(self, scope):
IDLInterfaceMember.finish(self, scope)
if not self.type.isComplete():
type = self.type.complete(scope)
if not type.isPrimitive() and not type.isString():
locations = [self.type.location, type.location]
try:
locations.append(type.inner.location)
except:
pass
raise WebIDLError("Incorrect type for constant", locations)
self.type = type
# The value might not match the type
coercedValue = self.value.coerceToType(self.type, self.location)
assert coercedValue
self.value = coercedValue
def validate(self):
IDLInterfaceMember.validate(self)
def handleExtendedAttribute(self, attr):
identifier = attr.identifier()
if identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr, self._exposureGlobalNames)
elif (identifier == "Pref" or
identifier == "ChromeOnly" or
identifier == "Func" or
identifier == "SecureContext" or
identifier == "NonEnumerable" or
identifier == "NeedsWindowsUndef"):
# Known attributes that we don't need to do anything with here
pass
else:
raise WebIDLError("Unknown extended attribute %s on constant" % identifier,
[attr.location])
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def _getDependentObjects(self):
return set([self.type, self.value])
class IDLAttribute(IDLInterfaceMember):
def __init__(self, location, identifier, type, readonly, inherit=False,
static=False, stringifier=False, maplikeOrSetlike=None,
extendedAttrDict=None):
IDLInterfaceMember.__init__(self, location, identifier,
IDLInterfaceMember.Tags.Attr,
extendedAttrDict=extendedAttrDict)
assert isinstance(type, IDLType)
self.type = type
self.readonly = readonly
self.inherit = inherit
self._static = static
self.lenientThis = False
self._unforgeable = False
self.stringifier = stringifier
self.slotIndices = None
assert maplikeOrSetlike is None or isinstance(maplikeOrSetlike, IDLMaplikeOrSetlike)
self.maplikeOrSetlike = maplikeOrSetlike
self.dependsOn = "Everything"
self.affects = "Everything"
self.bindingAliases = []
if static and identifier.name == "prototype":
raise WebIDLError("The identifier of a static attribute must not be 'prototype'",
[location])
if readonly and inherit:
raise WebIDLError("An attribute cannot be both 'readonly' and 'inherit'",
[self.location])
def isStatic(self):
return self._static
def forceStatic(self):
self._static = True
def __str__(self):
return "'%s' attribute '%s'" % (self.type, self.identifier)
def finish(self, scope):
IDLInterfaceMember.finish(self, scope)
if not self.type.isComplete():
t = self.type.complete(scope)
assert not isinstance(t, IDLUnresolvedType)
assert not isinstance(t, IDLTypedefType)
assert not isinstance(t.name, IDLUnresolvedIdentifier)
self.type = t
if self.readonly and (self.type.hasClamp() or self.type.hasEnforceRange() or
self.type.hasAllowShared() or self.type.treatNullAsEmpty):
raise WebIDLError("A readonly attribute cannot be [Clamp] or [EnforceRange] or [AllowShared]",
[self.location])
if self.type.isDictionary() and not self.getExtendedAttribute("Cached"):
raise WebIDLError("An attribute cannot be of a dictionary type",
[self.location])
if self.type.isSequence() and not self.getExtendedAttribute("Cached"):
raise WebIDLError("A non-cached attribute cannot be of a sequence "
"type", [self.location])
if self.type.isRecord() and not self.getExtendedAttribute("Cached"):
raise WebIDLError("A non-cached attribute cannot be of a record "
"type", [self.location])
if self.type.isUnion():
for f in self.type.unroll().flatMemberTypes:
if f.isDictionary():
raise WebIDLError("An attribute cannot be of a union "
"type if one of its member types (or "
"one of its member types's member "
"types, and so on) is a dictionary "
"type", [self.location, f.location])
if f.isSequence():
raise WebIDLError("An attribute cannot be of a union "
"type if one of its member types (or "
"one of its member types's member "
"types, and so on) is a sequence "
"type", [self.location, f.location])
if f.isRecord():
raise WebIDLError("An attribute cannot be of a union "
"type if one of its member types (or "
"one of its member types's member "
"types, and so on) is a record "
"type", [self.location, f.location])
if not self.type.isInterface() and self.getExtendedAttribute("PutForwards"):
raise WebIDLError("An attribute with [PutForwards] must have an "
"interface type as its type", [self.location])
if (not self.type.isInterface() and
self.getExtendedAttribute("SameObject")):
raise WebIDLError("An attribute with [SameObject] must have an "
"interface type as its type", [self.location])
if self.type.isPromise() and not self.readonly:
raise WebIDLError("Promise-returning attributes must be readonly",
[self.location])
def validate(self):
def typeContainsChromeOnlyDictionaryMember(type):
if (type.nullable() or
type.isSequence() or
type.isRecord()):
return typeContainsChromeOnlyDictionaryMember(type.inner)
if type.isUnion():
for memberType in type.flatMemberTypes:
(contains, location) = typeContainsChromeOnlyDictionaryMember(memberType)
if contains:
return (True, location)
if type.isDictionary():
dictionary = type.inner
while dictionary:
(contains, location) = dictionaryContainsChromeOnlyMember(dictionary)
if contains:
return (True, location)
dictionary = dictionary.parent
return (False, None)
def dictionaryContainsChromeOnlyMember(dictionary):
for member in dictionary.members:
if member.getExtendedAttribute("ChromeOnly"):
return (True, member.location)
(contains, location) = typeContainsChromeOnlyDictionaryMember(member.type)
if contains:
return (True, location)
return (False, None)
IDLInterfaceMember.validate(self)
if (self.getExtendedAttribute("Cached") or
self.getExtendedAttribute("StoreInSlot")):
if not self.affects == "Nothing":
raise WebIDLError("Cached attributes and attributes stored in "
"slots must be Constant or Pure or "
"Affects=Nothing, since the getter won't always "
"be called.",
[self.location])
(contains, location) = typeContainsChromeOnlyDictionaryMember(self.type)
if contains:
raise WebIDLError("[Cached] and [StoreInSlot] must not be used "
"on an attribute whose type contains a "
"[ChromeOnly] dictionary member",
[self.location, location])
if self.getExtendedAttribute("Frozen"):
if (not self.type.isSequence() and not self.type.isDictionary() and
not self.type.isRecord()):
raise WebIDLError("[Frozen] is only allowed on "
"sequence-valued, dictionary-valued, and "
"record-valued attributes",
[self.location])
if not self.type.unroll().isExposedInAllOf(self.exposureSet):
raise WebIDLError("Attribute returns a type that is not exposed "
"everywhere where the attribute is exposed",
[self.location])
if self.getExtendedAttribute("CEReactions"):
if self.readonly:
raise WebIDLError("[CEReactions] is not allowed on "
"readonly attributes",
[self.location])
def handleExtendedAttribute(self, attr):
identifier = attr.identifier()
if ((identifier == "SetterThrows" or identifier == "SetterCanOOM" or
identifier == "SetterNeedsSubjectPrincipal")
and self.readonly):
raise WebIDLError("Readonly attributes must not be flagged as "
"[%s]" % identifier,
[self.location])
elif identifier == "BindingAlias":
if not attr.hasValue():
raise WebIDLError("[BindingAlias] takes an identifier or string",
[attr.location])
self._addBindingAlias(attr.value())
elif (((identifier == "Throws" or identifier == "GetterThrows" or
identifier == "CanOOM" or identifier == "GetterCanOOM") and
self.getExtendedAttribute("StoreInSlot")) or
(identifier == "StoreInSlot" and
(self.getExtendedAttribute("Throws") or
self.getExtendedAttribute("GetterThrows") or
self.getExtendedAttribute("CanOOM") or
self.getExtendedAttribute("GetterCanOOM")))):
raise WebIDLError("Throwing things can't be [StoreInSlot]",
[attr.location])
elif identifier == "LenientThis":
if not attr.noArguments():
raise WebIDLError("[LenientThis] must take no arguments",
[attr.location])
if self.isStatic():
raise WebIDLError("[LenientThis] is only allowed on non-static "
"attributes", [attr.location, self.location])
if self.getExtendedAttribute("CrossOriginReadable"):
raise WebIDLError("[LenientThis] is not allowed in combination "
"with [CrossOriginReadable]",
[attr.location, self.location])
if self.getExtendedAttribute("CrossOriginWritable"):
raise WebIDLError("[LenientThis] is not allowed in combination "
"with [CrossOriginWritable]",
[attr.location, self.location])
self.lenientThis = True
elif identifier == "Unforgeable":
if self.isStatic():
raise WebIDLError("[Unforgeable] is only allowed on non-static "
"attributes", [attr.location, self.location])
self._unforgeable = True
elif identifier == "SameObject" and not self.readonly:
raise WebIDLError("[SameObject] only allowed on readonly attributes",
[attr.location, self.location])
elif identifier == "Constant" and not self.readonly:
raise WebIDLError("[Constant] only allowed on readonly attributes",
[attr.location, self.location])
elif identifier == "PutForwards":
if not self.readonly:
raise WebIDLError("[PutForwards] is only allowed on readonly "
"attributes", [attr.location, self.location])
if self.type.isPromise():
raise WebIDLError("[PutForwards] is not allowed on "
"Promise-typed attributes",
[attr.location, self.location])
if self.isStatic():
raise WebIDLError("[PutForwards] is only allowed on non-static "
"attributes", [attr.location, self.location])
if self.getExtendedAttribute("Replaceable") is not None:
raise WebIDLError("[PutForwards] and [Replaceable] can't both "
"appear on the same attribute",
[attr.location, self.location])
if not attr.hasValue():
raise WebIDLError("[PutForwards] takes an identifier",
[attr.location, self.location])
elif identifier == "Replaceable":
if not attr.noArguments():
raise WebIDLError("[Replaceable] must take no arguments",
[attr.location])
if not self.readonly:
raise WebIDLError("[Replaceable] is only allowed on readonly "
"attributes", [attr.location, self.location])
if self.type.isPromise():
raise WebIDLError("[Replaceable] is not allowed on "
"Promise-typed attributes",
[attr.location, self.location])
if self.isStatic():
raise WebIDLError("[Replaceable] is only allowed on non-static "
"attributes", [attr.location, self.location])
if self.getExtendedAttribute("PutForwards") is not None:
raise WebIDLError("[PutForwards] and [Replaceable] can't both "
"appear on the same attribute",
[attr.location, self.location])
elif identifier == "LenientSetter":
if not attr.noArguments():
raise WebIDLError("[LenientSetter] must take no arguments",
[attr.location])
if not self.readonly:
raise WebIDLError("[LenientSetter] is only allowed on readonly "
"attributes", [attr.location, self.location])
if self.type.isPromise():
raise WebIDLError("[LenientSetter] is not allowed on "
"Promise-typed attributes",
[attr.location, self.location])
if self.isStatic():
raise WebIDLError("[LenientSetter] is only allowed on non-static "
"attributes", [attr.location, self.location])
if self.getExtendedAttribute("PutForwards") is not None:
raise WebIDLError("[LenientSetter] and [PutForwards] can't both "
"appear on the same attribute",
[attr.location, self.location])
if self.getExtendedAttribute("Replaceable") is not None:
raise WebIDLError("[LenientSetter] and [Replaceable] can't both "
"appear on the same attribute",
[attr.location, self.location])
elif identifier == "LenientFloat":
if self.readonly:
raise WebIDLError("[LenientFloat] used on a readonly attribute",
[attr.location, self.location])
if not self.type.includesRestrictedFloat():
raise WebIDLError("[LenientFloat] used on an attribute with a "
"non-restricted-float type",
[attr.location, self.location])
elif identifier == "StoreInSlot":
if self.getExtendedAttribute("Cached"):
raise WebIDLError("[StoreInSlot] and [Cached] must not be "
"specified on the same attribute",
[attr.location, self.location])
elif identifier == "Cached":
if self.getExtendedAttribute("StoreInSlot"):
raise WebIDLError("[Cached] and [StoreInSlot] must not be "
"specified on the same attribute",
[attr.location, self.location])
elif (identifier == "CrossOriginReadable" or
identifier == "CrossOriginWritable"):
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
if self.isStatic():
raise WebIDLError("[%s] is only allowed on non-static "
"attributes" % identifier,
[attr.location, self.location])
if self.getExtendedAttribute("LenientThis"):
raise WebIDLError("[LenientThis] is not allowed in combination "
"with [%s]" % identifier,
[attr.location, self.location])
elif identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr, self._exposureGlobalNames)
elif identifier == "Pure":
if not attr.noArguments():
raise WebIDLError("[Pure] must take no arguments",
[attr.location])
self._setDependsOn("DOMState")
self._setAffects("Nothing")
elif identifier == "Constant" or identifier == "SameObject":
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
self._setDependsOn("Nothing")
self._setAffects("Nothing")
elif identifier == "Affects":
if not attr.hasValue():
raise WebIDLError("[Affects] takes an identifier",
[attr.location])
self._setAffects(attr.value())
elif identifier == "DependsOn":
if not attr.hasValue():
raise WebIDLError("[DependsOn] takes an identifier",
[attr.location])
if (attr.value() != "Everything" and attr.value() != "DOMState" and
not self.readonly):
raise WebIDLError("[DependsOn=%s] only allowed on "
"readonly attributes" % attr.value(),
[attr.location, self.location])
self._setDependsOn(attr.value())
elif identifier == "UseCounter":
if self.stringifier:
raise WebIDLError("[UseCounter] must not be used on a "
"stringifier attribute",
[attr.location, self.location])
elif identifier == "Unscopable":
if not attr.noArguments():
raise WebIDLError("[Unscopable] must take no arguments",
[attr.location])
if self.isStatic():
raise WebIDLError("[Unscopable] is only allowed on non-static "
"attributes and operations",
[attr.location, self.location])
elif identifier == "CEReactions":
if not attr.noArguments():
raise WebIDLError("[CEReactions] must take no arguments",
[attr.location])
elif (identifier == "Pref" or
identifier == "Deprecated" or
identifier == "SetterThrows" or
identifier == "Throws" or
identifier == "GetterThrows" or
identifier == "SetterCanOOM" or
identifier == "CanOOM" or
identifier == "GetterCanOOM" or
identifier == "ChromeOnly" or
identifier == "Func" or
identifier == "SecureContext" or
identifier == "Frozen" or
identifier == "NewObject" or
identifier == "NeedsSubjectPrincipal" or
identifier == "SetterNeedsSubjectPrincipal" or
identifier == "GetterNeedsSubjectPrincipal" or
identifier == "NeedsCallerType" or
identifier == "ReturnValueNeedsContainsHack" or
identifier == "BinaryName" or
identifier == "NonEnumerable"):
# Known attributes that we don't need to do anything with here
pass
else:
raise WebIDLError("Unknown extended attribute %s on attribute" % identifier,
[attr.location])
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def resolve(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.type.resolveType(parentScope)
IDLObjectWithIdentifier.resolve(self, parentScope)
def hasLenientThis(self):
return self.lenientThis
def isMaplikeOrSetlikeAttr(self):
"""
True if this attribute was generated from an interface with
maplike/setlike (e.g. this is the size attribute for
maplike/setlike)
"""
return self.maplikeOrSetlike is not None
def isUnforgeable(self):
return self._unforgeable
def _getDependentObjects(self):
return set([self.type])
def expand(self, members):
assert self.stringifier
if not self.type.isDOMString() and not self.type.isUSVString():
raise WebIDLError("The type of a stringifer attribute must be "
"either DOMString or USVString",
[self.location])
identifier = IDLUnresolvedIdentifier(self.location, "__stringifier",
allowDoubleUnderscore=True)
method = IDLMethod(self.location,
identifier,
returnType=self.type, arguments=[],
stringifier=True, underlyingAttr=self)
allowedExtAttrs = ["Throws", "NeedsSubjectPrincipal", "Pure"]
# Safe to ignore these as they are only meaningful for attributes
attributeOnlyExtAttrs = [
"CEReactions",
"CrossOriginWritable",
"SetterThrows",
]
for (key, value) in list(self._extendedAttrDict.items()):
if key in allowedExtAttrs:
if value is not True:
raise WebIDLError("[%s] with a value is currently "
"unsupported in stringifier attributes, "
"please file a bug to add support" % key,
[self.location])
method.addExtendedAttributes([IDLExtendedAttribute(self.location, (key,))])
elif not key in attributeOnlyExtAttrs:
raise WebIDLError("[%s] is currently unsupported in "
"stringifier attributes, please file a bug "
"to add support" % key,
[self.location])
members.append(method)
class IDLArgument(IDLObjectWithIdentifier):
def __init__(self, location, identifier, type, optional=False, defaultValue=None, variadic=False, dictionaryMember=False, allowTypeAttributes=False):
IDLObjectWithIdentifier.__init__(self, location, None, identifier)
assert isinstance(type, IDLType)
self.type = type
self.optional = optional
self.defaultValue = defaultValue
self.variadic = variadic
self.dictionaryMember = dictionaryMember
self._isComplete = False
self._allowTreatNonCallableAsNull = False
self._extendedAttrDict = {}
self.allowTypeAttributes = allowTypeAttributes
assert not variadic or optional
assert not variadic or not defaultValue
def addExtendedAttributes(self, attrs):
for attribute in attrs:
identifier = attribute.identifier()
if self.allowTypeAttributes and (identifier == "EnforceRange" or identifier == "Clamp" or
identifier == "TreatNullAs" or identifier == "AllowShared"):
self.type = self.type.withExtendedAttributes([attribute])
elif identifier == "TreatNonCallableAsNull":
self._allowTreatNonCallableAsNull = True
elif (self.dictionaryMember and
(identifier == "ChromeOnly" or
identifier == "Func" or
identifier == "Pref")):
if not self.optional:
raise WebIDLError("[%s] must not be used on a required "
"dictionary member" % identifier,
[attribute.location])
else:
raise WebIDLError("Unhandled extended attribute on %s" %
("a dictionary member" if self.dictionaryMember else
"an argument"),
[attribute.location])
attrlist = attribute.listValue()
self._extendedAttrDict[identifier] = attrlist if len(attrlist) else True
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
def isComplete(self):
return self._isComplete
def complete(self, scope):
if self._isComplete:
return
self._isComplete = True
if not self.type.isComplete():
type = self.type.complete(scope)
assert not isinstance(type, IDLUnresolvedType)
assert not isinstance(type, IDLTypedefType)
assert not isinstance(type.name, IDLUnresolvedIdentifier)
self.type = type
if self.type.isAny():
assert (self.defaultValue is None or
isinstance(self.defaultValue, IDLNullValue))
# optional 'any' values always have a default value
if self.optional and not self.defaultValue and not self.variadic:
# Set the default value to undefined, for simplicity, so the
# codegen doesn't have to special-case this.
self.defaultValue = IDLUndefinedValue(self.location)
if self.dictionaryMember and self.type.treatNullAsEmpty:
raise WebIDLError("Dictionary members cannot be [TreatNullAs]", [self.location])
# Now do the coercing thing; this needs to happen after the
# above creation of a default value.
if self.defaultValue:
self.defaultValue = self.defaultValue.coerceToType(self.type,
self.location)
assert self.defaultValue
def allowTreatNonCallableAsNull(self):
return self._allowTreatNonCallableAsNull
def _getDependentObjects(self):
deps = set([self.type])
if self.defaultValue:
deps.add(self.defaultValue)
return deps
def canHaveMissingValue(self):
return self.optional and not self.defaultValue
class IDLCallback(IDLObjectWithScope):
def __init__(self, location, parentScope, identifier, returnType, arguments, isConstructor):
assert isinstance(returnType, IDLType)
self._returnType = returnType
# Clone the list
self._arguments = list(arguments)
IDLObjectWithScope.__init__(self, location, parentScope, identifier)
for (returnType, arguments) in self.signatures():
for argument in arguments:
argument.resolve(self)
self._treatNonCallableAsNull = False
self._treatNonObjectAsNull = False
self._isRunScriptBoundary = False
self._isConstructor = isConstructor
def isCallback(self):
return True
def isConstructor(self):
return self._isConstructor
def signatures(self):
return [(self._returnType, self._arguments)]
def finish(self, scope):
if not self._returnType.isComplete():
type = self._returnType.complete(scope)
assert not isinstance(type, IDLUnresolvedType)
assert not isinstance(type, IDLTypedefType)
assert not isinstance(type.name, IDLUnresolvedIdentifier)
self._returnType = type
for argument in self._arguments:
if argument.type.isComplete():
continue
type = argument.type.complete(scope)
assert not isinstance(type, IDLUnresolvedType)
assert not isinstance(type, IDLTypedefType)
assert not isinstance(type.name, IDLUnresolvedIdentifier)
argument.type = type
def validate(self):
pass
def addExtendedAttributes(self, attrs):
unhandledAttrs = []
for attr in attrs:
if attr.identifier() == "TreatNonCallableAsNull":
self._treatNonCallableAsNull = True
elif attr.identifier() == "TreatNonObjectAsNull":
if self._isConstructor:
raise WebIDLError("[TreatNonObjectAsNull] is not supported "
"on constructors", [self.location])
self._treatNonObjectAsNull = True
elif attr.identifier() == "MOZ_CAN_RUN_SCRIPT_BOUNDARY":
if self._isConstructor:
raise WebIDLError("[MOZ_CAN_RUN_SCRIPT_BOUNDARY] is not "
"permitted on constructors",
[self.location])
self._isRunScriptBoundary = True
else:
unhandledAttrs.append(attr)
if self._treatNonCallableAsNull and self._treatNonObjectAsNull:
raise WebIDLError("Cannot specify both [TreatNonCallableAsNull] "
"and [TreatNonObjectAsNull]", [self.location])
if len(unhandledAttrs) != 0:
IDLType.addExtendedAttributes(self, unhandledAttrs)
def _getDependentObjects(self):
return set([self._returnType] + self._arguments)
def isRunScriptBoundary(self):
return self._isRunScriptBoundary;
class IDLCallbackType(IDLType):
def __init__(self, location, callback):
IDLType.__init__(self, location, callback.identifier.name)
self.callback = callback
def isCallback(self):
return True
def tag(self):
return IDLType.Tags.callback
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isNonCallbackInterface() or other.isSequence())
def _getDependentObjects(self):
return self.callback._getDependentObjects()
class IDLMethodOverload:
"""
A class that represents a single overload of a WebIDL method. This is not
quite the same as an element of the "effective overload set" in the spec,
because separate IDLMethodOverloads are not created based on arguments being
optional. Rather, when multiple methods have the same name, there is an
IDLMethodOverload for each one, all hanging off an IDLMethod representing
the full set of overloads.
"""
def __init__(self, returnType, arguments, location):
self.returnType = returnType
# Clone the list of arguments, just in case
self.arguments = list(arguments)
self.location = location
def _getDependentObjects(self):
deps = set(self.arguments)
deps.add(self.returnType)
return deps
def includesRestrictedFloatArgument(self):
return any(arg.type.includesRestrictedFloat() for arg in self.arguments)
class IDLMethod(IDLInterfaceMember, IDLScope):
Special = enum(
'Getter',
'Setter',
'Deleter',
'LegacyCaller',
base=IDLInterfaceMember.Special
)
NamedOrIndexed = enum(
'Neither',
'Named',
'Indexed'
)
def __init__(self, location, identifier, returnType, arguments,
static=False, getter=False, setter=False,
deleter=False, specialType=NamedOrIndexed.Neither,
legacycaller=False, stringifier=False,
maplikeOrSetlikeOrIterable=None,
underlyingAttr=None):
# REVIEW: specialType is NamedOrIndexed -- wow, this is messed up.
IDLInterfaceMember.__init__(self, location, identifier,
IDLInterfaceMember.Tags.Method)
self._hasOverloads = False
assert isinstance(returnType, IDLType)
# self._overloads is a list of IDLMethodOverloads
self._overloads = [IDLMethodOverload(returnType, arguments, location)]
assert isinstance(static, bool)
self._static = static
assert isinstance(getter, bool)
self._getter = getter
assert isinstance(setter, bool)
self._setter = setter
assert isinstance(deleter, bool)
self._deleter = deleter
assert isinstance(legacycaller, bool)
self._legacycaller = legacycaller
assert isinstance(stringifier, bool)
self._stringifier = stringifier
assert maplikeOrSetlikeOrIterable is None or isinstance(maplikeOrSetlikeOrIterable, IDLMaplikeOrSetlikeOrIterableBase)
self.maplikeOrSetlikeOrIterable = maplikeOrSetlikeOrIterable
self._htmlConstructor = False
self.underlyingAttr = underlyingAttr
self._specialType = specialType
self._unforgeable = False
self.dependsOn = "Everything"
self.affects = "Everything"
self.aliases = []
if static and identifier.name == "prototype":
raise WebIDLError("The identifier of a static operation must not be 'prototype'",
[location])
self.assertSignatureConstraints()
def __str__(self):
return "Method '%s'" % self.identifier
def assertSignatureConstraints(self):
if self._getter or self._deleter:
assert len(self._overloads) == 1
overload = self._overloads[0]
arguments = overload.arguments
assert len(arguments) == 1
assert (arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.domstring] or
arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.unsigned_long])
assert not arguments[0].optional and not arguments[0].variadic
assert not self._getter or not overload.returnType.isVoid()
if self._setter:
assert len(self._overloads) == 1
arguments = self._overloads[0].arguments
assert len(arguments) == 2
assert (arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.domstring] or
arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.unsigned_long])
assert not arguments[0].optional and not arguments[0].variadic
assert not arguments[1].optional and not arguments[1].variadic
if self._stringifier:
assert len(self._overloads) == 1
overload = self._overloads[0]
assert len(overload.arguments) == 0
if not self.underlyingAttr:
assert overload.returnType == BuiltinTypes[IDLBuiltinType.Types.domstring]
def isStatic(self):
return self._static
def forceStatic(self):
self._static = True
def isGetter(self):
return self._getter
def isSetter(self):
return self._setter
def isDeleter(self):
return self._deleter
def isNamed(self):
assert (self._specialType == IDLMethod.NamedOrIndexed.Named or
self._specialType == IDLMethod.NamedOrIndexed.Indexed)
return self._specialType == IDLMethod.NamedOrIndexed.Named
def isIndexed(self):
assert (self._specialType == IDLMethod.NamedOrIndexed.Named or
self._specialType == IDLMethod.NamedOrIndexed.Indexed)
return self._specialType == IDLMethod.NamedOrIndexed.Indexed
def isLegacycaller(self):
return self._legacycaller
def isStringifier(self):
return self._stringifier
def isToJSON(self):
return self.identifier.name == "toJSON"
def isDefaultToJSON(self):
return self.isToJSON() and self.getExtendedAttribute("Default")
def isMaplikeOrSetlikeOrIterableMethod(self):
"""
True if this method was generated as part of a
maplike/setlike/etc interface (e.g. has/get methods)
"""
return self.maplikeOrSetlikeOrIterable is not None
def isSpecial(self):
return (self.isGetter() or
self.isSetter() or
self.isDeleter() or
self.isLegacycaller() or
self.isStringifier())
def isHTMLConstructor(self):
return self._htmlConstructor
def hasOverloads(self):
return self._hasOverloads
def isIdentifierLess(self):
"""
True if the method name started with __, and if the method is not a
maplike/setlike method. Interfaces with maplike/setlike will generate
methods starting with __ for chrome only backing object access in JS
implemented interfaces, so while these functions use what is considered
an non-identifier name, they actually DO have an identifier.
"""
return (self.identifier.name[:2] == "__" and
not self.isMaplikeOrSetlikeOrIterableMethod())
def resolve(self, parentScope):
assert isinstance(parentScope, IDLScope)
IDLObjectWithIdentifier.resolve(self, parentScope)
IDLScope.__init__(self, self.location, parentScope, self.identifier)
for (returnType, arguments) in self.signatures():
for argument in arguments:
argument.resolve(self)
def addOverload(self, method):
assert len(method._overloads) == 1
if self._extendedAttrDict != method._extendedAttrDict:
extendedAttrDiff = set(self._extendedAttrDict.keys()) ^ set(method._extendedAttrDict.keys())
if extendedAttrDiff == { "LenientFloat" }:
if "LenientFloat" not in self._extendedAttrDict:
for overload in self._overloads:
if overload.includesRestrictedFloatArgument():
raise WebIDLError("Restricted float behavior differs on different "
"overloads of %s" % method.identifier,
[overload.location, method.location])
self._extendedAttrDict["LenientFloat"] = method._extendedAttrDict["LenientFloat"]
elif method._overloads[0].includesRestrictedFloatArgument():
raise WebIDLError("Restricted float behavior differs on different "
"overloads of %s" % method.identifier,
[self.location, method.location])
else:
raise WebIDLError("Extended attributes differ on different "
"overloads of %s" % method.identifier,
[self.location, method.location])
self._overloads.extend(method._overloads)
self._hasOverloads = True
if self.isStatic() != method.isStatic():
raise WebIDLError("Overloaded identifier %s appears with different values of the 'static' attribute" % method.identifier,
[method.location])
if self.isLegacycaller() != method.isLegacycaller():
raise WebIDLError("Overloaded identifier %s appears with different values of the 'legacycaller' attribute" % method.identifier,
[method.location])
# Can't overload special things!
assert not self.isGetter()
assert not method.isGetter()
assert not self.isSetter()
assert not method.isSetter()
assert not self.isDeleter()
assert not method.isDeleter()
assert not self.isStringifier()
assert not method.isStringifier()
assert not self.isHTMLConstructor()
assert not method.isHTMLConstructor()
return self
def signatures(self):
return [(overload.returnType, overload.arguments) for overload in
self._overloads]
def finish(self, scope):
IDLInterfaceMember.finish(self, scope)
for overload in self._overloads:
returnType = overload.returnType
if not returnType.isComplete():
returnType = returnType.complete(scope)
assert not isinstance(returnType, IDLUnresolvedType)
assert not isinstance(returnType, IDLTypedefType)
assert not isinstance(returnType.name, IDLUnresolvedIdentifier)
overload.returnType = returnType
for argument in overload.arguments:
if not argument.isComplete():
argument.complete(scope)
assert argument.type.isComplete()
# Now compute various information that will be used by the
# WebIDL overload resolution algorithm.
self.maxArgCount = max(len(s[1]) for s in self.signatures())
self.allowedArgCounts = [i for i in range(self.maxArgCount+1)
if len(self.signaturesForArgCount(i)) != 0]
def validate(self):
IDLInterfaceMember.validate(self)
# Make sure our overloads are properly distinguishable and don't have
# different argument types before the distinguishing args.
for argCount in self.allowedArgCounts:
possibleOverloads = self.overloadsForArgCount(argCount)
if len(possibleOverloads) == 1:
continue
distinguishingIndex = self.distinguishingIndexForArgCount(argCount)
for idx in range(distinguishingIndex):
firstSigType = possibleOverloads[0].arguments[idx].type
for overload in possibleOverloads[1:]:
if overload.arguments[idx].type != firstSigType:
raise WebIDLError(
"Signatures for method '%s' with %d arguments have "
"different types of arguments at index %d, which "
"is before distinguishing index %d" %
(self.identifier.name, argCount, idx,
distinguishingIndex),
[self.location, overload.location])
overloadWithPromiseReturnType = None
overloadWithoutPromiseReturnType = None
for overload in self._overloads:
returnType = overload.returnType
if not returnType.unroll().isExposedInAllOf(self.exposureSet):
raise WebIDLError("Overload returns a type that is not exposed "
"everywhere where the method is exposed",
[overload.location])
variadicArgument = None
arguments = overload.arguments
for (idx, argument) in enumerate(arguments):
assert argument.type.isComplete()
if ((argument.type.isDictionary() and
argument.type.unroll().inner.canBeEmpty()) or
(argument.type.isUnion() and
argument.type.unroll().hasPossiblyEmptyDictionaryType())):
# Optional dictionaries and unions containing optional
# dictionaries at the end of the list or followed by
# optional arguments must be optional.
if (not argument.optional and
all(arg.optional for arg in arguments[idx+1:])):
raise WebIDLError("Dictionary argument without any "
"required fields or union argument "
"containing such dictionary not "
"followed by a required argument "
"must be optional",
[argument.location])
if (not argument.defaultValue and
all(arg.optional for arg in arguments[idx+1:])):
raise WebIDLError("Dictionary argument without any "
"required fields or union argument "
"containing such dictionary not "
"followed by a required argument "
"must have a default value",
[argument.location])
# An argument cannot be a nullable dictionary or a
# nullable union containing a dictionary.
if (argument.type.nullable() and
(argument.type.isDictionary() or
(argument.type.isUnion() and
argument.type.unroll().hasDictionaryType()))):
raise WebIDLError("An argument cannot be a nullable "
"dictionary or nullable union "
"containing a dictionary",
[argument.location])
# Only the last argument can be variadic
if variadicArgument:
raise WebIDLError("Variadic argument is not last argument",
[variadicArgument.location])
if argument.variadic:
variadicArgument = argument
if returnType.isPromise():
overloadWithPromiseReturnType = overload
else:
overloadWithoutPromiseReturnType = overload
# Make sure either all our overloads return Promises or none do
if overloadWithPromiseReturnType and overloadWithoutPromiseReturnType:
raise WebIDLError("We have overloads with both Promise and "
"non-Promise return types",
[overloadWithPromiseReturnType.location,
overloadWithoutPromiseReturnType.location])
if overloadWithPromiseReturnType and self._legacycaller:
raise WebIDLError("May not have a Promise return type for a "
"legacycaller.",
[overloadWithPromiseReturnType.location])
if self.getExtendedAttribute("StaticClassOverride") and not \
(self.identifier.scope.isJSImplemented() and self.isStatic()):
raise WebIDLError("StaticClassOverride can be applied to static"
" methods on JS-implemented classes only.",
[self.location])
# Ensure that toJSON methods satisfy the spec constraints on them.
if self.identifier.name == "toJSON":
if len(self.signatures()) != 1:
raise WebIDLError("toJSON method has multiple overloads",
[self._overloads[0].location,
self._overloads[1].location])
if len(self.signatures()[0][1]) != 0:
raise WebIDLError("toJSON method has arguments",
[self.location])
if not self.signatures()[0][0].isJSONType():
raise WebIDLError("toJSON method has non-JSON return type",
[self.location])
def overloadsForArgCount(self, argc):
return [overload for overload in self._overloads if
len(overload.arguments) == argc or
(len(overload.arguments) > argc and
all(arg.optional for arg in overload.arguments[argc:])) or
(len(overload.arguments) < argc and
len(overload.arguments) > 0 and
overload.arguments[-1].variadic)]
def signaturesForArgCount(self, argc):
return [(overload.returnType, overload.arguments) for overload
in self.overloadsForArgCount(argc)]
def locationsForArgCount(self, argc):
return [overload.location for overload in self.overloadsForArgCount(argc)]
def distinguishingIndexForArgCount(self, argc):
def isValidDistinguishingIndex(idx, signatures):
for (firstSigIndex, (firstRetval, firstArgs)) in enumerate(signatures[:-1]):
for (secondRetval, secondArgs) in signatures[firstSigIndex+1:]:
if idx < len(firstArgs):
firstType = firstArgs[idx].type
else:
assert(firstArgs[-1].variadic)
firstType = firstArgs[-1].type
if idx < len(secondArgs):
secondType = secondArgs[idx].type
else:
assert(secondArgs[-1].variadic)
secondType = secondArgs[-1].type
if not firstType.isDistinguishableFrom(secondType):
return False
return True
signatures = self.signaturesForArgCount(argc)
for idx in range(argc):
if isValidDistinguishingIndex(idx, signatures):
return idx
# No valid distinguishing index. Time to throw
locations = self.locationsForArgCount(argc)
raise WebIDLError("Signatures with %d arguments for method '%s' are not "
"distinguishable" % (argc, self.identifier.name),
locations)
def handleExtendedAttribute(self, attr):
identifier = attr.identifier()
if (identifier == "GetterThrows" or
identifier == "SetterThrows" or
identifier == "GetterCanOOM" or
identifier == "SetterCanOOM" or
identifier == "SetterNeedsSubjectPrincipal" or
identifier == "GetterNeedsSubjectPrincipal"):
raise WebIDLError("Methods must not be flagged as "
"[%s]" % identifier,
[attr.location, self.location])
elif identifier == "Unforgeable":
if self.isStatic():
raise WebIDLError("[Unforgeable] is only allowed on non-static "
"methods", [attr.location, self.location])
self._unforgeable = True
elif identifier == "SameObject":
raise WebIDLError("Methods must not be flagged as [SameObject]",
[attr.location, self.location])
elif identifier == "Constant":
raise WebIDLError("Methods must not be flagged as [Constant]",
[attr.location, self.location])
elif identifier == "PutForwards":
raise WebIDLError("Only attributes support [PutForwards]",
[attr.location, self.location])
elif identifier == "LenientSetter":
raise WebIDLError("Only attributes support [LenientSetter]",
[attr.location, self.location])
elif identifier == "LenientFloat":
# This is called before we've done overload resolution
overloads = self._overloads
assert len(overloads) == 1
if not overloads[0].returnType.isVoid():
raise WebIDLError("[LenientFloat] used on a non-void method",
[attr.location, self.location])
if not overloads[0].includesRestrictedFloatArgument():
raise WebIDLError("[LenientFloat] used on an operation with no "
"restricted float type arguments",
[attr.location, self.location])
elif identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr, self._exposureGlobalNames)
elif (identifier == "CrossOriginCallable" or
identifier == "WebGLHandlesContextLoss"):
# Known no-argument attributes.
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
if identifier == "CrossOriginCallable" and self.isStatic():
raise WebIDLError("[CrossOriginCallable] is only allowed on non-static "
"attributes",
[attr.location, self.location])
elif identifier == "Pure":
if not attr.noArguments():
raise WebIDLError("[Pure] must take no arguments",
[attr.location])
self._setDependsOn("DOMState")
self._setAffects("Nothing")
elif identifier == "Affects":
if not attr.hasValue():
raise WebIDLError("[Affects] takes an identifier",
[attr.location])
self._setAffects(attr.value())
elif identifier == "DependsOn":
if not attr.hasValue():
raise WebIDLError("[DependsOn] takes an identifier",
[attr.location])
self._setDependsOn(attr.value())
elif identifier == "Alias":
if not attr.hasValue():
raise WebIDLError("[Alias] takes an identifier or string",
[attr.location])
self._addAlias(attr.value())
elif identifier == "UseCounter":
if self.isSpecial():
raise WebIDLError("[UseCounter] must not be used on a special "
"operation",
[attr.location, self.location])
elif identifier == "Unscopable":
if not attr.noArguments():
raise WebIDLError("[Unscopable] must take no arguments",
[attr.location])
if self.isStatic():
raise WebIDLError("[Unscopable] is only allowed on non-static "
"attributes and operations",
[attr.location, self.location])
elif identifier == "CEReactions":
if not attr.noArguments():
raise WebIDLError("[CEReactions] must take no arguments",
[attr.location])
if self.isSpecial() and not self.isSetter() and not self.isDeleter():
raise WebIDLError("[CEReactions] is only allowed on operation, "
"attribute, setter, and deleter",
[attr.location, self.location])
elif identifier == "Default":
if not attr.noArguments():
raise WebIDLError("[Default] must take no arguments",
[attr.location])
if not self.isToJSON():
raise WebIDLError("[Default] is only allowed on toJSON operations",
[attr.location, self.location])
if self.signatures()[0][0] != BuiltinTypes[IDLBuiltinType.Types.object]:
raise WebIDLError("The return type of the default toJSON "
"operation must be 'object'",
[attr.location, self.location])
elif (identifier == "Throws" or
identifier == "CanOOM" or
identifier == "NewObject" or
identifier == "ChromeOnly" or
identifier == "Pref" or
identifier == "Deprecated" or
identifier == "Func" or
identifier == "SecureContext" or
identifier == "BinaryName" or
identifier == "NeedsSubjectPrincipal" or
identifier == "NeedsCallerType" or
identifier == "StaticClassOverride" or
identifier == "NonEnumerable" or
identifier == "Unexposed"):
# Known attributes that we don't need to do anything with here
pass
else:
raise WebIDLError("Unknown extended attribute %s on method" % identifier,
[attr.location])
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def returnsPromise(self):
return self._overloads[0].returnType.isPromise()
def isUnforgeable(self):
return self._unforgeable
def _getDependentObjects(self):
deps = set()
for overload in self._overloads:
deps.update(overload._getDependentObjects())
return deps
class IDLConstructor(IDLMethod):
def __init__(self, location, args, name):
# We can't actually init our IDLMethod yet, because we do not know the
# return type yet. Just save the info we have for now and we will init
# it later.
self._initLocation = location
self._initArgs = args
self._initName = name
self._inited = False
self._initExtendedAttrs = []
def addExtendedAttributes(self, attrs):
if self._inited:
return IDLMethod.addExtendedAttributes(self, attrs)
self._initExtendedAttrs.extend(attrs)
def handleExtendedAttribute(self, attr):
identifier = attr.identifier()
if (identifier == "BinaryName" or
identifier == "ChromeOnly" or
identifier == "NewObject" or
identifier == "SecureContext" or
identifier == "Throws" or
identifier == "Func" or
identifier == "Pref"):
IDLMethod.handleExtendedAttribute(self, attr)
elif identifier == "HTMLConstructor":
if not attr.noArguments():
raise WebIDLError("[HTMLConstructor] must take no arguments",
[attr.location])
# We shouldn't end up here for named constructors.
assert(self.identifier.name == "constructor")
if any(len(sig[1]) != 0 for sig in self.signatures()):
raise WebIDLError("[HTMLConstructor] must not be applied to a "
"constructor operation that has arguments.",
[attr.location])
self._htmlConstructor = True
else:
raise WebIDLError("Unknown extended attribute %s on method" % identifier,
[attr.location])
def reallyInit(self, parentInterface):
name = self._initName
location = self._initLocation
identifier = IDLUnresolvedIdentifier(location, name, allowForbidden=True)
retType = IDLWrapperType(parentInterface.location, parentInterface)
IDLMethod.__init__(self, location, identifier, retType, self._initArgs,
static=True)
self._inited = True;
# Propagate through whatever extended attributes we already had
self.addExtendedAttributes(self._initExtendedAttrs)
self._initExtendedAttrs = []
# Constructors are always NewObject. Whether they throw or not is
# indicated by [Throws] annotations in the usual way.
self.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("NewObject",))])
class IDLIncludesStatement(IDLObject):
def __init__(self, location, interface, mixin):
IDLObject.__init__(self, location)
self.interface = interface
self.mixin = mixin
self._finished = False
def finish(self, scope):
if self._finished:
return
self._finished = True
assert(isinstance(self.interface, IDLIdentifierPlaceholder))
assert(isinstance(self.mixin, IDLIdentifierPlaceholder))
interface = self.interface.finish(scope)
mixin = self.mixin.finish(scope)
# NOTE: we depend on not setting self.interface and
# self.mixin here to keep track of the original
# locations.
if not isinstance(interface, IDLInterface):
raise WebIDLError("Left-hand side of 'includes' is not an "
"interface",
[self.interface.location, interface.location])
if interface.isCallback():
raise WebIDLError("Left-hand side of 'includes' is a callback "
"interface",
[self.interface.location, interface.location])
if not isinstance(mixin, IDLInterfaceMixin):
raise WebIDLError("Right-hand side of 'includes' is not an "
"interface mixin",
[self.mixin.location, mixin.location])
mixin.actualExposureGlobalNames.update(interface._exposureGlobalNames)
interface.addIncludedMixin(mixin)
self.interface = interface
self.mixin = mixin
def validate(self):
pass
def addExtendedAttributes(self, attrs):
if len(attrs) != 0:
raise WebIDLError("There are no extended attributes that are "
"allowed on includes statements",
[attrs[0].location, self.location])
class IDLExtendedAttribute(IDLObject):
"""
A class to represent IDL extended attributes so we can give them locations
"""
def __init__(self, location, tuple):
IDLObject.__init__(self, location)
self._tuple = tuple
def identifier(self):
return self._tuple[0]
def noArguments(self):
return len(self._tuple) == 1
def hasValue(self):
return len(self._tuple) >= 2 and isinstance(self._tuple[1], str)
def value(self):
assert(self.hasValue())
return self._tuple[1]
def hasArgs(self):
return (len(self._tuple) == 2 and isinstance(self._tuple[1], list) or
len(self._tuple) == 3)
def args(self):
assert(self.hasArgs())
# Our args are our last element
return self._tuple[-1]
def listValue(self):
"""
Backdoor for storing random data in _extendedAttrDict
"""
return list(self._tuple)[1:]
# Parser
class Tokenizer(object):
tokens = [
"INTEGER",
"FLOATLITERAL",
"IDENTIFIER",
"STRING",
"COMMENTS",
"WHITESPACE",
"OTHER"
]
def t_FLOATLITERAL(self, t):
r'(-?(([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)([Ee][+-]?[0-9]+)?|[0-9]+[Ee][+-]?[0-9]+|Infinity))|NaN'
t.value = float(t.value)
return t
def t_INTEGER(self, t):
r'-?(0([0-7]+|[Xx][0-9A-Fa-f]+)?|[1-9][0-9]*)'
try:
# Can't use int(), because that doesn't handle octal properly.
t.value = parseInt(t.value)
except:
raise WebIDLError("Invalid integer literal",
[Location(lexer=self.lexer,
lineno=self.lexer.lineno,
lexpos=self.lexer.lexpos,
filename=self._filename)])
return t
def t_IDENTIFIER(self, t):
r'[_-]?[A-Za-z][0-9A-Z_a-z-]*'
t.type = self.keywords.get(t.value, 'IDENTIFIER')
return t
def t_STRING(self, t):
r'"[^"]*"'
t.value = t.value[1:-1]
return t
def t_COMMENTS(self, t):
r'(\/\*(.|\n)*?\*\/)|(\/\/.*)'
pass
def t_WHITESPACE(self, t):
r'[\t\n\r ]+'
pass
def t_ELLIPSIS(self, t):
r'\.\.\.'
t.type = self.keywords.get(t.value)
return t
def t_OTHER(self, t):
r'[^\t\n\r 0-9A-Z_a-z]'
t.type = self.keywords.get(t.value, 'OTHER')
return t
keywords = {
"interface": "INTERFACE",
"partial": "PARTIAL",
"mixin": "MIXIN",
"dictionary": "DICTIONARY",
"exception": "EXCEPTION",
"enum": "ENUM",
"callback": "CALLBACK",
"typedef": "TYPEDEF",
"includes": "INCLUDES",
"const": "CONST",
"null": "NULL",
"true": "TRUE",
"false": "FALSE",
"serializer": "SERIALIZER",
"stringifier": "STRINGIFIER",
"unrestricted": "UNRESTRICTED",
"attribute": "ATTRIBUTE",
"readonly": "READONLY",
"inherit": "INHERIT",
"static": "STATIC",
"getter": "GETTER",
"setter": "SETTER",
"deleter": "DELETER",
"legacycaller": "LEGACYCALLER",
"optional": "OPTIONAL",
"...": "ELLIPSIS",
"::": "SCOPE",
"DOMString": "DOMSTRING",
"ByteString": "BYTESTRING",
"USVString": "USVSTRING",
"JSString": "JSSTRING",
"UTF8String": "UTF8STRING",
"any": "ANY",
"boolean": "BOOLEAN",
"byte": "BYTE",
"double": "DOUBLE",
"float": "FLOAT",
"long": "LONG",
"object": "OBJECT",
"octet": "OCTET",
"Promise": "PROMISE",
"required": "REQUIRED",
"sequence": "SEQUENCE",
"record": "RECORD",
"short": "SHORT",
"unsigned": "UNSIGNED",
"void": "VOID",
":": "COLON",
";": "SEMICOLON",
"{": "LBRACE",
"}": "RBRACE",
"(": "LPAREN",
")": "RPAREN",
"[": "LBRACKET",
"]": "RBRACKET",
"?": "QUESTIONMARK",
",": "COMMA",
"=": "EQUALS",
"<": "LT",
">": "GT",
"ArrayBuffer": "ARRAYBUFFER",
"or": "OR",
"maplike": "MAPLIKE",
"setlike": "SETLIKE",
"iterable": "ITERABLE",
"namespace": "NAMESPACE",
"ReadableStream": "READABLESTREAM",
"constructor": "CONSTRUCTOR",
"symbol": "SYMBOL",
"async": "ASYNC",
}
tokens.extend(list(keywords.values()))
def t_error(self, t):
raise WebIDLError("Unrecognized Input",
[Location(lexer=self.lexer,
lineno=self.lexer.lineno,
lexpos=self.lexer.lexpos,
filename=self.filename)])
def __init__(self, lexer=None):
if lexer:
self.lexer = lexer
else:
self.lexer = lex.lex(object=self)
class SqueakyCleanLogger(object):
errorWhitelist = [
# Web IDL defines the WHITESPACE and COMMENTS token, but doesn't actually
# use it ... so far.
"Token 'WHITESPACE' defined, but not used",
"Token 'COMMENTS' defined, but not used",
# And that means we have unused tokens
"There are 2 unused tokens",
# Web IDL defines a OtherOrComma rule that's only used in
# ExtendedAttributeInner, which we don't use yet.
"Rule 'OtherOrComma' defined, but not used",
# And an unused rule
"There is 1 unused rule",
# And the OtherOrComma grammar symbol is unreachable.
"Symbol 'OtherOrComma' is unreachable",
# Which means the Other symbol is unreachable.
"Symbol 'Other' is unreachable",
]
def __init__(self):
self.errors = []
def debug(self, msg, *args, **kwargs):
pass
info = debug
def warning(self, msg, *args, **kwargs):
if msg == "%s:%d: Rule %r defined, but not used" or \
msg == "%s:%d: Rule '%s' defined, but not used":
# Munge things so we don't have to hardcode filenames and
# line numbers in our whitelist.
whitelistmsg = "Rule %r defined, but not used"
whitelistargs = args[2:]
else:
whitelistmsg = msg
whitelistargs = args
if (whitelistmsg % whitelistargs) not in SqueakyCleanLogger.errorWhitelist:
self.errors.append(msg % args)
error = warning
def reportGrammarErrors(self):
if self.errors:
raise WebIDLError("\n".join(self.errors), [])
class Parser(Tokenizer):
def getLocation(self, p, i):
return Location(self.lexer, p.lineno(i), p.lexpos(i), self._filename)
def globalScope(self):
return self._globalScope
# The p_Foo functions here must match the WebIDL spec's grammar.
# It's acceptable to split things at '|' boundaries.
def p_Definitions(self, p):
"""
Definitions : ExtendedAttributeList Definition Definitions
"""
if p[2]:
p[0] = [p[2]]
p[2].addExtendedAttributes(p[1])
else:
assert not p[1]
p[0] = []
p[0].extend(p[3])
def p_DefinitionsEmpty(self, p):
"""
Definitions :
"""
p[0] = []
def p_Definition(self, p):
"""
Definition : CallbackOrInterfaceOrMixin
| Namespace
| Partial
| Dictionary
| Exception
| Enum
| Typedef
| IncludesStatement
"""
p[0] = p[1]
assert p[1] # We might not have implemented something ...
def p_CallbackOrInterfaceOrMixinCallback(self, p):
"""
CallbackOrInterfaceOrMixin : CALLBACK CallbackRestOrInterface
"""
if p[2].isInterface():
assert isinstance(p[2], IDLInterface)
p[2].setCallback(True)
p[0] = p[2]
def p_CallbackOrInterfaceOrMixinInterfaceOrMixin(self, p):
"""
CallbackOrInterfaceOrMixin : INTERFACE InterfaceOrMixin
"""
p[0] = p[2]
def p_CallbackRestOrInterface(self, p):
"""
CallbackRestOrInterface : CallbackRest
| CallbackConstructorRest
| CallbackInterface
"""
assert p[1]
p[0] = p[1]
def handleNonPartialObject(self, location, identifier, constructor,
constructorArgs, nonPartialArgs):
"""
This handles non-partial objects (interfaces, namespaces and
dictionaries) by checking for an existing partial object, and promoting
it to non-partial as needed. The return value is the non-partial
object.
constructorArgs are all the args for the constructor except the last
one: isKnownNonPartial.
nonPartialArgs are the args for the setNonPartial call.
"""
# The name of the class starts with "IDL", so strip that off.
# Also, starts with a capital letter after that, so nix that
# as well.
prettyname = constructor.__name__[3:].lower()
try:
existingObj = self.globalScope()._lookupIdentifier(identifier)
if existingObj:
if not isinstance(existingObj, constructor):
raise WebIDLError("%s has the same name as "
"non-%s object" %
(prettyname.capitalize(), prettyname),
[location, existingObj.location])
existingObj.setNonPartial(*nonPartialArgs)
return existingObj
except Exception as ex:
if isinstance(ex, WebIDLError):
raise ex
pass
# True for isKnownNonPartial
return constructor(*(constructorArgs + [True]))
def p_InterfaceOrMixin(self, p):
"""
InterfaceOrMixin : InterfaceRest
| MixinRest
"""
p[0] = p[1]
def p_CallbackInterface(self, p):
"""
CallbackInterface : INTERFACE InterfaceRest
"""
p[0] = p[2]
def p_InterfaceRest(self, p):
"""
InterfaceRest : IDENTIFIER Inheritance LBRACE InterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(location, p[1])
members = p[4]
parent = p[2]
p[0] = self.handleNonPartialObject(
location, identifier, IDLInterface,
[location, self.globalScope(), identifier, parent, members],
[location, parent, members])
def p_InterfaceForwardDecl(self, p):
"""
InterfaceRest : IDENTIFIER SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(location, p[1])
try:
if self.globalScope()._lookupIdentifier(identifier):
p[0] = self.globalScope()._lookupIdentifier(identifier)
if not isinstance(p[0], IDLExternalInterface):
raise WebIDLError("Name collision between external "
"interface declaration for identifier "
"%s and %s" % (identifier.name, p[0]),
[location, p[0].location])
return
except Exception as ex:
if isinstance(ex, WebIDLError):
raise ex
pass
p[0] = IDLExternalInterface(location, self.globalScope(), identifier)
def p_MixinRest(self, p):
"""
MixinRest : MIXIN IDENTIFIER LBRACE MixinMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[4]
p[0] = self.handleNonPartialObject(
location, identifier, IDLInterfaceMixin,
[location, self.globalScope(), identifier, members],
[location, members])
def p_Namespace(self, p):
"""
Namespace : NAMESPACE IDENTIFIER LBRACE InterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[4]
p[0] = self.handleNonPartialObject(
location, identifier, IDLNamespace,
[location, self.globalScope(), identifier, members],
[location, None, members])
def p_Partial(self, p):
"""
Partial : PARTIAL PartialDefinition
"""
p[0] = p[2]
def p_PartialDefinitionInterface(self, p):
"""
PartialDefinition : INTERFACE PartialInterfaceOrPartialMixin
"""
p[0] = p[2]
def p_PartialDefinition(self, p):
"""
PartialDefinition : PartialNamespace
| PartialDictionary
"""
p[0] = p[1]
def handlePartialObject(self, location, identifier, nonPartialConstructor,
nonPartialConstructorArgs,
partialConstructorArgs):
"""
This handles partial objects (interfaces, namespaces and dictionaries)
by checking for an existing non-partial object, and adding ourselves to
it as needed. The return value is our partial object. We use
IDLPartialInterfaceOrNamespace for partial interfaces or namespaces,
and IDLPartialDictionary for partial dictionaries.
nonPartialConstructorArgs are all the args for the non-partial
constructor except the last two: members and isKnownNonPartial.
partialConstructorArgs are the arguments for the partial object
constructor, except the last one (the non-partial object).
"""
# The name of the class starts with "IDL", so strip that off.
# Also, starts with a capital letter after that, so nix that
# as well.
prettyname = nonPartialConstructor.__name__[3:].lower()
nonPartialObject = None
try:
nonPartialObject = self.globalScope()._lookupIdentifier(identifier)
if nonPartialObject:
if not isinstance(nonPartialObject, nonPartialConstructor):
raise WebIDLError("Partial %s has the same name as "
"non-%s object" %
(prettyname, prettyname),
[location, nonPartialObject.location])
except Exception as ex:
if isinstance(ex, WebIDLError):
raise ex
pass
if not nonPartialObject:
nonPartialObject = nonPartialConstructor(
# No members, False for isKnownNonPartial
*(nonPartialConstructorArgs), members=[], isKnownNonPartial=False)
partialObject = None
if isinstance(nonPartialObject, IDLDictionary):
partialObject = IDLPartialDictionary(
*(partialConstructorArgs + [nonPartialObject]))
elif isinstance(nonPartialObject, (IDLInterface, IDLInterfaceMixin, IDLNamespace)):
partialObject = IDLPartialInterfaceOrNamespace(
*(partialConstructorArgs + [nonPartialObject]))
else:
raise WebIDLError("Unknown partial object type %s" %
type(partialObject),
[location])
return partialObject
def p_PartialInterfaceOrPartialMixin(self, p):
"""
PartialInterfaceOrPartialMixin : PartialInterfaceRest
| PartialMixinRest
"""
p[0] = p[1]
def p_PartialInterfaceRest(self, p):
"""
PartialInterfaceRest : IDENTIFIER LBRACE PartialInterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(location, p[1])
members = p[3]
p[0] = self.handlePartialObject(
location, identifier, IDLInterface,
[location, self.globalScope(), identifier, None],
[location, identifier, members])
def p_PartialMixinRest(self, p):
"""
PartialMixinRest : MIXIN IDENTIFIER LBRACE MixinMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[4]
p[0] = self.handlePartialObject(
location, identifier, IDLInterfaceMixin,
[location, self.globalScope(), identifier],
[location, identifier, members])
def p_PartialNamespace(self, p):
"""
PartialNamespace : NAMESPACE IDENTIFIER LBRACE InterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[4]
p[0] = self.handlePartialObject(
location, identifier, IDLNamespace,
[location, self.globalScope(), identifier],
[location, identifier, members])
def p_PartialDictionary(self, p):
"""
PartialDictionary : DICTIONARY IDENTIFIER LBRACE DictionaryMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[4]
p[0] = self.handlePartialObject(
location, identifier, IDLDictionary,
[location, self.globalScope(), identifier],
[location, identifier, members])
def p_Inheritance(self, p):
"""
Inheritance : COLON ScopedName
"""
p[0] = IDLIdentifierPlaceholder(self.getLocation(p, 2), p[2])
def p_InheritanceEmpty(self, p):
"""
Inheritance :
"""
pass
def p_InterfaceMembers(self, p):
"""
InterfaceMembers : ExtendedAttributeList InterfaceMember InterfaceMembers
"""
p[0] = [p[2]]
assert not p[1] or p[2]
p[2].addExtendedAttributes(p[1])
p[0].extend(p[3])
def p_InterfaceMembersEmpty(self, p):
"""
InterfaceMembers :
"""
p[0] = []
def p_InterfaceMember(self, p):
"""
InterfaceMember : PartialInterfaceMember
| Constructor
"""
p[0] = p[1]
def p_Constructor(self, p):
"""
Constructor : CONSTRUCTOR LPAREN ArgumentList RPAREN SEMICOLON
"""
p[0] = IDLConstructor(self.getLocation(p, 1), p[3], "constructor")
def p_PartialInterfaceMembers(self, p):
"""
PartialInterfaceMembers : ExtendedAttributeList PartialInterfaceMember PartialInterfaceMembers
"""
p[0] = [p[2]]
assert not p[1] or p[2]
p[2].addExtendedAttributes(p[1])
p[0].extend(p[3])
def p_PartialInterfaceMembersEmpty(self, p):
"""
PartialInterfaceMembers :
"""
p[0] = []
def p_PartialInterfaceMember(self, p):
"""
PartialInterfaceMember : Const
| AttributeOrOperationOrMaplikeOrSetlikeOrIterable
"""
p[0] = p[1]
def p_MixinMembersEmpty(self, p):
"""
MixinMembers :
"""
p[0] = []
def p_MixinMembers(self, p):
"""
MixinMembers : ExtendedAttributeList MixinMember MixinMembers
"""
p[0] = [p[2]]
assert not p[1] or p[2]
p[2].addExtendedAttributes(p[1])
p[0].extend(p[3])
def p_MixinMember(self, p):
"""
MixinMember : Const
| Attribute
| Operation
"""
p[0] = p[1]
def p_Dictionary(self, p):
"""
Dictionary : DICTIONARY IDENTIFIER Inheritance LBRACE DictionaryMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[5]
p[0] = IDLDictionary(location, self.globalScope(), identifier, p[3], members)
def p_DictionaryMembers(self, p):
"""
DictionaryMembers : ExtendedAttributeList DictionaryMember DictionaryMembers
|
"""
if len(p) == 1:
# We're at the end of the list
p[0] = []
return
p[2].addExtendedAttributes(p[1])
p[0] = [p[2]]
p[0].extend(p[3])
def p_DictionaryMemberRequired(self, p):
"""
DictionaryMember : REQUIRED TypeWithExtendedAttributes IDENTIFIER SEMICOLON
"""
# These quack a lot like required arguments, so just treat them that way.
t = p[2]
assert isinstance(t, IDLType)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3])
p[0] = IDLArgument(self.getLocation(p, 3), identifier, t,
optional=False,
defaultValue=None, variadic=False,
dictionaryMember=True)
def p_DictionaryMember(self, p):
"""
DictionaryMember : Type IDENTIFIER Default SEMICOLON
"""
# These quack a lot like optional arguments, so just treat them that way.
t = p[1]
assert isinstance(t, IDLType)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
defaultValue = p[3]
# Any attributes that precede this may apply to the type, so
# we configure the argument to forward type attributes down instead of producing
# a parse error
p[0] = IDLArgument(self.getLocation(p, 2), identifier, t,
optional=True,
defaultValue=defaultValue, variadic=False,
dictionaryMember=True, allowTypeAttributes=True)
def p_Default(self, p):
"""
Default : EQUALS DefaultValue
|
"""
if len(p) > 1:
p[0] = p[2]
else:
p[0] = None
def p_DefaultValue(self, p):
"""
DefaultValue : ConstValue
| LBRACKET RBRACKET
| LBRACE RBRACE
"""
if len(p) == 2:
p[0] = p[1]
else:
assert len(p) == 3 # Must be [] or {}
if p[1] == "[":
p[0] = IDLEmptySequenceValue(self.getLocation(p, 1))
else:
assert p[1] == "{"
p[0] = IDLDefaultDictionaryValue(self.getLocation(p, 1))
def p_DefaultValueNull(self, p):
"""
DefaultValue : NULL
"""
p[0] = IDLNullValue(self.getLocation(p, 1))
def p_Exception(self, p):
"""
Exception : EXCEPTION IDENTIFIER Inheritance LBRACE ExceptionMembers RBRACE SEMICOLON
"""
pass
def p_Enum(self, p):
"""
Enum : ENUM IDENTIFIER LBRACE EnumValueList RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
values = p[4]
assert values
p[0] = IDLEnum(location, self.globalScope(), identifier, values)
def p_EnumValueList(self, p):
"""
EnumValueList : STRING EnumValueListComma
"""
p[0] = [p[1]]
p[0].extend(p[2])
def p_EnumValueListComma(self, p):
"""
EnumValueListComma : COMMA EnumValueListString
"""
p[0] = p[2]
def p_EnumValueListCommaEmpty(self, p):
"""
EnumValueListComma :
"""
p[0] = []
def p_EnumValueListString(self, p):
"""
EnumValueListString : STRING EnumValueListComma
"""
p[0] = [p[1]]
p[0].extend(p[2])
def p_EnumValueListStringEmpty(self, p):
"""
EnumValueListString :
"""
p[0] = []
def p_CallbackRest(self, p):
"""
CallbackRest : IDENTIFIER EQUALS ReturnType LPAREN ArgumentList RPAREN SEMICOLON
"""
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
p[0] = IDLCallback(self.getLocation(p, 1), self.globalScope(),
identifier, p[3], p[5], isConstructor=False)
def p_CallbackConstructorRest(self, p):
"""
CallbackConstructorRest : CONSTRUCTOR IDENTIFIER EQUALS ReturnType LPAREN ArgumentList RPAREN SEMICOLON
"""
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
p[0] = IDLCallback(self.getLocation(p, 2), self.globalScope(),
identifier, p[4], p[6], isConstructor=True)
def p_ExceptionMembers(self, p):
"""
ExceptionMembers : ExtendedAttributeList ExceptionMember ExceptionMembers
|
"""
pass
def p_Typedef(self, p):
"""
Typedef : TYPEDEF TypeWithExtendedAttributes IDENTIFIER SEMICOLON
"""
typedef = IDLTypedef(self.getLocation(p, 1), self.globalScope(),
p[2], p[3])
p[0] = typedef
def p_IncludesStatement(self, p):
"""
IncludesStatement : ScopedName INCLUDES ScopedName SEMICOLON
"""
assert(p[2] == "includes")
interface = IDLIdentifierPlaceholder(self.getLocation(p, 1), p[1])
mixin = IDLIdentifierPlaceholder(self.getLocation(p, 3), p[3])
p[0] = IDLIncludesStatement(self.getLocation(p, 1), interface, mixin)
def p_Const(self, p):
"""
Const : CONST ConstType IDENTIFIER EQUALS ConstValue SEMICOLON
"""
location = self.getLocation(p, 1)
type = p[2]
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3])
value = p[5]
p[0] = IDLConst(location, identifier, type, value)
def p_ConstValueBoolean(self, p):
"""
ConstValue : BooleanLiteral
"""
location = self.getLocation(p, 1)
booleanType = BuiltinTypes[IDLBuiltinType.Types.boolean]
p[0] = IDLValue(location, booleanType, p[1])
def p_ConstValueInteger(self, p):
"""
ConstValue : INTEGER
"""
location = self.getLocation(p, 1)
# We don't know ahead of time what type the integer literal is.
# Determine the smallest type it could possibly fit in and use that.
integerType = matchIntegerValueToType(p[1])
if integerType is None:
raise WebIDLError("Integer literal out of range", [location])
p[0] = IDLValue(location, integerType, p[1])
def p_ConstValueFloat(self, p):
"""
ConstValue : FLOATLITERAL
"""
location = self.getLocation(p, 1)
p[0] = IDLValue(location, BuiltinTypes[IDLBuiltinType.Types.unrestricted_float], p[1])
def p_ConstValueString(self, p):
"""
ConstValue : STRING
"""
location = self.getLocation(p, 1)
stringType = BuiltinTypes[IDLBuiltinType.Types.domstring]
p[0] = IDLValue(location, stringType, p[1])
def p_BooleanLiteralTrue(self, p):
"""
BooleanLiteral : TRUE
"""
p[0] = True
def p_BooleanLiteralFalse(self, p):
"""
BooleanLiteral : FALSE
"""
p[0] = False
def p_AttributeOrOperationOrMaplikeOrSetlikeOrIterable(self, p):
"""
AttributeOrOperationOrMaplikeOrSetlikeOrIterable : Attribute
| Maplike
| Setlike
| Iterable
| Operation
"""
p[0] = p[1]
def p_Iterable(self, p):
"""
Iterable : ITERABLE LT TypeWithExtendedAttributes GT SEMICOLON
| ITERABLE LT TypeWithExtendedAttributes COMMA TypeWithExtendedAttributes GT SEMICOLON
"""
location = self.getLocation(p, 2)
identifier = IDLUnresolvedIdentifier(location, "__iterable",
allowDoubleUnderscore=True)
if (len(p) > 6):
keyType = p[3]
valueType = p[5]
else:
keyType = None
valueType = p[3]
p[0] = IDLIterable(location, identifier, keyType, valueType, self.globalScope())
def p_Setlike(self, p):
"""
Setlike : ReadOnly SETLIKE LT TypeWithExtendedAttributes GT SEMICOLON
"""
readonly = p[1]
maplikeOrSetlikeType = p[2]
location = self.getLocation(p, 2)
identifier = IDLUnresolvedIdentifier(location, "__setlike",
allowDoubleUnderscore=True)
keyType = p[4]
valueType = keyType
p[0] = IDLMaplikeOrSetlike(location, identifier, maplikeOrSetlikeType,
readonly, keyType, valueType)
def p_Maplike(self, p):
"""
Maplike : ReadOnly MAPLIKE LT TypeWithExtendedAttributes COMMA TypeWithExtendedAttributes GT SEMICOLON
"""
readonly = p[1]
maplikeOrSetlikeType = p[2]
location = self.getLocation(p, 2)
identifier = IDLUnresolvedIdentifier(location, "__maplike",
allowDoubleUnderscore=True)
keyType = p[4]
valueType = p[6]
p[0] = IDLMaplikeOrSetlike(location, identifier, maplikeOrSetlikeType,
readonly, keyType, valueType)
def p_AttributeWithQualifier(self, p):
"""
Attribute : Qualifier AttributeRest
"""
static = IDLInterfaceMember.Special.Static in p[1]
stringifier = IDLInterfaceMember.Special.Stringifier in p[1]
(location, identifier, type, readonly) = p[2]
p[0] = IDLAttribute(location, identifier, type, readonly,
static=static, stringifier=stringifier)
def p_AttributeInherited(self, p):
"""
Attribute : INHERIT AttributeRest
"""
(location, identifier, type, readonly) = p[2]
p[0] = IDLAttribute(location, identifier, type, readonly, inherit=True)
def p_Attribute(self, p):
"""
Attribute : AttributeRest
"""
(location, identifier, type, readonly) = p[1]
p[0] = IDLAttribute(location, identifier, type, readonly, inherit=False)
def p_AttributeRest(self, p):
"""
AttributeRest : ReadOnly ATTRIBUTE TypeWithExtendedAttributes AttributeName SEMICOLON
"""
location = self.getLocation(p, 2)
readonly = p[1]
t = p[3]
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 4), p[4])
p[0] = (location, identifier, t, readonly)
def p_ReadOnly(self, p):
"""
ReadOnly : READONLY
"""
p[0] = True
def p_ReadOnlyEmpty(self, p):
"""
ReadOnly :
"""
p[0] = False
def p_Operation(self, p):
"""
Operation : Qualifiers OperationRest
"""
qualifiers = p[1]
# Disallow duplicates in the qualifier set
if not len(set(qualifiers)) == len(qualifiers):
raise WebIDLError("Duplicate qualifiers are not allowed",
[self.getLocation(p, 1)])
static = IDLInterfaceMember.Special.Static in p[1]
# If static is there that's all that's allowed. This is disallowed
# by the parser, so we can assert here.
assert not static or len(qualifiers) == 1
stringifier = IDLInterfaceMember.Special.Stringifier in p[1]
# If stringifier is there that's all that's allowed. This is disallowed
# by the parser, so we can assert here.
assert not stringifier or len(qualifiers) == 1
getter = True if IDLMethod.Special.Getter in p[1] else False
setter = True if IDLMethod.Special.Setter in p[1] else False
deleter = True if IDLMethod.Special.Deleter in p[1] else False
legacycaller = True if IDLMethod.Special.LegacyCaller in p[1] else False
if getter or deleter:
if setter:
raise WebIDLError("getter and deleter are incompatible with setter",
[self.getLocation(p, 1)])
(returnType, identifier, arguments) = p[2]
assert isinstance(returnType, IDLType)
specialType = IDLMethod.NamedOrIndexed.Neither
if getter or deleter:
if len(arguments) != 1:
raise WebIDLError("%s has wrong number of arguments" %
("getter" if getter else "deleter"),
[self.getLocation(p, 2)])
argType = arguments[0].type
if argType == BuiltinTypes[IDLBuiltinType.Types.domstring]:
specialType = IDLMethod.NamedOrIndexed.Named
elif argType == BuiltinTypes[IDLBuiltinType.Types.unsigned_long]:
specialType = IDLMethod.NamedOrIndexed.Indexed
if deleter:
raise WebIDLError("There is no such thing as an indexed deleter.",
[self.getLocation(p, 1)])
else:
raise WebIDLError("%s has wrong argument type (must be DOMString or UnsignedLong)" %
("getter" if getter else "deleter"),
[arguments[0].location])
if arguments[0].optional or arguments[0].variadic:
raise WebIDLError("%s cannot have %s argument" %
("getter" if getter else "deleter",
"optional" if arguments[0].optional else "variadic"),
[arguments[0].location])
if getter:
if returnType.isVoid():
raise WebIDLError("getter cannot have void return type",
[self.getLocation(p, 2)])
if setter:
if len(arguments) != 2:
raise WebIDLError("setter has wrong number of arguments",
[self.getLocation(p, 2)])
argType = arguments[0].type
if argType == BuiltinTypes[IDLBuiltinType.Types.domstring]:
specialType = IDLMethod.NamedOrIndexed.Named
elif argType == BuiltinTypes[IDLBuiltinType.Types.unsigned_long]:
specialType = IDLMethod.NamedOrIndexed.Indexed
else:
raise WebIDLError("settter has wrong argument type (must be DOMString or UnsignedLong)",
[arguments[0].location])
if arguments[0].optional or arguments[0].variadic:
raise WebIDLError("setter cannot have %s argument" %
("optional" if arguments[0].optional else "variadic"),
[arguments[0].location])
if arguments[1].optional or arguments[1].variadic:
raise WebIDLError("setter cannot have %s argument" %
("optional" if arguments[1].optional else "variadic"),
[arguments[1].location])
if stringifier:
if len(arguments) != 0:
raise WebIDLError("stringifier has wrong number of arguments",
[self.getLocation(p, 2)])
if not returnType.isDOMString():
raise WebIDLError("stringifier must have DOMString return type",
[self.getLocation(p, 2)])
# identifier might be None. This is only permitted for special methods.
if not identifier:
if (not getter and not setter and
not deleter and not legacycaller and not stringifier):
raise WebIDLError("Identifier required for non-special methods",
[self.getLocation(p, 2)])
location = BuiltinLocation("<auto-generated-identifier>")
identifier = IDLUnresolvedIdentifier(
location,
"__%s%s%s%s%s%s" %
("named" if specialType == IDLMethod.NamedOrIndexed.Named else
"indexed" if specialType == IDLMethod.NamedOrIndexed.Indexed else "",
"getter" if getter else "",
"setter" if setter else "",
"deleter" if deleter else "",
"legacycaller" if legacycaller else "",
"stringifier" if stringifier else ""),
allowDoubleUnderscore=True)
method = IDLMethod(self.getLocation(p, 2), identifier, returnType, arguments,
static=static, getter=getter, setter=setter,
deleter=deleter, specialType=specialType,
legacycaller=legacycaller, stringifier=stringifier)
p[0] = method
def p_Stringifier(self, p):
"""
Operation : STRINGIFIER SEMICOLON
"""
identifier = IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"),
"__stringifier",
allowDoubleUnderscore=True)
method = IDLMethod(self.getLocation(p, 1),
identifier,
returnType=BuiltinTypes[IDLBuiltinType.Types.domstring],
arguments=[],
stringifier=True)
p[0] = method
def p_QualifierStatic(self, p):
"""
Qualifier : STATIC
"""
p[0] = [IDLInterfaceMember.Special.Static]
def p_QualifierStringifier(self, p):
"""
Qualifier : STRINGIFIER
"""
p[0] = [IDLInterfaceMember.Special.Stringifier]
def p_Qualifiers(self, p):
"""
Qualifiers : Qualifier
| Specials
"""
p[0] = p[1]
def p_Specials(self, p):
"""
Specials : Special Specials
"""
p[0] = [p[1]]
p[0].extend(p[2])
def p_SpecialsEmpty(self, p):
"""
Specials :
"""
p[0] = []
def p_SpecialGetter(self, p):
"""
Special : GETTER
"""
p[0] = IDLMethod.Special.Getter
def p_SpecialSetter(self, p):
"""
Special : SETTER
"""
p[0] = IDLMethod.Special.Setter
def p_SpecialDeleter(self, p):
"""
Special : DELETER
"""
p[0] = IDLMethod.Special.Deleter
def p_SpecialLegacyCaller(self, p):
"""
Special : LEGACYCALLER
"""
p[0] = IDLMethod.Special.LegacyCaller
def p_OperationRest(self, p):
"""
OperationRest : ReturnType OptionalIdentifier LPAREN ArgumentList RPAREN SEMICOLON
"""
p[0] = (p[1], p[2], p[4])
def p_OptionalIdentifier(self, p):
"""
OptionalIdentifier : IDENTIFIER
"""
p[0] = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
def p_OptionalIdentifierEmpty(self, p):
"""
OptionalIdentifier :
"""
pass
def p_ArgumentList(self, p):
"""
ArgumentList : Argument Arguments
"""
p[0] = [p[1]] if p[1] else []
p[0].extend(p[2])
def p_ArgumentListEmpty(self, p):
"""
ArgumentList :
"""
p[0] = []
def p_Arguments(self, p):
"""
Arguments : COMMA Argument Arguments
"""
p[0] = [p[2]] if p[2] else []
p[0].extend(p[3])
def p_ArgumentsEmpty(self, p):
"""
Arguments :
"""
p[0] = []
def p_Argument(self, p):
"""
Argument : ExtendedAttributeList ArgumentRest
"""
p[0] = p[2]
p[0].addExtendedAttributes(p[1])
def p_ArgumentRestOptional(self, p):
"""
ArgumentRest : OPTIONAL TypeWithExtendedAttributes ArgumentName Default
"""
t = p[2]
assert isinstance(t, IDLType)
# Arg names can be reserved identifiers
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3],
allowForbidden=True)
defaultValue = p[4]
# We can't test t.isAny() here and give it a default value as needed,
# since at this point t is not a fully resolved type yet (e.g. it might
# be a typedef). We'll handle the 'any' case in IDLArgument.complete.
p[0] = IDLArgument(self.getLocation(p, 3), identifier, t, True, defaultValue, False)
def p_ArgumentRest(self, p):
"""
ArgumentRest : Type Ellipsis ArgumentName
"""
t = p[1]
assert isinstance(t, IDLType)
# Arg names can be reserved identifiers
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3],
allowForbidden=True)
variadic = p[2]
# We can't test t.isAny() here and give it a default value as needed,
# since at this point t is not a fully resolved type yet (e.g. it might
# be a typedef). We'll handle the 'any' case in IDLArgument.complete.
# variadic implies optional
# Any attributes that precede this may apply to the type, so
# we configure the argument to forward type attributes down instead of producing
# a parse error
p[0] = IDLArgument(self.getLocation(p, 3), identifier, t, variadic, None, variadic, allowTypeAttributes=True)
def p_ArgumentName(self, p):
"""
ArgumentName : IDENTIFIER
| ArgumentNameKeyword
"""
p[0] = p[1]
def p_ArgumentNameKeyword(self, p):
"""
ArgumentNameKeyword : ASYNC
| ATTRIBUTE
| CALLBACK
| CONST
| CONSTRUCTOR
| DELETER
| DICTIONARY
| ENUM
| EXCEPTION
| GETTER
| INCLUDES
| INHERIT
| INTERFACE
| ITERABLE
| LEGACYCALLER
| MAPLIKE
| MIXIN
| NAMESPACE
| PARTIAL
| READONLY
| REQUIRED
| SERIALIZER
| SETLIKE
| SETTER
| STATIC
| STRINGIFIER
| TYPEDEF
| UNRESTRICTED
"""
p[0] = p[1]
def p_AttributeName(self, p):
"""
AttributeName : IDENTIFIER
| AttributeNameKeyword
"""
p[0] = p[1]
def p_AttributeNameKeyword(self, p):
"""
AttributeNameKeyword : ASYNC
| REQUIRED
"""
p[0] = p[1]
def p_Ellipsis(self, p):
"""
Ellipsis : ELLIPSIS
"""
p[0] = True
def p_EllipsisEmpty(self, p):
"""
Ellipsis :
"""
p[0] = False
def p_ExceptionMember(self, p):
"""
ExceptionMember : Const
| ExceptionField
"""
pass
def p_ExceptionField(self, p):
"""
ExceptionField : Type IDENTIFIER SEMICOLON
"""
pass
def p_ExtendedAttributeList(self, p):
"""
ExtendedAttributeList : LBRACKET ExtendedAttribute ExtendedAttributes RBRACKET
"""
p[0] = [p[2]]
if p[3]:
p[0].extend(p[3])
def p_ExtendedAttributeListEmpty(self, p):
"""
ExtendedAttributeList :
"""
p[0] = []
def p_ExtendedAttribute(self, p):
"""
ExtendedAttribute : ExtendedAttributeNoArgs
| ExtendedAttributeArgList
| ExtendedAttributeIdent
| ExtendedAttributeNamedArgList
| ExtendedAttributeIdentList
"""
p[0] = IDLExtendedAttribute(self.getLocation(p, 1), p[1])
def p_ExtendedAttributeEmpty(self, p):
"""
ExtendedAttribute :
"""
pass
def p_ExtendedAttributes(self, p):
"""
ExtendedAttributes : COMMA ExtendedAttribute ExtendedAttributes
"""
p[0] = [p[2]] if p[2] else []
p[0].extend(p[3])
def p_ExtendedAttributesEmpty(self, p):
"""
ExtendedAttributes :
"""
p[0] = []
def p_Other(self, p):
"""
Other : INTEGER
| FLOATLITERAL
| IDENTIFIER
| STRING
| OTHER
| ELLIPSIS
| COLON
| SCOPE
| SEMICOLON
| LT
| EQUALS
| GT
| QUESTIONMARK
| DOMSTRING
| BYTESTRING
| USVSTRING
| UTF8STRING
| JSSTRING
| PROMISE
| ANY
| BOOLEAN
| BYTE
| DOUBLE
| FALSE
| FLOAT
| LONG
| NULL
| OBJECT
| OCTET
| OR
| OPTIONAL
| RECORD
| SEQUENCE
| SHORT
| SYMBOL
| TRUE
| UNSIGNED
| VOID
| ArgumentNameKeyword
"""
pass
def p_OtherOrComma(self, p):
"""
OtherOrComma : Other
| COMMA
"""
pass
def p_TypeSingleType(self, p):
"""
Type : SingleType
"""
p[0] = p[1]
def p_TypeUnionType(self, p):
"""
Type : UnionType Null
"""
p[0] = self.handleNullable(p[1], p[2])
def p_TypeWithExtendedAttributes(self, p):
"""
TypeWithExtendedAttributes : ExtendedAttributeList Type
"""
p[0] = p[2].withExtendedAttributes(p[1])
def p_SingleTypeDistinguishableType(self, p):
"""
SingleType : DistinguishableType
"""
p[0] = p[1]
def p_SingleTypeAnyType(self, p):
"""
SingleType : ANY
"""
p[0] = BuiltinTypes[IDLBuiltinType.Types.any]
# Note: Promise<void> is allowed, so we want to parametrize on ReturnType,
# not Type. Promise types can't be null, hence no "Null" in there.
def p_SingleTypePromiseType(self, p):
"""
SingleType : PROMISE LT ReturnType GT
"""
p[0] = IDLPromiseType(self.getLocation(p, 1), p[3])
def p_UnionType(self, p):
"""
UnionType : LPAREN UnionMemberType OR UnionMemberType UnionMemberTypes RPAREN
"""
types = [p[2], p[4]]
types.extend(p[5])
p[0] = IDLUnionType(self.getLocation(p, 1), types)
def p_UnionMemberTypeDistinguishableType(self, p):
"""
UnionMemberType : ExtendedAttributeList DistinguishableType
"""
p[0] = p[2].withExtendedAttributes(p[1])
def p_UnionMemberType(self, p):
"""
UnionMemberType : UnionType Null
"""
p[0] = self.handleNullable(p[1], p[2])
def p_UnionMemberTypes(self, p):
"""
UnionMemberTypes : OR UnionMemberType UnionMemberTypes
"""
p[0] = [p[2]]
p[0].extend(p[3])
def p_UnionMemberTypesEmpty(self, p):
"""
UnionMemberTypes :
"""
p[0] = []
def p_DistinguishableType(self, p):
"""
DistinguishableType : PrimitiveType Null
| ARRAYBUFFER Null
| READABLESTREAM Null
| OBJECT Null
"""
if p[1] == "object":
type = BuiltinTypes[IDLBuiltinType.Types.object]
elif p[1] == "ArrayBuffer":
type = BuiltinTypes[IDLBuiltinType.Types.ArrayBuffer]
elif p[1] == "ReadableStream":
type = BuiltinTypes[IDLBuiltinType.Types.ReadableStream]
else:
type = BuiltinTypes[p[1]]
p[0] = self.handleNullable(type, p[2])
def p_DistinguishableTypeStringType(self, p):
"""
DistinguishableType : StringType Null
"""
p[0] = self.handleNullable(p[1], p[2])
def p_DistinguishableTypeSequenceType(self, p):
"""
DistinguishableType : SEQUENCE LT TypeWithExtendedAttributes GT Null
"""
innerType = p[3]
type = IDLSequenceType(self.getLocation(p, 1), innerType)
p[0] = self.handleNullable(type, p[5])
def p_DistinguishableTypeRecordType(self, p):
"""
DistinguishableType : RECORD LT StringType COMMA TypeWithExtendedAttributes GT Null
"""
keyType = p[3]
valueType = p[5]
type = IDLRecordType(self.getLocation(p, 1), keyType, valueType)
p[0] = self.handleNullable(type, p[7])
def p_DistinguishableTypeScopedName(self, p):
"""
DistinguishableType : ScopedName Null
"""
assert isinstance(p[1], IDLUnresolvedIdentifier)
if p[1].name == "Promise":
raise WebIDLError("Promise used without saying what it's "
"parametrized over",
[self.getLocation(p, 1)])
type = None
try:
if self.globalScope()._lookupIdentifier(p[1]):
obj = self.globalScope()._lookupIdentifier(p[1])
assert not obj.isType()
if obj.isTypedef():
type = IDLTypedefType(self.getLocation(p, 1), obj.innerType,
obj.identifier.name)
elif obj.isCallback() and not obj.isInterface():
type = IDLCallbackType(obj.location, obj)
else:
type = IDLWrapperType(self.getLocation(p, 1), p[1])
p[0] = self.handleNullable(type, p[2])
return
except:
pass
type = IDLUnresolvedType(self.getLocation(p, 1), p[1])
p[0] = self.handleNullable(type, p[2])
def p_ConstType(self, p):
"""
ConstType : PrimitiveType
"""
p[0] = BuiltinTypes[p[1]]
def p_ConstTypeIdentifier(self, p):
"""
ConstType : IDENTIFIER
"""
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
p[0] = IDLUnresolvedType(self.getLocation(p, 1), identifier)
def p_PrimitiveTypeUint(self, p):
"""
PrimitiveType : UnsignedIntegerType
"""
p[0] = p[1]
def p_PrimitiveTypeBoolean(self, p):
"""
PrimitiveType : BOOLEAN
"""
p[0] = IDLBuiltinType.Types.boolean
def p_PrimitiveTypeByte(self, p):
"""
PrimitiveType : BYTE
"""
p[0] = IDLBuiltinType.Types.byte
def p_PrimitiveTypeOctet(self, p):
"""
PrimitiveType : OCTET
"""
p[0] = IDLBuiltinType.Types.octet
def p_PrimitiveTypeFloat(self, p):
"""
PrimitiveType : FLOAT
"""
p[0] = IDLBuiltinType.Types.float
def p_PrimitiveTypeUnrestictedFloat(self, p):
"""
PrimitiveType : UNRESTRICTED FLOAT
"""
p[0] = IDLBuiltinType.Types.unrestricted_float
def p_PrimitiveTypeDouble(self, p):
"""
PrimitiveType : DOUBLE
"""
p[0] = IDLBuiltinType.Types.double
def p_PrimitiveTypeUnrestictedDouble(self, p):
"""
PrimitiveType : UNRESTRICTED DOUBLE
"""
p[0] = IDLBuiltinType.Types.unrestricted_double
def p_StringType(self, p):
"""
StringType : BuiltinStringType
"""
p[0] = BuiltinTypes[p[1]]
def p_BuiltinStringTypeDOMString(self, p):
"""
BuiltinStringType : DOMSTRING
"""
p[0] = IDLBuiltinType.Types.domstring
def p_BuiltinStringTypeBytestring(self, p):
"""
BuiltinStringType : BYTESTRING
"""
p[0] = IDLBuiltinType.Types.bytestring
def p_BuiltinStringTypeUSVString(self, p):
"""
BuiltinStringType : USVSTRING
"""
p[0] = IDLBuiltinType.Types.usvstring
def p_BuiltinStringTypeUTF8String(self, p):
"""
BuiltinStringType : UTF8STRING
"""
p[0] = IDLBuiltinType.Types.utf8string
def p_BuiltinStringTypeJSString(self, p):
"""
BuiltinStringType : JSSTRING
"""
p[0] = IDLBuiltinType.Types.jsstring
def p_UnsignedIntegerTypeUnsigned(self, p):
"""
UnsignedIntegerType : UNSIGNED IntegerType
"""
# Adding one to a given signed integer type gets you the unsigned type:
p[0] = p[2] + 1
def p_UnsignedIntegerType(self, p):
"""
UnsignedIntegerType : IntegerType
"""
p[0] = p[1]
def p_IntegerTypeShort(self, p):
"""
IntegerType : SHORT
"""
p[0] = IDLBuiltinType.Types.short
def p_IntegerTypeLong(self, p):
"""
IntegerType : LONG OptionalLong
"""
if p[2]:
p[0] = IDLBuiltinType.Types.long_long
else:
p[0] = IDLBuiltinType.Types.long
def p_OptionalLong(self, p):
"""
OptionalLong : LONG
"""
p[0] = True
def p_OptionalLongEmpty(self, p):
"""
OptionalLong :
"""
p[0] = False
def p_Null(self, p):
"""
Null : QUESTIONMARK
|
"""
if len(p) > 1:
p[0] = self.getLocation(p, 1)
else:
p[0] = None
def p_ReturnTypeType(self, p):
"""
ReturnType : Type
"""
p[0] = p[1]
def p_ReturnTypeVoid(self, p):
"""
ReturnType : VOID
"""
p[0] = BuiltinTypes[IDLBuiltinType.Types.void]
def p_ScopedName(self, p):
"""
ScopedName : AbsoluteScopedName
| RelativeScopedName
"""
p[0] = p[1]
def p_AbsoluteScopedName(self, p):
"""
AbsoluteScopedName : SCOPE IDENTIFIER ScopedNameParts
"""
assert False
pass
def p_RelativeScopedName(self, p):
"""
RelativeScopedName : IDENTIFIER ScopedNameParts
"""
assert not p[2] # Not implemented!
p[0] = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
def p_ScopedNameParts(self, p):
"""
ScopedNameParts : SCOPE IDENTIFIER ScopedNameParts
"""
assert False
pass
def p_ScopedNamePartsEmpty(self, p):
"""
ScopedNameParts :
"""
p[0] = None
def p_ExtendedAttributeNoArgs(self, p):
"""
ExtendedAttributeNoArgs : IDENTIFIER
"""
p[0] = (p[1],)
def p_ExtendedAttributeArgList(self, p):
"""
ExtendedAttributeArgList : IDENTIFIER LPAREN ArgumentList RPAREN
"""
p[0] = (p[1], p[3])
def p_ExtendedAttributeIdent(self, p):
"""
ExtendedAttributeIdent : IDENTIFIER EQUALS STRING
| IDENTIFIER EQUALS IDENTIFIER
"""
p[0] = (p[1], p[3])
def p_ExtendedAttributeNamedArgList(self, p):
"""
ExtendedAttributeNamedArgList : IDENTIFIER EQUALS IDENTIFIER LPAREN ArgumentList RPAREN
"""
p[0] = (p[1], p[3], p[5])
def p_ExtendedAttributeIdentList(self, p):
"""
ExtendedAttributeIdentList : IDENTIFIER EQUALS LPAREN IdentifierList RPAREN
"""
p[0] = (p[1], p[4])
def p_IdentifierList(self, p):
"""
IdentifierList : IDENTIFIER Identifiers
"""
idents = list(p[2])
# This is only used for identifier-list-valued extended attributes, and if
# we're going to restrict to IDENTIFIER here we should at least allow
# escaping with leading '_' as usual for identifiers.
ident = p[1]
if ident[0] == '_':
ident = ident[1:]
idents.insert(0, ident)
p[0] = idents
def p_IdentifiersList(self, p):
"""
Identifiers : COMMA IDENTIFIER Identifiers
"""
idents = list(p[3])
# This is only used for identifier-list-valued extended attributes, and if
# we're going to restrict to IDENTIFIER here we should at least allow
# escaping with leading '_' as usual for identifiers.
ident = p[2]
if ident[0] == '_':
ident = ident[1:]
idents.insert(0, ident)
p[0] = idents
def p_IdentifiersEmpty(self, p):
"""
Identifiers :
"""
p[0] = []
def p_error(self, p):
if not p:
raise WebIDLError("Syntax Error at end of file. Possibly due to missing semicolon(;), braces(}) or both",
[self._filename])
else:
raise WebIDLError("invalid syntax", [Location(self.lexer, p.lineno, p.lexpos, self._filename)])
def __init__(self, outputdir='', lexer=None):
Tokenizer.__init__(self, lexer)
logger = SqueakyCleanLogger()
try:
self.parser = yacc.yacc(module=self, errorlog=logger, debug=False)
finally:
logger.reportGrammarErrors()
self._globalScope = IDLScope(BuiltinLocation("<Global Scope>"), None, None)
self._installBuiltins(self._globalScope)
self._productions = []
self._filename = "<builtin>"
self.lexer.input(Parser._builtins)
self._filename = None
self.parser.parse(lexer=self.lexer, tracking=True)
def _installBuiltins(self, scope):
assert isinstance(scope, IDLScope)
# range omits the last value.
for x in range(IDLBuiltinType.Types.ArrayBuffer, IDLBuiltinType.Types.Float64Array + 1):
builtin = BuiltinTypes[x]
name = builtin.name
typedef = IDLTypedef(BuiltinLocation("<builtin type>"), scope, builtin, name)
@ staticmethod
def handleNullable(type, questionMarkLocation):
if questionMarkLocation is not None:
type = IDLNullableType(questionMarkLocation, type)
return type
def parse(self, t, filename=None):
self._filename = filename
self.lexer.input(t.decode(encoding = 'utf-8'))
# for tok in iter(self.lexer.token, None):
# print tok
self._productions.extend(self.parser.parse(lexer=self.lexer, tracking=True))
self._filename = None
def finish(self):
# If we have interfaces that are iterable, create their
# iterator interfaces and add them to the productions array.
interfaceStatements = []
for p in self._productions:
if isinstance(p, IDLInterface):
interfaceStatements.append(p)
iterableIteratorIface = None
for iface in interfaceStatements:
iterable = None
# We haven't run finish() on the interface yet, so we don't know
# whether our interface is maplike/setlike/iterable or not. This
# means we have to loop through the members to see if we have an
# iterable member.
for m in iface.members:
if isinstance(m, IDLIterable):
iterable = m
break
if iterable and iterable.isPairIterator():
def simpleExtendedAttr(str):
return IDLExtendedAttribute(iface.location, (str, ))
nextMethod = IDLMethod(
iface.location,
IDLUnresolvedIdentifier(iface.location, "next"),
BuiltinTypes[IDLBuiltinType.Types.object], [])
nextMethod.addExtendedAttributes([simpleExtendedAttr("Throws")])
itr_ident = IDLUnresolvedIdentifier(iface.location,
iface.identifier.name + "Iterator")
toStringTag = iface.identifier.name + " Iterator"
itr_iface = IDLInterface(iface.location, self.globalScope(),
itr_ident, None, [nextMethod],
isKnownNonPartial=True,
classNameOverride=toStringTag,
toStringTag=toStringTag)
itr_iface.addExtendedAttributes([simpleExtendedAttr("NoInterfaceObject")])
# Make sure the exposure set for the iterator interface is the
# same as the exposure set for the iterable interface, because
# we're going to generate methods on the iterable that return
# instances of the iterator.
itr_iface._exposureGlobalNames = set(iface._exposureGlobalNames)
# Always append generated iterable interfaces after the
# interface they're a member of, otherwise nativeType generation
# won't work correctly.
itr_iface.iterableInterface = iface
self._productions.append(itr_iface)
iterable.iteratorType = IDLWrapperType(iface.location, itr_iface)
# Make sure we finish IDLIncludesStatements before we finish the
# IDLInterfaces.
# XXX khuey hates this bit and wants to nuke it from orbit.
includesStatements = [p for p in self._productions if
isinstance(p, IDLIncludesStatement)]
otherStatements = [p for p in self._productions if
not isinstance(p, IDLIncludesStatement)]
for production in includesStatements:
production.finish(self.globalScope())
for production in otherStatements:
production.finish(self.globalScope())
# Do any post-finish validation we need to do
for production in self._productions:
production.validate()
# De-duplicate self._productions, without modifying its order.
seen = set()
result = []
for p in self._productions:
if p not in seen:
seen.add(p)
result.append(p)
return result
def reset(self):
return Parser(lexer=self.lexer)
# Builtin IDL defined by WebIDL
_builtins = """
typedef unsigned long long DOMTimeStamp;
typedef (ArrayBufferView or ArrayBuffer) BufferSource;
"""
def main():
# Parse arguments.
from optparse import OptionParser
usageString = "usage: %prog [options] files"
o = OptionParser(usage=usageString)
o.add_option("--cachedir", dest='cachedir', default=None,
help="Directory in which to cache lex/parse tables.")
o.add_option("--verbose-errors", action='store_true', default=False,
help="When an error happens, display the Python traceback.")
(options, args) = o.parse_args()
if len(args) < 1:
o.error(usageString)
fileList = args
baseDir = os.getcwd()
# Parse the WebIDL.
parser = Parser(options.cachedir)
try:
for filename in fileList:
fullPath = os.path.normpath(os.path.join(baseDir, filename))
f = open(fullPath, 'rb')
lines = f.readlines()
f.close()
print(fullPath)
parser.parse(''.join(lines), fullPath)
parser.finish()
except WebIDLError as e:
if options.verbose_errors:
traceback.print_exc()
else:
print(e)
if __name__ == '__main__':
main()
| mpl-2.0 | 1,164,412,686,066,760,000 | 38.398934 | 158 | 0.563396 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.