code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import time
import threading
import zc.twist
def _get(reactor, job, name, default, timeout, poll, deferred, start=None):
now = time.time()
if start is None:
start = now
if name in job.annotations:
res = job.annotations[name]
elif start + timeout < now:
res = default
else:
partial = zc.twist.Partial(
_get, reactor, job, name, default, timeout, poll, deferred,
start)
partial.setReactor(reactor)
reactor.callLater(min(poll, start + timeout - now), partial)
return
deferred.setResult(res)
class Result(object):
result = None
def __init__(self):
self._event = threading.Event()
def setResult(self, value):
self.result = value
self._event.set()
def wait(self, *args):
self._event.wait(*args)
class Local(threading.local):
job = None
dispatcher = None
name = None
def getJob(self):
return self.job
def getQueue(self):
return self.job.queue
def getDispatcher(self):
return self.dispatcher
def getReactor(self):
return self.dispatcher.reactor
def getAgentName(self):
return self.name
def setLiveAnnotation(self, name, value, job=None):
if self.job is None or self.dispatcher.reactor is None:
raise ValueError('not initialized')
if job is None:
job = self.job
partial = zc.twist.Partial(
job.annotations.__setitem__, name, value)
partial.setReactor(self.dispatcher.reactor)
self.dispatcher.reactor.callFromThread(partial)
def getLiveAnnotation(self, name, default=None, timeout=0,
poll=1, job=None):
if self.job is None or self.dispatcher.reactor is None:
raise ValueError('not initialized')
if job is None:
job = self.job
deferred = Result()
partial = zc.twist.Partial(
_get, self.dispatcher.reactor, job, name, default, timeout, poll,
deferred)
partial.setReactor(self.dispatcher.reactor)
self.dispatcher.reactor.callFromThread(partial)
deferred.wait(timeout+2)
return deferred.result
local = Local() | zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/threadlocal.py | threadlocal.py |
import types
import zc.twist
import zope.component
import zope.event
import zope.component.event # yuck; as of this writing, this import causes the
# zope.component hook to be installed in
# zope.event.
import ZODB.interfaces
import zc.async.interfaces
import zc.async.job
import zc.async.queue
import zc.async.instanceuuid
import zc.async.subscribers
# These functions accomplish what configure.zcml does; you don't want both
# to be in play (the component registry will complain).
def minimal():
# use this ``minimal`` function if you have the
# zope.app.keyreference.persistent.connectionOfPersistent adapter
# installed in your zope.component registry. Otherwise use ``base``
# below.
# persistent object and connection -> transaction manager
zope.component.provideAdapter(zc.twist.transactionManager)
zope.component.provideAdapter(zc.twist.transactionManager,
adapts=(ZODB.interfaces.IConnection,))
# function and method -> job
zope.component.provideAdapter(
zc.async.job.Job,
adapts=(types.FunctionType,),
provides=zc.async.interfaces.IJob)
zope.component.provideAdapter(
zc.async.job.Job,
adapts=(types.MethodType,),
provides=zc.async.interfaces.IJob)
zope.component.provideAdapter( # optional, rarely used
zc.async.job.Job,
adapts=(zc.twist.METHOD_WRAPPER_TYPE,),
provides=zc.async.interfaces.IJob)
zope.component.provideAdapter( # optional, rarely used
zc.async.job.Job,
adapts=(types.BuiltinFunctionType,),
provides=zc.async.interfaces.IJob)
# UUID for this instance
zope.component.provideUtility(
zc.async.instanceuuid.UUID, zc.async.interfaces.IUUID)
def base():
# see comment in ``minimal``, above
minimal()
zope.component.provideAdapter(zc.twist.connection)
# this function installs a queue named '' (empty string), starts the
# dispatcher, and installs an agent named 'main', with default values.
# It is a convenience for quick starts.
def start(db, poll_interval=5, db_name=None, agent_chooser=None, agent_size=3,
twisted=False):
zope.component.provideAdapter(zc.async.queue.getDefaultQueue)
zope.component.provideAdapter(zc.async.queue.getDefaultQueue,
adapts=(ZODB.interfaces.IConnection,))
zope.component.provideHandler(
zc.async.subscribers.QueueInstaller(db_name=db_name))
if twisted:
zope.component.provideHandler(
zc.async.subscribers.TwistedDispatcherInstaller(
poll_interval=poll_interval))
else:
zope.component.provideHandler(
zc.async.subscribers.ThreadedDispatcherInstaller(
poll_interval=poll_interval))
zope.component.provideHandler(
zc.async.subscribers.AgentInstaller('main',
chooser=agent_chooser,
size=agent_size))
zope.event.notify(zc.async.interfaces.DatabaseOpened(db)) | zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/configure.py | configure.py |
import zope.interface
import zope.interface.common.mapping
import zope.interface.common.sequence
import zope.component.interfaces
import zc.queue.interfaces
from zc.async.i18n import _
# this is our only direct dependency on anything in zope.app, which is
# only used by our convenience subscribers. Since we don't really need this,
# or zope.app, we make this import optional and provide some replacements if
# necessary.
try:
from zope.app.appsetup.interfaces import (IDatabaseOpenedEvent,
DatabaseOpened)
except ImportError:
class IDatabaseOpenedEvent(zope.interface.Interface):
"""The main database has been opened."""
database = zope.interface.Attribute("The main database.")
class DatabaseOpened(object):
zope.interface.implements(IDatabaseOpenedEvent)
def __init__(self, database):
self.database = database
# TODO: these interfaces are not particularly complete. The other
# documentation is more accurate at the moment.
KEY = 'zc.async'
NEW = _('new-status', 'New')
PENDING = _('pending-status', 'Pending')
ASSIGNED = _('assigned-status', 'Assigned')
ACTIVE = _('active-status', 'Active')
CALLBACKS = _('callback-status', 'Performing Callbacks')
COMPLETED = _('completed-status', 'Completed')
class IReactor(zope.interface.Interface):
"""This describes what the dispatcher expects of the reactor.
The reactor does not need to actually provide this interface."""
def callFromThread(callable, *args, **kw):
"""have callable run in reactor's thread, by reactor, ASAP.
Intended to be called from a thread other than the reactor's main
loop.
"""
def callInThread(callable, *args, **kw):
"""have callable run in a separate thread, ASAP.
Must be called in same thread as reactor's main loop.
"""
def callLater(seconds, callable, *args, **kw):
"""have callable run in reactor at least <seconds> from now
Must be called in same thread as reactor's main loop.
"""
def addSystemEventTrigger(phase, event, callable, *args, **kw):
"""Install a callable to be run in phase of event.
must support phase 'before', and event 'shutdown'.
"""
def callWhenRunning(self, _callable, *args, **kw):
"""run callable now if running, or when started.
"""
class IRetryPolicy(zope.interface.Interface):
def jobError(failure, data_cache):
"""whether and how to retry after an error while performing job.
return boolean as to whether to retry, or a datetime or timedelta to
reschedule the job in the queue. An empty timedelta means to rescedule
for immediately, before any pending calls in the queue."""
def commitError(failure, data_cache):
"""whether to retry after trying to commit a job's successful result.
return boolean as to whether to retry, or a datetime or timedelta to
reschedule the job in the queue. An empty timedelta means to rescedule
for immediately, before any pending calls in the queue."""
def interrupted():
"""whether to retry after a dispatcher dies when job was in progress.
return boolean as to whether to retry, or a datetime or timedelta to
reschedule the job in the queue. An empty timedelta means to rescedule
for immediately, before any pending calls in the queue."""
def updateData(data_cache):
"""right before committing a job, retry is given a chance to stash
information it has saved in the data_cache."""
class AbstractObjectEvent(object):
def __init__(self, object):
self.object = object
class IDispatcherRegistered(zope.component.interfaces.IObjectEvent):
"""Dispatcher was registered"""
class DispatcherRegistered(AbstractObjectEvent):
zope.interface.implements(IDispatcherRegistered)
class IDispatcherUnregistered(zope.component.interfaces.IObjectEvent):
"""Dispatcher was unregistered"""
class DispatcherUnregistered(AbstractObjectEvent):
zope.interface.implements(IDispatcherUnregistered)
class IDispatcherActivated(zope.component.interfaces.IObjectEvent):
"""Dispatcher was activated"""
class DispatcherActivated(AbstractObjectEvent):
zope.interface.implements(IDispatcherActivated)
class IDispatcherDeactivated(zope.component.interfaces.IObjectEvent):
"""Dispatcher was deactivated"""
class DispatcherDeactivated(AbstractObjectEvent):
zope.interface.implements(IDispatcherDeactivated)
class IDispatcherReactivated(zope.component.interfaces.IObjectEvent):
"""Dispatcher was reactivated after mistaken deactivation"""
class DispatcherReactivated(AbstractObjectEvent):
zope.interface.implements(IDispatcherReactivated)
class IObjectAdded(zope.component.interfaces.IObjectEvent):
"""Object was added to the database"""
parent = zope.interface.Attribute(
'container to which the object was added')
name = zope.interface.Attribute(
'name of the object within the container')
class ObjectAdded(AbstractObjectEvent):
zope.interface.implements(IObjectAdded)
def __init__(self, object, parent, name):
super(ObjectAdded, self).__init__(object)
self.parent = parent
self.name = name
class AbortedError(Exception):
"""An explicit abort, as generated by the default behavior of
IJob.handleInterrupt"""
class TimeoutError(Exception):
"""A time out caused by a ``begin_by`` value."""
class BadStatusError(Exception):
"""The job is not in the status it should be for the call being made.
This is almost certainly a programmer error."""
class ReassignedError(Exception):
"""The job has been reassigned to another process.
This should only happen when a polling timeout has made a not-dead process
appear to be dead to a sibling."""
class IAbstractJob(zope.interface.Interface):
parent = zope.interface.Attribute(
"""The current canonical location of the job""")
status = zope.interface.Attribute(
"""One of constants defined in zc.async.interfaces:
NEW, PENDING, ASSIGNED, ACTIVE, CALLBACKS, COMPLETED.
NEW means not added to a queue and not yet called.
PENDING means addded to a queue but not an agent, and not yet called.
ASSIGNED means added to an agent and not yet called.
ACTIVE means in the process of being called.
CALLBACKS means in the process of calling callbacks.
COMPLETED means called.""")
result = zope.interface.Attribute(
"""The result of the call. When state equals PENDING or ACTIVE, will
be None. When COMPLETED, will be a twisted.python.failure.Failure
describing the call failure or the successful result.""")
def addCallbacks(success=None, failure=None):
"""if success or failure is not None, adds a callback job to
self.callbacks and returns the job. Otherwise returns self.
success and failure must be None or adaptable to IJob.
addCallbacks may be called multiple times. Each will be called
with the result of this job. If callback is already in COMPLETED
state then the callback will be performed immediately."""
def addCallback(callback):
"""callback will receive result (independent of whether it is a
success or a failure). callback must be adaptable to IJob.
addCallback may be called multiple times. Each will be called
with the result of this job. If callback is already in
COMPLETED state then the callback will be performed immediately."""
callbacks = zope.interface.Attribute(
"""A mutable persistent list of the callback jobs added by
addCallbacks.""")
class ICallbackProxy(IAbstractJob):
"""A proxy for jobs."""
job = zope.interface.Attribute(
"""None, before ``getJob``, then the job calculated by ``getJob``""")
def getJob(result):
"""Get the job for the given result."""
class IJob(IAbstractJob):
callable = zope.interface.Attribute(
"""The callable object that should be called with *IJob.args and
**IJob.kwargs when the IJob is called. Mutable.""")
args = zope.interface.Attribute(
"""a peristent list of the args that should be applied to self.call.
May include persistent objects (though note that, if passing a method
is desired, it will typicall need to be wrapped in an IJob).""")
kwargs = zope.interface.Attribute(
"""a persistent mapping of the kwargs that should be applied to
self.call. May include persistent objects (though note that, if
passing a method is desired, it will typicall need to be wrapped
in an IJob).""")
annotations = zope.interface.Attribute(
"""An OOBTree that is available for metadata use.""")
def __call__(*args, **kwargs):
"""call the callable. Any given args are effectively appended to
self.args for the call, and any kwargs effectively update self.kwargs
for the call."""
def handleInterrupt():
"""use IRetryPolicy to decide whether to abort."""
def resumeCallbacks():
"""Make all callbacks remaining for this job. Any callbacks
that are in PENDING state should be called normally; any callbacks
in ACTIVE state should be `fail`ed; any callbacks in CALLBACKS state
should `resumeCallback`; and any callbacks in COMPLETED state should
be untouched. May only be called when job is in CALLBACKS state.
State will be COMPLETED after this call."""
assignerUUID = zope.interface.Attribute(
"""The UUID of the software instance that was in charge when the
IJob was put in an IJobQueue. Should be assigned by
IJobQueue.put.""")
# selectedUUIDs = zope.interface.Attribute(
# """a set of selected worker UUIDs. If it is empty, it is
# interpreted as the set of all available workerUUIDs. Only
# workers with UUIDs in the set may perform it.
#
# If a worker would have selected this job for a run, but the
# difference of selected_workerUUIDs and excluded_workerUUIDs
# stopped it, it is responsible for verifying that the effective
# set of workerUUIDs intersects with the available workers; if the
# intersection contains no possible workers, the worker should
# call job.fail().""")
begin_after = zope.interface.Attribute(
"""A datetime.datetime in UTC of the first time when the
job may run. Cannot be set after job gets a data_manager.
""")
begin_by = zope.interface.Attribute(
"""A datetime.timedelta of the duration after the begin_after
value after which the job will fail, if it has not already
begun. Cannot be set after job has begun.""")
class IAgent(zope.interface.common.sequence.IFiniteSequence):
"""Responsible for picking jobs and keeping track of them.
An agent is a persistent object in a queue that is associated with a
dispatcher and is responsible for picking jobs and keeping track of
them. Zero or more agents within a queue can be associated with a
dispatcher.
Each agent for a given dispatcher is identified uniquely with a
name. A fully (universally) unique identifier for the agent can be
obtained by combining the key of the agent's queue in the main queue
mapping at the ZODB root; the UUID of the agent's dispatcher; and
the agent's name.
"""
size = zope.interface.Attribute(
"""The maximum number of jobs this agent should have active at a time.
""")
name = zope.interface.Attribute(
"""The name for this agent. Unique within its dispatcher's jobs for
its queue. Can be used to obtain agent with
queue.dispatchers[*dispatcher UUID*][*name*].""")
completed = zope.interface.Attribute(
"""an ICompleted of recent completed jobs.""")
parent = zope.interface.Attribute(
"""a link to parent: an IDispatcherAgents container.""")
def get():
"""get a new item, obtained from queue; or None if there are no
items in the queue that this agent wants to take, or the agent is
full. If an item is returned, it has also been added to the agent.
"""
def remove(item):
"""remove item, or raise ValueError if item is not in queue"""
def __delitem__(index):
"""delete item at index"""
def index(item):
"""return index, or raise ValueError if item is not in queue"""
class IFilterAgent(IAgent):
"""An agent that uses a filter to claim jobs (see ``IQueue.claim``).
This sort of agent can easily report what jobs it *could* take because the
filter simply should be able to return a boolean and not change any state.
"""
def filter(job):
"""return whether the agent could perform the job.
This decision should ignore whether the agent has any room (that is, if
len(agent) < agent.size).
As a special case, if the ``filter`` attribute on the agent is None,
this should be considered to be a do-nothing filter--that is, the agent
accepts all jobs.
"""
class IQueue(zc.queue.interfaces.IQueue):
parent = zope.interface.Attribute(
"""the IDataManager of which this is a part.""")
def put(item, begin_after=None, begin_by=None):
"""Put an IJob adapted from item into the queue. Returns IJob.
Rememeber that IJobs are not guaranteed to be run in order
added to a queue. If you need sequencing, use
IJob.addCallbacks.
item must be an IJob, or be adaptable to that interface.
begin_after must be None (to leave the job's current value) or a
datetime.datetime. begin_by must be None (to leave it alone) or a
datetime.timedelta of the duration after the begin_after.
If item.begin_after is None and begin_after is None, begin_after will
effectively be now. If item.begin_by is None and begin_by is None,
begin_by will effectively be datetime.timedelta(hours=1).
datetime.datetimes are suggested to be in UTC. Timezone-naive
datetimes will be interpreted as in UTC. Timezone-aware datetimes
will be converted to UTC, and errors because of this (such as
pytz ambiguity errors) will be raised.
When an IJob is put in the queue, the queue puts the
begin_after time and begin_by duration on the job,
and the UUID of the Zope instance that put the job in the
queue on the `assignerUUID`.
"""
def putBack(item):
"""Return a previously claimed job to the top of the queue."""
def pull(index=0):
"""Remove and return a job, by default from the front of the queue.
Raise IndexError if index does not exist.
This is the blessed way to remove an unclaimed job from the queue so
that dispatchers will not try to perform it.
"""
def remove(item):
"""Removes item from queue or raises LookupError if not found."""
def claim(filter=None, default=None):
"""returns first due job that is available for the given filter,
removing it from the queue as appropriate; or None, if none are
available. Responsible for including jobs to fail expired
jobs."""
class IDispatcherAgents(zope.interface.common.mapping.IMapping):
"""holds agents. contained agents get a ``name`` and ``parent``
associated with this mapping."""
class IDispatchers(zope.interface.common.mapping.IEnumerableMapping):
def register(UUID):
"register UUID"
def unregister(UUID):
"unregister UUID"
def ping(UUID):
"""responsible for setting ping time if necessary for this
dispatcher agent, and for decomissioning dead dispatchers for
the next highest dispatcher (sorted by UUID) if its (last_ping.value +
ping_interval + ping_death_interval) < now. If this is the
highest dispatcher UUID, cycles around to lowest."""
class IQuota(zope.interface.common.mapping.IEnumerableMapping):
def clean():
''
filled = zope.interface.Attribute(
"")
def add(item):
"add a job"
name = zope.interface.Attribute(
"")
parent = zope.interface.Attribute(
"")
class FullError(Exception):
"""Container is full.
"""
class ISizedSequence(zope.interface.common.sequence.IFiniteSequence):
size = zope.interface.Attribute(
"""an integer. If the queue's len >= size, put will raise
FullError""")
def add(item):
"""same contract as IQueue.put, except if queue's len >= size, put will
raise FullError, and all objects get __parent__ set to the queue;
and it will only store jobs."""
__parent__ = zope.interface.Attribute(
"""a link to parent: an IWorker""")
def remove(item):
"""remove item, or raise ValueError if item is not in queue"""
def __delitem__(index):
"""delete item at index"""
def index(item):
"""return index, or raise ValueError if item is not in queue"""
class ICompletedCollection(zope.interface.Interface):
def __iter__():
"""Iterate over jobs in collection, from most recent `begin_after`
to oldest"""
def iter(start=None, stop=None):
"""Iterate over jobs in collection, starting and stopping with
given timezone-aware datetime values reasonably efficiently."""
def __len__():
"""Return number of jobs in collection"""
def add(job):
"""Add job to collection and set __parent__ to the collection."""
__parent__ = zope.interface.Attribute(
"""an IAgent""")
def first(start=None):
"""Return the first (most recent) job in the collection, starting
with optional timezone-aware datetime."""
def last(stop=None):
"""Return the last (oldest) job in the collection, stopping
with optional timezone-aware datetime."""
def __nonzero__():
"whether collection contains any jobs"
class IUUID(zope.interface.Interface):
"""A marker interface for the API of Ka-Ping Yee's uuid.UUID class.
See http://zesty.ca/python/uuid.html """ | zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/interfaces.py | interfaces.py |
.. _configuration-with-zope-3:
=========================
Configuration with Zope 3
=========================
Our last main section can be the shortest yet, both because we've already
introduced all of the main concepts, and because we will be leveraging
conveniences to automate much of the configuration shown in the section
discussing configuration without Zope 3.
Client Set Up
=============
If you want to set up a client alone, without a dispatcher, include the egg in
your setup.py, include the configure.zcml in your applications zcml, make sure
you share the database in which the queues will be held, and make sure that
either the zope.app.keyreference.persistent.connectionOfPersistent adapter is
registered, or zc.twist.connection.
That should be it.
Client/Server Set Up
====================
For a client/server combination, use zcml that is something like the
basic_dispatcher_policy.zcml, make sure you have access to the database with
the queues, configure logging and monitoring as desired, configure the
``ZC_ASYNC_UUID`` environmental variable in zdaemon.conf if you are in
production, and start up! Getting started is really pretty easy. You can even
start a dispatcher-only version by not starting any servers in zcml.
In comparison to the non-Zope 3 usage, an important difference in your setup.py
is that, if you want the full set up described below, including zc.z3monitor,
you'll need to specify "zc.async [z3]" as the desired package in your
``install_requires``, as opposed to just "zc.async" [#extras_require]_.
We'll look at this by making a zope.conf-alike and a site.zcml-alike. We'll
need a place to put some files, so we'll use a temporary directory. This, and
the comments in the files that we set up, are the primary differences between
our examples and a real set up.
We'll do this in two versions. The first version uses a single database, as
you might do to get started quickly, or for a small site. The second version
has one database for the main application, and one database for the async data,
as will be more appropriate for typical production usage.
.. toctree::
:maxdepth: 2
README_3a
README_3b
.. rubric:: Footnotes
.. [#extras_require] The "[z3]" is an "extra", defined in zc.async's setup.py
in ``extras_require``. It pulls along zc.z3monitor and simplejson in
addition to the packages described in the
:ref:`configuration-without-zope-3` section. Unfortunately, zc.z3monitor
depends on zope.app.appsetup, which as of this writing ends up depending
indirectly on many, many packages, some as far flung as zope.app.rotterdam.
| zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/README_3.txt | README_3.txt |
import datetime
import fnmatch
import itertools
import re
from uuid import UUID # we use this non-standard import spelling because
# ``uuid`` is frequently an argument
import pytz
import twisted.python.failure
import ZODB.interfaces
import ZODB.utils
import zope.component
import zc.async.dispatcher
import zc.async.interfaces
import zc.async.monitor
import zc.async.utils
_available_states = frozenset(
('pending', 'assigned', 'active', 'callbacks', 'completed', 'succeeded',
'failed'))
def _get_date_filter(name, value):
since = before = None
for o in value.split(','):
if o.startswith('since'):
if since is not None:
raise ValueError('only provide "since" once (%s)' % (name,))
since = zc.async.monitor._dt(o[5:]).replace(tzinfo=pytz.UTC)
elif o.startswith('before'):
if before is not None:
raise ValueError('only provide "before" once (%s)' % (name,))
before = zc.async.monitor._dt(o[5:]).replace(tzinfo=pytz.UTC)
return lambda j: ((since is None or getattr(j, name) > since) and
(before is None or getattr(j, name) < before))
def _jobs(context, states,
callable=None, queue=None, agent=None, requested_start=None,
start=None, end=None, callbacks_completed=None,
uuid=None):
conn = ZODB.interfaces.IConnection(context)
states = set(states)
unknown = states - _available_states
if unknown:
raise ValueError('Available states are %s (unknown: %s)' %
(', '.join(sorted(_available_states)),
', '.join(sorted(unknown))))
completed = set(['completed', 'succeeded', 'failed']) & states
if len(completed) > 1:
raise ValueError(
'can only include zero or one of '
'"completed", "succeeded," or "failed" states.')
elif completed:
completed = iter(completed).next()
if not states:
raise ValueError('Specify at least one of the available states: %s' %
(', '.join(sorted(_available_states)),))
pending = 'pending' in states
assigned = 'assigned' in states
active = 'active' in states
callbacks = 'callbacks' in states
agent_states = []
if assigned:
agent_states.append(zc.async.interfaces.ASSIGNED)
if active:
agent_states.append(zc.async.interfaces.ACTIVE)
if callbacks:
agent_states.append(zc.async.interfaces.CALLBACKS)
if uuid is not None:
if uuid.upper() == 'THIS':
uuid = zope.component.getUtility(zc.async.interfaces.IUUID)
else:
uuid = UUID(uuid)
filters = []
if callable is not None:
regex = fnmatch.translate(callable)
if '.' not in callable:
regex = r'(.*\.)?%s$' % (regex,)
callable = re.compile(regex).match
filters.append(
lambda j: callable(zc.async.utils.custom_repr(j.callable)))
if requested_start:
filters.append(_get_date_filter('begin_after', requested_start))
if start:
pending = False
filters.append(_get_date_filter('active_start', start))
if end:
pending = assigned = active = False
filters.append(_get_date_filter('active_end', end))
if callbacks_completed:
pending = assigned = active = callbacks = False
filters.append(
_get_date_filter('initial_callbacks_end', callbacks_completed))
if queue is not None:
queue = re.compile(fnmatch.translate(queue)).match
if agent is not None:
agent = re.compile(fnmatch.translate(agent)).match
sources = []
if pending:
def pending_source(q, agent_filters, ignore_agent_filters):
for j in q:
if not ignore_agent_filters:
for f in agent_filters:
if f(j):
break # this is a positive match
else:
continue
for f in filters:
if not f(j):
break # this is a negative match
else:
yield j
def pending_key(job):
return job.begin_after.isoformat()
pending_sources = []
sources.append((pending_sources, pending_key))
if agent_states:
def agent_source(a):
for j in a:
if j.status not in agent_states:
continue
for f in filters:
if not f(j):
break
else:
yield j
now = datetime.datetime.now(pytz.UTC)
def agent_key(job):
return (job.active_start or now).isoformat()
agent_sources = []
sources.append((agent_sources, agent_key))
if completed:
def completed_source(a):
for j in a.completed:
if completed!='completed':
is_failure = isinstance(
j.result, twisted.python.failure.Failure)
if (completed=='succeeded' and is_failure or
completed=='failed' and not is_failure):
continue
for f in filters:
if not f(j):
break
else:
yield j
def completed_key(job):
return job.key # == reverse int of job.initial_callbacks_end
completed_sources = []
sources.append((completed_sources, completed_key))
queues = conn.root()[zc.async.interfaces.KEY]
for q_name, q in queues.items():
if queue and not queue(q_name):
continue
agent_filters = []
ignore_agent_filters = agent is None and uuid is None
if (agent_states or completed or pending and not ignore_agent_filters):
if uuid is None:
das = q.dispatchers.values()
else:
das = (q.dispatchers[uuid],)
for da in das:
for a_name, a in da.items():
if agent:
if not agent(a_name):
continue
if agent or uuid is not None:
if pending and not ignore_agent_filters:
if zc.async.interfaces.IFilterAgent.providedBy(a):
agent_filters.append(a.filter)
ignore_agent_filters = (
ignore_agent_filters or a.filter is None)
else:
raise ValueError(
'can only find pending jobs for agent if '
'agent provides '
'zc.async.interfaces.IFilterAgent '
'(%s : %s : %s)' %
(q_name, da.UUID, a_name))
if agent_states:
agent_sources.append(agent_source(a))
if completed:
completed_sources.append(completed_source(a))
if pending and (not agent or agent_filters):
pending_sources.append(
pending_source(q, agent_filters, ignore_agent_filters))
return itertools.chain(
*(zc.async.utils.sortedmerge(s, key) for s, key in sources))
def jobs(context, *states, **kwargs):
"""Return jobs in one or more states.
By default, jobs are identified by integer OID and database name. These
identifiers can be used with the "asyncdb job" command to get details about
the jobs. The integer OIDs can be used in a database connection to get the
job with ``connection.get(ZODB.utils.p64(INTEGER_OID))``. For other
display options for jobs, see the "display" optional argument.
After the arguments list, this description concludes with usage examples.
Arguments
=========
States
------
You must provide at least one of the following states.
- "pending": the job is in a queue, waiting to be started.
- "assigned": a dispatcher has claimed the job and assigned it to one of
its worker threads. Work has not yet begun. Jobs are in this state very
briefly.
- "active": A worker thread is performing this job.
- "callbacks": the job's work is ended, and the thread is performing the
callbacks, if any.
- "completed": the job and its callbacks are completed. Completed jobs
stay in the database for only a certain amount of time--between seven and
eight days in the default agent implementation.
- "succeeded": the job completed successfully (that is, without raising an
unhandled exception, and without returning an explicit
twisted.python.failure.Failure). This is a subset of "completed,"
described above.
- "failed": the job completed by raising an unhandled exception or by
explicitly returning a twisted.python.failure.Failure. This is a subset
of "completed," described above.
You may use no more than one of the states "completed," "succeeded," and
"failed".
Optional Arguments
------------------
You can further filter your results with a number of optional arguments.
"Shell-style glob wildcards," as referred to in this list, are "?", "*",
"[SEQUENCE]", and "[!SEQUENCE]", as described in
http://docs.python.org/lib/module-fnmatch.html .
A "duration-based filter" described in this list accepts an argument that
is of the form "sinceINTERVAL", "beforeINTERVAL", or
"sinceINTERVAL,beforeINTERVAL" (no space!). The "INTERVAL"s are of the
form ``[nD][nH][nM][nS]``, where "n" should be replaced with a positive
integer, and "D," "H," "M," and "S" are literals standing for "days,"
"hours," "minutes," and "seconds." For instance, you might use ``5M`` for
five minutes, ``20S`` for twenty seconds, or ``1H30M`` for an hour and a
half. Thus "before30M" would mean "thirty minutes ago or older."
"since45S" would mean "45 seconds ago or newer." "since1H,before30M" would
mean "between thirty minutes and an hour ago." Note that reversing the two
components in the last example--"before30M,since1H"--is equivalent.
- "callable": filters by callable name. Supports shell-style glob
wildcards. If you do not include a "." in the string, it matches only on
the callable name. If you include a ".", it matches on the
fully-qualified name (that is, including the module).
- "queue": filters by queue name. Supports shell-style glob wildcards.
- "agent": filters by agent name. Supports shell-style glob wildcards.
When applied to jobs in the "pending" state, this restricts the jobs to
the ones that the agent could perform, according to its filter.
- "uuid": filters by UUID string, or the special marker "THIS", indicating
the UUID of the current process' dispatcher. Supports shell-style glob
wildcards.
When applied to jobs in the "pending" state, this restricts the jobs to
the ones that the agents for that dispatcher could perform, according to
their filters.
- "requested_start": a duration-based filter for when the job was requested
to start.
Note that, if a job is not given an explicit start time, the time when it
was added to a queue is used. This is based on a job's ``begin_after``
attribute.
- "start": a duration-based filter for when the job started work.
Note that, if a job is restarted because of problems such as an
interruption or a conflict error, this is the most recent time that the
job started work. This is based on a job's ``active_start`` attribute.
To see the first time a job started work ever, the default retry policies
store a 'first_active' value in their ``data`` attribute
(``job.getRetryPolicy().data.get('first_active')``). Other information
about retries can also be found in this data dictionary.
- "end": a duration-based filter for when the job ended work (but not
callbacks).
This is based on a job's ``active_end`` attribute.
- "callbacks_completed": a duration-based filter for when the job
finished the callbacks it had just after it performed the job.
If subsequent callbacks are added, they are performed immediately, and
will not affect the value that this filter uses.
This is based on a job's ``initial_callbacks_end`` attribute.
- "display": By default, or with a "default" value, jobs are identified
with integer OID and database name. If the display value is "repr,"
reprs of the jobs are used instead. If the display value is "detail,"
a dictionary of details is used for each job.
- "count": By default, or with a value of 0, this will include all jobs
matching the filter. If you provide a count (a positive integer), only
a maximum of the given "count" items will be listed.
Usage Examples
==============
Here are some examples of the command.
asyncdb job pending
(lists the job identifiers for pending jobs)
asyndb job active agent:instance5
(lists the job identifiers of all jobs that any agent named instance5
is working on)
asyndb job pending agent:instance5
(lists the job identifiers of all pending jobs that agents named
"instance5" could perform)
asyncdb job completed end:since1H callable:import_*
(lists the job identifiers of completed jobs that ended within the
last hour that called a function or method that began with the string
"import_")
asyncdb job pending count:3 display:repr
(lists the job reprs for the three pending jobs next in line to be
performed)
Here are some examples of how the duration-based filters work.
* If you used "start:since5s" then that could be read as "jobs that
started five seconds ago or sooner."
* "requested_start:before1M" could be read as "jobs that were supposed to
begin one minute ago or longer".
* "end:since1M,before30S" could be read as "jobs that ended their
primary work (that is, not including callbacks) between thirty seconds
and one minute ago."
* "callbacks_completed:before30S,since1M" could be read as "jobs that
completed the callbacks they had when first run between thirty seconds
and one minute ago." (This also shows that the order of "before" and
"since" do not matter.)
"""
display = kwargs.pop('display', 'default').lower()
count = int(kwargs.pop('count', 0))
res = _jobs(context, states, **kwargs)
if count:
res = zc.async.utils.takecount(res, count)
if display == 'default':
return res
elif display == 'repr':
return (repr(j) for j in res)
elif display == 'details':
return (jobsummary(j) for j in res)
else:
raise ValueError('unknown value for "display": '
'must be one of "default," "repr," or "details."')
def count(context, *states, **kwargs):
"""Count jobs in one or more states.
After the arguments list, this description concludes with usage examples.
Arguments
=========
States
------
You must provide at least one of the following states.
- "pending": the job is in a queue, waiting to be started.
- "assigned": a dispatcher has claimed the job and assigned it to one of
its worker threads. Work has not yet begun. Jobs are in this state very
briefly.
- "active": A worker thread is performing this job.
- "callbacks": the job's work is ended, and the thread is performing the
callbacks, if any.
- "completed": the job and its callbacks are completed. Completed jobs
stay in the database for only a certain amount of time--between seven and
eight days in the default agent implementation.
- "succeeded": the job completed successfully (that is, without raising an
unhandled exception, and without returning an explicit
twisted.python.failure.Failure). This is a subset of "completed,"
described above.
- "failed": the job completed by raising an unhandled exception or by
explicitly returning a twisted.python.failure.Failure. This is a subset
of "completed," described above.
You may use no more than one of the states "completed," "succeeded," and
"failed".
Optional Arguments
------------------
You can further filter your results with a number of optional arguments.
"Shell-style glob wildcards," as referred to in this list, are "?", "*",
"[SEQUENCE]", and "[!SEQUENCE]", as described in
http://docs.python.org/lib/module-fnmatch.html .
A "duration-based filter" described in this list accepts an argument that
is of the form "sinceINTERVAL", "beforeINTERVAL", or
"sinceINTERVAL,beforeINTERVAL" (no space!). The "INTERVAL"s are of the
form ``[nD][nH][nM][nS]``, where "n" should be replaced with a positive
integer, and "D," "H," "M," and "S" are literals standing for "days,"
"hours," "minutes," and "seconds." For instance, you might use ``5M`` for
five minutes, ``20S`` for twenty seconds, or ``1H30M`` for an hour and a
half. Thus "before30M" would mean "thirty minutes ago or older."
"since45S" would mean "45 seconds ago or newer." "since1H,before30M" would
mean "between thirty minutes and an hour ago." Note that reversing the two
components in the last example--"before30M,since1H"--is equivalent.
- "callable": filters by callable name. Supports shell-style glob
wildcards. If you do not include a "." in the string, it matches only on
the callable name. If you include a ".", it matches on the
fully-qualified name (that is, including the module).
- "queue": filters by queue name. Supports shell-style glob wildcards.
- "agent": filters by agent name. Supports shell-style glob wildcards.
When applied to jobs in the "pending" state, this restricts the jobs to
the ones that the agent could perform, according to its filter.
- "uuid": filters by UUID string, or the special marker "THIS", indicating
the UUID of the current process' dispatcher. Supports shell-style glob
wildcards.
When applied to jobs in the "pending" state, this restricts the jobs to
the ones that the agents for that dispatcher could perform, according to
their filters.
- "requested_start": a duration-based filter for when the job was requested
to start.
Note that, if a job is not given an explicit start time, the time when it
was added to a queue is used. This is based on a job's ``begin_after``
attribute.
- "start": a duration-based filter for when the job started work.
Note that, if a job is restarted because of problems such as an
interruption or a conflict error, this is the most recent time that the
job started work. This is based on a job's ``active_start`` attribute.
To see the first time a job started work ever, the default retry policies
store a 'first_active' value in their ``data`` attribute
(``job.getRetryPolicy().data.get('first_active')``). Other information
about retries can also be found in this data dictionary.
- "end": a duration-based filter for when the job ended work (but not
callbacks).
This is based on a job's ``active_end`` attribute.
- "callbacks_completed": a duration-based filter for when the job
finished the callbacks it had just after it performed the job.
If subsequent callbacks are added, they are performed immediately, and
will not affect the value that this filter uses.
This is based on a job's ``initial_callbacks_end`` attribute.
Usage Examples
==============
Here are some examples of the command.
asyncdb count pending
(counts pending jobs)
asyndb count active agent:instance5
(counts the jobs that all agents named instance5 are working on)
asyndb count pending agent:instance5
(counts the pending jobs that all agents named "instance5" could
perform)
asyncdb count completed end:since1H callable:import_*
(counts the completed jobs that ended within the last hour that called
a function or method that began with the string "import_")
Here are some examples of how the duration-based filters work.
* If you used "start:since5s" then that could be read as "jobs that
started five seconds ago or sooner."
* "requested_start:before1M" could be read as "jobs that were supposed to
begin one minute ago or longer".
* "end:since1M,before30S" could be read as "jobs that ended their
primary work (that is, not including callbacks) between thirty seconds
and one minute ago."
* "callbacks_completed:before30S,since1M" could be read as "jobs that
completed the callbacks they had when first run between thirty seconds
and one minute ago." (This also shows that the order of "before" and
"since" do not matter.)
"""
res = 0
for j in _jobs(context, states, **kwargs):
res += 1
return res
_status_keys = {
zc.async.interfaces.NEW: 'new',
zc.async.interfaces.PENDING: 'pending',
zc.async.interfaces.ASSIGNED: 'assigned',
zc.async.interfaces.ACTIVE: 'active',
zc.async.interfaces.CALLBACKS: 'callbacks',
zc.async.interfaces.COMPLETED: 'completed'}
def jobstats(context, *states, **kwargs):
"""Return statistics about jobs in one or more states.
The report shows the following statistics.
- The number of jobs that match the search in each of these states:
"pending," "assigned," "active," "callbacks," "succeeded," and "failed".
- "longest wait" and "shortest wait" give the wait duration and identifier
of the job with the longest and shortest wait interval, respectively.
- "longest active" and "shortest active" give the active duration and
identifier of the job with the longest and shortest active duration,
respectively.
By default, jobs are identified by integer OID and database name. These
identifiers can be used with the "asyncdb job" command to get details about
the jobs. The integer OIDs can be used in a database connection to get the
job with ``connection.get(ZODB.utils.p64(INTEGER_OID))``. Alternatively,
for other display options for jobs, see the "display" optional argument.
After the arguments list, this description concludes with usage examples.
Arguments
=========
States
------
You must provide at least one of the following states.
- "pending": the job is in a queue, waiting to be started.
- "assigned": a dispatcher has claimed the job and assigned it to one of
its worker threads. Work has not yet begun. Jobs are in this state very
briefly.
- "active": A worker thread is performing this job.
- "callbacks": the job's work is ended, and the thread is performing the
callbacks, if any.
- "completed": the job and its callbacks are completed. Completed jobs
stay in the database for only a certain amount of time--between seven and
eight days in the default agent implementation.
- "succeeded": the job completed successfully (that is, without raising an
unhandled exception, and without returning an explicit
twisted.python.failure.Failure). This is a subset of "completed,"
described above.
- "failed": the job completed by raising an unhandled exception or by
explicitly returning a twisted.python.failure.Failure. This is a subset
of "completed," described above.
You may use no more than one of the states "completed," "succeeded," and
"failed".
Optional Arguments
------------------
You can further filter your results with a number of optional arguments.
"Shell-style glob wildcards," as referred to in this list, are "?", "*",
"[SEQUENCE]", and "[!SEQUENCE]", as described in
http://docs.python.org/lib/module-fnmatch.html .
A "duration-based filter" described in this list accepts an argument that
is of the form "sinceINTERVAL", "beforeINTERVAL", or
"sinceINTERVAL,beforeINTERVAL" (no space!). The "INTERVAL"s are of the
form ``[nD][nH][nM][nS]``, where "n" should be replaced with a positive
integer, and "D," "H," "M," and "S" are literals standing for "days,"
"hours," "minutes," and "seconds." For instance, you might use ``5M`` for
five minutes, ``20S`` for twenty seconds, or ``1H30M`` for an hour and a
half. Thus "before30M" would mean "thirty minutes ago or older."
"since45S" would mean "45 seconds ago or newer." "since1H,before30M" would
mean "between thirty minutes and an hour ago." Note that reversing the two
components in the last example--"before30M,since1H"--is equivalent.
- "callable": filters by callable name. Supports shell-style glob
wildcards. If you do not include a "." in the string, it matches only on
the callable name. If you include a ".", it matches on the
fully-qualified name (that is, including the module).
- "queue": filters by queue name. Supports shell-style glob wildcards.
- "agent": filters by agent name. Supports shell-style glob wildcards.
When applied to jobs in the "pending" state, this restricts the jobs to
the ones that the agent could perform, according to its filter.
- "uuid": filters by UUID string, or the special marker "THIS", indicating
the UUID of the current process' dispatcher. Supports shell-style glob
wildcards.
When applied to jobs in the "pending" state, this restricts the jobs to
the ones that the agents for that dispatcher could perform, according to
their filters.
- "requested_start": a duration-based filter for when the job was requested
to start.
Note that, if a job is not given an explicit start time, the time when it
was added to a queue is used. This is based on a job's ``begin_after``
attribute.
- "start": a duration-based filter for when the job started work.
Note that, if a job is restarted because of problems such as an
interruption or a conflict error, this is the most recent time that the
job started work. This is based on a job's ``active_start`` attribute.
To see the first time a job started work ever, the default retry policies
store a 'first_active' value in their ``data`` attribute
(``job.getRetryPolicy().data.get('first_active')``). Other information
about retries can also be found in this data dictionary.
- "end": a duration-based filter for when the job ended work (but not
callbacks).
This is based on a job's ``active_end`` attribute.
- "callbacks_completed": a duration-based filter for when the job
finished the callbacks it had just after it performed the job.
If subsequent callbacks are added, they are performed immediately, and
will not affect the value that this filter uses.
This is based on a job's ``initial_callbacks_end`` attribute.
- "display": By default, or with a "default" value, jobs are identified
with integer OID and database name. If the display value is "repr,"
reprs of the jobs are used instead. If the display value is "detail,"
a dictionary of details is used for each job.
Usage Examples
==============
Here are some examples of the command.
asyncdb jobstats pending
(gives statistics about the pending jobs)
asyndb jobstats active agent:instance5
(gives statistics about all jobs that any agent named instance5 is
working on)
asyndb job pending agent:instance5
(gives statistics aboutt all pending jobs that agents named "instance5"
could perform)
asyncdb job completed end:since1H callable:import_*
(gves statistics about completed jobs that ended within the last hour
that called a function or method that began with the string "import_")
Here are some examples of how the duration-based filters work.
* If you used "start:since5s" then that could be read as "jobs that
started five seconds ago or sooner."
* "requested_start:before1M" could be read as "jobs that were supposed to
begin one minute ago or longer".
* "end:since1M,before30S" could be read as "jobs that ended their
primary work (that is, not including callbacks) between thirty seconds
and one minute ago."
* "callbacks_completed:before30S,since1M" could be read as "jobs that
completed the callbacks they had when first run between thirty seconds
and one minute ago." (This also shows that the order of "before" and
"since" do not matter.)
"""
now = datetime.datetime.now(pytz.UTC)
d = {'pending': 0, 'assigned': 0, 'active': 0, 'callbacks': 0,
'succeeded': 0, 'failed': 0}
longest_wait = longest_active = None
shortest_wait = shortest_active = None
display = kwargs.pop('display', 'default').lower()
if display == 'default':
job_display = lambda j: j
elif display == 'repr':
job_display = lambda j: j is not None and repr(j) or j
elif display == 'details':
job_display = lambda j: j is not None and jobsummary(j) or j
else:
raise ValueError('unknown value for "display": '
'must be one of "default," "repr," or "details."')
for j in _jobs(context, states, **kwargs):
status = j.status
if status == zc.async.interfaces.COMPLETED:
if isinstance(j.result, twisted.python.failure.Failure):
d['failed'] += 1
else:
d['succeeded'] += 1
else:
d[_status_keys[status]] += 1
wait = active = None
if j.active_start:
if j.active_end:
active = j.active_end - j.active_start
else:
active = now - j.active_start
if (longest_active is None or
longest_active[0] < active):
longest_active = active, j
if (shortest_active is None or
shortest_active[0] < active):
shortest_active = active, j
wait = j.active_start - j.begin_after
else:
wait = now - j.begin_after
if (longest_wait is None or
longest_wait[0] < wait):
longest_wait = wait, j
if (shortest_wait is None or
shortest_wait[0] < wait):
shortest_wait = wait, j
d['longest wait'] = (
longest_wait is not None and
(longest_wait[0], job_display(longest_wait[1])) or
longest_wait)
d['longest active'] = (
longest_active is not None and
(longest_active[0], job_display(longest_active[1])) or
longest_active)
d['shortest wait'] = (
shortest_wait is not None and
(shortest_wait[0], job_display(shortest_wait[1])) or
shortest_wait)
d['shortest active'] = (
shortest_active is not None and
(shortest_active[0], job_display(shortest_active[1])) or
shortest_active)
return d
def jobsummary(job):
now = datetime.datetime.now(pytz.UTC)
wait = active = None
if job.active_start:
if job.active_end:
active = job.active_end - job.active_start
else:
active = now - job.active_start
wait = job.active_start - job.begin_after
else:
wait = now - job.begin_after
if isinstance(job.result, twisted.python.failure.Failure):
failed = True
result = job.result.getBriefTraceback()
else:
failed = False
result = zc.async.utils.custom_repr(job.result)
a = job.agent
if a:
agent = job.agent.name
dispatcher = a.parent.UUID
else:
agent = dispatcher = None
q = job.queue
if q:
queue = q.name
else:
queue = None
return {'repr': repr(job),
'args': list(repr(a) for a in job.args),
'kwargs': dict((k, repr(v)) for k, v in job.kwargs.items()),
'begin after': job.begin_after,
'active start': job.active_start,
'active end': job.active_end,
'initial callbacks end': job.initial_callbacks_end,
'wait': wait,
'active': active,
'status': _status_keys[job.status],
'failed': failed,
'result': result,
'quota names': job.quota_names,
'agent': agent,
'dispatcher': dispatcher,
'queue': queue,
'callbacks': list(job.callbacks)}
def _get_job(context, oid, database=None):
conn = ZODB.interfaces.IConnection(context)
if database is None:
local_conn = conn
else:
local_conn = conn.get_connection(database)
return local_conn.get(ZODB.utils.p64(int(oid)))
def traceback(context, oid, database=None, detail='default'):
"""Return the traceback for the job identified by integer oid."""
detail = detail.lower()
if detail not in ('brief', 'default', 'verbose'):
raise ValueError('detail must be one of "brief," "default," "verbose"')
job = _get_job(context, oid, database)
if not isinstance(job.result, twisted.python.failure.Failure):
return None
return job.result.getTraceback(detail=detail)
def job(context, oid, database=None):
"""Return details of job identified by integer oid.
The result includes the following information:
- "active": how long the job was,or has been active.
- "active end": when the job ended its work (before callbacks).
- "active start": when the job started its work.
- "agent": the name of the agent that performed this job.
- "args": the standard repr of each argument to this job.
- "begin after": when the job was requested to begin.
- "callbacks": identifiers of the callbacks to this job.
- "dispatcher": the UUID of the dispatcher that performed this job.
- "failed": whether the job failed (raised an unhandled exception).
- "initial callbacks end": when the callbacks were first completed.
- "kwargs": standard reprs of each keyword argument to this job.
- "queue": the name of the queue that performed this job.
- "quota names": the quota names of this job.
- "repr": a repr of this job (includes its integer OID and database name).
- "result": a repr of the result of the job; OR a brief traceback.
- "status": the status of the job.
- "wait": how long the job was, or has been waiting.
Times are in UTC.
"""
return jobsummary(_get_job(context, oid, database))
def nextpending(context, **kwargs):
"""Return details of the next job in queue to be performed.
The result includes the following information:
- "active": how long the job was,or has been active.
- "active end": when the job ended its work (before callbacks).
- "active start": when the job started its work.
- "agent": the name of the agent that performed this job.
- "args": the standard repr of each argument to this job.
- "begin after": when the job was requested to begin.
- "callbacks": identifiers of the callbacks to this job.
- "dispatcher": the UUID of the dispatcher that performed this job.
- "failed": whether the job failed (raised an unhandled exception).
- "initial callbacks end": when the callbacks were first completed.
- "kwargs": standard reprs of each keyword argument to this job.
- "queue": the name of the queue that performed this job.
- "quota names": the quota names of this job.
- "repr": a repr of this job (includes its integer OID and database name).
- "result": a repr of the result of the job; OR a brief traceback.
- "status": the status of the job.
- "wait": how long the job was, or has been waiting.
Times are in UTC.
After the arguments list, this description concludes with usage examples.
Arguments
=========
Optional Arguments
------------------
You can filter your results with a number of optional arguments.
"Shell-style glob wildcards," as referred to in this list, are "?", "*",
"[SEQUENCE]", and "[!SEQUENCE]", as described in
http://docs.python.org/lib/module-fnmatch.html .
A "duration-based filter" described in this list accepts an argument that
is of the form "sinceINTERVAL", "beforeINTERVAL", or
"sinceINTERVAL,beforeINTERVAL" (no space!). The "INTERVAL"s are of the
form ``[nD][nH][nM][nS]``, where "n" should be replaced with a positive
integer, and "D," "H," "M," and "S" are literals standing for "days,"
"hours," "minutes," and "seconds." For instance, you might use ``5M`` for
five minutes, ``20S`` for twenty seconds, or ``1H30M`` for an hour and a
half. Thus "before30M" would mean "thirty minutes ago or older."
"since45S" would mean "45 seconds ago or newer." "since1H,before30M" would
mean "between thirty minutes and an hour ago." Note that reversing the two
components in the last example--"before30M,since1H"--is equivalent.
- "callable": filters by callable name. Supports shell-style glob
wildcards. If you do not include a "." in the string, it matches only on
the callable name. If you include a ".", it matches on the
fully-qualified name (that is, including the module).
- "queue": filters by queue name. Supports shell-style glob wildcards.
- "agent": filters by agent name. Supports shell-style glob wildcards.
This restricts the jobs to the ones that the agent could perform,
according to its filter.
- "uuid": filters by UUID string, or the special marker "THIS", indicating
the UUID of the current process' dispatcher. Supports shell-style glob
wildcards. This restricts the jobs to the ones that the agents for that
dispatcher could perform, according to their filters.
- "requested_start": a duration-based filter for when the job was requested
to start.
Note that, if a job is not given an explicit start time, the time when it
was added to a queue is used. This is based on a job's ``begin_after``
attribute.
Usage Examples
==============
Here are some examples of the command.
asyncdb nextpending
(gives details of next pending job)
asyndb nextpending agent:instance5
(gives details of the next pending job that any of the "instance5"
agents could work on)
asyncdb nextpending callable:import_*
(gives details about the next pending job with a callable beginning
with the name "import_")
asyncdb nextpending start:before1M
(gives details of the next pending job that was supposed to begin
one minute ago or longer)
"""
unsupported = set(['start', 'end', 'callbacks_completed']).intersection(
kwargs)
if unsupported:
raise ValueError('unsupported filters: %s' %
(', '.join(sorted(unsupported)),))
for j in _jobs(context, ('pending',), **kwargs):
return jobsummary(j)
return None
def lastcompleted(context, **kwargs):
"""Return details of the most recently completed job.
The result includes the following information:
- "active": how long the job was,or has been active.
- "active end": when the job ended its work (before callbacks).
- "active start": when the job started its work.
- "agent": the name of the agent that performed this job.
- "args": the standard repr of each argument to this job.
- "begin after": when the job was requested to begin.
- "callbacks": identifiers of the callbacks to this job.
- "dispatcher": the UUID of the dispatcher that performed this job.
- "failed": whether the job failed (raised an unhandled exception).
- "initial callbacks end": when the callbacks were first completed.
- "kwargs": standard reprs of each keyword argument to this job.
- "queue": the name of the queue that performed this job.
- "quota names": the quota names of this job.
- "repr": a repr of this job (includes its integer OID and database name).
- "result": a repr of the result of the job; OR a brief traceback.
- "status": the status of the job.
- "wait": how long the job was, or has been waiting.
Times are in UTC.
After the arguments list, this description concludes with usage examples.
Arguments
=========
Optional Arguments
------------------
You can filter your results with a number of optional arguments.
"Shell-style glob wildcards," as referred to in this list, are "?", "*",
"[SEQUENCE]", and "[!SEQUENCE]", as described in
http://docs.python.org/lib/module-fnmatch.html .
A "duration-based filter" described in this list accepts an argument that
is of the form "sinceINTERVAL", "beforeINTERVAL", or
"sinceINTERVAL,beforeINTERVAL" (no space!). The "INTERVAL"s are of the
form ``[nD][nH][nM][nS]``, where "n" should be replaced with a positive
integer, and "D," "H," "M," and "S" are literals standing for "days,"
"hours," "minutes," and "seconds." For instance, you might use ``5M`` for
five minutes, ``20S`` for twenty seconds, or ``1H30M`` for an hour and a
half. Thus "before30M" would mean "thirty minutes ago or older."
"since45S" would mean "45 seconds ago or newer." "since1H,before30M" would
mean "between thirty minutes and an hour ago." Note that reversing the two
components in the last example--"before30M,since1H"--is equivalent.
- "callable": filters by callable name. Supports shell-style glob
wildcards. If you do not include a "." in the string, it matches only on
the callable name. If you include a ".", it matches on the
fully-qualified name (that is, including the module).
- "queue": filters by queue name. Supports shell-style glob wildcards.
- "agent": filters by agent name. Supports shell-style glob wildcards.
When applied to jobs in the "pending" state, this restricts the jobs to
the ones that the agent could perform, according to its filter.
- "uuid": filters by UUID string, or the special marker "THIS", indicating
the UUID of the current process' dispatcher. Supports shell-style glob
wildcards.
When applied to jobs in the "pending" state, this restricts the jobs to
the ones that the agents for that dispatcher could perform, according to
their filters.
- "requested_start": a duration-based filter for when the job was requested
to start.
Note that, if a job is not given an explicit start time, the time when it
was added to a queue is used. This is based on a job's ``begin_after``
attribute.
- "start": a duration-based filter for when the job started work.
Note that, if a job is restarted because of problems such as an
interruption or a conflict error, this is the most recent time that the
job started work. This is based on a job's ``active_start`` attribute.
To see the first time a job started work ever, the default retry policies
store a 'first_active' value in their ``data`` attribute
(``job.getRetryPolicy().data.get('first_active')``). Other information
about retries can also be found in this data dictionary.
- "end": a duration-based filter for when the job ended work (but not
callbacks).
This is based on a job's ``active_end`` attribute.
- "callbacks_completed": a duration-based filter for when the job
finished the callbacks it had just after it performed the job.
If subsequent callbacks are added, they are performed immediately, and
will not affect the value that this filter uses.
This is based on a job's ``initial_callbacks_end`` attribute.
Usage Examples
==============
Here are some examples of the command.
asyncdb lastcompleted
(gives details about the most recently completed job)
asyndb lastcompleted agent:instance5
(gives details about the most recently completed job that any agent
named "instance5" has worked on)
asyncdb lastcompleted end:since1H callable:import_*
(gives details about the most recently completed job that ended within
the last hour that called a function or method that began with the
string "import_")
Here are some examples of how the duration-based filters work.
* If you used "start:since5s" then that could be read as "jobs that
started five seconds ago or sooner."
* "requested_start:before1M" could be read as "jobs that were supposed to
begin one minute ago or longer".
* "end:since1M,before30S" could be read as "jobs that ended their
primary work (that is, not including callbacks) between thirty seconds
and one minute ago."
* "callbacks_completed:before30S,since1M" could be read as "jobs that
completed the callbacks they had when first run between thirty seconds
and one minute ago." (This also shows that the order of "before" and
"since" do not matter.)
"""
for j in _jobs(context, ('completed',), **kwargs):
return jobsummary(j)
return None
def UUIDs(context):
"""Return all active UUIDs."""
conn = ZODB.interfaces.IConnection(context)
queues = conn.root()[zc.async.interfaces.KEY]
if not len(queues):
return []
queue = iter(queues.values()).next()
return [str(UUID) for UUID, da in queue.dispatchers.items()
if da.activated]
def status(context, queue=None, agent=None, uuid=None):
"""Return status of the agents of all queues and all active UUIDs.
Times are in UTC.
"""
conn = ZODB.interfaces.IConnection(context)
if uuid is not None:
if uuid.upper() == 'THIS':
uuid = zope.component.getUtility(zc.async.interfaces.IUUID)
else:
uuid = UUID(uuid)
if queue is not None:
queue = re.compile(fnmatch.translate(queue)).match
if agent is not None:
agent = re.compile(fnmatch.translate(agent)).match
queues = conn.root()[zc.async.interfaces.KEY]
res ={}
if not len(queues):
return res
for q_name, q in queues.items():
if queue is None or queue(q_name):
das = {}
res[q_name] = {'len': len(q), 'dispatchers': das}
for da_uuid, da in q.dispatchers.items():
if da.activated and (uuid is None or da_uuid == uuid):
agents = {}
das[str(da_uuid)] = da_data = {
'last ping': da.last_ping.value,
'since ping': (datetime.datetime.now(pytz.UTC) -
da.last_ping.value),
'dead': da.dead,
'ping interval': da.ping_interval,
'ping death interval': da.ping_death_interval,
'agents': agents
}
for a_name, a in da.items():
if agent is None or agent(a_name):
agents[a_name] = d = {
'size': a.size,
'len': len(a)
}
if zc.async.interfaces.IFilterAgent.providedBy(a):
d['filter'] = a.filter
else:
d['chooser'] = a.chooser
return res
funcs = {}
def help(context, cmd=None):
"""Get help on an asyncdb monitor tool.
Usage is 'asyncdb help <tool name>' or 'asyncdb help'."""
if cmd is None:
res = [
"These are the tools available. Usage for each tool is \n"
"'asyncdb <tool name> [modifiers...]'. Learn more about each \n"
"tool using 'asyncdb help <tool name>'.\n"]
for nm, func in sorted(funcs.items()):
res.append('%s: %s' % (
nm, func.__doc__.split('\n', 1)[0]))
return '\n'.join(res)
f = funcs.get(cmd)
if f is None:
return 'Unknown async tool'
return f.__doc__
for f in (count, jobs, job, nextpending, lastcompleted, traceback, jobstats,
UUIDs, status, help):
name = f.__name__
funcs[name] = f
def asyncdb(connection, cmd=None, *raw):
"""Monitor and introspect zc.async activity in the database.
To see a list of asyncdb tools, use 'asyncdb help'.
To learn more about an asyncdb tool, use 'asyncdb help <tool name>'.
``asyncdb`` tools differ from ``async`` tools in that ``asyncdb`` tools
access the database, and ``async`` tools do not."""
zc.async.monitor.monitor(
funcs, asyncdb.__doc__, connection, cmd, raw, needs_db_connection=True) | zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/monitordb.py | monitordb.py |
============
Introduction
============
Goals
=====
The zc.async package provides a way to schedule jobs to be performed
out-of-band from your current thread. The job might be done in another thread
or another process, possibly on another machine. Here are some example core
use cases.
- You want to let users do something that requires a lot of system
resources from your application, such as creating a large PDF. Naively
done, six or seven simultaneous PDF requests will consume your
application thread pool and could make your application unresponsive to
any other users.
- You want to let users spider a web site; communicate with a credit card
company; query a large, slow LDAP database on another machine; or do
some other action that generates network requests from the server.
System resources might not be a problem, but, again, if something goes
wrong, several requests could make your application unresponsive.
- Perhaps because of resource contention, you want to serialize work
that can be done asynchronously, such as updating a single data structure
like a catalog index.
- You want to decompose and parallelize a single job across many machines so
it can be finished faster.
- You have an application job that you discover is taking longer than users can
handle, even after you optimize it. You want a quick fix to move the work
out-of-band.
Many of these core use cases involve end-users being able to start potentially
expensive processes, on demand. Basic scheduled tasks are also provided by this
package, though recurrence must be something you arrange.
History
=======
This is a second-generation design. The first generation was `zasync`,
a mission-critical and successful Zope 2 product in use for a number of
high-volume Zope 2 installations. [#async_history]_ It's worthwhile noting
that zc.async has absolutely no backwards compatibility with zasync and
zc.async does not require Zope (although it can be used in conjunction with
it).
Design Overview
===============
---------------
Overview: Usage
---------------
Looking at the design from the perspective of regular usage, your code obtains
a ``queue``, which is a place to register jobs to be performed asynchronously.
Your application calls ``put`` on the queue to register a job. The job must be
a pickleable, callable object. A global function, a callable persistent
object, a method of a persistent object, or a special zc.async.job.Job object
(discussed later) are all examples of suitable objects. The job by default is
registered to be performed as soon as possible, but can be registered to be
called at a certain time.
The ``put`` call will return a zc.async.job.Job object. This object represents
both the callable and its deferred result. It has information about the job
requested, the current state of the job, and the result of performing the job.
An example spelling for registering a job might be ``self.pending_result =
queue.put(self.performSpider)``. The returned object can be stored and polled
to see when the job is complete; or the job can be configured to do additional
work when it completes (such as storing the result in a data structure).
-------------------
Overview: Mechanism
-------------------
Multiple processes, typically spread across multiple machines, can
connect to the queue and claim and perform work. As with other
collections of processes that share pickled objects, these processes
generally should share the same software (though some variations on this
constraint should be possible).
A process that should claim and perform work, in addition to a database
connection and the necessary software, needs a ``dispatcher`` with a
``reactor`` to provide a heartbeat. The dispatcher will rely on one or more
persistent ``agents`` in the queue (in the database) to determine which jobs
it should perform.
A ``dispatcher`` is in charge of dispatching queued work for a given
process to worker threads. It works with one or more queues and a
single reactor. It has a universally unique identifier (UUID), which is
usually an identifier of the application instance in which it is
running. The dispatcher starts jobs in dedicated threads.
A ``reactor`` is something that can provide an eternal loop, or heartbeat,
to power the dispatcher. It can be the main twisted reactor (in the
main thread); another instance of a twisted reactor (in a child thread);
or any object that implements a small subset of the twisted reactor
interface (see discussion in dispatcher.txt, and example testing reactor in
testing.py, used below).
An ``agent`` is a persistent object in a queue that is associated with a
dispatcher and is responsible for picking jobs and keeping track of
them. Zero or more agents within a queue can be associated with a
dispatcher. Each agent for a given dispatcher in a given queue is
identified uniquely with a name [#identifying_agent]_.
Generally, these work together as follows. The reactor calls the
dispatcher. The dispatcher tries to find the mapping of queues in the
database root under a key of ``zc.async`` (see constant
zc.async.interfaces.KEY). If it finds the mapping, it iterates
over the queues (the mapping's values) and asks each queue for the
agents associated with the dispatcher's UUID. The dispatcher then is
responsible for seeing what jobs its agents want to do from the queue,
and providing threads and connections for the work to be done. The
dispatcher then asks the reactor to call itself again in a few seconds.
.. rubric:: Footnotes
.. [#async_history] The first generation, ``zasync``, had the following goals:
- be scalable, so that another process or machine could do the asynchronous
work;
- support lengthy jobs outside of the ZODB;
- support lengthy jobs inside the ZODB;
- be recoverable, so that crashes would not lose work;
- be discoverable, so that logs and web interfaces give a view into the
work being done asynchronously;
- be easily extendible, to do new jobs; and
- support graceful job expiration and cancellation.
It met its goals well in some areas and adequately in others.
Based on experience with the first generation, this second generation
identifies several areas of improvement from the first design, and adds
several goals.
- Improvements
* More carefully delineate the roles of the comprising components.
The zc.async design has three main components, as divided by their
roles: persistent deferreds, now called jobs; job queues (the original
zasync's "asynchronous call manager"); and dispatchers (the original
zasync ZEO client). The zasync 1.x design blurred the lines between the
three components such that the component parts could only be replaced
with difficulty, if at all. A goal for the 2.x design is to clearly
define the role for each of three components such that, for instance, a
user of a queue does not need to know about the dispatcher or the
agents.
* Improve scalability of asynchronous workers.
The 1.x line was initially designed for a single asynchronous worker,
which could be put on another machine thanks to ZEO. Tarek Ziade of
Nuxeo wrote zasyncdispatcher, which allowed multiple asynchronous
workers to accept work, allowing multiple processes and multiple
machines to divide and conquer. It worked around the limitations of the
original zasync design to provide even more scalability. However, it
was forced to divide up work well before a given worker looks at the
queue.
While dividing work earlier allows guesses and heuristics a chance to
predict what worker might be more free in the future, a more reliable
approach is to let the worker gauge whether it should take a job at the
time the job is taken. Perhaps the worker will choose based on the
worker's load, or other concurrent jobs in the process, or other
details. A goal for the 2.x line is to more directly support this type
of scalability.
* Improve scalability of registering jobs.
The 1.x line initially wasn't concerned about very many concurrent
asynchronous requests. When this situation was encountered, it caused
ConflictErrors between the worker process reading the deferred queue
and the code that was adding the deferreds. Thanks to Nuxeo, this
problem was addressed in the 1.x line. A goal for the new version is to
include and improve upon the 1.x solution.
* Make it even simpler to provide new jobs.
In the first version, `plugins` performed jobs. They had a specific API
and they had to be configured. A goal for the new version is to require
no specific API for jobs, and to not require any configuration.
* Improve report information, especially through the web.
The component that the first version of zasync provided to do the
asynchronous work, the zasync client, provided very verbose logs of the
jobs done, but they were hard to read and also did not have a through-
the-web parallel. Two goals for the new version are to improve the
usefulness of the filesystem logs and to include more complete
visibility of the status of the provided asynchronous clients.
* Make it easier to configure and start, especially for small
deployments.
A significant barrier to experimentation and deployment of the 1.x line
was the difficulty in configuration. The 1.x line relied on ZConfig for
zasync client configuration, demanding non-extensible
similar-yet-subtly-different .conf files like the Zope conf files. The
2.x line provides code that Zope 3 can configure to run in the same
process as a standard Zope 3 application. This means that development
instances can start a zasync quickly and easily. It also means that
processes can be reallocated on the fly during production use, so that
a machine being used as a zasync process can quickly be converted to a
web server, if needed, and vice versa.
- New goals
* Support intermediate return calls so that jobs can report back how they
are doing.
A frequent request from users of zasync 1.x was the ability for a long-
running asynchronous process to report back progress to the original
requester. The 2.x line addresses this with three changes:
+ jobs are annotatable;
+ jobs should not be modified in an asynchronous worker that does work
(though they may be read);
+ jobs can request another job in a synchronous process that annotates
the job with progress status or other information.
Because of relatively recent changes in ZODB--multi version concurrency
control--this simple pattern should not generate conflict errors.
* Support time-delayed calls.
Retries and other use cases make time-delayed deferred calls desirable.
The new design supports these sort of calls.
.. [#identifying_agent] The combination of a queue name plus a
dispatcher UUID plus an agent name uniquely identifies an agent.
| zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/README.txt | README.txt |
-----------------------------
Shared Single Database Set Up
-----------------------------
As described above, using a shared single database will probably be the
quickest way to get started. Large-scale production usage will probably prefer
to use the :ref:`two-database-set-up` described later.
So, without further ado, here is the text of our zope.conf-alike, and of our
site.zcml-alike [#get_vals]_.
>>> zope_conf = """
... site-definition %(site_zcml_file)s
...
... <zodb main>
... <filestorage>
... create true
... path %(main_storage_path)s
... </filestorage>
... </zodb>
...
... <product-config zc.z3monitor>
... port %(monitor_port)s
... </product-config>
...
... <logger>
... level debug
... name zc.async
... propagate no
...
... <logfile>
... path %(async_event_log)s
... </logfile>
... </logger>
...
... <logger>
... level debug
... name zc.async.trace
... propagate no
...
... <logfile>
... path %(async_trace_log)s
... </logfile>
... </logger>
...
... <eventlog>
... <logfile>
... formatter zope.exceptions.log.Formatter
... path STDOUT
... </logfile>
... <logfile>
... formatter zope.exceptions.log.Formatter
... path %(event_log)s
... </logfile>
... </eventlog>
... """ % {'site_zcml_file': site_zcml_file,
... 'main_storage_path': os.path.join(dir, 'main.fs'),
... 'async_storage_path': os.path.join(dir, 'async.fs'),
... 'monitor_port': monitor_port,
... 'event_log': os.path.join(dir, 'z3.log'),
... 'async_event_log': os.path.join(dir, 'async.log'),
... 'async_trace_log': os.path.join(dir, 'async_trace.log'),}
...
In a non-trivial production system, you will also probably want to replace
the file storage with a <zeoclient> stanza.
Also note that an open monitor port should be behind a firewall, of course.
We'll assume that zdaemon.conf has been set up to put ZC_ASYNC_UUID in the
proper place too. It would have looked something like this in the
zdaemon.conf::
<environment>
ZC_ASYNC_UUID /path/to/uuid.txt
</environment>
(Other tools, such as supervisor, also can work, of course; their spellings are
different and are "left as an exercise to the reader" at the moment.)
We'll do that by hand:
>>> os.environ['ZC_ASYNC_UUID'] = os.path.join(dir, 'uuid.txt')
Now let's define our site-zcml-alike.
>>> site_zcml = """
... <configure xmlns='http://namespaces.zope.org/zope'
... xmlns:meta="http://namespaces.zope.org/meta"
... >
... <include package="zope.component" file="meta.zcml" />
... <include package="zope.component" />
... <include package="zc.z3monitor" />
... <include package="zc.async" file="basic_dispatcher_policy.zcml" />
...
... <!-- this is usually handled in Zope applications by the
... zope.app.keyreference.persistent.connectionOfPersistent adapter -->
... <adapter factory="zc.twist.connection" />
... </configure>
... """
Now we're done.
If we process these files, and wait for a poll, we've got a working
set up [#process]_.
>>> import zc.async.dispatcher
>>> dispatcher = zc.async.dispatcher.get()
>>> import pprint
>>> pprint.pprint(get_poll(dispatcher, 0))
{'': {'main': {'active jobs': [],
'error': None,
'len': 0,
'new jobs': [],
'size': 3}}}
>>> bool(dispatcher.activated)
True
We can ask for a job to be performed, and get the result.
>>> conn = db.open()
>>> root = conn.root()
>>> import zc.async.interfaces
>>> queue = zc.async.interfaces.IQueue(root)
>>> import operator
>>> import zc.async.job
>>> job = queue.put(zc.async.job.Job(operator.mul, 21, 2))
>>> import transaction
>>> transaction.commit()
>>> wait_for_result(job)
42
We can connect to the monitor server with telnet.
>>> import telnetlib
>>> tn = telnetlib.Telnet('127.0.0.1', monitor_port)
>>> tn.write('async status\n') # immediately disconnects
>>> print tn.read_all() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
{
"poll interval": {
"seconds": ...
},
"status": "RUNNING",
"time since last poll": {
"seconds": ...
},
"uptime": {
"seconds": ...
},
"uuid": "..."
}
<BLANKLINE>
Now we'll "shut down" with a CTRL-C, or SIGINT, and clean up.
>>> import signal
>>> if getattr(os, 'getpid', None) is not None: # UNIXEN, not Windows
... pid = os.getpid()
... try:
... os.kill(pid, signal.SIGINT)
... except KeyboardInterrupt:
... if dispatcher.activated:
... assert False, 'dispatcher did not deactivate'
... else:
... print "failed to send SIGINT, or something"
... else:
... dispatcher.reactor.callFromThread(dispatcher.reactor.stop)
... for i in range(30):
... if not dispatcher.activated:
... break
... time.sleep(0.1)
... else:
... assert False, 'dispatcher did not deactivate'
...
>>> import transaction
>>> t = transaction.begin() # sync
>>> import zope.component
>>> import zc.async.interfaces
>>> uuid = zope.component.getUtility(zc.async.interfaces.IUUID)
>>> da = queue.dispatchers[uuid]
>>> bool(da.activated)
False
>>> db.close()
>>> import shutil
>>> shutil.rmtree(dir)
These instructions are very similar to the :ref:`two-database-set-up`.
.. rubric:: Footnotes
.. [#get_vals]
>>> import errno, os, random, socket, tempfile
>>> dir = tempfile.mkdtemp()
>>> site_zcml_file = os.path.join(dir, 'site.zcml')
>>> s = socket.socket()
>>> for i in range(20):
... monitor_port = random.randint(20000, 49151)
... try:
... s.bind(('127.0.0.1', monitor_port))
... except socket.error, e:
... if e.args[0] == errno.EADDRINUSE:
... pass
... else:
... raise
... else:
... s.close()
... break
... else:
... assert False, 'could not find available port'
... monitor_port = None
...
.. [#process]
>>> zope_conf_file = os.path.join(dir, 'zope.conf')
>>> f = open(zope_conf_file, 'w')
>>> f.write(zope_conf)
>>> f.close()
>>> f = open(site_zcml_file, 'w')
>>> f.write(site_zcml)
>>> f.close()
>>> import zdaemon.zdoptions
>>> import zope.app.appsetup
>>> options = zdaemon.zdoptions.ZDOptions()
>>> options.schemadir = os.path.join(
... os.path.dirname(os.path.abspath(zope.app.appsetup.__file__)),
... 'schema')
>>> options.realize(['-C', zope_conf_file])
>>> config = options.configroot
>>> import zope.app.appsetup.product
>>> zope.app.appsetup.product.setProductConfigurations(
... config.product_config)
>>> ignore = zope.app.appsetup.config(config.site_definition)
>>> import zope.app.appsetup.appsetup
>>> db = zope.app.appsetup.appsetup.multi_database(config.databases)[0][0]
>>> import zope.event
>>> import zc.async.interfaces
>>> zope.event.notify(zc.async.interfaces.DatabaseOpened(db))
>>> from zc.async.testing import get_poll, wait_for_result
| zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/README_3a.txt | README_3a.txt |
import datetime
import re
import types
import pytz
from uuid import UUID as uuid_UUID # we use this non-standard import spelling
# because ``uuid`` is frequently an argument and UUID is a function defined
# locally.
import simplejson
import zope.component
import persistent.interfaces
import zc.async.dispatcher
import zc.async.interfaces
import zc.async.utils
_marker = object()
class Encoder(simplejson.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.timedelta):
tmp = {'days': obj.days,
'hours': obj.seconds // (60*60),
'minutes': (obj.seconds % (60*60)) // 60,
'seconds': float(
obj.seconds % 60) + obj.microseconds/1000000
}
res = dict((k, v) for k, v in tmp.items() if v)
if not res:
res['seconds'] = 0.0
return res
# TODO the spelling of this conditional is to support our test setup
# shenanigans. originally was ``isinstance(obj, datetime.datetime)``.
# Would be nice to fix, though the duck typing is Pythonic at least.
elif (getattr(obj, 'tzinfo', _marker) is not _marker and
getattr(obj, 'astimezone', _marker) is not _marker):
if obj.tzinfo is not None:
obj = obj.astimezone(pytz.UTC).replace(tzinfo=None)
return obj.isoformat() + "Z"
elif isinstance(obj, uuid_UUID):
return str(obj)
elif zc.async.interfaces.IJob.providedBy(obj):
return zc.async.dispatcher.getId(obj)
elif getattr(obj, 'next', _marker) is not _marker:
# iterator. Duck typing too fuzzy, practically?
return tuple(obj)
# isinstance and providedBy are *not* redundant
# it's a performance optimization
elif (isinstance(obj, (types.FunctionType, types.BuiltinFunctionType,
persistent.Persistent)) or
persistent.interfaces.IPersistent.providedBy(obj)):
return zc.async.utils.custom_repr(obj)
try:
return simplejson.JSONEncoder.default(self, obj)
except TypeError:
return zc.async.utils.custom_repr(obj)
encoder = Encoder(sort_keys=True, indent=4)
def monitor(funcs, help_text, connection, cmd, raw, needs_db_connection=False):
if cmd is None:
res = help_text
else:
f = funcs.get(cmd)
if f is None:
res = '[Unknown tool name for this command: %s]' % (cmd,)
else:
args = []
kwargs = {}
for val in raw:
if ':' in val:
key, val = val.split(':', 1)
kwargs[key] = val
else:
if kwargs:
raise ValueError(
'placeful modifiers must come before named '
'modifiers')
args.append(val)
if needs_db_connection:
dispatcher = zc.async.dispatcher.get()
conn = dispatcher.db.open()
try:
res = f(conn, *args, **kwargs)
if not isinstance(res, str):
res = encoder.encode(res)
finally:
conn.close()
else:
res = f(*args, **kwargs)
if not isinstance(res, str):
res = encoder.encode(res)
connection.write(res)
connection.write('\n')
def status(uuid=None):
"""Get a mapping of general zc.async dispatcher information.
'status' is one of 'STUCK', 'STARTING', 'RUNNING', or 'STOPPED', where
'STUCK' means the poll is past due."""
if uuid is not None:
uuid = uuid_UUID(uuid)
return zc.async.dispatcher.get(uuid).getStatusInfo()
def jobs(queue=None, agent=None, uuid=None):
"""Show active jobs in worker threads as of the instant.
Usage:
jobs
(returns active jobs as of last poll, newest to oldest)
jobs queue:<queue name>
(jobs are filtered to those coming from the named queue)
jobs agent:<agent name>
(jobs are filtered to those coming from agents with given name)
"queue:" and "agent:" modifiers may be combined.
Example:
async jobs queue: agent:main
(results filtered to queue named '' and agent named 'main')"""
if uuid is not None:
uuid = uuid_UUID(uuid)
return zc.async.dispatcher.get(uuid).getActiveJobIds(queue, agent)
def job(OID, database=None, uuid=None):
"""Local information about a job as of last poll, if known.
Does not consult ZODB, but in-memory information.
Usage:
job <job id>
(returns information about the job)
job <job id> database:<database name>
(returns job information, with job id disambiguated by database name)
The job id in this case is an integer such as those returned by the
``async jobs`` command or in the ``longest ...`` and ``shortest ...``
values of the ``async jobstats`` command. It is the integer version of the
oid of the job, and can be converted to an oid with ``ZODB.utils.p64``, and
converted back to an integer with ``ZODB.utils.u64``.
"""
if uuid is not None:
uuid = uuid_UUID(uuid)
return zc.async.dispatcher.get(uuid).getJobInfo(long(OID), database)
_find = re.compile(r'\d+[DHMS]').findall
def _dt(s):
if s is None:
res = s
else:
try:
res = int(s)
except ValueError:
vals = {}
for val in _find(s.upper()):
vals[val[-1]] = int(val[:-1])
res = datetime.datetime.utcnow() - datetime.timedelta(
days=vals.get('D', 0),
hours=vals.get('H', 0),
minutes=vals.get('M', 0),
seconds=vals.get('S', 0))
return res
def jobstats(at=None, before=None, since=None, queue=None, agent=None,
uuid=None):
"""Statistics on historical jobs as of last poll.
Usage:
jobstats
(returns statistics on historical jobs as of last poll)
jobstats queue:<queue name>
(statistics are filtered to those coming from the named queue)
jobstats agent:<agent name>
(statistics are filtered to those coming from agents with given name)
jobstats at:<poll key or interval>
(statistics are collected at or before the poll key or interval)
jobstats before:<pollkey or interval>
(statistics are collected before the poll key or interval)
jobstats since:<pollkey or interval>
(statistics are collected since poll key or interval, inclusive)
The modifiers "queue:", "agent:", "since:", and one of "at:" or "before:"
may be combined.
Intervals are of the format ``[nD][nH][nM][nS]``, where "n" should
be replaced with a positive integer, and "D," "H," "M," and "S" are
literals standing for "days," "hours," "minutes," and "seconds."
For instance, you might use ``5M`` for five minutes, ``20S`` for
twenty seconds, or ``1H30M`` for an hour and a half.
Poll keys are the values shown as "key" from the ``poll`` or ``polls``
command.
Example:
async jobstats queue: agent:main since:1H
(results filtered to queue named '' and agent named 'main' from
one hour ago till now)"""
if uuid is not None:
uuid = uuid_UUID(uuid)
return zc.async.dispatcher.get(uuid).getStatistics(
_dt(at), _dt(before), _dt(since), queue, agent)
def poll(at=None, before=None, uuid=None):
"""Get information about a single poll, defaulting to most recent.
Usage:
poll
(returns most recent poll)
poll at:<poll key or interval>
(returns poll at or before the poll key or interval)
poll before:<poll key or interval>
(returns poll before the poll key or interval)
Intervals are of the format ``[nD][nH][nM][nS]``, where "n" should
be replaced with a positive integer, and "D," "H," "M," and "S" are
literals standing for "days," "hours," "minutes," and "seconds."
For instance, you might use ``5M`` for five minutes, ``20S`` for
twenty seconds, or ``1H30M`` for an hour and a half.
Example:
async poll at:5M
(get the poll information at five minutes ago or before)"""
# TODO: parse at and before to datetimes
if uuid is not None:
uuid = uuid_UUID(uuid)
info = zc.async.dispatcher.get(uuid).getPollInfo(_dt(at), _dt(before))
return {'key': info.key, 'time': info.utc_timestamp.isoformat() + "Z",
'results': info}
def polls(at=None, before=None, since=None, count=None, uuid=None):
"""Get information about recent polls, defaulting to most recent.
Usage:
polls
(returns most recent 3 poll)
polls at:<poll key or interval>
(returns up to 3 polls at or before the poll key or interval)
polls before:<poll key or interval>
(returns up to 3 polls before the poll key or interval)
polls since:<poll key or interval>
(returns polls since the poll key or interval, inclusive)
polls count <positive integer>
(returns the given number of the most recent files)
The modifiers "since:", "count:", and one of "at:" or "before:" may
be combined.
Intervals are of the format ``[nD][nH][nM][nS]``, where "n" should
be replaced with a positive integer, and "D," "H," "M," and "S" are
literals standing for "days," "hours," "minutes," and "seconds."
For instance, you might use ``5M`` for five minutes, ``20S`` for
twenty seconds, or ``1H30M`` for an hour and a half.
Example:
async polls since:10M before:5M
(get the poll information from 10 to 5 minutes ago)"""
if uuid is not None:
uuid = uuid_UUID(uuid)
if count is None:
if since is None:
count = 3
else:
count = int(count)
return [{'key': p.key, 'time': p.utc_timestamp.isoformat() + "Z",
'results': p}
for p in zc.async.dispatcher.get(uuid).iterPolls(
_dt(at), _dt(before), _dt(since), count)]
# provide in async and separately:
def utcnow():
"""Return the current time in UTC, in ISO 8601 format."""
return datetime.datetime.utcnow().isoformat() + "Z"
def UUID():
"""Get instance UUID in hex."""
return str(zope.component.getUtility(zc.async.interfaces.IUUID))
funcs = {}
def help(cmd=None):
"""Get help on an async monitor tool.
Usage is 'async help <tool name>' or 'async help'."""
if cmd is None:
res = [
"These are the tools available. Usage for each tool is \n"
"'async <tool name> [modifiers...]'. Learn more about each \n"
"tool using 'async help <tool name>'.\n"]
for nm, func in sorted(funcs.items()):
res.append('%s: %s' % (
nm, func.__doc__.split('\n', 1)[0]))
return '\n'.join(res)
f = funcs.get(cmd)
if f is None:
return 'Unknown async tool'
return f.__doc__
for f in status, jobs, job, jobstats, poll, polls, utcnow, UUID, help:
funcs[f.__name__] = f
def async(connection, cmd=None, *raw):
"""Monitor zc.async activity in this process.
To see a list of async tools, use 'async help'.
To learn more about an async monitor tool, use 'async help <tool name>'."""
monitor(funcs, async.__doc__, connection, cmd, raw) | zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/monitor.py | monitor.py |
import persistent
import datetime
import rwproperty
import zope.interface
import zope.component
import zc.async.interfaces
import zc.async.utils
from zc.async.legacy import chooseFirst
class Agent(zc.async.utils.Base):
zope.interface.implements(zc.async.interfaces.IAgent)
_chooser = _filter = None
@property
def filter(self):
return self._filter
@rwproperty.setproperty
def filter(self, value):
if value is not None and self.chooser is not None:
raise ValueError('cannot set both chooser and filter to non-None')
self._filter = value
@property
def chooser(self):
res = self._chooser
if res is None: # legacy support
res = self.__dict__.get('chooser')
return res
@rwproperty.setproperty
def chooser(self, value):
if value is not None and self.filter is not None:
raise ValueError('cannot set both chooser and filter to non-None')
self._chooser = value
if 'chooser' in self.__dict__:
del self.__dict__['chooser']
if value is None:
zope.interface.alsoProvides(self, zc.async.interfaces.IFilterAgent)
else:
zope.interface.directlyProvides(self,
zope.interface.directlyProvidedBy(self) -
zc.async.interfaces.IFilterAgent)
def __init__(self, chooser=None, filter=None, size=3):
self.chooser = chooser
self.filter = filter
self.size = size
self._data = zc.queue.PersistentQueue()
self._data.__parent__ = self
self.completed = zc.async.utils.Periodic(
period=datetime.timedelta(days=7),
buckets=7)
zope.interface.alsoProvides(
self.completed, zc.async.interfaces.ICompletedCollection)
self.completed.__parent__ = self
@property
def queue(self):
if self.parent is not None:
return self.parent.parent
for nm in ('__len__', '__iter__', '__getitem__', '__nonzero__'):
locals()[nm] = zc.async.utils.simpleWrapper(nm)
def index(self, item):
for ix, i in enumerate(self):
if i is item:
return ix
raise ValueError("%r not in %s" % (item, self.__class__.__name__))
def remove(self, item):
self.pull(self.index(item))
def __delitem__(self, ix):
self.pull(ix)
def pull(self, index=0):
res = self._data.pull(index)
res.parent = None
return res
def claimJob(self):
if not self.parent.activated or self.parent.dead:
# we don't want to claim a job unless we are activated.
# Normally, this should be the case, but in unusual
# circumstances, such as very long commits causing the
# ping to not be able to commit, we might get in this
# unfortunate circumstance.
# TODO: we would like to have a read conflict error if we read
# activated but it changed beneath us. If the ZODB grows a gesture
# to cause this, use it.
return None
if len(self._data) < self.size: # MVCC can cause error here...
res = self._choose()
if res is not None:
res.parent = self
self._data.put(res)
else:
res = None
return res
def _choose(self): # hook point for subclass. Override if desired.
if self.chooser is not None:
return self.chooser(self)
else:
return self.queue.claim(self.filter)
def jobCompleted(self, job):
self.remove(job)
self.completed.add(job)
@zope.component.adapter(zc.async.interfaces.IDispatcherActivated)
def addMainAgentActivationHandler(event):
da = event.object
if 'main' not in da:
da['main'] = Agent() | zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/agent.py | agent.py |
import bisect
import datetime
import logging
import sys
import time
import types
import persistent.interfaces
import BTrees
import ZEO.Exceptions
import ZODB.POSException
import ZODB.utils
import rwproperty
import persistent
import zope.minmax
import zc.dict
import pytz
import zope.bforest.periodic
import zc.async.interfaces
EXPLOSIVE_ERRORS = (SystemExit, KeyboardInterrupt,
zc.async.interfaces.ReassignedError)
SYSTEM_ERRORS = (ZEO.Exceptions.ClientDisconnected,
ZODB.POSException.POSKeyError)
INITIAL_BACKOFF = 5
MAX_BACKOFF = 60
BACKOFF_INCREMENT = 5
def simpleWrapper(name):
# notice use of "simple" in function name! A sure sign of trouble!
def wrapper(self, *args, **kwargs):
return getattr(self._data, name)(*args, **kwargs)
return wrapper
log = logging.getLogger('zc.async.events')
tracelog = logging.getLogger('zc.async.trace')
class Base(persistent.Persistent):
_z_parent__ = parent = None
# we use ``parent`` for our data structures. As a convenience, we
# support the ``__parent__`` attribute used by most security policies so
# that ``__parent__`` uses ``parent`` unless __parent__ is explicitly set.
@property
def __parent__(self):
if self._z_parent__ is not None:
return self._z_parent__
return self.parent
@rwproperty.setproperty
def __parent__(self, value):
self._z_parent__ = None
# for legacy databases
Atom = zope.minmax.Maximum
class Dict(zc.dict.Dict, Base):
copy = None # mask
def __setitem__(self, key, value):
previous = self.get(key)
super(Dict, self).__setitem__(key, value)
value.name = key
value.parent = self
if previous is not None:
previous.parent = previous.name = None
def pop(self, key, *args):
try:
res = super(Dict, self).pop(key)
except KeyError:
if args:
return args[0]
else:
raise
res.parent = None
res.name = None
return res
def dt_to_long(dt):
# 4 low bits, 0-15, will be discarded and can be set, if desired
# newer dates are smaller than older, so BTrees sort from newer to older
if dt.tzinfo is not None:
dt = dt.astimezone(pytz.UTC).replace(tzinfo=None)
delta = datetime.datetime.max - dt
return long((delta.days << 41 | delta.seconds << 24 |
delta.microseconds << 4))
def long_to_dt(l):
microseconds = (l >> 4) & (2**20-1)
seconds = (l >> 24) & (2**17-1)
days = (l >> 41)
return (datetime.datetime.max -
datetime.timedelta(days, seconds, microseconds))
class AbstractSet(persistent.Persistent):
__parent__ = None
def __init__(self):
self._data = BTrees.family64.IO.BTree()
def clear(self):
self._data.clear()
def add(self, item):
key = dt_to_long(datetime.datetime.utcnow()) + 15
while key in self._data:
key -= 1
self._data[key] = item
assert self.__parent__ is not None
item.parent = self.__parent__
item.key = key
def __iter__(self):
return self._data.itervalues()
def __len__(self):
return len(self._data)
def __nonzero__(self):
return bool(self._data)
def first(self, start=None):
if start is not None:
if isinstance(start, (int, long)):
args = (start,)
else:
args = (dt_to_long(start),)
else:
args = ()
return self._data[self._data.minKey(*args)]
def last(self, stop=None):
if stop is not None:
if isinstance(stop, (int, long)):
args = (stop,)
else:
args = (dt_to_long(stop),)
else:
args = ()
return self._data[self._data.maxKey(*args)]
def iter(self, start=None, stop=None):
if start is not None:
start = dt_to_long(start)
if stop is not None:
stop = dt_to_long(stop)
return self._data.itervalues(start, stop)
class Periodic(AbstractSet):
# sorts on begin_after from newest to oldest
def __init__(self, period, buckets):
self._data = zope.bforest.periodic.LOBForest(period, count=buckets)
@property
def period(self):
return self._data.period
@rwproperty.setproperty
def period(self, value):
self._data.period = value
class RollingSet(AbstractSet):
size = 100
def add(self, item):
super(RollingSet, self).add(item)
diff = len(self._data) - self.size
while diff > 0:
self._data.pop(self._data.maxKey())
diff -= 1
class RollingMapping(zc.dict.OrderedDict):
size = 100
def __setitem__(self, key, value):
super(RollingMapping, self).__setitem__(key, value)
diff = len(self) - self.size
if diff > 0:
for key in self._order[:diff]:
self._data.pop(key)
del self._order[:diff]
self._len.change(-diff)
def maxKey(self, key=None):
if key is None:
args = ()
else:
args = (key,)
return self._data.maxKey(*args)
def minKey(self, key=None):
if key is None:
args = ()
else:
args = (key,)
return self._data.minKey(*args)
def never_fail(call, identifier, tm):
# forever for TransactionErrors; forever, with backoff, for anything else
trans_ct = 0
backoff_ct = 0
backoff = INITIAL_BACKOFF
res = None
while 1:
try:
res = call()
tm.commit()
except ZODB.POSException.TransactionError:
tm.abort()
trans_ct += 1
if not trans_ct % 5:
log.warning(
'%d consecutive transaction errors while %s',
trans_ct, identifier, exc_info=True)
res = None
except EXPLOSIVE_ERRORS:
tm.abort()
raise
except Exception, e:
if isinstance(e, SYSTEM_ERRORS):
level = logging.ERROR
else:
level = logging.CRITICAL
tm.abort()
backoff_ct += 1
if backoff_ct == 1:
log.log(level,
'first error while %s; will continue in %d seconds',
identifier, backoff, exc_info=True)
elif not backoff_ct % 10:
log.log(level,
'%d consecutive errors while %s; '
'will continue in %d seconds',
backoff_ct, identifier, backoff, exc_info=True)
res = None
time.sleep(backoff)
backoff = min(MAX_BACKOFF, backoff + BACKOFF_INCREMENT)
else:
return res
def wait_for_system_recovery(call, identifier, tm):
# forever for TransactionErrors; forever, with backoff, for SYSTEM_ERRORS
trans_ct = 0
backoff_ct = 0
backoff = INITIAL_BACKOFF
res = None
while 1:
try:
res = call()
tm.commit()
except ZODB.POSException.TransactionError:
tm.abort()
trans_ct += 1
if not trans_ct % 5:
log.warning(
'%d consecutive transaction errors while %s',
trans_ct, identifier, exc_info=True)
res = None
except EXPLOSIVE_ERRORS:
tm.abort()
raise
except SYSTEM_ERRORS:
tm.abort()
backoff_ct += 1
if backoff_ct == 1:
log.error('first error while %s; will continue in %d seconds',
identifier, backoff, exc_info=True)
elif not backoff_ct % 5:
log.error('%d consecutive errors while %s; '
'will continue in %d seconds',
backoff_ct, identifier, backoff, exc_info=True)
res = None
time.sleep(backoff)
backoff = min(MAX_BACKOFF, backoff + BACKOFF_INCREMENT)
except:
log.error('Error while %s', identifier, exc_info=True)
tm.abort()
return zc.twist.Failure()
else:
return res
def try_five_times(call, identifier, tm, commit=True):
ct = 0
res = None
while 1:
try:
res = call()
if commit:
tm.commit()
except ZODB.POSException.TransactionError:
tm.abort()
ct += 1
if ct >= 5:
log.critical('Five consecutive transaction errors while %s',
identifier, exc_info=True)
res = zc.twist.Failure()
else:
continue
except EXPLOSIVE_ERRORS:
tm.abort()
raise
except:
tm.abort()
log.critical('Error while %s', identifier, exc_info=True)
res = zc.twist.Failure()
return res
def custom_repr(obj):
# isinstance and providedBy are *not* redundant
# it's a performance optimization
if (isinstance(obj, persistent.Persistent) or
persistent.interfaces.IPersistent.providedBy(obj)):
dbname = "?"
if obj._p_jar is not None:
dbname = getattr(obj._p_jar.db(), 'database_name', "?")
if dbname != '?':
dbname = repr(dbname)
if obj._p_oid is not None:
oid = ZODB.utils.u64(obj._p_oid)
else:
oid = '?'
return '%s.%s (oid %s, db %s)' % (
obj.__class__.__module__,
obj.__class__.__name__,
oid,
dbname)
elif isinstance(obj, (types.FunctionType, types.BuiltinFunctionType)):
return '%s.%s' % (obj.__module__, obj.__name__)
else:
return repr(obj)
def sortedmerge(sources, key=None):
if key is None:
key = lambda item: item
sorted_sources = []
for src in sources:
iterator = iter(src)
try:
first = iterator.next()
except StopIteration:
pass
else:
sorted_sources.append((key(first), first, iterator))
sorted_sources.sort()
while sorted_sources:
ignore, result, iterator = sorted_sources.pop(0)
yield result
try:
next = iterator.next()
except StopIteration:
pass
else:
bisect.insort(sorted_sources, (key(next), next, iterator))
def takecount(res, count):
if count < 0:
raise ValueError('count must be a positive integer')
if count == 0:
return
ct = 0
for val in res:
yield val
ct += 1
if ct >= count:
break | zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/utils.py | utils.py |
import time
import types
import datetime
import logging
import BTrees.OOBTree
import ZODB.POSException
import ZEO.Exceptions
import transaction.interfaces
import persistent
import persistent.list
import persistent.mapping
import twisted.internet.defer
import twisted.python.failure
import zope.interface
import zc.queue
import zc.twist
import rwproperty
import pytz
import zc.async.interfaces
import zc.async.utils
import zc.async
from zc.async.legacy import success_or_failure
class RetryCommonFourTimes(persistent.Persistent): # default
zope.component.adapts(zc.async.interfaces.IJob)
zope.interface.implements(zc.async.interfaces.IRetryPolicy)
# exceptions, data_cache key, max retry, initial backoff seconds,
# incremental backoff seconds, max backoff seconds
internal_exceptions = (
((ZEO.Exceptions.ClientDisconnected,), 'zeo_disconnected',
None, 5, 5, 60),
((ZODB.POSException.TransactionError,), 'transaction_error',
5, 0, 0, 0),
)
transaction_exceptions = internal_exceptions
max_interruptions = 9
log_every = 5
def __init__(self, job):
self.parent = self.__parent__ = job
self.data = BTrees.family32.OO.BTree()
def updateData(self, data_cache):
if 'first_active' in self.data and 'first_active' in data_cache:
data_cache.pop('first_active')
self.data.update(data_cache)
def jobError(self, failure, data_cache):
return self._process(failure, data_cache, self.internal_exceptions)
def commitError(self, failure, data_cache):
return self._process(failure, data_cache, self.transaction_exceptions)
def _process(self, failure, data_cache, exceptions):
for (exc, key, max_count, init_backoff,
incr_backoff, max_backoff) in exceptions:
if failure.check(*exc) is not None:
count = data_cache.get(key, 0) + 1
if max_count is not None and count >= max_count:
zc.async.utils.tracelog.warning(
'Retry policy for job %r is not retrying after %d '
'counts of %s occurrences', self.parent, count, key)
return False
elif count==1 or not count % self.log_every:
zc.async.utils.tracelog.warning(
'Retry policy for job %r requests another attempt '
'after %d counts of %s occurrences', self.parent,
count, key, exc_info=True)
backoff = min(max_backoff,
(init_backoff + (count-1) * incr_backoff))
if backoff:
time.sleep(backoff)
data_cache[key] = count
data_cache['last_' + key] = failure
if 'first_active' not in data_cache:
data_cache['first_active'] = self.parent.active_start
return True
return False
def interrupted(self):
if 'first_active' not in self.data:
self.data['first_active'] = self.parent.active_start
count = self.data['interruptions'] = self.data.get('interruptions', 0) + 1
if self.max_interruptions is None or count <= self.max_interruptions:
if count==1 or not count % self.log_every:
zc.async.utils.tracelog.info(
'Retry policy for job %r requests another attempt '
'after %d interrupts', self.parent, count)
return True
else:
zc.async.utils.tracelog.info(
'Retry policy for job %r is not retrying after %d '
'interrupts', self.parent, count)
return False
class RetryCommonForever(RetryCommonFourTimes):
# retry on ZEO failures and Transaction errors during the job forever
# retry on commitErrors and interrupteds forever.
internal_exceptions = (
((ZEO.Exceptions.ClientDisconnected,), 'zeo_disconnected',
None, 5, 5, 60),
((ZODB.POSException.TransactionError,), 'transaction_error',
None, 0, 0, 0),
)
max_interruptions = None
other_commit_initial_backoff = 0
other_commit_incremental_backoff = 1
other_commit_max_backoff = 60
def commitError(self, failure, data_cache):
res = super(RetryCommonForever, self).commitError(failure, data_cache)
if not res:
# that just means we didn't record it. We actually are going to
# retry. However, we are going to back these off.
key = 'other'
count = data_cache['other'] = data_cache.get('other', 0) + 1
data_cache['last_other'] = failure
if 'first_active' not in data_cache:
data_cache['first_active'] = self.parent.active_start
backoff = min(self.other_commit_max_backoff,
(self.other_commit_initial_backoff +
(count-1) * self.other_commit_incremental_backoff))
if count==1 or not count % self.log_every:
# this is critical because it is unexpected. Someone probably
# needs to see this. We can't move on until it is dealt with.
zc.async.utils.log.critical(
'Retry policy for job %r requests another attempt in %d '
'seconds after %d counts of %s occurrences',
self.parent, backoff, count, key, exc_info=True)
if backoff:
time.sleep(backoff)
return True # always retry
class NeverRetry(persistent.Persistent):
zope.component.adapts(zc.async.interfaces.IJob)
zope.interface.implements(zc.async.interfaces.IRetryPolicy)
def __init__(self, job):
self.parent = self.__parent__ = job
def updateData(self, data_cache):
pass
def jobError(self, failure, data_cache):
return False
def commitError(self, failure, data_cache):
return False
def interrupted(self):
return False
def callback_retry_policy_factory(job):
res = zope.component.queryAdapter(
job, zc.async.interfaces.IRetryPolicy, 'callback')
if res is None:
res = RetryCommonForever(job)
return res
def isFailure(value):
return isinstance(value, twisted.python.failure.Failure)
def _prepare_callback(callback, failure_log_level=None,
retry_policy_factory=None, parent=None):
if not zc.async.interfaces.ICallbackProxy.providedBy(callback):
callback = zc.async.interfaces.IJob(callback)
if failure_log_level is not None:
callback.failure_log_level = failure_log_level
elif callback.failure_log_level is None:
callback.failure_log_level = logging.CRITICAL
if retry_policy_factory is not None:
callback.retry_policy_factory = retry_policy_factory
elif callback.retry_policy_factory is None:
callback.retry_policy_factory = callback_retry_policy_factory
callback.parent = parent
return callback
class ConditionalCallbackProxy(zc.async.utils.Base):
zope.interface.implements(zc.async.interfaces.ICallbackProxy)
job = None
@property
def status(self):
# NEW -> (PENDING -> ASSIGNED ->) ACTIVE -> CALLBACKS -> COMPLETED
if self.job is None:
ob = self.parent
while (ob is not None and
zc.async.interfaces.IJob.providedBy(ob)):
ob = ob.parent
if zc.async.interfaces.IAgent.providedBy(ob):
return zc.async.interfaces.ASSIGNED
elif zc.async.interfaces.IQueue.providedBy(ob):
return zc.async.interfaces.PENDING
return zc.async.interfaces.NEW
return self.job.status
@property
def result(self):
if self.job is None:
return None
return self.job.result
def __init__(self, *args, **kwargs):
kwargs['parent'] = self
default = None
if not args:
pass
elif args[-1] is None:
args = args[:-1]
elif getattr(args[-1], '__len__', None) is None:
default = _prepare_callback(args[-1], **kwargs)
args = args[:-1]
self.default = default
self.conditionals = persistent.list.PersistentList()
for condition, job in args:
if job is not None:
job = _prepare_callback(job, **kwargs)
self.conditionals.append((condition, job))
self.callbacks = zc.queue.PersistentQueue()
def getJob(self, result):
if self.job is None:
for condition, callable in self.conditionals:
if condition(result):
break
else:
callable = self.default
if callable is None:
callable = _prepare_callback(_transparent, None, None, self)
self.job = callable
else:
callable = self.job
while self.callbacks:
callable.addCallback(self.callbacks.pull())
return callable
def addCallbacks(self, success=None, failure=None,
failure_log_level=None, retry_policy_factory=None):
return self.addCallback(SuccessFailureCallbackProxy(
success, failure,
failure_log_level=failure_log_level,
retry_policy_factory=retry_policy_factory))
def addCallback(self, callback, failure_log_level=None,
retry_policy_factory=None):
callback = _prepare_callback(
callback, failure_log_level, retry_policy_factory, self)
if self.job is None:
self.callbacks.put(callback)
else:
self.job.addCallback(callback)
return callback
class SuccessFailureCallbackProxy(ConditionalCallbackProxy):
@property
def success(self):
return self.default
@property
def failure(self):
return self.conditionals[0][1]
def __init__(self, success, failure, failure_log_level=None,
retry_policy_factory=None):
super(SuccessFailureCallbackProxy, self).__init__(
(isFailure, failure), success,
failure_log_level=failure_log_level,
retry_policy_factory=retry_policy_factory)
_status_mapping = {
0: zc.async.interfaces.NEW,
# calculated: zc.async.interfaces.PENDING,
# calculated: zc.async.interfaces.ASSIGNED,
1: zc.async.interfaces.ACTIVE,
2: zc.async.interfaces.CALLBACKS,
3: zc.async.interfaces.COMPLETED}
class Job(zc.async.utils.Base):
zope.interface.implements(zc.async.interfaces.IJob)
_callable_root = _callable_name = _result = None
_status_id = None
_status = None # legacy; we use _status_id now
_begin_after = _begin_by = _active_start = _active_end = None
key = None
_retry_policy = None
retry_policy_factory = None # effectively "look up IRetryPolicy adapter
# for '' (empty string) name, and use RetryCommonFourTimes if the adapter
# doesn't exist"
failure_log_level = None # effectively logging.ERROR
assignerUUID = None
_quota_names = ()
def __init__(self, *args, **kwargs):
self._status_id = 0 # we do this here rather than in the class because
# the attribute is new; if _status_id is set, we know we can ignore
# the legacy _status value.
self.args = persistent.list.PersistentList(args) # TODO: blist
self.callable = self.args.pop(0)
self.kwargs = persistent.mapping.PersistentMapping(kwargs)
self.callbacks = zc.queue.PersistentQueue()
self.annotations = BTrees.OOBTree.OOBTree()
def setUp(self):
# a hook (see z3.py, for instance) used in __call__
pass
def tearDown(self, setup_info):
# a hook (see z3.py, for instance) used in __call__
pass
@property
def active_start(self):
return self._active_start
@property
def active_end(self):
return self._active_end
@property
def initial_callbacks_end(self):
return self.key and zc.async.utils.long_to_dt(self.key).replace(
tzinfo=pytz.UTC)
@property
def quota_names(self):
return self._quota_names
@rwproperty.setproperty
def quota_names(self, value):
if isinstance(value, basestring):
raise TypeError('provide an iterable of names')
status = self.status
if status != zc.async.interfaces.NEW:
if status == zc.async.interfaces.PENDING:
quotas = self.queue.quotas
for name in value:
if name not in quotas:
raise ValueError('unknown quota name', name)
else:
raise zc.async.interfaces.BadStatusError(
'can only set quota_names when a job has NEW or PENDING '
'status')
self._quota_names = tuple(value)
@property
def begin_after(self):
return self._begin_after
@rwproperty.setproperty
def begin_after(self, value):
if self.status != zc.async.interfaces.NEW:
raise zc.async.interfaces.BadStatusError(
'can only set begin_after when a job has NEW status')
if value is not None:
if value.tzinfo is None:
raise ValueError('cannot use timezone-naive values')
else:
value = value.astimezone(pytz.UTC)
self._begin_after = value
@property
def begin_by(self):
return self._begin_by
@rwproperty.setproperty
def begin_by(self, value):
if self.status not in (zc.async.interfaces.PENDING,
zc.async.interfaces.NEW):
raise zc.async.interfaces.BadStatusError(
'can only set begin_by when a job has NEW or PENDING status')
if value is not None:
if value < datetime.timedelta():
raise ValueError('negative values are not allowed')
self._begin_by = value
@property
def queue(self):
ob = self.parent
while (ob is not None and
(zc.async.interfaces.IJob.providedBy(ob) or
zc.async.interfaces.IAgent.providedBy(ob) or
zc.async.interfaces.IDispatcherAgents.providedBy(ob))):
ob = ob.parent
if not zc.async.interfaces.IQueue.providedBy(ob):
ob = None
return ob
@property
def agent(self):
ob = self.parent
while (ob is not None and
zc.async.interfaces.IJob.providedBy(ob)):
ob = ob.parent
if not zc.async.interfaces.IAgent.providedBy(ob):
ob = None
return ob
@property
def result(self):
return self._result
@property
def status(self):
# NEW -> (PENDING -> ASSIGNED ->) ACTIVE -> CALLBACKS -> COMPLETED
if self._status_id is None: # legacy
res = self._status
else:
res = _status_mapping[self._status_id]
if res == zc.async.interfaces.NEW:
ob = self.parent
while (ob is not None and ob is not self and
zc.async.interfaces.IJob.providedBy(ob)):
ob = ob.parent
if zc.async.interfaces.IAgent.providedBy(ob):
res = zc.async.interfaces.ASSIGNED
elif zc.async.interfaces.IQueue.providedBy(ob):
res = zc.async.interfaces.PENDING
return res
@classmethod
def bind(klass, *args, **kwargs):
res = klass(*args, **kwargs)
res.args.insert(0, res)
return res
def __repr__(self):
try:
call = zc.async.utils.custom_repr(self._callable_root)
if self._callable_name is not None:
call += ' :' + self._callable_name
args = ', '.join(zc.async.utils.custom_repr(a) for a in self.args)
kwargs = ', '.join(
k + "=" + zc.async.utils.custom_repr(v)
for k, v in self.kwargs.items())
if args:
if kwargs:
args += ", " + kwargs
else:
args = kwargs
return '<%s ``%s(%s)``>' % (
zc.async.utils.custom_repr(self), call, args)
except (TypeError, ValueError, AttributeError):
# broken reprs are a bad idea; they obscure problems
return super(Job, self).__repr__()
@property
def callable(self):
if self._callable_name is None:
return self._callable_root
else:
return getattr(self._callable_root, self._callable_name)
@rwproperty.setproperty
def callable(self, value):
# can't pickle/persist methods by default as of this writing, so we
# add the sugar ourselves. In future, would like for args to be
# potentially methods of persistent objects too...
if self.status != zc.async.interfaces.NEW:
raise zc.async.interfaces.BadStatusError(
'can only set callable when a job has NEW, PENDING, or '
'ASSIGNED status')
if isinstance(value, types.MethodType):
self._callable_root = value.im_self
self._callable_name = value.__name__
elif isinstance(value, zc.twist.METHOD_WRAPPER_TYPE):
self._callable_root = zc.twist.get_self(value)
self._callable_name = value.__name__
elif (isinstance(value, types.BuiltinMethodType) and
getattr(value, '__self__', None) is not None):
self._callable_root = value.__self__
self._callable_name = value.__name__
else:
self._callable_root, self._callable_name = value, None
if (zc.async.interfaces.IJob.providedBy(self._callable_root) and
self._callable_root.parent is None):
# if the parent is already set, that is probably an agent or
# something like that. Don't override, or else the agent won't
# get cleaned out.
self._callable_root.parent = self
def addCallbacks(self, success=None, failure=None,
failure_log_level=None, retry_policy_factory=None):
return self.addCallback(SuccessFailureCallbackProxy(
success, failure,
failure_log_level=failure_log_level,
retry_policy_factory=retry_policy_factory))
def addCallback(self, callback, failure_log_level=None,
retry_policy_factory=None):
callback = _prepare_callback(
callback, failure_log_level, retry_policy_factory, self)
self.callbacks.put(callback)
if self.status == zc.async.interfaces.COMPLETED:
if zc.async.interfaces.ICallbackProxy.providedBy(callback):
call = callback.getJob(self.result)
else:
call = callback
call(self.result) # this commits transactions!
else:
self._p_changed = True # to try and fire conflict errors if
# our reading of self.status has changed beneath us
return callback
def getRetryPolicy(self):
if self._retry_policy is not None:
return self._retry_policy
if self.retry_policy_factory is None:
# first try to look up adapter with name of ''; then if that fails
# use RetryCommonFourTimes
res = zope.component.queryAdapter(
self, zc.async.interfaces.IRetryPolicy, '')
if res is None:
res = RetryCommonFourTimes(self)
elif isinstance(self.retry_policy_factory, basestring):
res = zope.component.getAdapter(
self, zc.async.interfaces.IRetryPolicy,
self.retry_policy_factory)
# this may cause an error. We can't proceed because we don't know
# what to do, and it may be *critical* to know. Therefore, in
# _getRetry, we rely on never_fail to keep on sending critical
# errors in the log, and never stopping.
else:
res = self.retry_policy_factory(self)
self._retry_policy = res
return res
def _getRetry(self, call_name, tm, *args):
# if we are after the time that we are supposed to begin_by, no retry
if (self.begin_by is not None and self.begin_after is not None and
self.begin_by + self.begin_after > datetime.datetime.now(pytz.UTC)):
return False
# we divide up the two ``never_fail`` calls so that retries in getting
# the policy don't affect actually calling the method.
identifier = 'getting retry policy for %r' % (self,)
policy = zc.async.utils.never_fail(self.getRetryPolicy, identifier, tm)
call = getattr(policy, call_name, None)
if call is None:
zc.async.utils.log.error(
'retry policy %r for %r does not have required %s method',
policy, self, call_name)
return None
identifier = 'getting result for %s retry for %r' % (call_name, self)
res = zc.async.utils.never_fail(lambda: call(*args), identifier, tm)
self._check_reassigned((zc.async.interfaces.ACTIVE,)) # will raise
# exception if necessary
return res
def __call__(self, *args, **kwargs):
statuses = (zc.async.interfaces.NEW, zc.async.interfaces.ASSIGNED)
if self.status not in statuses:
raise zc.async.interfaces.BadStatusError(
'can only call a job with NEW or ASSIGNED status')
tm = transaction.interfaces.ITransactionManager(self)
def prepare():
self._check_reassigned(statuses)
self._status_id = 1 # ACTIVE
self._active_start = datetime.datetime.now(pytz.UTC)
effective_args = list(args)
effective_args[0:0] = self.args
effective_kwargs = dict(self.kwargs)
effective_kwargs.update(kwargs)
return effective_args, effective_kwargs
identifier = 'preparing for call of %r' % (self,)
effective_args, effective_kwargs = zc.async.utils.never_fail(
prepare, identifier, tm)
# this is the calling code. It is complex and long because it is
# trying both to handle exceptions reasonably, and to honor the
# IRetryPolicy interface for those exceptions.
data_cache = {}
res = None
while 1:
zc.async.local.job = self # we do this in the loop for paranoia
try:
setup_info = self.setUp()
res = self.callable(*effective_args, **effective_kwargs)
except zc.async.utils.EXPLOSIVE_ERRORS:
tm.abort()
zc.async.utils.try_five_times(
lambda: self.tearDown(setup_info),
'tearDown for %r' % self, tm, commit=False)
raise
except:
res = zc.twist.Failure()
tm.abort()
zc.async.utils.try_five_times(
lambda: self.tearDown(setup_info),
'tearDown for %r' % self, tm, commit=False)
retry = self._getRetry('jobError', tm, res, data_cache)
if isinstance(retry, (datetime.timedelta, datetime.datetime)):
identifier = (
'rescheduling %r as requested by '
'associated IRetryPolicy %r' % (
self, self.getRetryPolicy()))
if self is zc.async.utils.never_fail(
lambda: self._reschedule(retry, data_cache),
identifier, tm):
zc.async.local.job = None
return self
elif retry:
continue
# policy didn't exist or returned False or couldn't reschedule
try:
callback = self._set_result(res, tm, data_cache)
except zc.async.utils.EXPLOSIVE_ERRORS:
tm.abort()
zc.async.utils.try_five_times(
lambda: self.tearDown(setup_info),
'tearDown for %r' % self, tm, commit=False)
raise
except:
failure = zc.twist.Failure()
tm.abort()
zc.async.utils.try_five_times(
lambda: self.tearDown(setup_info),
'tearDown for %r' % self, tm, commit=False)
retry = self._getRetry('commitError', tm, failure, data_cache)
if isinstance(retry, (datetime.timedelta, datetime.datetime)):
identifier = (
'rescheduling %r as requested by '
'associated IRetryPolicy %r' % (
self, self.getRetryPolicy()))
if self is zc.async.utils.never_fail(
lambda: self._reschedule(retry, data_cache),
identifier, tm):
zc.async.local.job = None
return self
elif retry:
continue
# policy didn't exist or returned False or couldn't reschedule
if isinstance(res, twisted.python.failure.Failure):
log_level = self.failure_log_level
if log_level is None:
log_level = logging.ERROR
zc.async.utils.log.log(
log_level,
'Commit failed for %r (see subsequent traceback). '
'Prior to this, job failed with traceback:\n%s',
self,
res.getTraceback(
elideFrameworkCode=True, detail='verbose'))
else:
zc.async.utils.log.info(
'Commit failed for %r (see subsequent traceback). '
'Prior to this, job succeeded with result: %r',
self, res)
res = failure
def complete():
self._check_reassigned((zc.async.interfaces.ACTIVE,))
self._result = res
self._status_id = 2 # CALLBACKS
self._active_end = datetime.datetime.now(pytz.UTC)
policy = self.getRetryPolicy()
if data_cache and self._retry_policy is not None:
self._retry_policy.updateData(data_cache)
identifier = 'storing failure at commit of %r' % (self,)
zc.async.utils.never_fail(complete, identifier, tm)
callback = True
else:
zc.async.utils.try_five_times(
lambda: self.tearDown(setup_info),
'tearDown for %r' % self, tm, commit=False)
if callback:
self._log_completion(res)
identifier = 'performing callbacks of %r' % (self,)
zc.async.utils.never_fail(self.resumeCallbacks, identifier, tm)
zc.async.local.job = None
return res
def handleInterrupt(self):
# should be called within a job that has a RetryCommonForever policy
tm = transaction.interfaces.ITransactionManager(self)
if self.status == zc.async.interfaces.ACTIVE:
retry = self._getRetry('interrupted', tm)
if isinstance(retry, (datetime.datetime, datetime.timedelta)):
self._reschedule(retry, queue=self.queue)
elif retry:
self._reschedule(datetime.timedelta(), queue=self.queue)
else:
res = zc.twist.Failure(zc.async.interfaces.AbortedError())
if self._set_result(res, tm):
self.resumeCallbacks()
self._log_completion(res)
elif self.status != zc.async.interfaces.CALLBACKS:
# we have to allow CALLBACKS or else some retries will fall over,
# because handleInterrupt may fail after a commit of the aborted
# error
raise zc.async.interfaces.BadStatusError(
'can only call ``handleInterrupt`` on a job with ACTIVE '
'status') # um...or CALLBACKS, but that's a secret :-D
else:
self.resumeCallbacks()
def fail(self, e=None):
# something may have fallen over the last time this was called, so we
# are careful to only store the error if we're not in the CALLBACKS
# status.
callback = True
status = self.status
if status in (zc.async.interfaces.COMPLETED,
zc.async.interfaces.ACTIVE):
raise zc.async.interfaces.BadStatusError(
'can only call fail on a job with NEW, PENDING, or ASSIGNED '
'status') # ...or CALLBACKS, but that's because of
# retries, and is semantically incorrect
if status != zc.async.interfaces.CALLBACKS:
if e is None:
e = zc.async.interfaces.TimeoutError()
res = zc.twist.Failure(e)
callback = self._set_result(
res, transaction.interfaces.ITransactionManager(self))
self._log_completion(res)
if callback:
self.resumeCallbacks()
def _reschedule(self, when, data_cache=None, queue=None):
if not isinstance(when, (datetime.datetime, datetime.timedelta)):
raise TypeError('``when`` must be datetime or timedelta')
in_agent = zc.async.interfaces.IAgent.providedBy(self.parent)
if queue is None:
# this is a reschedule from jobError or commitError
if not in_agent:
zc.async.utils.log.critical(
'error for IRetryPolicy %r on %r: '
'can only reschedule a job directly in an agent',
self.getRetryPolicy(), self)
return None
queue = self.queue
if data_cache is not None and self._retry_policy is not None:
self._retry_policy.updateData(data_cache)
self._status_id = 0 # NEW
self._active_start = None
if in_agent:
self.parent.remove(self)
else:
self.parent = None
now = datetime.datetime.now(pytz.UTC)
if isinstance(when, datetime.datetime):
if when.tzinfo is None:
when = when.replace(tzinfo=pytz.UTC)
if when <= now:
queue.putBack(self)
else:
queue.put(self, begin_after=when)
elif isinstance(when, datetime.timedelta):
if when <= datetime.timedelta():
queue.putBack(self)
else:
queue.put(self, begin_after=now+when)
return self
def _check_reassigned(self, expected_statuses):
agent = self.agent
res = self.status not in expected_statuses or (
zc.async.interfaces.IAgent.providedBy(agent) and
not zc.async.interfaces.IJob.providedBy(self._result) and
zc.async.local.getAgentName() is not None and
(zc.async.local.getAgentName() != agent.name or
zc.async.local.getDispatcher().UUID != agent.parent.UUID))
if res:
# the only known scenario for this to occur is the following.
# agent took job. dispatcher gave it to a thread. While
# performing the job, the poll was unable to write to the db,
# perhaps because of a database disconnect or because of a
# too-long commit in another process or thread. Therefore,
# A sibling has noticed that this agent seems to have died
# and put this job back in the queue, where it has been claimed
# by another process/agent.
# It's debatable whether this is CRITICAL or ERROR level. We'll
# go with ERROR for now.
zc.async.utils.log.error(
'%r was reassigned. Likely cause was that polling was '
'unable to occur as regularly as expected, perhaps because of '
'long commit times in the application.', self)
raise zc.async.interfaces.ReassignedError()
def _set_result(self, res, tm, data_cache=None):
# returns whether to call ``resumeCallbacks``
callback = True
if zc.async.interfaces.IJob.providedBy(res):
res.addCallback(self._callback)
self._result = res # temporary
callback = False
elif isinstance(res, twisted.internet.defer.Deferred):
partial = zc.twist.Partial(self._callback)
partial.max_transaction_errors = None # retry conflicts forever
res.addBoth(partial)
callback = False
else:
if isinstance(res, twisted.python.failure.Failure):
res = zc.twist.sanitize(res)
self._result = res
self._status_id = 2 # CALLBACKS
self._active_end = datetime.datetime.now(pytz.UTC)
if self._retry_policy is not None and data_cache:
self._retry_policy.updateData(data_cache)
tm.commit() # this should raise a ConflictError if the job has been
# reassigned.
return callback
def _log_completion(self, res):
if isinstance(res, twisted.python.failure.Failure):
log_level = self.failure_log_level
if log_level is None:
log_level = logging.ERROR
zc.async.utils.log.log(
log_level,
'%r failed with traceback:\n%s',
self,
res.getTraceback(
elideFrameworkCode=True, detail='verbose'))
else:
zc.async.utils.tracelog.info(
'%r succeeded with result: %r',
self, res)
def _callback(self, res):
# done within a job or partial, so we can rely on their retry bits to
# some degree. However, we commit transactions ourselves, so we have
# to be a bit careful that the result hasn't been set already.
callback = True
if self.status == zc.async.interfaces.ACTIVE:
callback = self._set_result(
res, transaction.interfaces.ITransactionManager(self))
self._log_completion(res)
if callback:
self.resumeCallbacks()
def handleCallbackInterrupt(self, caller):
if self.status != zc.async.interfaces.ACTIVE:
raise zc.async.interfaces.BadStatusError(
'can only handleCallbackInterrupt on a job with ACTIVE status')
if caller.status != zc.async.interfaces.CALLBACKS:
raise zc.async.interfaces.BadStatusError(
'can only handleCallbackInterrupt with caller in CALLBACKS '
'status')
result = caller.result
if self.result is not None:
if not zc.async.interfaces.IJob.providedBy(self.result):
msg = ('Callback %r is in an apparently insane state: result '
'has been set (%r), the result is not a job, and yet '
'the status is ACTIVE. This should not be possible. ')
if self.result == result:
zc.async.utils.log.error(
msg + 'Stored result is equivalent to currently '
'received result, so will '
'change status to CALLBACKS and '
'run callbacks, for no clear "right" action.',
self, self.result)
self._status_id = 2 # CALLBACKS
self._active_end = datetime.datetime.now(pytz.UTC)
self.resumeCallbacks()
return
else:
zc.async.utils.log.error(
msg + 'Stored result is not equivalent to currently '
'received result (%r), so will '
'(re?)run this job with new result, for no clear '
'"right" action.',
self, self.result, result)
# fall through
elif self.result.status == zc.async.interfaces.COMPLETED:
zc.async.utils.log.warning(
'Callback %r is in an apparently insane state: inner job '
'result has been completed, including callbacks, but '
'this job has not been '
'completed. This should not be possible. Will set '
'result and run callbacks, for no clear "right" action.')
callback = self._set_result(self.result.result)
self._log_completion(self.result.result)
if callback:
self.resumeCallbacks()
return
else:
return # we are going to hope that the job works; it should,
# and there's no way for us to know that it won't here.
tm = transaction.interfaces.ITransactionManager(self)
retry = self._getRetry('interrupted', tm)
istime = isinstance(
retry, (datetime.timedelta, datetime.datetime))
if istime:
zc.async.utils.log.error(
'error for IRetryPolicy %r on %r: '
'cannot reschedule a callback, only retry. '
'We will retry now, for no clear "right" action.',
self.getRetryPolicy(), self)
if retry or istime:
zc.async.utils.tracelog.debug(
'retrying interrupted callback '
'%r to %r', self, caller)
self._status_id = 0 # NEW
self._active_start = None
self(result)
else:
zc.async.utils.tracelog.debug(
'aborting interrupted callback '
'%r to %r', self, caller)
self.fail(zc.async.interfaces.AbortedError())
def resumeCallbacks(self):
# should be called within a job that has a RetryCommonForever policy
if self.status != zc.async.interfaces.CALLBACKS:
raise zc.async.interfaces.BadStatusError(
'can only resumeCallbacks on a job with CALLBACKS status')
self._check_reassigned((zc.async.interfaces.CALLBACKS,))
callbacks = list(self.callbacks)
tm = transaction.interfaces.ITransactionManager(self)
length = 0
while 1:
for j in callbacks:
self._check_reassigned((zc.async.interfaces.CALLBACKS,))
if zc.async.interfaces.ICallbackProxy.providedBy(j):
j = j.getJob(self.result)
status = j.status
if status in (zc.async.interfaces.NEW,
zc.async.interfaces.ASSIGNED,
zc.async.interfaces.PENDING):
if (j.begin_by is not None and
(j.begin_after + j.begin_by) <
datetime.datetime.now(pytz.UTC)):
zc.async.utils.log.error(
'failing expired callback %r to %r', j, self)
j.fail()
else:
zc.async.utils.tracelog.debug(
'starting callback %r to %r', j, self)
j(self.result)
elif status == zc.async.interfaces.ACTIVE:
j.handleCallbackInterrupt(self)
elif status == zc.async.interfaces.CALLBACKS:
j.resumeCallbacks()
# TODO: this shouldn't raise anything we want to catch, right?
# now, this should catch all the errors except EXPLOSIVE_ERRORS
# cleaning up dead jobs should look something like the above.
tm.begin() # syncs
# it's possible that someone added some callbacks, so run until
# we're exhausted.
length += len(callbacks)
callbacks = list(self.callbacks)[length:]
if not callbacks:
# this whole method is called within a never_fail...
self._status_id = 3 # COMPLETED
if zc.async.interfaces.IAgent.providedBy(self.parent):
self.parent.jobCompleted(self)
tm.commit()
return
# conveniences for serial and parallel jobs
def _transparent(*results):
return results
def _serial_or_parallel(scheduler, jobs, kw):
if kw and (len(kw) > 1 or kw.keys()[0] != 'postprocess'):
raise TypeError('only accepts one keyword argument, ``postprocess``')
postprocess = zc.async.interfaces.IJob(kw.get('postprocess', _transparent))
result = Job(scheduler,
*(zc.async.interfaces.IJob(j) for j in jobs),
**dict(postprocess=postprocess))
return result
def _queue_next(main_job, ix=0, ignored_result=None):
jobs = main_job.args
queue = main_job.queue
if ix < len(jobs):
next = jobs[ix]
queue.put(next)
next.addCallback(Job(_queue_next, main_job, ix+1))
else:
postprocess = main_job.kwargs['postprocess']
if postprocess.status == zc.async.interfaces.NEW:
# will not be NEW if this is a retry
postprocess.args.extend(jobs)
queue.put(postprocess)
def _schedule_serial(*jobs, **kw):
for ix, job in enumerate(jobs): # important for interrupts
if job.status == zc.async.interfaces.NEW:
break
else:
ix += 1
_queue_next(zc.async.local.getJob(), ix)
return kw['postprocess']
def serial(*jobs, **kw):
return _serial_or_parallel(_schedule_serial, jobs, kw)
def _queue_all(main_job, ignored_result=None):
jobs = main_job.args
queue = main_job.queue
complete = True
for job in jobs:
status = job.status
if status == zc.async.interfaces.NEW:
queue.put(job)
job.addCallback(Job(_queue_all, main_job))
complete = False
elif status not in (zc.async.interfaces.COMPLETED,
zc.async.interfaces.CALLBACKS):
complete = False
if complete:
postprocess = main_job.kwargs['postprocess']
if postprocess.status == zc.async.interfaces.NEW:
# will not be NEW if this is a retry
postprocess.args.extend(jobs)
queue.put(postprocess)
def _schedule_parallel(*jobs, **kw):
_queue_all(zc.async.local.getJob())
return kw['postprocess']
def parallel(*jobs, **kw):
return _serial_or_parallel(_schedule_parallel, jobs, kw) | zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/job.py | job.py |
import time
import datetime
import bisect
import Queue
import thread
import threading
import twisted.python.failure
import twisted.internet.defer
import ZODB.POSException
import ZEO.Exceptions
import ZODB.utils
import BTrees
import transaction
import transaction.interfaces
import zope.component
import zope.bforest.periodic
import zc.twist
import zc.async
import zc.async.utils
import zc.async.interfaces
class PollInfo(dict):
key = None
@property
def utc_timestamp(self):
if self.key is not None:
return zc.async.utils.long_to_dt(self.key)
class AgentThreadPool(object):
_size = 0
initial_backoff = 5
incremental_backoff = 5
maximum_backoff = 60
jobid = None
def __init__(self, dispatcher, name, size):
self.dispatcher = dispatcher
self.name = name
self.queue = Queue.Queue(0)
self.threads = []
self.jobids = {}
self.setSize(size)
def getSize(self):
return self._size
def perform_thread(self):
thread_id = str(thread.get_ident())
threading.currentThread().setName(thread_id)
self.jobids[thread_id] = None
zc.async.local.dispatcher = self.dispatcher
zc.async.local.name = self.name # this is the name of this pool's agent
conn = self.dispatcher.db.open()
try:
job_info = self.queue.get()
while job_info is not None:
identifier, dbname, info = job_info
self.jobids[thread_id] = (ZODB.utils.u64(identifier), dbname)
info['thread'] = thread_id
info['started'] = datetime.datetime.utcnow()
zc.async.utils.tracelog.info(
'starting in thread %s: %s',
info['thread'], info['call'])
backoff = self.initial_backoff
conflict_retry_count = 0
try:
while 1:
try:
transaction.begin()
if dbname is None:
local_conn = conn
else:
local_conn = conn.get_connection(dbname)
job = local_conn.get(identifier)
# this setstate should trigger any initial problems
# within the try/except retry structure here.
local_conn.setstate(job)
# this is handled in job.__call__: local.job = job
except ZEO.Exceptions.ClientDisconnected:
zc.async.utils.log.info(
'ZEO client disconnected while trying to '
'get job %d in db %s; retrying in %d seconds',
ZODB.utils.u64(identifier), dbname or '',
backoff)
time.sleep(backoff)
backoff = min(self.maximum_backoff,
backoff + self.incremental_backoff)
except ZODB.POSException.TransactionError:
# continue, i.e., try again
conflict_retry_count += 1
if (conflict_retry_count == 1 or
not conflict_retry_count % 5):
zc.async.utils.log.warning(
'%d transaction error(s) while trying to '
'get job %d in db %s',
conflict_retry_count,
ZODB.utils.u64(identifier), dbname or '',
exc_info=True)
# now ``while 1`` loop will continue, to retry
else:
break
try:
job() # this does the committing and retrying, largely
except zc.async.interfaces.BadStatusError:
transaction.abort()
zc.async.utils.log.error( # notice, not tracelog
'job already completed?', exc_info=True)
if job.status == zc.async.interfaces.CALLBACKS:
job.resumeCallbacks() # moves the job off the agent
else:
count = 0
while 1:
status = job.status
if status == zc.async.interfaces.COMPLETED:
if zc.async.interfaces.IAgent.providedBy(
job.parent):
job.parent.jobCompleted(job)
# moves the job off the agent
else:
job.fail() # moves the job off the agent
try:
transaction.commit()
except (ZODB.POSException.TransactionError,
ZODB.POSException.POSError):
if count and not count % 10:
zc.async.utils.log.critical(
'frequent database errors! '
'I retry forever...',
exc_info=True)
time.sleep(1)
transaction.abort() # retry forever (!)
else:
break
except zc.async.interfaces.ReassignedError:
transaction.abort()
info['reassigned'] = True
# will need to get next job_info and continue
# EXPLOSIVE_ERRORS includes Reassigned: order is important
except zc.async.utils.EXPLOSIVE_ERRORS:
transaction.abort()
raise
except:
# all errors should have been handled by the job at
# this point, so anything other than BadStatusError,
# SystemExit and KeyboardInterrupt are bad surprises.
transaction.abort()
zc.async.utils.log.critical(
'unexpected error', exc_info=True)
raise
# should come before 'completed' for threading dance
if isinstance(job.result, twisted.python.failure.Failure):
info['failed'] = True
info['result'] = job.result.getTraceback(
elideFrameworkCode=True)
else:
info['result'] = repr(job.result)
if len(info['result']) > 10000:
info['result'] = (
info['result'][:10000] + '\n[...TRUNCATED...]')
info['completed'] = datetime.datetime.utcnow()
finally:
zc.async.local.job = None # also in job (here for paranoia)
transaction.abort() # (also paranoia)
zc.async.utils.tracelog.info(
'completed in thread %s: %s',
info['thread'], info['call'])
self.jobids[thread_id] = None
job_info = self.queue.get()
finally:
conn.close()
if self.dispatcher.activated:
# this may cause some bouncing, but we don't ever want to end
# up with fewer than needed.
self.dispatcher.reactor.callFromThread(self.setSize)
del self.jobids[thread_id]
def setSize(self, size=None):
# this should only be called from the thread in which the reactor runs
# (otherwise it needs locks)
old = self._size
if size is None:
size = old
else:
self._size = size
res = []
ct = 0
for t in self.threads:
if t.isAlive():
res.append(t)
ct += 1
self.threads[:] = res
if ct < size:
for i in range(max(size - ct, 0)):
t = threading.Thread(target=self.perform_thread)
t.setDaemon(True)
self.threads.append(t)
t.start()
elif ct > size:
# this may cause some bouncing, but hopefully nothing too bad.
for i in range(ct - size):
self.queue.put(None)
return size - old # size difference
def getId(obj):
dbname = getattr(obj._p_jar.db(), 'database_name', None)
return (ZODB.utils.u64(obj._p_oid), dbname)
# this is mostly for testing, though ``get`` comes in handy generally
_dispatchers = {}
def get(uuid=None, default=None):
if uuid is None:
uuid = zope.component.getUtility(zc.async.interfaces.IUUID)
return _dispatchers.get(uuid, default)
def pop(uuid=None):
if uuid is None:
uuid = zope.component.getUtility(zc.async.interfaces.IUUID)
return _dispatchers.pop(uuid)
clear = _dispatchers.clear
# end of testing bits
class Dispatcher(object):
activated = False
conn = None
thread = None # this is just a placeholder that other code can use the
# way that zc.async.subscribers.ThreadedDispatcherInstaller.__call__ does.
def __init__(self, db, reactor=None, poll_interval=5, uuid=None,
jobs_size=200, polls_size=400):
if uuid is None:
uuid = zope.component.getUtility(zc.async.interfaces.IUUID)
if uuid in _dispatchers:
raise ValueError('dispatcher for this UUID is already registered')
_dispatchers[uuid] = self
self.db = db
self._reactor = reactor
self.poll_interval = poll_interval
self.UUID = uuid
# Let's talk about jobs_size and polls_size.
#
# Let's take a random guess that data for a job might be about 1K on
# average. That would mean that the default value (keep 200 jobs)
# would mean about 200K.
#
# Let's randomly guess that a poll record averages 300 bytes on
# average. That would mean that the default value (keep 400 polls)
# would mean (400*300 bytes == 120000 bytes == ) about 120K. That
# would cover (400 polls * 5 seconds/poll * 1 min/60 seconds == )
# just over 33 minutes of polling at the default poll_interval.
#
# These memory usages should be not really noticeable on typical
# production machines. On the other hand, if this is causing you memory
# problems, reduce these values when you instantiate your dispatcher.
self.polls = zc.async.utils.RollingSet()
self.polls.size = polls_size
self.polls.__parent__ = self
self.jobs = zc.async.utils.RollingMapping()
self.jobs.size = jobs_size
self.jobs.__parent__ = self
self._activated = set()
self.queues = {}
self.dead_pools = []
@property
def reactor(self):
res = self._reactor
if res is None:
# importing this the first time is kinda slow so we're lazy
import twisted.internet.reactor
res = self._reactor = twisted.internet.reactor
return res
def _getJob(self, agent):
identifier = (
'getting job for UUID %s from agent %s (oid %d) '
'in queue %s (oid %d)' % (
self.UUID, agent.name, ZODB.utils.u64(agent._p_oid),
agent.queue.name, ZODB.utils.u64(agent.queue._p_oid)))
res = zc.async.utils.try_five_times(
agent.claimJob, identifier, transaction)
if isinstance(res, twisted.python.failure.Failure):
identifier = 'stashing failure on agent %s (oid %s)' % (
agent.name, ZODB.utils.u64(agent._p_oid))
def setFailure():
agent.failure = res
zc.async.utils.try_five_times(
setFailure, identifier, transaction)
return res
def poll(self):
poll_info = PollInfo()
started_jobs = []
transaction.begin() # sync and clear
try:
queues = self.conn.root().get(zc.async.interfaces.KEY)
if queues is None:
transaction.abort()
return
for queue in queues.values():
poll_info[queue.name] = None
if self.UUID not in queue.dispatchers:
queue.dispatchers.register(self.UUID)
da = queue.dispatchers[self.UUID]
if queue._p_oid not in self._activated:
identifier = (
'activating dispatcher UUID %s in queue %s (oid %d)' %
(self.UUID, queue.name, ZODB.utils.u64(queue._p_oid)))
def activate():
if da.activated:
if da.dead:
da.deactivate()
else:
zc.async.utils.log.error(
'UUID %s already activated in queue %s '
'(oid %d): another process? (To stop '
'poll attempts in this process, set '
'``zc.async.dispatcher.get().activated = '
"False``. To stop polls permanently, don't "
'start a zc.async.dispatcher!)',
self.UUID, queue.name,
ZODB.utils.u64(queue._p_oid))
return False
da.activate()
return True
if zc.async.utils.try_five_times(
activate, identifier, transaction) is True:
self._activated.add(queue._p_oid)
else:
continue
identifier = 'committing ping for UUID %s' % (self.UUID,)
zc.async.utils.try_five_times(
lambda: queue.dispatchers.ping(self.UUID), identifier,
transaction)
queue_info = poll_info[queue.name] = {}
pools = self.queues.get(queue.name)
if pools is None:
pools = self.queues[queue.name] = {}
for name, agent in da.items():
job_info = []
active_jobs = [getId(job) for job in agent]
agent_info = queue_info[name] = {
'size': None, 'len': None, 'error': None,
'new jobs': job_info, 'active jobs': active_jobs}
try:
agent_info['size'] = agent.size
agent_info['len'] = len(agent)
except zc.async.utils.EXPLOSIVE_ERRORS:
raise
except:
agent_info['error'] = zc.twist.Failure()
transaction.abort()
continue
pool = pools.get(name)
if pool is None:
pool = pools[name] = AgentThreadPool(
self, name, agent_info['size'])
conn_delta = agent_info['size']
else:
conn_delta = pool.setSize(agent_info['size'])
if conn_delta:
db = queues._p_jar.db()
db.setPoolSize(db.getPoolSize() + conn_delta)
job = self._getJob(agent)
while job is not None:
if isinstance(job, twisted.python.failure.Failure):
agent_info['error'] = job
job = None
else:
info = {'result': None,
'failed': False,
'agent': name,
'queue': queue.name,
'poll id': None,
'quota names': job.quota_names,
'call': repr(job),
'started': None,
'completed': None,
'thread': None,
'reassigned': False}
started_jobs.append(info)
jobid = uoid, dbname = getId(job)
self.jobs[jobid] = info
job_info.append(jobid)
pool.queue.put(
(job._p_oid, dbname, info))
job = self._getJob(agent)
if len(pools) > len(queue_info):
conn_delta = 0
for name, pool in pools.items():
if name not in agent_info:
conn_delta += pool.setSize(0)
self.dead_pools.append(pools.pop(name))
if conn_delta:
db = queues._p_jar.db()
# this is a bit premature--it should really happen when
# all threads are complete--but since the pool just
# complains if the size is not honored, and this
# approach is easier, we're doing this.
db.setPoolSize(db.getPoolSize() + conn_delta)
if len(self.queues) > len(poll_info):
conn_delta = 0
for queue_pools in self.queues.values():
if name not in poll_info:
for name, pool in queue_pools.items():
conn_delta += pool.setSize(0)
self.dead_pools.append(queue_pools.pop(name))
if conn_delta:
# this is a bit premature--it should really happen
# when all threads are complete--but since the pool just
# complains if the size is not honored, and this approach
# is easier, we're doing this.
self.db.setPoolSize(self.db.getPoolSize() + conn_delta)
finally:
transaction.abort()
try:
last = self.polls.first()
except ValueError:
last = None
self.polls.add(poll_info)
for info in started_jobs:
info['poll id'] = poll_info.key
if last is None or last != poll_info:
zc.async.utils.tracelog.debug(
'poll %s: %r', poll_info.key, poll_info)
def directPoll(self):
if not self.activated:
return
try:
self.poll()
finally:
self.reactor.callLater(self.poll_interval, self.directPoll)
def _inThreadPoll(self, deferred):
self.conn = self.db.open()
try:
self.poll()
finally:
self.conn.close()
self.reactor.callFromThread(deferred.callback, None)
def threadedPoll(self):
if not self.activated:
return
deferred = twisted.internet.defer.Deferred()
deferred.addCallback(
lambda result: self.reactor.callLater(
self.poll_interval, self.threadedPoll))
self.reactor.callInThread(self._inThreadPoll, deferred)
def activate(self, threaded=False):
if self.activated:
raise ValueError('already activated')
zc.async.utils.log.info('attempting to activate dispatcher %s',
self.UUID)
self.activated = datetime.datetime.utcnow()
# in case this is a restart, we clear old data
self.polls.clear()
self.jobs.clear()
# increase pool size to account for the dispatcher poll
self.db.setPoolSize(self.db.getPoolSize() + 1)
if not threaded:
self.conn = self.db.open() # we keep the same connection for all
# polls as an optimization
if threaded:
self.reactor.callWhenRunning(self.threadedPoll)
else:
self.reactor.callWhenRunning(self.directPoll)
self.reactor.addSystemEventTrigger(
'before', 'shutdown', self.deactivate)
def deactivate(self):
if not self.activated:
raise ValueError('not activated')
self.activated = None # "in progress"
try:
# Note: we do not want to clear jobs and polls, because they can
# be helpful diagnostic information (particularly in the use of
# zc.async.testing.tear_down_dispatcher to identify jobs that won't
# stop).
transaction.begin()
try:
identifier = 'cleanly deactivating UUID %s' % (self.UUID,)
def deactivate_das():
queues = self.conn.root().get(zc.async.interfaces.KEY)
if queues is not None:
for queue in queues.values():
da = queue.dispatchers.get(self.UUID)
if da is not None and da.activated:
da.deactivate()
zc.async.utils.try_five_times(
deactivate_das, identifier, transaction)
finally:
transaction.abort()
self.conn.close()
conn_delta = 0
for queue_pools in self.queues.values():
for name, pool in queue_pools.items():
conn_delta += pool.setSize(0)
self.dead_pools.append(queue_pools.pop(name))
conn_delta -= 1
self.db.setPoolSize(self.db.getPoolSize() + conn_delta)
zc.async.utils.log.info('deactivated dispatcher %s',
self.UUID)
finally:
self.activated = False # "completed" (can distinguish for tests)
# these methods are used for monitoring and analysis
STOPPED = 'STOPPED'
RUNNING = 'RUNNING'
STUCK = 'STUCK'
STARTING = 'STARTING'
def getStatusInfo(self):
res = {'time since last poll': None, 'uptime': None, 'uuid': self.UUID}
poll_interval = res['poll interval'] = datetime.timedelta(
seconds=self.poll_interval)
if not self.activated:
res['status'] = self.STOPPED
else:
now = datetime.datetime.utcnow()
try:
poll = self.polls.first()
except ValueError:
# no polls
next = self.activated + poll_interval
if next < now:
res['status'] = self.STUCK
else:
res['status'] = self.STARTING
res['time since last poll'] = now - self.activated
else:
next = poll.utc_timestamp + poll_interval
if next < now:
res['status'] = self.STUCK
else:
res['status'] = self.RUNNING
res['time since last poll'] = now - poll.utc_timestamp
res['uptime'] = now - self.activated
return res
def getJobInfo(self, oid, database_name=None):
if database_name is None:
# these will raise ValueErrors for unknown oids. We'll let 'em.
minKey = self.jobs.minKey((oid,))
maxKey = self.jobs.maxKey((oid+1,))
if minKey != maxKey:
raise ValueError('ambiguous database name')
else:
database_name = minKey[1]
res = self.jobs[(oid, database_name)]
if res['completed'] is None:
jobid = (oid, database_name)
info = self.polls.first()[res['queue']][res['agent']]
if (jobid not in info['active jobs'] and
jobid not in info['new jobs']):
res = res.copy()
res['reassigned'] = True
return res
def getActiveJobIds(self, queue=None, agent=None):
"""returns active jobs from newest to oldest"""
res = []
for queue_name, agents in self.queues.items():
if queue is None or queue_name == queue:
for agent_name, pool in agents.items():
if agent is None or agent_name == agent:
res.extend(val for val in pool.jobids.values()
if val is not None)
return res
def getPollInfo(self, at=None, before=None):
if at is not None:
if before is not None:
raise ValueError('may only provide one of `at` and `before`')
if isinstance(at, datetime.datetime):
at = zc.async.utils.dt_to_long(at)
elif before is not None:
if isinstance(before, datetime.datetime):
at = zc.async.utils.dt_to_long(before) + 16
else:
at = before + 1
return self.polls.first(at)
def iterPolls(self, at=None, before=None, since=None, count=None):
# `polls` may be mutated during iteration so we don't iterate over it
if at is not None and before is not None:
raise ValueError('may only provide one of `at` and `before`')
if isinstance(since, datetime.datetime):
since = zc.async.utils.dt_to_long(since) + 15
ct = 0
while 1:
if count is not None and ct >= count:
break
try:
info = self.getPollInfo(at=at, before=before)
except ValueError:
break
else:
if since is None or before <= since:
yield info
ct += 1
before = info.key
at = None
else:
break
def getStatistics(self, at=None, before=None, since=None, queue=None,
agent=None):
if at is not None and before is not None:
raise ValueError('may only provide one of `at` and `before`')
res = {
'started': 0,
'successful': 0,
'failed': 0,
'unknown': 0
}
started = successful = failed = unknown = 0
_pair = (None, None)
successful_extremes = [_pair, _pair]
failed_extremes = [_pair, _pair]
active_extremes = [_pair, _pair]
now = datetime.datetime.utcnow()
first = True
poll = first_poll = None
def process(jobs):
for jobid in jobs:
jobinfo = self.jobs.get(jobid)
if jobinfo is None:
res['unknown'] += 1
continue
if jobinfo['completed']:
if jobinfo['failed']:
pair = failed_extremes
res['failed'] += 1
else:
pair = successful_extremes
res['successful'] += 1
else:
pair = active_extremes
start = jobinfo['started'] or poll_time
stop = jobinfo['completed'] or now
duration = stop - start
if pair[0][0] is None or pair[0][0] > duration:
pair[0] = (duration, jobid)
if pair[1][0] is None or pair[1][0] < duration:
pair[1] = (duration, jobid)
for poll in self.iterPolls(at=at, before=before, since=since):
poll_time = poll.utc_timestamp
for agent_info in _iter_info(poll, queue, agent):
res['started'] += len(agent_info['new jobs'])
process(agent_info['new jobs'])
if first:
first = False
first_poll = poll
if poll is not None:
for agent_info in _iter_info(poll, queue, agent):
process(agent_info['active jobs'])
if first_poll is not None:
stat_start = first_poll.utc_timestamp
stat_end = poll.utc_timestamp
else:
stat_start = None
stat_end = None
res.update({
'shortest successful': successful_extremes[0][1],
'longest successful': successful_extremes[1][1],
'shortest failed': failed_extremes[0][1],
'longest failed': failed_extremes[1][1],
'shortest active': active_extremes[0][1],
'longest active': active_extremes[1][1],
'statistics start': stat_start,
'statistics end': stat_end,
})
return res
def _iter_info(poll, queue, agent):
if queue is None:
queues = poll.values()
elif queue not in poll:
queues = []
else:
queues = [poll[queue]]
for q in queues:
if agent is None:
for i in q.values():
yield i
elif agent in q:
yield q[agent] | zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/dispatcher.py | dispatcher.py |
.. _configuration-without-zope-3:
==============================
Configuration (without Zope 3)
==============================
This section discusses setting up zc.async without Zope 3. Since Zope 3 is
ill-defined, we will be more specific: this describes setting up zc.async
without ZCML, without any zope.app packages, and with as few dependencies as
possible. A casual way of describing the dependencies is "ZODB, Twisted, and
zope.component," though we directly depend on some smaller packages and
indirectly on others [#specific_dependencies]_.
You may have one or two kinds of configurations for your software using
zc.async. The simplest approach is to have all processes able both to put items
in queues, and to perform them with a dispatcher. You can then use on-the-fly
ZODB configuration to determine what jobs, if any, each process' dispatcher
performs. If a dispatcher has no agents in a given queue, as we'll discuss
below, the dispatcher will not perform any job for that queue.
However, if you want to create some processes that can only put items in a
queue, and do not have a dispatcher at all, that is easy to do. We'll call this
a "client" process, and the full configuration a "client/server process". As
you might expect, the configuration of a client process is a subset of the
configuration of the client/server process.
The ``zc.async.configure`` module helps with basic configuration. The
:ref:`quickstart-with-virtualenv` shows an example of using this for a very
:quick start. The current text uses some of those conveniences, but focuses
more on understanding the underlying patterns, rather than the conveniences.
We will first describe setting up a client, non-dispatcher process, in which
you only can put items in a zc.async queue; and then describe setting up a
dispatcher client/server process that can be used both to request and to
perform jobs.
Configuring a Client Process
============================
Generally, zc.async configuration has four basic parts: component
registrations, ZODB setup, ZODB configuration, and process configuration. For
a client process, we'll discuss required component registrations; ZODB
setup; minimal ZODB configuration; process configuration; and then circle
back around for some optional component registrations.
--------------------------------
Required Component Registrations
--------------------------------
The required registrations can be installed for you by the
``zc.async.configure.base`` function. Most other examples in this package,
such as those in the :ref:`usage` section, use this in their
test setup.
Again, for a quick start, you might just want to use the helper
``zc.async.configure.base`` function, and move on to the `Required ZODB Set
Up`_ section below.
Here, though, we will go over each required registration to briefly explain
what they are.
You must have three adapter registrations: IConnection to
ITransactionManager, IPersistent to IConnection, and IPersistent to
ITransactionManager.
The ``zc.twist`` package provides all of these adapters. However,
zope.app.keyreference also provides a version of the ``connection`` adapter
that is identical or very similar, and that should work fine if you are
already using that package in your application.
>>> import zc.twist
>>> import zope.component
>>> zope.component.provideAdapter(zc.twist.transactionManager)
>>> zope.component.provideAdapter(zc.twist.connection)
>>> import ZODB.interfaces
>>> zope.component.provideAdapter(
... zc.twist.transactionManager, adapts=(ZODB.interfaces.IConnection,))
We also need to be able to adapt functions and methods to jobs. The
zc.async.job.Job class is the expected implementation.
>>> import types
>>> import zc.async.interfaces
>>> import zc.async.job
>>> zope.component.provideAdapter(
... zc.async.job.Job,
... adapts=(types.FunctionType,),
... provides=zc.async.interfaces.IJob)
>>> zope.component.provideAdapter(
... zc.async.job.Job,
... adapts=(types.MethodType,),
... provides=zc.async.interfaces.IJob)
>>> zope.component.provideAdapter( # optional, rarely used
... zc.async.job.Job,
... adapts=(zc.twist.METHOD_WRAPPER_TYPE,),
... provides=zc.async.interfaces.IJob)
The queue looks for the UUID utility to set the ``assignerUUID`` job attribute,
and may want to use it to optionally filter jobs during ``claim`` in the
future. Also, the dispatcher will look for a UUID utility if a UUID is not
specifically provided to its constructor.
>>> from zc.async.instanceuuid import UUID
>>> zope.component.provideUtility(
... UUID, zc.async.interfaces.IUUID, '')
The UUID we register here is a UUID of the instance, which is expected
to uniquely identify the process when in production. It is stored in
the file specified by the ``ZC_ASYNC_UUID`` environment variable (or in
``os.join(os.getcwd(), 'uuid.txt')`` if this is not specified, for easy
initial experimentation with the package).
>>> import uuid
>>> import os
>>> f = open(os.environ["ZC_ASYNC_UUID"])
>>> uuid_hex = f.readline().strip()
>>> f.close()
>>> uuid = uuid.UUID(uuid_hex)
>>> UUID == uuid
True
The uuid.txt file is intended to stay in the instance home as a persistent
identifier.
Again, all of the required registrations above can be accomplished quickly with
``zc.async.configure.base``.
--------------------
Required ZODB Set Up
--------------------
On a basic level, zc.async needs a setup that supports good conflict
resolution. Most or all production ZODB storages now have the necessary
APIs to support MVCC.
Of course, if you want to run multiple processes, you need ZEO. You should also
then make sure that your ZEO server installation has all the code that includes
conflict resolution, such as zc.queue, because, as of this writing, conflict
resolution happens in the ZEO server, not in clients.
A more subtle decision is whether to use multiple databases. The zc.async
dispatcher can generate a lot of database churn. It may be wise to put the
queue in a separate database from your content database(s).
The downsides to this option include the fact that you must be careful to
specify to which database objects belong; and that broken cross-database
references are not handled gracefully in the ZODB as of this writing.
We will use multiple databases for our example here, because we are trying to
demonstrate production-quality examples. We will show this with a pure-Python
approach, rather than the ZConfig approach usually used by Zope. If you know
ZConfig, that will be a reasonable approach as well; see zope.app.appsetup
for how Zope uses ZConfig to set up multidatabases.
In our example, we create two file storages. In production, you might likely
use ZEO; hooking ClientStorage up instead of FileStorage should be straight
forward.
>>> databases = {}
>>> import ZODB.FileStorage
>>> storage = ZODB.FileStorage.FileStorage(
... 'main.fs', create=True)
>>> async_storage = ZODB.FileStorage.FileStorage(
... 'async.fs', create=True)
>>> from ZODB.DB import DB
>>> databases[''] = db = DB(storage)
>>> databases['async'] = async_db = DB(async_storage)
>>> async_db.databases = db.databases = databases
>>> db.database_name = ''
>>> async_db.database_name = 'async'
>>> conn = db.open()
>>> root = conn.root()
------------------
ZODB Configuration
------------------
A Queue
-------
All we must have for a client to be able to put jobs in a queue is ... a queue.
For a quick start, the ``zc.async.subscribers`` module provides a subscriber to
a DatabaseOpened event that does the right dance. See
``multidb_queue_installer`` and ``queue_installer`` in that module, and you can
see that in use in :ref:`configuration-with-zope-3`. For now, though, we're taking
things step by step and explaining what's going on.
Dispatchers look for queues in a mapping off the root of the database in
a key defined as a constant: zc.async.interfaces.KEY. This mapping should
generally be a zc.async.queue.Queues object.
If we were not using a multi-database for our example, we could simply install
the queues mapping with this line:
``root[zc.async.interfaces.KEY] = zc.async.queue.Queues()``. We will need
something a bit more baroque. We will add the queues mapping to the 'async'
database, and then make it available in the main database ('') with the proper
key.
>>> conn2 = conn.get_connection('async')
>>> import zc.async.queue
>>> queues = conn2.root()['mounted_queues'] = zc.async.queue.Queues()
Note that the 'mounted_queues' key in the async database is arbitrary:
what we care about is the key in the database that the dispatcher will
see.
Now we add the object explicitly to conn2, so that the ZODB will know the
"real" database in which the object lives, even though it will be also
accessible from the main database.
>>> conn2.add(queues)
>>> root[zc.async.interfaces.KEY] = queues
>>> import transaction
>>> transaction.commit()
Now we need to put a queue in the queues collection. We can have more than
one, as discussed below, but we suggest a convention of the primary queue
being available in a key of '' (empty string).
>>> queue = queues[''] = zc.async.queue.Queue()
>>> transaction.commit()
Quotas
------
We touched on quotas in the usage section. Some jobs will need to
access resources that are shared across processes. A central data
structure such as an index in the ZODB is a prime example, but other
examples might include a network service that only allows a certain
number of concurrent connections. These scenarios can be helped by
quotas.
Quotas are demonstrated in the usage section. For configuration, you
should know these characteristics:
- you cannot add a job with a quota name that is not defined in the
queue [#undefined_quota_name]_;
- you cannot add a quota name to a job in a queue if the quota name is not
defined in the queue [#no_mutation_to_undefined]_;
- you can create and remove quotas on the queue [#create_remove_quotas]_;
- you can remove quotas if pending jobs have their quota names--the quota name
is then ignored [#remove_quotas]_;
- quotas default to a size of 1 [#default_size]_;
- this can be changed at creation or later [#change_size]_; and
- decreasing the size of a quota while the old quota size is filled will
not affect the currently running jobs [#decreasing_affects_future]_.
Multiple Queues
---------------
Since we put our queues in a mapping of them, we can also create multiple
queues. This can make some scenarios more convenient and simpler to reason
about. For instance, while you might have agents filtering jobs as we
describe above, it might be simpler to say that you have a queue for one kind
of job--say, processing a video file or an audio file--and a queue for other
kinds of jobs. Then it is easy and obvious to set up simple FIFO agents
as desired for different dispatchers. The same kind of logic could be
accomplished with agents, but it is easier to picture the multiple queues.
Another use case for multiple queues might be for specialized queues, like ones
that broadcast jobs. You could write a queue subclass that broadcasts copies of
jobs they get to all dispatchers, aggregating results. This could be used to
send "events" to all processes, or to gather statistics on certain processes,
and so on.
Generally, any time the application wants to be able to assert a kind of job
rather than letting the agents decide what to do, having separate queues is
a reasonable tool.
---------------------
Process Configuration
---------------------
Daemonization
-------------
You often want to daemonize your software, so that you can restart it if
there's a problem, keep track of it and monitor it, and so on. ZDaemon
(http://pypi.python.org/pypi/zdaemon) and Supervisor (http://supervisord.org/)
are two fairly simple-to-use ways of doing this for both client and
client/server processes. If your main application can be packaged as a
setuptools distribution (egg or source release or even development egg) then
you can have your main application as a zc.async client and your dispatchers
running a separate zc.async-only main loop that simply includes your main
application as a dependency, so the necessary software is around. You may have
to do a bit more configuration on the client/server side to mimic global
registries such as zope.component registrations and so on between the client
and the client/servers, but this shouldn't be too bad.
UUID File Location
------------------
As discussed above, the instanceuuid module will look for an environmental
variable ``ZC_ASYNC_UUID`` to find the file name to use, and failing that will
use ``os.join(os.getcwd(), 'uuid.txt')``. It's worth noting that daemonization
tools such as ZDaemon and Supervisor (3 or greater) make setting environment
values for child processes an easy (and repeatable) configuration file setting.
-----------------------------------------------------
Optional Component Registrations for a Client Process
-----------------------------------------------------
The only optional component registration potentially valuable for client
instances that only put jobs in the queue is registering an adapter from
persistent objects to a queue. The ``zc.async.queue.getDefaultQueue`` adapter
does this for an adapter to the queue named '' (empty string). Since that's
what we have from the `ZODB Configuration`_ above section, we'll register it.
Writing your own adapter is trivial, as you can see if you look at the
implementation of this function.
>>> zope.component.provideAdapter(zc.async.queue.getDefaultQueue)
>>> zc.async.interfaces.IQueue(root) is queue
True
Configuring a Client/Server Process
===================================
Configuring a client/server process--something that includes a running
dispatcher--means doing everything described above, plus a bit more. You
need to set up and start a reactor and dispatcher; configure agents as desired
to get the dispatcher to do some work; and optionally configure logging.
For a quick start, the ``zc.async.subscribers`` module has some conveniences
to start a threaded reactor and dispatcher, and to install agents. You might
want to look at those to get started. They are also used in the Zope 3
configuration (README_3). Meanwhile, this document continues to go
step-by-step instead, to try and explain the components and configuration.
Even though it seems reasonable to first start a dispatcher and then set up its
agents, we'll first define a subscriber to create an agent. As we'll see below,
the dispatcher fires an event when it registers with a queue, and another when
it activates the queue. These events give you the opportunity to register
subscribers to add one or more agents to a queue, to tell the dispatcher what
jobs to perform. zc.async.agent.addMainAgentActivationHandler is a reasonable
starter: it adds a single agent named 'main' if one does not exist. The agent
has a simple indiscriminate FIFO policy for the queue. If you want to write
your own subscriber, look at this, or at the more generic subscriber in the
``zc.async.subscribers`` module.
Agents are an important part of the ZODB configuration, and so are described
more in depth below.
>>> import zc.async.agent
>>> zope.component.provideHandler(
... zc.async.agent.addMainAgentActivationHandler)
This subscriber is registered for the IDispatcherActivated event; another
approach might use the IDispatcherRegistered event.
-----------------------
Starting the Dispatcher
-----------------------
Now we can start the reactor, and start the dispatcher.
In some applications this may be done with an event subscriber to
DatabaseOpened, as is done in ``zc.async.subscribers``. Here, we will do it
inline.
Any object that conforms to the specification of zc.async.interfaces.IReactor
will be usable by the dispatcher. For our example, we will use our own instance
of the Twisted select-based reactor running in a separate thread. This is
separate from the Twisted reactor installed in twisted.internet.reactor, and
so this approach can be used with an application that does not otherwise use
Twisted (for instance, a Zope application using the "classic" zope publisher).
The testing module also has a reactor on which the `Usage` section relies, if
you would like to see a minimal contract.
Configuring the basics is fairly simple, as we'll see in a moment. The
trickiest part is to handle signals cleanly. It is also optional! The
dispatcher will eventually figure out that there was not a clean shut down
before and take care of it. Here, though, essentially as an optimization, we
install signal handlers in the main thread using ``reactor._handleSignals``.
``reactor._handleSignals`` may work in some real-world applications, but if
your application already needs to handle signals you may need a more careful
approach. Again, see ``zc.async.subscribers`` for some options you can explore.
>>> import twisted.internet.selectreactor
>>> reactor = twisted.internet.selectreactor.SelectReactor()
>>> reactor._handleSignals()
Now we are ready to instantiate our dispatcher.
>>> import zc.async.dispatcher
>>> dispatcher = zc.async.dispatcher.Dispatcher(db, reactor)
Notice it has the uuid defined in instanceuuid.
>>> dispatcher.UUID == UUID
True
Now we can start the reactor and the dispatcher in a thread.
>>> import threading
>>> def start():
... dispatcher.activate()
... reactor.run(installSignalHandlers=0)
...
>>> thread = threading.Thread(target=start)
>>> thread.setDaemon(True)
>>> thread.start()
The dispatcher should be starting up now. Let's wait for it to activate.
We're using a test convenience, get_poll, defined in the testing module.
>>> from zc.async.testing import get_poll
>>> poll = get_poll(dispatcher, 0)
We're off! The events have been fired for registering and activating the
dispatcher. Therefore, our subscriber to add our agent has fired.
We need to begin our transaction to synchronize our view of the database.
>>> t = transaction.begin()
We get the collection of dispatcher agents from the queue, using the UUID.
>>> dispatcher_agents = queue.dispatchers[UUID]
It has one agent--the one placed by our subscriber.
>>> dispatcher_agents.keys()
['main']
>>> agent = dispatcher_agents['main']
Now we have our agent! But...what is it [#stop_config_reactor]_?
------
Agents
------
Agents are the way you control what a dispatcher's worker threads do. They
pick the jobs and assign them to their dispatcher when the dispatcher asks.
*If a dispatcher does not have any agents in a given queue, it will not perform
any tasks for that queue.*
We currently have an agent that simply asks for the next available FIFO job.
We are using an agent implementation that allows you to specify a callable to
filter the job. That callable is now None.
>>> agent.filter is None
True
What does a filter do? A filter takes a job and returns a value evaluated as a
boolean. For instance, let's say we always wanted a certain number of threads
available for working on a particular call; for the purpose of example, we'll
use ``operator.mul``, though a more real-world example might be a network call
or a particular call in your application.
>>> import operator
>>> def chooseMul(job):
... return job.callable == operator.mul
...
You might want something more sophisticated, such as preferring operator.mul,
but if one is not in the queue, it will take any; or doing any other priority
variations. To do this, you'll want to write your own agent--possibly
inheriting from the provided one and overriding ``_choose``.
Let's set up another agent, in addition to the default one, that has
the ``chooseMul`` policy.
>>> agent2 = dispatcher_agents['mul'] = zc.async.agent.Agent(chooseMul)
Another characteristic of agents is that they specify how many jobs they
should pick at a time. The dispatcher actually adjusts the size of the
ZODB connection pool to accommodate its agents' size. The default is 3.
>>> agent.size
3
>>> agent2.size
3
We can change that at creation or later.
Finally, it's worth noting that agents contain the jobs that are currently
worked on by the dispatcher, on their behalf; and have a ``completed``
collection of the more recent completed jobs, beginning with the most recently
completed job.
----------------------
Logging and Monitoring
----------------------
Logs are sent to the ``zc.async.events`` log for big events, like startup and
shutdown, and errors. Poll and job logs are sent to ``zc.async.trace``.
Configure the standard Python logging module as usual to send these logs where
you need. Be sure to auto-rotate the trace logs.
The package supports monitoring using zc.monitor. Using this package includes
only a very few additional dependencies: zc.monitor, simplejson, and zc.ngi. An
example of setting it up without Zope 3 is in the end of
:ref:`quickstart-with-virtualenv`. If you would like to use it, see that
document, monitor.txt in the package, and our next section:
:ref:`configuration-with-zope-3`.
Otherwise, if you want to roll your own monitoring, glance at monitor.py and
monitordb.py--you'll see that you should be able to reuse most of the heavy
lifting, so it should be pretty easy to hook up the basic data another way.
>>> reactor.stop()
.. rubric:: Footnotes
.. [#specific_dependencies] More specifically, as of this writing,
these are the minimal egg dependencies (including indirect
dependencies):
- pytz
A Python time zone library
- rwproperty
A small package of descriptor conveniences
- uuid
The uuid module included in Python 2.5
- zc.dict
A ZODB-aware dict implementation based on BTrees.
- zc.queue
A ZODB-aware queue
- zc.twist
Conveniences for working with Twisted and the ZODB
- twisted
The Twisted internet library.
- ZConfig
A general configuration package coming from the Zope project with which
the ZODB tests.
- zdaemon
A general daemon tool coming from the Zope project.
- ZODB3
The Zope Object Database.
- zope.bforest
Aggregations of multiple BTrees into a single dict-like structure,
reasonable for rotating data structures, among other purposes.
- zope.component
A way to hook together code by contract.
- zope.deferredimport
A way to defer imports in Python packages, often to prevent circular
import problems.
- zope.deprecation
A small framework for deprecating features.
- zope.event
An exceedingly small event framework that derives its power from
zope.component.
- zope.i18nmessageid
A way to specify strings to be translated.
- zope.interface
A way to specify code contracts and other data structures.
- zope.proxy
A way to proxy other Python objects.
- zope.testing
Testing extensions and helpers.
The next section, :ref:`configuration-with-zope-3`, still tries to limit
dependencies--we only rely on additional packages zc.z3monitor, simplejson,
and zope.app.appsetup ourselves--but as of this writing zope.app.appsetup
ends up dragging in a large chunk of zope.app.* packages. Hopefully that
will be refactored in Zope itself, and our full Zope 3 configuration can
benefit from the reduced indirect dependencies.
.. [#undefined_quota_name]
>>> import operator
>>> import zc.async.job
>>> job = zc.async.job.Job(operator.mul, 5, 2)
>>> job.quota_names = ['content catalog']
>>> job.quota_names
('content catalog',)
>>> queue.put(job)
Traceback (most recent call last):
...
ValueError: ('unknown quota name', 'content catalog')
>>> len(queue)
0
.. [#no_mutation_to_undefined]
>>> job.quota_names = ()
>>> job is queue.put(job)
True
>>> job.quota_names = ('content catalog',)
Traceback (most recent call last):
...
ValueError: ('unknown quota name', 'content catalog')
>>> job.quota_names
()
.. [#create_remove_quotas]
>>> list(queue.quotas)
[]
>>> queue.quotas.create('testing')
>>> list(queue.quotas)
['testing']
>>> queue.quotas.remove('testing')
>>> list(queue.quotas)
[]
.. [#remove_quotas]
>>> queue.quotas.create('content catalog')
>>> job.quota_names = ('content catalog',)
>>> queue.quotas.remove('content catalog')
>>> job.quota_names
('content catalog',)
>>> job is queue.claim()
True
>>> len(queue)
0
.. [#default_size]
>>> queue.quotas.create('content catalog')
>>> queue.quotas['content catalog'].size
1
.. [#change_size]
>>> queue.quotas['content catalog'].size = 2
>>> queue.quotas['content catalog'].size
2
>>> queue.quotas.create('frobnitz account', size=3)
>>> queue.quotas['frobnitz account'].size
3
.. [#decreasing_affects_future]
>>> job1 = zc.async.job.Job(operator.mul, 5, 2)
>>> job2 = zc.async.job.Job(operator.mul, 5, 2)
>>> job3 = zc.async.job.Job(operator.mul, 5, 2)
>>> job1.quota_names = job2.quota_names = job3.quota_names = (
... 'content catalog',)
>>> job1 is queue.put(job1)
True
>>> job2 is queue.put(job2)
True
>>> job3 is queue.put(job3)
True
>>> job1 is queue.claim()
True
>>> job2 is queue.claim()
True
>>> print queue.claim()
None
>>> quota = queue.quotas['content catalog']
>>> len(quota)
2
>>> list(quota) == [job1, job2]
True
>>> quota.filled
True
>>> quota.size = 1
>>> quota.filled
True
>>> print queue.claim()
None
>>> job1()
10
>>> print queue.claim()
None
>>> len(quota)
1
>>> list(quota) == [job2]
True
>>> job2()
10
>>> job3 is queue.claim()
True
>>> list(quota) == [job3]
True
>>> len(quota)
1
>>> job3()
10
>>> print queue.claim()
None
>>> len(queue)
0
>>> quota.clean()
>>> len(quota)
0
>>> quota.filled
False
.. [#stop_config_reactor] We don't want the live dispatcher for our demos,
actually. See dispatcher.txt to see the live dispatcher actually in use.
So, here we'll stop the "real" reactor and switch to a testing one.
>>> reactor.callFromThread(reactor.stop)
>>> thread.join(3)
>>> assert not dispatcher.activated, 'dispatcher did not deactivate'
>>> import zc.async.testing
>>> reactor = zc.async.testing.Reactor()
>>> dispatcher._reactor = reactor
>>> dispatcher.activate()
>>> reactor.start()
| zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/README_2.txt | README_2.txt |
import datetime
import bisect
import logging
import pytz
import persistent
import persistent.interfaces
import ZODB.interfaces
import BTrees.OOBTree
import BTrees.Length
import zope.interface
import zope.component
import zope.event
import zope.bforest
import zope.minmax
import zc.queue
import zc.dict
import zc.async.interfaces
import zc.async.utils
_marker = object()
# purely optional
@zope.interface.implementer(zc.async.interfaces.IQueue)
@zope.component.adapter(persistent.interfaces.IPersistent)
def getDefaultQueue(obj):
return ZODB.interfaces.IConnection(obj).root()[zc.async.interfaces.KEY]['']
class DispatcherAgents(zc.async.utils.Dict):
zope.interface.implements(zc.async.interfaces.IDispatcherAgents)
UUID = None
activated = None
def __init__(self, uuid):
super(DispatcherAgents, self).__init__()
self.UUID = uuid
self.last_ping = zope.minmax.Maximum()
ping_interval = datetime.timedelta(seconds=30)
ping_death_interval = datetime.timedelta(seconds=60)
@property
def dead(self):
last_ping = self.last_ping.value
if self.activated and (
last_ping is None or self.activated > last_ping):
last_ping = self.activated
elif last_ping is None:
return False
return ((last_ping + self.ping_death_interval) <
datetime.datetime.now(pytz.UTC))
return False
def __setitem__(self, key, value):
if not zc.async.interfaces.IAgent.providedBy(value):
raise ValueError('value must be IAgent')
if len(value):
raise ValueError('cannot add an agent with active jobs')
current = self.get(key)
if current is not None and len(current):
raise ValueError('cannot remove an agent with active jobs')
super(DispatcherAgents, self).__setitem__(key, value)
def pop(self, key, *args):
current = self.get(key)
if current is not None and len(current):
raise ValueError('cannot remove an agent with active jobs')
return super(DispatcherAgents, self).pop(key, *args)
def activate(self):
if self.activated:
raise ValueError('Already activated')
# in exceptional circumstances, the agents may have in-progress jobs
# left in them. These will never be worked on, and will block the
# agents from using these slots in their "size", until the jobs are
# removed. This can be catastrophic. Therefore we iterate over all
# the agents to make sure they are all clean before activating.
self._clean()
self.activated = datetime.datetime.now(pytz.UTC)
zope.event.notify(
zc.async.interfaces.DispatcherActivated(self))
def _clean(self):
queue = self.parent
assert zc.async.interfaces.IQueue.providedBy(queue)
for agent in self.values():
try:
job = agent.pull()
except IndexError:
pass
else:
while job is not None:
status = job.status
if status in (zc.async.interfaces.PENDING,
zc.async.interfaces.ASSIGNED):
# odd
zc.async.log.warning(
'unexpected job status %s for %r; treating as NEW',
status, job)
status = zc.async.interfaces.NEW
if status == zc.async.interfaces.NEW:
tmp = job.assignerUUID
job.assignerUUID = None
job.parent = None
queue.put(job)
job.assignerUUID = tmp
elif job.status == zc.async.interfaces.ACTIVE:
j = queue.put(
job.handleInterrupt,
retry_policy_factory=zc.async.job.RetryCommonForever,
failure_log_level=logging.CRITICAL)
# we don't make job's parent j because it shouldn't
# really be needed and it would be a pain to clean up
elif job.status == zc.async.interfaces.CALLBACKS:
j = queue.put(
job.resumeCallbacks,
retry_policy_factory=zc.async.job.RetryCommonForever,
failure_log_level=logging.CRITICAL)
# make job's parent j so that ``queue`` references work
# in callbacks
job.parent = j
elif job.status == zc.async.interfaces.COMPLETED:
# huh, that's odd.
agent.completed.add(job)
zc.async.utils.log.warning(
'unexpectedly had to inform agent of completion '
'of %r', job)
try:
job = agent.pull()
except IndexError:
job = None
def deactivate(self):
if not self.activated:
raise ValueError('Not activated')
self.activated = None
self._clean()
zope.event.notify(
zc.async.interfaces.DispatcherDeactivated(self))
def reactivate(self):
# this is called *only* by ``poll``. ``poll`` calls ``reactivate``
# when ``poll`` discovers that a dispatcher, thought dead, is still
# alive.
self.activated = datetime.datetime.now(pytz.UTC)
zope.event.notify(
zc.async.interfaces.DispatcherReactivated(self))
class Queues(zc.async.utils.Dict):
def __setitem__(self, key, value):
if not zc.async.interfaces.IQueue.providedBy(value):
raise ValueError('value must be IQueue')
super(Queues, self).__setitem__(key, value)
class Dispatchers(zc.dict.Dict):
zope.interface.implements(zc.async.interfaces.IDispatchers)
__setitem__ = update = pop = __delitem__ = copy = None # simple hide
def register(self, uuid):
if uuid in self:
raise ValueError('UUID already registered')
da = DispatcherAgents(uuid)
da.parent = self.__parent__ # __parent__ should be queue
super(Dispatchers, self).__setitem__(uuid, da)
zope.event.notify(
zc.async.interfaces.DispatcherRegistered(da))
def unregister(self, uuid):
da = self[uuid]
if da.activated:
raise ValueError('UUID is activated.')
da = super(Dispatchers, self).pop(uuid)
da.parent = da.name = None
zope.event.notify(
zc.async.interfaces.DispatcherUnregistered(da, self.__parent__))
return da
def ping(self, uuid):
da = self[uuid]
if not da.activated:
zc.async.utils.log.critical(
"Dispatcher %r not activated prior to ping. This can indicate "
"that the dispatcher's ping_death_interval is set too short, "
"or that some transactions in the system are taking too long "
"to commit. Activating, to correct the current problem, but "
"if the dispatcher was inappropriately viewed as ``dead`` and "
"deactivated, you should investigate the cause (be sure to "
"check that time is synced on all participating machines).",
uuid)
# we do this rather than calling ``activate`` because the semantics
# are different. ``activate`` is after a true deactivation, and
# cleans out the agents and fires off an activation event. This
# is inappropriate here, and could easily cause problems.
# ``reactivate`` is specifically for this circumstance: a
# dispatcher thought dead is discovered to be alive.
da.reactivate()
now = datetime.datetime.now(pytz.UTC)
last_ping = da.last_ping.value
if (last_ping is None or
last_ping + da.ping_interval <= now):
da.last_ping.value = now
next = self._getNextActiveSibling(uuid)
if next is not None and next.dead:
# `next` seems to be a dead dispatcher.
next.deactivate()
def _getNextActiveSibling(self, uuid):
for da in self._data.values(min=uuid, excludemin=True):
if da.activated:
return da
for da in self._data.values(max=uuid, excludemax=True):
if da.activated:
return da
class Quota(zc.async.utils.Base):
# this implementation is reasonable for relatively small (say, size<100)
# quotas.
zope.interface.implements(zc.async.interfaces.IQuota)
_data = ()
def __init__(self, name, size):
self.name = name
self.size = size
def clean(self):
now = datetime.datetime.now(pytz.UTC)
changed = False
new = []
for job in self._data:
status = job.status
if status in (zc.async.interfaces.CALLBACKS,
zc.async.interfaces.COMPLETED) or (
status == zc.async.interfaces.PENDING and
job.begin_after > now): # for a rescheduled task
changed = True # remove from quota
else:
new.append(job)
if changed:
self._data = tuple(new)
@property
def filled(self):
return len(self._data) >= self.size
def __contains__(self, item):
for i in self:
if i is item:
return True
return False
def add(self, item):
if item in self:
return
if not zc.async.interfaces.IJob.providedBy(item):
raise ValueError('must be IJob')
if self.name not in item.quota_names:
raise ValueError('quota name must be in quota_names')
if self.filled:
raise ValueError('Quota is filled')
# casting self._data to tuple for legacy instances; no-op for tuples
self._data = tuple(self._data) + (item,)
for nm in ('__len__', '__iter__', '__getitem__', '__nonzero__', 'get'):
locals()[nm] = zc.async.utils.simpleWrapper(nm)
class Quotas(zc.dict.Dict):
__setitem__ = update = pop = __delitem__ = copy = None # simple hide
def create(self, name, size=1):
res = Quota(name, size)
super(Quotas, self).__setitem__(name, res)
res.parent = self
def remove(self, name):
super(Quotas, self).pop(name)
class Queue(zc.async.utils.Base):
zope.interface.implements(zc.async.interfaces.IQueue)
_putback_queue = None
def __init__(self):
self._queue = zc.queue.CompositeQueue()
self._held = BTrees.OOBTree.OOBTree()
self.quotas = Quotas()
self.quotas.__parent__ = self
self._length = BTrees.Length.Length(0)
self.dispatchers = Dispatchers()
self.dispatchers.__parent__ = self
def put(self, item, begin_after=None, begin_by=None,
failure_log_level=None, retry_policy_factory=None):
item = zc.async.interfaces.IJob(item)
if failure_log_level is not None:
item.failure_log_level = failure_log_level
if retry_policy_factory is not None:
item.retry_policy_factory = retry_policy_factory
if item.status != zc.async.interfaces.NEW:
raise ValueError(
'cannot add already-assigned job')
for name in item.quota_names:
if name not in self.quotas:
raise ValueError('unknown quota name', name)
now = datetime.datetime.now(pytz.UTC)
if begin_after is not None:
item.begin_after = begin_after
elif item.begin_after is None:
item.begin_after = now
if begin_by is not None:
item.begin_by = begin_by
if item.assignerUUID is not None: # rescheduled job keeps old UUID
item.assignerUUID = zope.component.getUtility(
zc.async.interfaces.IUUID)
if item._p_jar is None:
# we need to do this if the job will be stored in another
# database as well during this transaction. Also, _held storage
# disambiguates against the database_name and the _p_oid.
conn = ZODB.interfaces.IConnection(self)
conn.add(item)
if now >= item.begin_after:
self._queue.put(item)
else:
self._held[
(item.begin_after,
item._p_jar.db().database_name,
item._p_oid)] = item
item.parent = self
self._length.change(1)
return item
def putBack(self, item):
# an agent has claimed a job, but now the job needs to be returned. the
# only current caller for this is a job's ``handleInterrupt`` method.
# The scenario for this is that the agent's dispatcher died while the
# job was active, interrupting the work; and the job's retry policy
# asks that the job be put back on the queue to be claimed immediately.
# This method puts the job in a special internal queue that ``_iter``
# looks at first. This allows jobs to maintain their order, if needed,
# within a quota.
assert zc.async.interfaces.IJob.providedBy(item)
assert item.status == zc.async.interfaces.NEW, item.status
assert item.begin_after is not None
assert item._p_jar is not None
# to support legacy instances of the queue that were created before
# this functionality and its separate internal data structure were
# part of the code, we instantiate the _putback_queue when we first
# need it, here.
if self._putback_queue is None:
self._putback_queue = zc.queue.CompositeQueue()
self._putback_queue.put(item)
item.parent = self
self._length.change(1)
def _iter(self):
putback_queue = self._putback_queue
if putback_queue: # not None and not empty
dq_pop = putback_queue.pull
for dq_ix, dq_next in enumerate(putback_queue):
yield dq_pop, dq_ix, dq_next
queue = self._queue
tree = self._held
q = enumerate(queue)
t = iter(tree.items())
q_pop = queue.pull
t_pop = tree.pop
def get_next(i):
try:
next = i.next()
except StopIteration:
active = False
next = (None, None)
else:
active = True
return active, next
q_active, (q_index, q_next) = get_next(q)
t_active, (t_index, t_next) = get_next(t)
while q_active and t_active:
if t_next.begin_after <= q_next.begin_after:
yield t_pop, t_index, t_next
t_active, (t_index, t_next) = get_next(t)
else:
yield q_pop, q_index, q_next
q_active, (q_index, q_next) = get_next(q)
if t_active:
yield t_pop, t_index, t_next
for (t_index, t_next) in t:
yield t_pop, t_index, t_next
elif q_active:
yield q_pop, q_index, q_next
for (q_index, q_next) in q:
yield q_pop, q_index, q_next
def pull(self, index=0):
length = len(self)
if index < 0:
index += length
if index < 0:
raise IndexError(index + length)
if index >= length:
raise IndexError(index)
for i, (pop, ix, job) in enumerate(self._iter()):
if i == index:
tmp = pop(ix)
assert tmp is job
self._length.change(-1)
job.assignerUUID = None
job.parent = None
return job
assert False, 'programmer error: the length appears to be incorrect.'
def remove(self, item):
for pop, ix, job in self._iter():
if job is item:
assert pop(ix) is job
self._length.change(-1)
job.assignerUUID = None
job.parent = None
break
else:
raise LookupError('item not in queue', item)
def claim(self, filter=None, default=None):
now = datetime.datetime.now(pytz.UTC)
if not self._length():
return default
uuid = None
quotas_cleaned = set()
for pop, ix, job in self._iter():
if job.begin_after > now:
break
res = None
quotas = []
if (job.begin_by is not None and
(job.begin_after + job.begin_by) < now):
res = zc.async.interfaces.IJob(job.fail)
res.args.append(zc.async.interfaces.TimeoutError())
res.begin_after = now
res.parent = self
if uuid is None:
uuid = zope.component.getUtility(zc.async.interfaces.IUUID)
res.assignerUUID = uuid
else:
for name in job.quota_names:
quota = self.quotas.get(name)
if quota is not None:
if name not in quotas_cleaned:
quota.clean()
quotas_cleaned.add(name)
if quota.filled and job not in quota:
break
quotas.append(quota)
else:
res = job
if res is not None and (filter is None or filter(res)):
tmp = pop(ix)
assert tmp is job
self._length.change(-1)
for quota in quotas:
quota.add(job)
job.parent = None
return res
return default
def __len__(self):
return self._length()
def __iter__(self):
return (next for pop, ix, next in self._iter())
def __nonzero__(self):
return bool(self._length())
def __getitem__(self, index):
length = len(self)
if index < 0:
index += length
if index < 0:
raise IndexError(index + length)
if index >= length:
raise IndexError(index)
for i, (pop, ix, job) in enumerate(self._iter()):
if i == index:
return job
assert False, 'programmer error: the length appears to be incorrect.' | zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/queue.py | queue.py |
.. _usage:
=====
Usage
=====
Overview and Basics
===================
The basic usage of zc.async does not depend on a particular configuration
of the back-end mechanism for getting the jobs done. Moreover, on some
teams, it will be the responsibility of one person or group to configure
zc.async, but a service available to the code of all team members. Therefore,
we begin our detailed discussion with regular usage, assuming configuration
has already happened. Subsequent sections discuss configuring zc.async
with and without Zope 3.
So, let's assume we have a queue with dispatchers, reactors and agents all
waiting to fulfill jobs placed into the queue. We start with a connection
object, ``conn``, and some convenience functions introduced along the way that
help us simulate time passing and work being done [#usageSetUp]_.
-------------------
Obtaining the queue
-------------------
First, how do we get the queue? Your installation may have some
conveniences. For instance, the Zope 3 configuration described below
makes it possible to get the primary queue with an adaptation call like
``zc.async.interfaces.IQueue(a_persistent_object_with_db_connection)``.
But failing that, queues are always expected to be in a zc.async.queue.Queues
mapping found off the ZODB root in a key defined by the constant
zc.async.interfaces.KEY.
>>> import zc.async.interfaces
>>> zc.async.interfaces.KEY
'zc.async'
>>> root = conn.root()
>>> queues = root[zc.async.interfaces.KEY]
>>> import zc.async.queue
>>> isinstance(queues, zc.async.queue.Queues)
True
As the name implies, ``queues`` is a collection of queues. As discussed later,
it's possible to have multiple queues, as a tool to distribute and control
work. We will assume a convention of a queue being available in the '' (empty
string).
>>> queues.keys()
['']
>>> queue = queues['']
-------------
``queue.put``
-------------
Now we want to actually get some work done. The simplest case is simple
to perform: pass a persistable callable to the queue's ``put`` method and
commit the transaction.
>>> def send_message():
... print "imagine this sent a message to another machine"
>>> job = queue.put(send_message)
>>> import transaction
>>> transaction.commit()
Note that this won't really work in an interactive session: the callable needs
to be picklable, as discussed above, so ``send_message`` would need to be
a module global, for instance.
The ``put`` returned a job. Now we need to wait for the job to be
performed. We would normally do this by really waiting. For our
examples, we will use a helper method on the testing reactor to ``wait_for``
the job to be completed.
>>> reactor.wait_for(job)
imagine this sent a message to another machine
We also could have used the method of a persistent object. Here's another
quick example.
First we define a simple persistent.Persistent subclass and put an instance of
it in the database [#commit_for_multidatabase]_.
>>> import persistent
>>> class Demo(persistent.Persistent):
... counter = 0
... def increase(self, value=1):
... self.counter += value
...
>>> root['demo'] = Demo()
>>> transaction.commit()
Now we can put the ``demo.increase`` method in the queue.
>>> root['demo'].counter
0
>>> job = queue.put(root['demo'].increase)
>>> transaction.commit()
>>> reactor.wait_for(job)
>>> root['demo'].counter
1
The method was called, and the persistent object modified!
To reiterate, only pickleable callables such as global functions and the
methods of persistent objects can be used. This rules out, for instance,
lambdas and other functions created dynamically. As we'll see below, the job
instance can help us out there somewhat by offering closure-like features.
-----------------------------------
``queue.pull`` and ``queue.remove``
-----------------------------------
If you put a job into a queue and it hasn't been claimed yet and you want to
cancel the job, ``pull`` or ``remove`` it from the queue.
The ``pull`` method removes the first job, or takes an integer index.
>>> len(queue)
0
>>> job1 = queue.put(send_message)
>>> job2 = queue.put(send_message)
>>> len(queue)
2
>>> job1 is queue.pull()
True
>>> list(queue) == [job2]
True
>>> job1 is queue.put(job1)
True
>>> list(queue) == [job2, job1]
True
>>> job1 is queue.pull(-1)
True
>>> job2 is queue.pull()
True
>>> len(queue)
0
The ``remove`` method removes the specific given job.
>>> job1 = queue.put(send_message)
>>> job2 = queue.put(send_message)
>>> len(queue)
2
>>> queue.remove(job1)
>>> list(queue) == [job2]
True
>>> job1 is queue.put(job1)
True
>>> list(queue) == [job2, job1]
True
>>> queue.remove(job1)
>>> list(queue) == [job2]
True
>>> queue.remove(job2)
>>> len(queue)
0
---------------
Scheduled Calls
---------------
When using ``put``, you can also pass a datetime.datetime to schedule a call. A
datetime without a timezone is considered to be in the UTC timezone.
>>> t = transaction.begin()
>>> import datetime
>>> import pytz
>>> datetime.datetime.now(pytz.UTC)
datetime.datetime(2006, 8, 10, 15, 44, 33, 211, tzinfo=<UTC>)
>>> job = queue.put(
... send_message, begin_after=datetime.datetime(
... 2006, 8, 10, 15, 56, tzinfo=pytz.UTC))
>>> job.begin_after
datetime.datetime(2006, 8, 10, 15, 56, tzinfo=<UTC>)
>>> transaction.commit()
>>> reactor.wait_for(job, attempts=2) # +5 virtual seconds
TIME OUT
>>> reactor.wait_for(job, attempts=2) # +5 virtual seconds
TIME OUT
>>> datetime.datetime.now(pytz.UTC)
datetime.datetime(2006, 8, 10, 15, 44, 43, 211, tzinfo=<UTC>)
>>> zc.async.testing.set_now(datetime.datetime(
... 2006, 8, 10, 15, 56, tzinfo=pytz.UTC))
>>> reactor.wait_for(job)
imagine this sent a message to another machine
>>> datetime.datetime.now(pytz.UTC) >= job.begin_after
True
If you set a time that has already passed, it will be run as if it had
been set to run as soon as possible [#already_passed]_...unless the job
has already timed out, in which case the job fails with an
abort [#already_passed_timed_out]_.
The queue's ``put`` method is the essential API. ``pull`` is used rarely. Other
methods are used to introspect, but are not needed for basic usage.
But what is that result of the ``put`` call in the examples above? A
job? What do you do with that?
Jobs
====
--------
Overview
--------
The result of a call to ``put`` returns an ``IJob``. The job represents the
pending result. This object has a lot of functionality that's explored in other
documents in this package, and demonstrated a bit below, but here's a summary.
- You can introspect, and even modify, the call and its arguments.
- You can specify that the job should be run serially with others of a given
identifier.
- You can specify other calls that should be made on the basis of the result of
this call.
- You can persist a reference to it, and periodically (after syncing your
connection with the database, which happens whenever you begin or commit a
transaction) check its ``status`` to see if it is equal to
``zc.async.interfaces.COMPLETED``. When it is, the call has run to completion,
either to success or an exception.
- You can look at the result of the call (once ``COMPLETED``). It might be the
result you expect, or a ``zc.twist.Failure``, a subclass of
``twisted.python.failure.Failure``, which is a way to safely communicate
exceptions across connections and machines and processes.
-------
Results
-------
So here's a simple story. What if you want to get a result back from a
call? Look at the job.result after the call is ``COMPLETED``.
>>> def imaginaryNetworkCall():
... # let's imagine this makes a network call...
... return "200 OK"
...
>>> job = queue.put(imaginaryNetworkCall)
>>> print job.result
None
>>> job.status == zc.async.interfaces.PENDING
True
>>> transaction.commit()
>>> reactor.wait_for(job)
>>> t = transaction.begin()
>>> job.result
'200 OK'
>>> job.status == zc.async.interfaces.COMPLETED
True
--------
Closures
--------
What's more, you can pass a Job to the ``put`` call. This means that you
aren't constrained to simply having simple non-argument calls performed
asynchronously, but you can pass a job with a call, arguments, and
keyword arguments--effectively, a kind of closure. Here's a quick example.
We'll use the demo object, and its increase method, that we introduced
above, but this time we'll include some arguments [#job]_.
With positional arguments:
>>> t = transaction.begin()
>>> job = queue.put(
... zc.async.job.Job(root['demo'].increase, 5))
>>> transaction.commit()
>>> reactor.wait_for(job)
>>> t = transaction.begin()
>>> root['demo'].counter
6
With keyword arguments (``value``):
>>> job = queue.put(
... zc.async.job.Job(root['demo'].increase, value=10))
>>> transaction.commit()
>>> reactor.wait_for(job)
>>> t = transaction.begin()
>>> root['demo'].counter
16
Note that arguments to these jobs can be any persistable object.
--------
Failures
--------
What happens if a call raises an exception? The return value is a Failure.
>>> def I_am_a_bad_bad_function():
... return foo + bar
...
>>> job = queue.put(I_am_a_bad_bad_function)
>>> transaction.commit()
>>> reactor.wait_for(job)
>>> t = transaction.begin()
>>> job.result
<zc.twist.Failure ...exceptions.NameError...>
Failures can provide useful information such as tracebacks.
>>> print job.result.getTraceback()
... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
exceptions.NameError: global name 'foo' is not defined
<BLANKLINE>
---------
Callbacks
---------
You can register callbacks to handle the result of a job, whether a
Failure or another result.
Note that, unlike callbacks on a Twisted deferred, these callbacks do not
change the result of the original job. Since callbacks are jobs, you can chain
results, but generally callbacks for the same job all get the same result as
input.
Also note that, during execution of a callback, there is no guarantee that
the callback will be processed on the same machine as the main call. Also,
some of the ``local`` functions, discussed below, will not work as desired.
Here's a simple example of reacting to a success.
>>> def I_scribble_on_strings(string):
... return string + ": SCRIBBLED"
...
>>> job = queue.put(imaginaryNetworkCall)
>>> callback = job.addCallback(I_scribble_on_strings)
>>> transaction.commit()
>>> reactor.wait_for(job)
>>> job.result
'200 OK'
>>> callback.result
'200 OK: SCRIBBLED'
Here's a more complex example of handling a Failure, and then chaining
a subsequent callback.
>>> def I_handle_NameErrors(failure):
... failure.trap(NameError) # see twisted.python.failure.Failure docs
... return 'I handled a name error'
...
>>> job = queue.put(I_am_a_bad_bad_function)
>>> callback1 = job.addCallbacks(failure=I_handle_NameErrors)
>>> callback2 = callback1.addCallback(I_scribble_on_strings)
>>> transaction.commit()
>>> reactor.wait_for(job)
>>> job.result
<zc.twist.Failure ...exceptions.NameError...>
>>> callback1.result
'I handled a name error'
>>> callback2.result
'I handled a name error: SCRIBBLED'
Advanced Techniques and Tools
=============================
**Important**
The job and its functionality described above are the core zc.async tools.
The following are advanced techniques and tools of various complexities. You
can use zc.async very productively without ever understanding or using them. If
the following do not make sense to you now, please just move on for now.
--------------
zc.async.local
--------------
Jobs always run their callables in a thread, within the context of a
connection to the ZODB. The callables have access to five special
thread-local functions if they need them for special uses. These are
available off of zc.async.local.
``zc.async.local.getJob()``
The ``getJob`` function can be used to examine the job, to get
a connection off of ``_p_jar``, to get the queue into which the job
was put, or other uses.
``zc.async.local.getQueue()``
The ``getQueue`` function can be used to examine the queue, to put another
task into the queue, or other uses. It is sugar for
``zc.async.local.getJob().queue``.
``zc.async.local.setLiveAnnotation(name, value, job=None)``
The ``setLiveAnnotation`` tells the agent to set an annotation on a job,
by default the current job, *in another connection*. This makes it
possible to send messages about progress or for coordination while in the
middle of other work.
As a simple rule, only send immutable objects like strings or
numbers as values [#setLiveAnnotation]_.
``zc.async.local.getLiveAnnotation(name, default=None, timeout=0, poll=1, job=None)``
The ``getLiveAnnotation`` tells the agent to get an annotation for a job,
by default the current job, *from another connection*. This makes it
possible to send messages about progress or for coordination while in the
middle of other work.
As a simple rule, only ask for annotation values that will be
immutable objects like strings or numbers [#getLiveAnnotation]_.
If the ``timeout`` argument is set to a positive float or int, the function
will wait at least that number of seconds until an annotation of the
given name is available. Otherwise, it will return the ``default`` if the
name is not present in the annotations. The ``poll`` argument specifies
approximately how often to poll for the annotation, in seconds (to be more
precise, a subsequent poll will be min(poll, remaining seconds until
timeout) seconds away).
``zc.async.local.getReactor()``
The ``getReactor`` function returns the job's dispatcher's reactor. The
``getLiveAnnotation`` and ``setLiveAnnotation`` functions use this,
along with the zc.twist package, to work their magic; if you are feeling
adventurous, you can do the same.
``zc.async.local.getDispatcher()``
The ``getDispatcher`` function returns the job's dispatcher. This might
be used to analyze its non-persistent poll data structure, for instance
(described later in configuration discussions).
Let's give three of those a whirl. We will write a function that examines the
job's state while it is being called, and sets the state in an annotation, then
waits for our flag to finish.
>>> def annotateStatus():
... zc.async.local.setLiveAnnotation(
... 'zc.async.test.status',
... zc.async.local.getJob().status)
... zc.async.local.getLiveAnnotation(
... 'zc.async.test.flag', timeout=5)
... return 42
...
>>> job = queue.put(annotateStatus)
>>> transaction.commit()
>>> import time
>>> def wait_for_annotation(job, key):
... reactor.time_flies(dispatcher.poll_interval) # starts thread
... for i in range(10):
... while reactor.time_passes():
... pass
... transaction.begin()
... if key in job.annotations:
... break
... time.sleep(0.1)
... else:
... print 'Timed out' + repr(dict(job.annotations))
...
>>> wait_for_annotation(job, 'zc.async.test.status')
>>> job.annotations['zc.async.test.status'] == (
... zc.async.interfaces.ACTIVE)
True
>>> job.status == zc.async.interfaces.ACTIVE
True
[#stats_1]_
>>> job.annotations['zc.async.test.flag'] = True
>>> transaction.commit()
>>> reactor.wait_for(job)
>>> job.result
42
[#stats_2]_ ``getReactor`` and ``getDispatcher`` are for advanced use
cases and are not explored further here.
----------
Job Quotas
----------
One class of asynchronous jobs are ideally serialized. For instance,
you may want to reduce or eliminate the chance of conflict errors when
updating a text index. One way to do this kind of serialization is to
use the ``quota_names`` attribute of the job.
For example, let's first show two non-serialized jobs running at the
same time, and then two serialized jobs created at the same time.
The first part of the example does not use queue_names, to show a contrast.
For our parallel jobs, we'll do something that would create a deadlock
if they were serial. Notice that we are mutating the job arguments after
creation to accomplish this, which is supported.
>>> def waitForParallel(other):
... zc.async.local.setLiveAnnotation(
... 'zc.async.test.flag', True)
... zc.async.local.getLiveAnnotation(
... 'zc.async.test.flag', job=other, timeout=0.4, poll=0)
...
>>> job1 = queue.put(waitForParallel)
>>> job2 = queue.put(waitForParallel)
>>> job1.args.append(job2)
>>> job2.args.append(job1)
>>> transaction.commit()
>>> reactor.wait_for(job1, job2)
>>> job1.status == zc.async.interfaces.COMPLETED
True
>>> job2.status == zc.async.interfaces.COMPLETED
True
>>> job1.result is job2.result is None
True
On the other hand, for our serial jobs, we'll do something that would fail
if it were parallel. We'll rely on ``quota_names``.
Quotas verge on configuration, which is not what this section is about,
because they must be configured on the queue. However, they also affect
usage, so we show them here.
>>> def pause(other):
... zc.async.local.setLiveAnnotation(
... 'zc.async.test.flag', True)
... res = zc.async.local.getLiveAnnotation(
... 'zc.async.test.flag', timeout=0.4, poll=0.1, job=other)
...
>>> job1 = queue.put(pause)
>>> job2 = queue.put(imaginaryNetworkCall)
You can't put a name in ``quota_names`` unless the quota has been created
in the queue.
>>> job1.quota_names = ('test',)
Traceback (most recent call last):
...
ValueError: ('unknown quota name', 'test')
>>> queue.quotas.create('test')
>>> job1.quota_names = ('test',)
>>> job2.quota_names = ('test',)
Now we can see the two jobs being performed serially.
>>> job1.args.append(job2)
>>> transaction.commit()
>>> reactor.time_flies(dispatcher.poll_interval)
1
>>> for i in range(10):
... t = transaction.begin()
... if job1.status == zc.async.interfaces.ACTIVE:
... break
... time.sleep(0.1)
... else:
... print 'TIME OUT'
...
>>> job2.status == zc.async.interfaces.PENDING
True
>>> job2.annotations['zc.async.test.flag'] = False
>>> transaction.commit()
>>> reactor.wait_for(job1)
>>> reactor.wait_for(job2)
>>> print job1.result
None
>>> print job2.result
200 OK
Quotas can be configured for limits greater than one at a time, if desired.
This may be valuable when a needed resource is only available in limited
numbers at a time.
Note that, while quotas are valuable tools for doing serialized work such as
updating a text index, other optimization features sometimes useful for this
sort of task, such as collapsing similar jobs, are not provided directly by
this package. This functionality could be trivially built on top of zc.async,
however [#idea_for_collapsing_jobs]_.
--------------
Returning Jobs
--------------
Our examples so far have done work directly. What if the job wants to
orchestrate other work? One way this can be done is to return another
job. The result of the inner job will be the result of the first
job once the inner job is finished. This approach can be used to
break up the work of long running processes; to be more cooperative to
other jobs; and to make parts of a job that can be parallelized available
to more workers.
Serialized Work
---------------
First, consider a serialized example. This simple pattern is one approach.
>>> def second_job(value):
... # imagine a lot of work goes on...
... return value * 2
...
>>> def first_job():
... # imagine a lot of work goes on...
... intermediate_value = 21
... queue = zc.async.local.getJob().queue
... return queue.put(zc.async.job.Job(
... second_job, intermediate_value))
...
>>> job = queue.put(first_job)
>>> transaction.commit()
>>> reactor.wait_for(job, attempts=3)
TIME OUT
>>> len(agent)
1
>>> reactor.wait_for(job, attempts=3)
>>> job.result
42
The job is now out of the agent.
>>> len(agent)
0
The second_job could also have returned a job, allowing for additional
legs. Once the last job returns a real result, it will cascade through the
past jobs back up to the original one.
A different approach could have used callbacks. Using callbacks can be
somewhat more complicated to follow, but can allow for a cleaner
separation of code: dividing code that does work from code that orchestrates
the jobs. The ``serial`` helper function in the job module uses this pattern.
Here's a quick example of the helper function [#define_longer_wait]_.
>>> def job_zero():
... return 0
...
>>> def job_one():
... return 1
...
>>> def job_two():
... return 2
...
>>> def postprocess(zero, one, two):
... return zero.result, one.result, two.result
...
>>> job = queue.put(zc.async.job.serial(job_zero, job_one, job_two,
... postprocess=postprocess))
>>> transaction.commit()
>>> wait_repeatedly()
... # doctest: +ELLIPSIS
TIME OUT...
>>> job.result
(0, 1, 2)
[#extra_serial_tricks]_
The ``parallel`` example we use below follows a similar pattern.
Parallelized Work
-----------------
Now how can we set up parallel jobs? There are other good ways, but we
can describe one way that avoids potential problems with the
current-as-of-this-writing (ZODB 3.8 and trunk) default optimistic MVCC
serialization behavior in the ZODB. The solution uses callbacks, which
also allows us to cleanly divide the "work" code from the synchronization
code, as described in the previous paragraph.
First, we'll define the jobs that do work. ``job_A``, ``job_B``, and
``job_C`` will be jobs that can be done in parallel, and
``postprocess`` will be a function that assembles the job results for a
final result.
>>> def job_A():
... # imaginary work...
... return 7
...
>>> def job_B():
... # imaginary work...
... return 14
...
>>> def job_C():
... # imaginary work...
... return 21
...
>>> def postprocess(*jobs):
... # this callable represents one that needs to wait for the
... # parallel jobs to be done before it can process them and return
... # the final result
... return sum(job.result for job in jobs)
...
This can be handled by a convenience function, ``parallel``, that will arrange
everything for you.
>>> job = queue.put(zc.async.job.parallel(
... job_A, job_B, job_C, postprocess=postprocess))
>>> transaction.commit()
Now we just wait for the result.
>>> wait_repeatedly()
... # doctest: +ELLIPSIS
TIME OUT...
>>> job.result
42
Ta-da! [#extra_parallel_tricks]_
Now, how did this work? Let's look at a simple implementation directly. We'll
use a slightly different postprocess, that expects results directly rather than
the jobs.
>>> def postprocess(*results):
... # this callable represents one that needs to wait for the
... # parallel jobs to be done before it can process them and return
... # the final result
... return sum(results)
...
This code works with jobs to get everything done. Note, in the callback
function, that mutating the same object we are checking (job.args) is the way
we are enforcing necessary serializability with MVCC turned on.
>>> def callback(job, result):
... job.args.append(result)
... if len(job.args) == 3: # all results are in
... zc.async.local.getJob().queue.put(job)
...
>>> def main_job():
... job = zc.async.job.Job(postprocess)
... queue = zc.async.local.getJob().queue
... for j in (job_A, job_B, job_C):
... queue.put(j).addCallback(
... zc.async.job.Job(callback, job))
... return job
...
That may be a bit mind-blowing at first. The trick to catch here is that,
because the main_job returns a job, the result of that job will become the
result of the main_job once the returned (``post_process``) job is done.
Now we'll put this in and let it cook.
>>> job = queue.put(main_job)
>>> transaction.commit()
>>> wait_repeatedly()
... # doctest: +ELLIPSIS
TIME OUT...
>>> job.result
42
Once again, ta-da!
For real-world usage, you'd also probably want to deal with the possibility of
one or more of the jobs generating a Failure, among other edge cases. The
``parallel`` function introduced above helps you handle this by returning
jobs, rather than results, so you can analyze what went wrong and try to handle
it.
-------------------
Returning Deferreds
-------------------
What if you want to do work that doesn't require a ZODB connection? You
can also return a Twisted deferred (twisted.internet.defer.Deferred).
When you then ``callback`` the deferred with the eventual result, the
agent will be responsible for setting that value on the original
deferred and calling its callbacks. This can be a useful trick for
making network calls using Twisted or zc.ngi, for instance.
>>> def imaginaryNetworkCall2(deferred):
... # make a network call...
... deferred.callback('200 OK')
...
>>> import twisted.internet.defer
>>> import threading
>>> def delegator():
... deferred = twisted.internet.defer.Deferred()
... t = threading.Thread(
... target=imaginaryNetworkCall2, args=(deferred,))
... t.run()
... return deferred
...
>>> job = queue.put(delegator)
>>> transaction.commit()
>>> reactor.wait_for(job)
>>> job.result
'200 OK'
Conclusion
==========
This concludes our discussion of zc.async usage. The :ref:`next section
<configuration-without-zope-3>` shows how to configure zc.async without
Zope 3 [#stop_usage_reactor]_.
.. _`next section`: :ref:`configuration-without-zope-3`
.. rubric:: Footnotes
.. [#usageSetUp] We set up the configuration for our usage examples here.
You must have two adapter registrations: IConnection to
ITransactionManager, and IPersistent to IConnection. We will also
register IPersistent to ITransactionManager because the adapter is
designed for it.
We also need to be able to get data manager partials for functions and
methods; normal partials for functions and methods; and a data manager for
a partial. Here are the necessary registrations.
The dispatcher will look for a UUID utility, so we also need one of these.
The ``zc.async.configure.base`` function performs all of these
registrations. If you are working with zc.async without ZCML you might want
to use it or ``zc.async.configure.minimal`` as a convenience.
>>> import zc.async.configure
>>> zc.async.configure.base()
Now we'll set up the database, and make some policy decisions. As
the subsequent ``configuration`` sections discuss, some helpers are
available for you to set this up if you'd like, though it's not too
onerous to do it by hand.
We'll use a test reactor that we can control.
>>> import zc.async.testing
>>> reactor = zc.async.testing.Reactor()
>>> reactor.start() # this monkeypatches datetime.datetime.now
We need to instantiate the dispatcher with a reactor and a DB. We
have the reactor, so here is the DB. We use a FileStorage rather
than a MappingStorage variant typical in tests and examples because
we want MVCC.
>>> import ZODB.FileStorage
>>> storage = ZODB.FileStorage.FileStorage(
... 'zc_async.fs', create=True)
>>> from ZODB.DB import DB
>>> db = DB(storage)
>>> conn = db.open()
>>> root = conn.root()
Now let's create the mapping of queues, and a single queue.
>>> import zc.async.queue
>>> import zc.async.interfaces
>>> mapping = root[zc.async.interfaces.KEY] = zc.async.queue.Queues()
>>> queue = mapping[''] = zc.async.queue.Queue()
>>> import transaction
>>> transaction.commit()
Now we can instantiate, activate, and perform some reactor work in order
to let the dispatcher register with the queue.
>>> import zc.async.dispatcher
>>> dispatcher = zc.async.dispatcher.Dispatcher(db, reactor)
>>> dispatcher.activate()
>>> reactor.time_flies(1)
1
The UUID is set on the dispatcher.
>>> import zope.component
>>> import zc.async.interfaces
>>> UUID = zope.component.getUtility(zc.async.interfaces.IUUID)
>>> dispatcher.UUID == UUID
True
Here's an agent named 'main'
>>> import zc.async.agent
>>> agent = zc.async.agent.Agent()
>>> queue.dispatchers[dispatcher.UUID]['main'] = agent
>>> agent.filter is None
True
>>> agent.size
3
>>> transaction.commit()
.. [#commit_for_multidatabase] We commit before we do the next step as a
good practice, in case the queue is from a different database than
the root. See the configuration sections for a discussion about
why putting the queue in another database might be a good idea.
Rather than committing the transaction,
``root._p_jar.add(root['demo'])`` would also accomplish the same
thing from a multi-database perspective, without a commit. It was
not used in the example because the author judged the
``transaction.commit()`` to be less jarring to the reader. If you
are down here reading this footnote, maybe the author was wrong. :-)
.. [#already_passed]
>>> t = transaction.begin()
>>> job = queue.put(
... send_message, datetime.datetime(2006, 8, 10, 15, tzinfo=pytz.UTC))
>>> transaction.commit()
>>> reactor.wait_for(job)
imagine this sent a message to another machine
It's worth noting that this situation constitutes a small exception
in the handling of scheduled calls. Scheduled calls usually get
preference when jobs are handed out over normal non-scheduled "as soon as
possible" jobs. However, setting the begin_after date to an earlier
time puts the job at the end of the (usually) FIFO queue of non-scheduled
tasks: it is treated exactly as if the date had not been specified.
.. [#already_passed_timed_out]
>>> t = transaction.begin()
>>> job = queue.put(
... send_message, datetime.datetime(2006, 7, 21, 12, tzinfo=pytz.UTC),
... datetime.timedelta(hours=1))
>>> transaction.commit()
>>> reactor.wait_for(job)
>>> job.result
<zc.twist.Failure ...zc.async.interfaces.TimeoutError...>
>>> import sys
>>> job.result.printTraceback(sys.stdout) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
Failure: zc.async.interfaces.TimeoutError:
.. [#job] The Job class can take arguments and keyword arguments
for the wrapped callable at call time as well, similar to Python
2.5's `partial`. This will be important when we use the Job as
a callback. For this use case, though, realize that the job
will be called with no arguments, so you must supply all necessary
arguments for the callable at creation time.
.. [#setLiveAnnotation] Here's the real rule, which is more complex.
*Do not send non-persistent mutables or a persistent.Persistent
object without a connection, unless you do not refer to it again in
the current job.*
.. [#getLiveAnnotation] Here's the real rule. *To prevent surprising
errors, do not request an annotation that might be a persistent
object.*
.. [#stats_1] The dispatcher has a getStatistics method. It also shows the
fact that there is an active task.
>>> import pprint
>>> pprint.pprint(dispatcher.getStatistics()) # doctest: +ELLIPSIS
{'failed': 2,
'longest active': (..., 'unnamed'),
'longest failed': (..., 'unnamed'),
'longest successful': (..., 'unnamed'),
'shortest active': (..., 'unnamed'),
'shortest failed': (..., 'unnamed'),
'shortest successful': (..., 'unnamed'),
'started': 12,
'statistics end': datetime.datetime(2006, 8, 10, 15, 44, 22, 211),
'statistics start': datetime.datetime(2006, 8, 10, 15, 56, 47, 211),
'successful': 9,
'unknown': 0}
We can also see the active job with ``getActiveJobIds``
>>> job_ids = dispatcher.getActiveJobIds()
>>> len(job_ids)
1
>>> info = dispatcher.getJobInfo(*job_ids[0])
>>> pprint.pprint(info) # doctest: +ELLIPSIS
{'agent': 'main',
'call': "<zc.async.job.Job (oid ..., db 'unnamed') ``zc.async.doctest_test.annotateStatus()``>",
'completed': None,
'failed': False,
'poll id': ...,
'queue': '',
'quota names': (),
'reassigned': False,
'result': None,
'started': datetime.datetime(...),
'thread': ...}
>>> info['thread'] is not None
True
>>> info['poll id'] is not None
True
.. [#stats_2] Now the task is done, as the stats reflect.
>>> pprint.pprint(dispatcher.getStatistics()) # doctest: +ELLIPSIS
{'failed': 2,
'longest active': None,
'longest failed': (..., 'unnamed'),
'longest successful': (..., 'unnamed'),
'shortest active': None,
'shortest failed': (..., 'unnamed'),
'shortest successful': (..., 'unnamed'),
'started': 12,
'statistics end': datetime.datetime(2006, 8, 10, 15, 44, 22, 211),
'statistics start': datetime.datetime(2006, 8, 10, 15, 56, 52, 211),
'successful': 10,
'unknown': 0}
Note that these statistics eventually rotate out. By default, poll info
will eventually rotate out after about 30 minutes (400 polls), and job info
will only keep the most recent 200 stats in-memory. To look in history
beyond these limits, check your logs.
The ``getActiveJobIds`` list is empty now.
>>> dispatcher.getActiveJobIds()
[]
>>> info = dispatcher.getJobInfo(*job_ids[0])
>>> pprint.pprint(info) # doctest: +ELLIPSIS
{'agent': 'main',
'call': "<zc.async.job.Job (oid ..., db 'unnamed') ``zc.async.doctest_test.annotateStatus()``>",
'completed': datetime.datetime(...),
'failed': False,
'poll id': ...,
'queue': '',
'quota names': (),
'reassigned': False,
'result': '42',
'started': datetime.datetime(...),
'thread': ...}
>>> info['thread'] is not None
True
>>> info['poll id'] is not None
True
.. [#idea_for_collapsing_jobs] For instance, here is one approach. Imagine
you are queueing the job of indexing documents. If the same document has a
request to index, the job could simply walk the queue and remove (``pull``)
similar tasks, perhaps aggregating any necessary data. Since the jobs are
serial because of a quota, no other worker should be trying to work on
those jobs.
Alternatively, you could use a standalone, non-zc.async queue of things to
do, and have the zc.async job just pull from that queue. You might use
zc.queue for this stand-alone queue, or zc.catalogqueue.
.. [#define_longer_wait]
>>> def wait_repeatedly():
... for i in range(10):
... reactor.wait_for(job, attempts=3)
... if job.status == zc.async.interfaces.COMPLETED:
... break
... else:
... assert False, 'never completed'
...
.. [#extra_serial_tricks] The ``serial`` helper can accept a partial closure
for a ``postprocess`` argument.
>>> def postprocess(extra_info, *jobs):
... return extra_info, tuple(j.result for j in jobs)
...
>>> job = queue.put(zc.async.job.serial(
... job_zero, job_one, job_two,
... postprocess=zc.async.job.Job(postprocess, 'foo')))
>>> transaction.commit()
>>> wait_repeatedly()
... # doctest: +ELLIPSIS
TIME OUT...
>>> job.result
('foo', (0, 1, 2))
The list of jobs can be extended by adding them to the args of the job
returned by ``serial`` under these circumstances:
- before the job has started,
- by an inner job while it is running, or
- by any callback added to any inner job *before* that inner job has begun.
Here's an example.
>>> def postprocess(*jobs):
... return [j.result for j in jobs]
...
>>> job = queue.put(zc.async.job.serial(postprocess=postprocess))
>>> def second_job():
... return 'second'
...
>>> def third_job():
... return 'third'
...
>>> def schedule_third(main_job, ignored):
... main_job.args.append(zc.async.job.Job(third_job))
...
>>> def first_job(main_job):
... j = zc.async.job.Job(second_job)
... main_job.args.append(j)
... j.addCallback(zc.async.job.Job(schedule_third, main_job))
... return 'first'
...
>>> job.args.append(zc.async.job.Job(first_job, job))
>>> transaction.commit()
>>> wait_repeatedly()
... # doctest: +ELLIPSIS
TIME OUT...
>>> job.result
['first', 'second', 'third']
Be warned, these sort of constructs allow infinite loops!
.. [#extra_parallel_tricks] The ``parallel`` helper can accept a partial closure
for a ``postprocess`` argument.
>>> def postprocess(extra_info, *jobs):
... return extra_info, sum(j.result for j in jobs)
...
>>> job = queue.put(zc.async.job.parallel(
... job_A, job_B, job_C,
... postprocess=zc.async.job.Job(postprocess, 'foo')))
>>> transaction.commit()
>>> wait_repeatedly()
... # doctest: +ELLIPSIS
TIME OUT...
>>> job.result
('foo', 42)
The list of jobs can be extended by adding them to the args of the job
returned by ``parallel`` under these circumstances:
- before the job has started,
- by an inner job while it is running,
- by any callback added to any inner job *before* that inner job has begun.
Here's an example.
>>> def postprocess(*jobs):
... return [j.result for j in jobs]
...
>>> job = queue.put(zc.async.job.parallel(postprocess=postprocess))
>>> def second_job():
... return 'second'
...
>>> def third_job():
... return 'third'
...
>>> def schedule_third(main_job, ignored):
... main_job.args.append(zc.async.job.Job(third_job))
...
>>> def first_job(main_job):
... j = zc.async.job.Job(second_job)
... main_job.args.append(j)
... j.addCallback(zc.async.job.Job(schedule_third, main_job))
... return 'first'
...
>>> job.args.append(zc.async.job.Job(first_job, job))
>>> transaction.commit()
>>> wait_repeatedly()
... # doctest: +ELLIPSIS
TIME OUT...
>>> job.result
['first', 'second', 'third']
As with ``serial``, be warned, these sort of constructs allow infinite
loops!
.. [#stop_usage_reactor]
>>> threads = []
>>> for queue_pools in dispatcher.queues.values():
... for pool in queue_pools.values():
... threads.extend(pool.threads)
>>> reactor.stop()
>>> zc.async.testing.wait_for_deactivation(dispatcher)
>>> for thread in threads:
... thread.join(3)
...
>>> pprint.pprint(dispatcher.getStatistics()) # doctest: +ELLIPSIS
{'failed': 2,
'longest active': None,
'longest failed': (..., 'unnamed'),
'longest successful': (..., 'unnamed'),
'shortest active': None,
'shortest failed': (..., 'unnamed'),
'shortest successful': (..., 'unnamed'),
'started': 54,
'statistics end': datetime.datetime(2006, 8, 10, 15, 44, 22, 211),
'statistics start': datetime.datetime(2006, 8, 10, 16, ...),
'successful': 52,
'unknown': 0}
| zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/README_1.txt | README_1.txt |
import threading
import signal
import transaction
import twisted.internet.selectreactor
import zope.component
import zope.event
import zc.twist
import zc.async.interfaces
import zc.async.queue
import zc.async.agent
import zc.async.dispatcher
import zc.async.utils
class QueueInstaller(object):
def __init__(self, queues=('',),
factory=lambda *args: zc.async.queue.Queue(),
db_name=None):
# This IDatabaseOpenedEvent will be from zope.app.appsetup if that
# package is around
zope.component.adapter(zc.async.interfaces.IDatabaseOpenedEvent)(self)
self.db_name = db_name
self.factory = factory
self.queues = queues
def __call__(self, ev):
db = ev.database
tm = transaction.TransactionManager()
conn = db.open(transaction_manager=tm)
tm.begin()
try:
try:
root = conn.root()
if zc.async.interfaces.KEY not in root:
if self.db_name is not None:
other = conn.get_connection(self.db_name)
queues = other.root()[
zc.async.interfaces.KEY] = zc.async.queue.Queues()
other.add(queues)
else:
queues = zc.async.queue.Queues()
root[zc.async.interfaces.KEY] = queues
zope.event.notify(zc.async.interfaces.ObjectAdded(
queues, root, zc.async.interfaces.KEY))
tm.commit()
zc.async.utils.log.info('queues collection added')
else:
queues = root[zc.async.interfaces.KEY]
for queue_name in self.queues:
if queue_name not in queues:
queue = self.factory(conn, queue_name)
queues[queue_name] = queue
zope.event.notify(zc.async.interfaces.ObjectAdded(
queue, queues, queue_name))
tm.commit()
zc.async.utils.log.info('queue %r added', queue_name)
except:
tm.abort()
raise
finally:
conn.close()
queue_installer = QueueInstaller()
multidb_queue_installer = QueueInstaller(db_name='async')
signal_handlers = {} # id(dispatcher) -> signal -> (prev handler, curr handler)
def restore_signal_handlers(dispatcher):
key = id(dispatcher)
sighandlers = signal_handlers.get(key)
if sighandlers:
for _signal, handlers in sighandlers.items():
prev, cur = handlers
# The previous signal handler is only restored if the currently
# registered handler is the one we originally installed.
if signal.getsignal(_signal) is cur:
signal.signal(_signal, prev)
del signal_handlers[key]
class ThreadedDispatcherInstaller(object):
def __init__(self,
poll_interval=5,
reactor_factory=twisted.internet.selectreactor.SelectReactor,
uuid=None): # optional uuid is really just for tests; see
# catastrophes.txt, for instance, which runs
# two dispatchers simultaneously.
self.poll_interval = poll_interval
self.reactor_factory = reactor_factory
self.uuid = uuid
# This IDatabaseOpenedEvent will be from zope.app.appsetup if that
# package is around
zope.component.adapter(zc.async.interfaces.IDatabaseOpenedEvent)(self)
def __call__(self, ev):
reactor = self.reactor_factory()
dispatcher = zc.async.dispatcher.Dispatcher(
ev.database, reactor, poll_interval=self.poll_interval,
uuid=self.uuid)
def start():
dispatcher.activate()
reactor.run(installSignalHandlers=0)
# we stash the thread object on the dispatcher so functional tests
# can do the following at the end of a test:
# dispatcher = zc.async.dispatcher.get()
# dispatcher.reactor.callFromThread(dispatcher.reactor.stop)
# dispatcher.thread.join(3)
dispatcher.thread = thread = threading.Thread(target=start)
thread.setDaemon(True)
thread.start()
# The above is really sufficient. This signal registration, below, is
# an optimization. The dispatcher, on its next run, will eventually
# figure out that it is looking at a previous incarnation of itself if
# these handlers don't get to clean up.
# We do this with signal handlers rather than atexit.register because
# we want to clean up before the database is closed, if possible. ZODB
# does not provide an appropriate hook itself as of this writing.
curr_sigint_handler = signal.getsignal(signal.SIGINT)
def sigint_handler(*args):
reactor.callFromThread(reactor.stop)
thread.join(3)
curr_sigint_handler(*args)
def handler(*args):
reactor.callFromThread(reactor.stop)
raise SystemExit()
# We keep a record of the current signal handler and the one we're
# installing so that our handler can later be uninstalled and the old
# one reinstated (for instance, by zc.async.ftesting.tearDown).
key = id(dispatcher)
handlers = signal_handlers[key] = {}
handlers[signal.SIGINT] = (
signal.signal(signal.SIGINT, sigint_handler), sigint_handler,)
handlers[signal.SIGTERM] = (signal.signal(signal.SIGTERM, handler),
handler,)
# Catch Ctrl-Break in windows
if getattr(signal, "SIGBREAK", None) is not None:
handlers[signal.SIGBREAK] = (
signal.signal(signal.SIGBREAK, handler), handler,)
threaded_dispatcher_installer = ThreadedDispatcherInstaller()
class TwistedDispatcherInstaller(object):
def __init__(self, poll_interval=5):
self.poll_interval = poll_interval
# This IDatabaseOpenedEvent will be from zope.app.appsetup if that
# package is around
zope.component.adapter(zc.async.interfaces.IDatabaseOpenedEvent)(self)
def __call__(self, ev):
dispatcher = zc.async.dispatcher.Dispatcher(
ev.database, poll_interval=self.poll_interval)
dispatcher.activate(threaded=True)
twisted_dispatcher_installer = TwistedDispatcherInstaller()
class AgentInstaller(object):
def __init__(self, agent_name, chooser=None, size=3, queue_names=None,
filter=None):
zope.component.adapter(
zc.async.interfaces.IDispatcherActivated)(self)
self.queue_names = queue_names
self.agent_name = agent_name
if filter is not None and chooser is not None:
raise ValueError('cannot set both chooser and filter to non-None')
self.chooser = chooser
self.filter = filter
self.size = size
def __call__(self, ev):
dispatcher = ev.object
if (self.queue_names is None or
dispatcher.parent.name in self.queue_names):
if self.agent_name not in dispatcher:
dispatcher[self.agent_name] = zc.async.agent.Agent(
chooser=self.chooser, filter=self.filter, size=self.size)
zc.async.utils.log.info(
'agent %r added to queue %r',
self.agent_name,
dispatcher.parent.name)
else:
zc.async.utils.log.info(
'agent %r already in queue %r',
self.agent_name,
dispatcher.parent.name)
agent_installer = AgentInstaller('main') | zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/subscribers.py | subscribers.py |
import os
import random
import math
import ZEO.ClientStorage
import ZODB
import transaction
import twisted.internet.reactor
import zc.monitor
import zc.monitor.interfaces
import zope.component
import zc.async.configure
import zc.async.queue
import zc.async.instanceuuid
import zc.async.agent
import zc.async.monitor
import zc.async.monitordb
def generate_sample(size=100000):
count = 0
for i in range(size):
if math.hypot(random.random(), random.random()) < 1:
count += 1
return count, size
def process_samples(*sample_jobs):
count = 0
size = 0
for j in sample_jobs:
count += j.result[0]
size += j.result[1]
return 4.0 * count / size
def choose_generate_sample(agent):
return agent.queue.claim(
lambda j: j.callable.__name__ == 'generate_sample')
def choose_another(agent):
return agent.queue.claim(
lambda j: j.callable.__name__ != 'generate_sample')
def install_agent(db):
conn = db.open()
try:
q = zc.async.queue.getDefaultQueue(conn)
try:
dispatcher = q.dispatchers[zc.async.instanceuuid.UUID]
except KeyError:
twisted.internet.reactor.callLater(0.05, install_agent, db)
else:
if 'generate_sample' not in dispatcher:
agent = dispatcher['main']
agent.chooser = choose_another
dispatcher['generate_sample'] = zc.async.agent.Agent(
choose_generate_sample, 1)
transaction.commit()
finally:
transaction.abort()
conn.close()
if __name__ == '__main__':
monitor_port = os.environ.get('MONITOR_PORT')
if monitor_port:
for f in (zc.monitor.interactive, zc.monitor.quit, zc.monitor.help,
zc.async.monitor.async, zc.async.monitordb.asyncdb):
zope.component.provideUtility(
f, zc.monitor.interfaces.IMonitorPlugin, f.__name__)
zc.monitor.start(int(monitor_port))
storage = ZEO.ClientStorage.ClientStorage(
('127.0.0.1', 9999))
db = ZODB.DB(storage)
zc.async.configure.base()
zc.async.configure.start(
db, poll_interval=0.1, twisted=True)
twisted.internet.reactor.callWhenRunning(install_agent, db)
twisted.internet.reactor.run() | zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/examples/pi3.py | pi3.py |
import httplib
import urllib
import md5
import zc.creditcard
import zc.ssl
class TransactionResult(object):
def __init__(self, fields):
self.response_code = fields[0]
self.response = {'1': 'approved', '2': 'declined', '3': 'error',
'4': 'held for review'}[self.response_code]
self.response_reason_code = fields[2]
self.response_reason = fields[3]
TESTING_PREFIX = '(TESTMODE) '
if self.response_reason.startswith(TESTING_PREFIX):
self.test = True
self.response_reason = self.response_reason[len(TESTING_PREFIX):]
else:
self.test = False
self.approval_code = fields[4]
self.trans_id = fields[6]
self.amount = fields[9]
self.hash = fields[37]
self.card_type = None
def validateHash(self, login, salt):
value = ''.join([salt, login, self.trans_id, self.amount])
return self.hash.upper() == md5.new(value).hexdigest().upper()
class AuthorizeNetConnection(object):
def __init__(self, server, login, key, salt=None, timeout=None):
self.server = server
self.login = login
self.salt = salt
self.timeout = timeout
self.delimiter = '|'
self.standard_fields = dict(
x_login = login,
x_tran_key = key,
x_version = '3.1',
x_delim_data = 'TRUE',
x_delim_char = self.delimiter,
x_relay_response = 'FALSE',
x_method = 'CC',
)
def sendTransaction(self, **kws):
# if the card number passed in is the "generate an error" card...
if kws.get('card_num') == '4222222222222':
# ... turn on test mode (that's the only time that card works)
kws['test_request'] = 'TRUE'
body = self.formatRequest(kws)
if self.server.startswith('localhost:'):
server, port = self.server.split(':')
conn = httplib.HTTPConnection(server, port)
else:
conn = zc.ssl.HTTPSConnection(self.server, timeout=self.timeout)
conn.putrequest('POST', '/gateway/transact.dll')
conn.putheader('content-type', 'application/x-www-form-urlencoded')
conn.putheader('content-length', len(body))
conn.endheaders()
conn.send(body)
response = conn.getresponse()
fields = response.read().split(self.delimiter)
result = TransactionResult(fields)
if (self.salt is not None
and not result.validateHash(self.login, self.salt)):
raise ValueError('MD5 hash is not valid (trans_id = %r)'
% result.trans_id)
return result
def formatRequest(self, params):
r"""Encode the argument dict into HTTP request form data.
>>> conn = AuthorizeNetConnection('test.authorize.net',
... 'login','key')
>>> def display(result):
... print '&\\\n'.join(result.split('&'))
>>> display(conn.formatRequest({'card_num': '4007000000027',
... 'exp_date': '0530'}))
x_login=login&\
x_method=CC&\
x_card_num=4007000000027&\
x_tran_key=key&\
x_version=3.1&\
x_delim_char=%7C&\
x_exp_date=0530&\
x_relay_response=FALSE&\
x_delim_data=TRUE
The 'line_items' parameter is handled in a special way. It is
expected to be a sequence of sequences. Each inner sequence
represents a line_item parameter. There can be up to 30 of
them in a single transaction.
>>> display(conn.formatRequest({'line_items': [
... # item# name description qty unitprice taxable
... ('1', 'MD-1000', 'Main device', '1', '99.95', 'Y'),
... ('2', 'AC-100', 'Accessory', '2', '14.95', ''),
... ]}))
x_login=login&\
x_version=3.1&\
x_delim_char=%7C&\
x_method=CC&\
x_relay_response=FALSE&\
x_tran_key=key&\
x_delim_data=TRUE&\
x_line_item=1%3C%7C%3EMD-1000%3C%7C%3EMain+device%3C%7C%3E1%3C%7C%3E99.95%3C%7C%3EY&\
x_line_item=2%3C%7C%3EAC-100%3C%7C%3EAccessory%3C%7C%3E2%3C%7C%3E14.95%3C%7C%3E
'%3C%7C%3E' is '<|>', the delimiter of line_item fields.
"""
line_items = []
if 'line_items' in params:
line_items = params.pop('line_items')
fields = dict(('x_'+key, value) for key, value in params.iteritems())
fields.update(self.standard_fields)
fields_pairs = fields.items()
for item in line_items:
fields_pairs.append(('x_line_item', '<|>'.join(item)))
return urllib.urlencode(fields_pairs)
class CcProcessor(object):
def __init__(self, server, login, key, salt=None, timeout=None):
self.connection = AuthorizeNetConnection(
server, login, key, salt, timeout)
def authorize(self, **kws):
if not isinstance(kws['amount'], basestring):
raise ValueError('amount must be a string')
type = 'AUTH_ONLY'
result = self.connection.sendTransaction(type=type, **kws)
# get the card_type
card_num = kws.get('card_num')
if card_num is not None and len(card_num) >= 4:
card_type = zc.creditcard.identifyCreditCardType(card_num[:4], len(card_num))
result.card_type = card_type
return result
def captureAuthorized(self, **kws):
type = 'PRIOR_AUTH_CAPTURE'
return self.connection.sendTransaction(type=type, **kws)
def credit(self, **kws):
type = 'CREDIT'
return self.connection.sendTransaction(type=type, **kws)
def void(self, **kws):
type = 'VOID'
return self.connection.sendTransaction(type=type, **kws) | zc.authorizedotnet | /zc.authorizedotnet-1.3.1.tar.gz/zc.authorizedotnet-1.3.1/src/zc/authorizedotnet/processing.py | processing.py |
Authorize.Net Integration
=========================
Authorize.Net provides credit card (henceforth "CC") processing via a protocol
on top of HTTPS. Authorize.Net's customers are "merchants". The merchant is
the entity accepting a CC as payment. This package provides a simple
interface to Authorize.Net's "Advanced Integration Method" (AIM).
Several terms used in this document:
- authorize: check validity of CC information and for sufficient balance
- capture: the approval of transfer of funds from the CC holder to the
merchant
- settlement: the actual transfer of funds from the CC holder
to the merchant
- credit: issuing a refund from the merchant to the card holder
- voiding: canceling a previous transaction
Settlement is performed in daily batches. The cut-off time for which is
specified in the merchant's settings available on the Authorize.Net merchant
interface.
There are many other settings which can be configured via the merchant
interface, but this module attempts to work independently of most of them.
Where specific settings are required they will be marked with the phrase
"required merchant interface setting".
Transaction Keys
----------------
Each AIM transaction must be accompanied by a merchant login and a
"transaction key". This key is obtained from the merchant interface. After
importing the CcProcessor class you must pass it your login and transaction
key:
>>> from zc.authorizedotnet.processing import CcProcessor
>>> from zc.creditcard import (AMEX, DISCOVER, MASTERCARD,
... VISA, UNKNOWN_CARD_TYPE)
>>> cc = CcProcessor(server=SERVER_NAME, login=LOGIN, key=KEY)
Authorizing
-----------
To authorize a charge use the ``authorize`` method. It returns a
``Transaction`` object.
>>> result = cc.authorize(amount='2.00', card_num='4007000000027',
... exp_date='0530')
The result object contains details about the transaction.
>>> result.response
'approved'
>>> result.response_reason
'This transaction has been approved.'
>>> result.approval_code
'123456'
>>> auth_trans_id = result.trans_id
>>> result.trans_id
'123456789'
When the card_num is sent in, the result also contains the type of credit card:
>>> result.card_type == VISA
True
If no credit card number is provided, card_type is None:
>>> result2 = cc.authorize(amount='2.00', exp_date='0530')
>>> result2.card_type == None
True
>>> result2 = cc.authorize(amount='2.00', card_num='', exp_date='0530')
>>> result2.card_type == None
True
Capturing Authorized Transactions
---------------------------------
Now if we want to capture the transaction that was previously authorized, we
can do so.
>>> result = cc.captureAuthorized(trans_id=result.trans_id)
>>> result.response
'approved'
Credit (refund) transactions
----------------------------
A previosly credited transaction can be refunded. The amount of the
refund cannot exceed the amount captured. At least the last four
digits of the credit card number must be provided, along with the
transaction id.
Credit will only work when the transaction has been settled by the
banks, that is if we try refunding immediately, it will fail:
>>> result = cc.credit(trans_id=auth_trans_id,
... card_num='4007000000027',
... exp_date='0530',
... amount='1.00',
... )
>>> result.response_reason
'The referenced transaction does not meet the criteria for issuing a credit.'
>>> result.response
'error'
Voiding Transactions
--------------------
If we need to stop a transaction that has not yet been completed (like the
crediting of the captured transaction above) we can do so with the ``void``
method.
>>> result = cc.void(trans_id=auth_trans_id)
>>> result.response
'approved'
Transaction Errors
------------------
If something about the transaction is erroneous, the transaction results
indicate so.
>>> result = cc.authorize(amount='2.50', card_num='4007000000027',
... exp_date='0599')
The result object reflecs the error.
>>> result.response
'error'
>>> result.response_reason
'The credit card has expired.'
The valid values for the ``response`` attribute are 'approved', 'declined',
and 'error'.
Address Verification System (AVS)
---------------------------------
AVS is used to assert that the billing information provided for a transaction
must match (to some degree or another) the cardholder's actual billing data.
The gateway can be configured to disallow transactions that don't meet certain
AVS criteria.
>>> result = cc.authorize(amount='27.00', card_num='4222222222222',
... exp_date='0530', address='000 Bad Street',
... zip='90210')
>>> result.response
'declined'
>>> result.response_reason
'The transaction resulted in an AVS mismatch...'
Duplicate Window
----------------
The gateway provides a way to detect and reject duplicate transactions within
a certain time window. Any transaction with the same CC information (card
number and expiration date) and amount duplicated within the window will be
rejected.
The first transaction will work.
>>> result = cc.authorize(amount='3.00', card_num='4007000000027',
... exp_date='0530', invoice_num='123')
>>> result.response
'approved'
A duplicate transaction will fail with an appropriate message.
>>> result2 = cc.authorize(amount='3.00', card_num='4007000000027',
... exp_date='0530', invoice_num='123')
>>> result2.response
'error'
>>> result2.response_reason
'A duplicate transaction has been submitted.'
Similar transactions can be unaliased by including a unique invoice_num
field:
>>> result3 = cc.authorize(amount='3.00', card_num='4007000000027',
... exp_date='0530', invoice_num='124')
>>> result3.response
'approved'
The default window size is 120 seconds, but any other value (including 0) can
be provided by passing ``duplicate_window`` to the transaction method.
>>> cc.captureAuthorized(trans_id=result.trans_id).response
'approved'
>>> cc.captureAuthorized(trans_id=result.trans_id).response_reason
'This transaction has already been captured.'
>>> cc.captureAuthorized(trans_id=result.trans_id, duplicate_window=0
... ).response
'approved'
But voiding doesn't report errors if the same transaction is voided inside
the duplicate window.
>>> cc.void(trans_id=result.trans_id).response
'approved'
>>> cc.void(trans_id=result.trans_id).response
'approved'
Line items
----------
An itemized listing of the order can be included in the authorization
data as a sequcence of sequences.
>>> result = cc.authorize(amount='2.98', card_num='4007000000027',
... exp_date='0530',
... line_items=[
... # id name description qty unit price tax
... ('1', 'G-1000', 'Gadget', '1', '1.99', 'Y'),
... ('2', 'A-150', 'Accessory','1', '0.99', 'Y'),
... ])
>>> result.response
'approved'
the result will have a card_type attribute.
>>> result.card_type == VISA
True
The MD5 Hash Security Feature
-----------------------------
Authorize.Net provides for validating transaction responses via an MD5 hash.
The required merchant interface setting to use this feature is under
"Settings and Profile" and then "MD5 Hash". Enter a "salt" value in the
fields provided and submit the form. You may then provide the ``salt``
parameter to the CcProcessor constructor to enable response validation.
WARNING: The format of the "amount" field is very important for this feature
to work correctly. The field must be formatted in the "canonical" way for the
currency in use. For the US dollar that means no leading zeros and two (and
only two) decimal places. If the amount is not formatted properly in the
request, the hashes will not match and the transaction will raise an exception.
If you want to enable hash checking, provide a ``salt`` value to the
``CcProcessor`` constructor. If an incorrect salt value is used, or the
hash given in the transaction doesn't match the true hash value an exception
is raised.
>>> cc = CcProcessor(server=SERVER_NAME, login=LOGIN, key=KEY,
... salt='wrong')
>>> result = cc.authorize(amount='10.00', card_num='4007000000027',
... exp_date='0530')
Traceback (most recent call last):
...
ValueError: MD5 hash is not valid (trans_id = ...)
Error Checking
--------------
If you don't pass a string for the amount when doing an authorization, an
exception will be raised. This is to avoid charging the wrong amount due to
floating point representation issues.
>>> cc.authorize(amount=5.00, number='4007000000027', expiration='0530')
Traceback (most recent call last):
...
ValueError: amount must be a string
| zc.authorizedotnet | /zc.authorizedotnet-1.3.1.tar.gz/zc.authorizedotnet-1.3.1/src/zc/authorizedotnet/README.txt | README.txt |
Using ZConfig to configure Before storages
==========================================
"before" option
---------------
To use before storages from ZConfig configuration files, you need to
import zc.beforestorage and then use a before storage section.
>>> import ZODB.config
>>> storage = ZODB.config.storageFromString("""
...
... %import zc.beforestorage
...
... <before>
... before 2008-01-21
... <filestorage>
... path my.fs
... </filestorage>
... </before>
... """)
>>> storage
<Before: my.fs before 2008-01-21 00:00:00.000000>
>>> storage.close()
If we leave off the before option, we'll use the current time:
>>> storage = ZODB.config.storageFromString("""
...
... %import zc.beforestorage
...
... <before>
... <filestorage>
... path my.fs
... </filestorage>
... </before>
... """)
>>> storage
<Before: my.fs before 2008-01-21 18:22:49.000000>
>>> storage.close()
We can also give the option 'now' and get the current time.
>>> import ZODB.config
>>> storage = ZODB.config.storageFromString("""
...
... %import zc.beforestorage
...
... <before>
... before now
... <filestorage>
... path my.fs
... </filestorage>
... </before>
... """)
>>> storage
<Before: my.fs before 2008-01-21 18:22:53.000000>
>>> storage.close()
We can give the option 'startup' and get the time at startup.
>>> import ZODB.config
>>> storage = ZODB.config.storageFromString("""
...
... %import zc.beforestorage
...
... <before>
... before startup
... <filestorage>
... path my.fs
... </filestorage>
... </before>
... """)
>>> storage
<Before: my.fs before 2008-01-21 18:22:43.000000>
>>> import zc.beforestorage
>>> import ZODB.TimeStamp
>>> print(
... str(zc.beforestorage.startup_time_stamp))
2008-01-21 18:22:43.000000
>>> storage.close()
"before-from-file" option
-------------------------
The "before-from-file" option can be used to preserve the changes file between
restarts. It's value is the absolute path to a file. If the file exists, the
"before" time will be read from that file. If the file does not exist,
it will be created and the current UTC time will be written to it
When used with a Changes file that does NOT have the "create=true"
option set, the database will be preserved between restarts.
>>> import os.path
>>> import tempfile
>>> tempdir = tempfile.mkdtemp()
>>> before_file = os.path.join(tempdir, 'before-file')
Currently the file does not exist. So it'll be created and written with the
current time. In order to make this repeatable, we "monkeypatch" the "get_now"
function in the module to return a fixed value:
>>> import datetime
>>> import zc.beforestorage
>>> def fake_get_utcnow():
... return datetime.datetime(2008, 1, 1, 15, 0)
>>> orig_get_utcnow = zc.beforestorage.get_utcnow
>>> zc.beforestorage.get_utcnow = fake_get_utcnow
>>> os.path.exists(before_file)
False
>>> storage = ZODB.config.storageFromString("""
...
... %%import zc.beforestorage
...
... <before>
... before-from-file %s
... <filestorage>
... path my.fs
... </filestorage>
... </before>
... """ % before_file)
>>> storage
<Before: my.fs before 2008-01-01 15:00:00.000000>
>>> storage.close()
The file will now have been created:
>>> os.path.exists(before_file)
True
>>> f = open(before_file)
>>> f.read() == fake_get_utcnow().replace(microsecond=0).isoformat()
True
If we now write a new value to the file, the storage will be started with that
time.
>>> f = open(before_file, 'w')
>>> _ = f.write('1990-01-01T11:11')
>>> f.close()
>>> storage = ZODB.config.storageFromString("""
...
... %%import zc.beforestorage
...
... <before>
... before-from-file %s
... <filestorage>
... path my.fs
... </filestorage>
... </before>
... """ % before_file)
>>> storage
<Before: my.fs before 1990-01-01 11:11:00.000000>
>>> storage.close()
If we restart the storage, the value from the file will be used.
>>> storage = ZODB.config.storageFromString("""
...
... %%import zc.beforestorage
...
... <before>
... before-from-file %s
... <filestorage>
... path my.fs
... </filestorage>
... </before>
... """ % before_file)
>>> storage
<Before: my.fs before 1990-01-01 11:11:00.000000>
>>> storage.close()
This will continue to happen until we remove the file. The "before_from_file"
path is stored on the storage itself, so applications that use it have access
to it.
>>> os.remove(storage.before_from_file)
>>> os.path.exists(before_file)
False
If we restart the storage again, a new file will be created.
>>> storage = ZODB.config.storageFromString("""
...
... %%import zc.beforestorage
...
... <before>
... before-from-file %s
... <filestorage>
... path my.fs
... </filestorage>
... </before>
... """ % before_file)
>>> storage
<Before: my.fs before 2008-01-01 15:00:00.000000>
>>> storage.close()
Note that unlike the "before" option, the "before-from-file" file cannot
contain special values such as "now" or "startup".
>>> f = open(before_file, 'w')
>>> _ = f.write('now')
>>> f.close()
>>> storage = ZODB.config.storageFromString("""
...
... %%import zc.beforestorage
...
... <before>
... before-from-file %s
... <filestorage>
... path my.fs
... </filestorage>
... </before>
... """ % before_file)
Traceback (most recent call last):
...
ValueError: 8-byte array expected
Note that only one of "before" or "before-from-file" options can be specified,
not both:
>>> storage = ZODB.config.storageFromString("""
...
... %%import zc.beforestorage
...
... <before>
... before 2008-01-01
... before-from-file %s
... <filestorage>
... path my.fs
... </filestorage>
... </before>
... """ % before_file)
Traceback (most recent call last):
...
ValueError: Only one of "before" or "before-from-file" options can be specified, not both
Cleanup...
>>> import shutil
>>> shutil.rmtree(tempdir)
>>> zc.beforestorage.get_utcnow = orig_get_utcnow
Demonstration (doctest)
=======================
Note that most people will configure the storage through ZConfig. If
you are one of those people, you may want to stop here. :) The
examples below show you how to use the storage from Python, but they
also exercise lots of details you might not be interested in.
To see how this works at the Python level, we'll create a file
storage, and use a before storage to provide views on it.
>>> import ZODB.FileStorage
>>> fs = ZODB.FileStorage.FileStorage('Data.fs')
>>> from ZODB.DB import DB
>>> db = DB(fs)
>>> conn = db.open()
>>> root = conn.root()
>>> import persistent.mapping
We'll record transaction identifiers, which we'll use to when opening
the before storage.
>>> import transaction
>>> transactions = [root._p_serial]
>>> for i in range(1, 11):
... root[i] = persistent.mapping.PersistentMapping()
... transaction.get().note("trans %s" % i)
... transaction.commit()
... transactions.append(root._p_serial)
We create a before storage by calling the Before constructer
with an existing storage and a timestamp:
>>> import zc.beforestorage
>>> b5 = zc.beforestorage.Before(fs, transactions[5])
>>> db5 = DB(b5)
>>> conn5 = db5.open()
>>> root5 = conn5.root()
>>> len(root5)
4
here we see the database as it was before the 5th transaction was
committed. If we try to access a later object, we'll get a
ReadConflictError:
>>> conn5.get(root[5]._p_oid)
Traceback (most recent call last):
...
ZODB.POSException.ReadConflictError: b'\x00\x00\x00\x00\x00\x00\x00\x05'
Similarly, while we can access earlier object revisions, we can't
access revisions at the before time or later:
>>> _ = b5.loadSerial(root._p_oid, transactions[2])
>>> b5.loadSerial(root._p_oid, transactions[5])
Traceback (most recent call last):
...
POSKeyError: 0x00
Let's run through the storage methods:
>>> (b5.getName() ==
... 'Data.fs before %s' % ZODB.TimeStamp.TimeStamp(transactions[5]))
True
>>> b5.getSize() == fs.getSize()
True
>>> for hd in b5.history(root._p_oid, size=3):
... print(hd['description'].decode('utf-8'))
trans 4
trans 3
trans 2
>>> b5.isReadOnly()
True
>>> transactions[4] <= b5.lastTransaction() < transactions[5]
True
>>> len(b5) == len(fs)
True
>>> p, s1, s2 = b5.loadBefore(root._p_oid, transactions[5])
>>> p == fs.loadSerial(root._p_oid, transactions[4])
True
>>> s1 == transactions[4]
True
>>> s2 is None
True
>>> p, s1, s2 = b5.loadBefore(root._p_oid, transactions[4])
>>> p == fs.loadSerial(root._p_oid, transactions[3])
True
>>> s1 == transactions[3]
True
>>> s2 == transactions[4]
True
>>> b5.getTid(root._p_oid) == transactions[4]
True
>>> b5.tpc_transaction()
>>> try:
... b5.new_oid()
... except Exception as e: # Workaround http://bugs.python.org/issue19138
... print(e.__class__.__name__)
ReadOnlyError
>>> from ZODB.TimeStamp import TimeStamp
>>> try:
... b5.pack(TimeStamp(transactions[3]).timeTime(), lambda p: [])
... except Exception as e:
... print(e.__class__.__name__)
ReadOnlyError
>>> b5.registerDB(db5)
>>> b5.sortKey() == fs.sortKey()
True
>>> try:
... b5.tpc_begin(transaction.get())
... except Exception as e:
... print(e.__class__.__name__)
ReadOnlyError
>>> b5.store(root._p_oid, transactions[4], b5.load(root._p_oid)[0], '',
... transaction.get())
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
StorageTransactionError: ...
>>> b5.tpc_vote(transaction.get())
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ZODB.POSException.StorageTransactionError: ...
>>> b5.tpc_finish(transaction)
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ZODB.POSException.StorageTransactionError: ...
>>> b5.tpc_transaction()
>>> b5.tpc_abort(transaction)
Before storages don't support undo:
>>> b5.supportsUndo
Traceback (most recent call last):
...
AttributeError: 'Before' object has no attribute 'supportsUndo'
(Don't even ask about versions. :)
Closing a before storage closes the underlying storage:
>>> b5.close()
>>> fs.load(root._p_oid, '') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...
If we ommit a timestamp when creating a before storage, the current
time will be used:
>>> fs = ZODB.FileStorage.FileStorage('Data.fs')
>>> from ZODB.DB import DB
>>> db = DB(fs)
>>> conn = db.open()
>>> root = conn.root()
>>> bnow = zc.beforestorage.Before(fs)
>>> dbnow = DB(bnow)
>>> connnow = dbnow.open()
>>> rootnow = connnow.root()
>>> for i in range(1, 11):
... root[i] = persistent.mapping.PersistentMapping()
... transaction.get().note("trans %s" % i)
... transaction.commit()
... transactions.append(root._p_serial)
>>> len(rootnow)
10
>>> dbnow.close()
The timestamp may be passed directory, or as an ISO time. For
example:
>>> fs = ZODB.FileStorage.FileStorage('Data.fs')
>>> iso = 'T'.join(str(ZODB.TimeStamp.TimeStamp(transactions[5])).split()
... )[:19]
>>> b5 = zc.beforestorage.Before(fs, iso)
>>> db5 = DB(b5)
>>> conn5 = db5.open()
>>> root5 = conn5.root()
>>> len(root5)
4
>>> b5.close()
Blob Support
------------
Before storage supports blobs if the storage it wraps supports blobs,
and, in fact, it simply exposes the underlying storages loadBlob and
temporaryDirectory methods.
>>> fs = ZODB.FileStorage.FileStorage('Data.fs')
>>> import ZODB.blob
>>> bs = ZODB.blob.BlobStorage('blobs', fs)
>>> db = ZODB.DB(bs)
>>> conn = db.open()
>>> conn.root()['blob'] = ZODB.blob.Blob()
>>> _ = conn.root()['blob'].open('w').write(b'data1')
>>> transaction.commit()
>>> bnow = zc.beforestorage.Before(bs)
>>> dbnow = DB(bnow)
>>> connnow = dbnow.open()
>>> rootnow = connnow.root()
>>> _ = conn.root()['blob'].open('w').write(b'data2')
>>> transaction.commit()
>>> print(rootnow['blob'].open().read().decode('utf-8'))
data1
>>> bnow.temporaryDirectory() == bs.temporaryDirectory()
True
>>> import ZODB.interfaces, zope.interface.verify
>>> zope.interface.verify.verifyObject(
... ZODB.interfaces.IBlobStorage, bnow)
True
>>> bnow.close()
| zc.beforestorage | /zc.beforestorage-1.0-py3-none-any.whl/zc/beforestorage/README.rst | README.rst |
import datetime
import os.path
import time
import ZODB.interfaces
import ZODB.POSException
import ZODB.TimeStamp
import ZODB.utils
import zope.interface
def time_stamp():
t = time.time()
g = time.gmtime(t)
before = ZODB.TimeStamp.TimeStamp(*(g[:5] + (g[5] + (t % 1), )))
return before
def get_utcnow():
return datetime.datetime.utcnow()
startup_time_stamp = time_stamp()
class Before:
def __init__(self, storage, before=None):
if before is None:
before = time_stamp().raw()
elif isinstance(before, str):
if len(before) > 8:
if 'T' in before:
d, t = before.split('T')
else:
d, t = before, ''
d = list(map(int, d.split('-')))
if t:
t = t.split(':')
assert len(t) <= 3
d += list(map(int, t[:2])) + list(map(float, t[2:3]))
before = ZODB.TimeStamp.TimeStamp(*d).raw()
else:
# Try converting to a timestamp
if len(before) != 8:
raise ValueError("8-byte array expected")
self.storage = storage
self.before = before
if ZODB.interfaces.IBlobStorage.providedBy(storage):
self.loadBlob = storage.loadBlob
self.temporaryDirectory = storage.temporaryDirectory
if hasattr(storage, 'openCommittedBlobFile'):
self.openCommittedBlobFile = storage.openCommittedBlobFile
zope.interface.alsoProvides(self, ZODB.interfaces.IBlobStorage)
def close(self):
self.storage.close()
def getName(self):
return "{} before {}".format(
self.storage.getName(),
ZODB.TimeStamp.TimeStamp(self.before))
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self.getName())
def getSize(self):
return self.storage.getSize()
def history(self, oid, version='', size=1):
assert version == ''
# This is awkward. We don't know how much history to ask for.
# We'll have to keep trying until we heve enough or until there isn't
# any more to chose from. :(
s = size
while 1:
base_history = self.storage.history(oid, size=s)
result = [d for d in base_history
if d['tid'] < self.before
]
if ((len(base_history) < s) or (len(result) >= size)):
if len(result) > size:
result = result[:size]
return result
s *= 2
def isReadOnly(self):
return True
def getTid(self, oid):
return self.load(oid)[1]
def lastTransaction(self):
return ZODB.utils.p64(ZODB.utils.u64(self.before) - 1)
def __len__(self):
return len(self.storage)
def load(self, oid, version=''):
assert version == ''
result = self.storage.loadBefore(oid, self.before)
if result:
return result[:2]
raise ZODB.POSException.POSKeyError(oid)
def loadBefore(self, oid, tid):
if self.before < tid:
tid = self.before
r = self.storage.loadBefore(oid, tid)
if r:
p, s1, s2 = r
if (s2 is not None) and (s2 >= self.before):
s2 = None
return p, s1, s2
else:
return None
def loadSerial(self, oid, serial):
if serial >= self.before:
raise ZODB.POSException.POSKeyError(oid)
return self.storage.loadSerial(oid, serial)
def new_oid(self):
raise ZODB.POSException.ReadOnlyError()
def pack(self, pack_time, referencesf):
raise ZODB.POSException.ReadOnlyError()
def registerDB(self, db):
pass
def sortKey(self):
return self.storage.sortKey()
def store(self, oid, serial, data, version, transaction):
raise ZODB.POSException.StorageTransactionError(self, transaction)
def storeBlob(self, oid, oldserial, data, blobfilename, version,
transaction):
raise ZODB.POSException.StorageTransactionError(self, transaction)
def tpc_abort(self, transaction):
pass
def tpc_begin(self, transaction):
raise ZODB.POSException.ReadOnlyError()
def tpc_finish(self, transaction, func=lambda: None):
raise ZODB.POSException.StorageTransactionError(self, transaction)
def tpc_transaction(self):
return None
def tpc_vote(self, transaction):
raise ZODB.POSException.StorageTransactionError(self, transaction)
class ZConfig:
def __init__(self, config):
self.config = config
self.name = config.getSectionName()
def open(self):
before = self.config.before
before_from_file = self.config.before_from_file
if (before and before_from_file):
raise ValueError(
'Only one of "before" or "before-from-file" options '
'can be specified, not both')
base = self.config.base.open()
if before and isinstance(before, str):
if before.lower() == 'now':
self.config.before = None
elif before.lower() == 'startup':
self.config.before = startup_time_stamp.raw()
elif before_from_file:
if os.path.exists(before_from_file):
f = open(before_from_file)
self.config.before = f.read()
else:
f = open(before_from_file, 'w')
self.config.before = get_utcnow().replace(
microsecond=0).isoformat()
f.write(self.config.before)
f.close()
before_storage = Before(base, self.config.before)
before_storage.before_from_file = self.config.before_from_file
return before_storage | zc.beforestorage | /zc.beforestorage-1.0-py3-none-any.whl/zc/beforestorage/__init__.py | __init__.py |
~~~~~~~~
zc.blist
~~~~~~~~
.. contents::
========
Overview
========
The sequence in this package has a list-like API, but stores its values in
individual buckets. This means that, for small changes in large sequences, the
sequence could be a big win. For instance, an ordered BTree-based container
might want to store order in a sequence, so that moves only cause a bucket or
two--around 50 strings or less--to be rewritten in the database, rather than
the entire contents (which might be thousands of strings, for instance).
If the sequence is most often completely rearranged, the complexity of the code
in this package is not desirable. It only makes sense if changes most
frequently are fairly small.
One downside is that reading and writing is more work than with a normal list.
If this were to actually gain traction, perhaps writing some or all of it in C
would be helpful. However, it still seems pretty snappy.
Another downside is the corollary of the bucket advantage listed initially:
with more persistent objects, iterating over it will fill a lot of ZODB's
object cache (which is based on the number of objects cached, rather than the
size). Consider specifying a big object cache if you are using these to store a
lot of data and are frequently iterating or changing.
These sequences return slices as iterators, and add some helpful iteration
methods. It adds a ``copy`` method that provides a cheap copy of the blist
that shares all buckets and indexes until a write happens, at which point it
copies and mutates the affected indexes and buckets.
We'll take a glance at how these differences work, and then describe the
implementation's basic mechanism, and close with a brief discussion of
performance characteristics in the abstract.
==============================
Differences from Python's List
==============================
Slices are Iterators
====================
This doesn't need much discussion. Getting slices of all sorts returns
iterators.
>>> from zc.blist import BList
>>> l = BList(range(1000))
>>> l[345:351] # doctest: +ELLIPSIS
<generator object at ...>
>>> list(l[345:351])
[345, 346, 347, 348, 349, 350]
>>> l[351:345:-1] # doctest: +ELLIPSIS
<generator object at ...>
>>> list(l[351:345:-1])
[351, 350, 349, 348, 347, 346]
>>> l[345:351:2] # doctest: +ELLIPSIS
<generator object at ...>
>>> list(l[345:351:2])
[345, 347, 349]
Additional Iteration Methods
============================
``iterReversed`` lets you iterate over the list in reverse order, efficiently,
with a given start point. It is used for slices that proceed with a step of
-1.
>>> i = l.iterReversed()
>>> i.next()
999
>>> i.next()
998
>>> list(i)[-10:]
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
``iterSlice`` lets you iterate over the list with a slice. It is equivalent to
using a slice with __getitem__.
>>> i = l.iterSlice(345, 351, 2)
>>> i # doctest: +ELLIPSIS
<generator object at ...>
>>> list(i)
[345, 347, 349]
Cheap ``copy``
==============
The ``copy`` method produces a cheap copy of the given blist. All buckets
and indexes are shared until a change is made to either side. Copies can
safely be made of other copies.
>>> c = l.copy()
>>> l == c
True
>>> list(c) == list(l)
True
>>> del c[10:]
>>> list(l) == range(1000)
True
>>> list(c)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> l == c
False
>>> c2 = c.copy()
>>> c2 == c
True
>>> list(c2)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
=========
Mechanism
=========
In its implementation, the sequence is an adapted B+ tree. Indexes are keys, but
each bucket or branch starts at 0. For instance, a perfectly-balanced bucket
sequence with 16 items, and a limit of 3 entries in a bucket or branch, would
have "keys" like this. In the diagram, the top three rows are indexes, and the
bottom row consists of buckets::
0 8
0 4 0 4
0 2 0 2 0 2 0 2
01 01 01 01 01 01 01 01
So, for instance, you would get the value at position 5 using this process:
- In the top index (the top row, with keys of 0 and 8), find the largest key
that is lower than the desired position, and use the associated value (index
or bucket, which is in this case the index represented by the first pair of 0
and 4 in the second row) for the next step. In this case, the top index has
keys of 0 and 8, so the largest key lower than position 5 is 0. Subtract this
key from the position for the next step. This difference will be used as the
position for the next step. In this case, the next position will be (5-0=) 5.
- The next index has keys of 0 and 4. The largest key lower than 5 is 4. Use
the child index associated with the 4 key for the next step (the second pair
of 0 and 2 in the third row), and subtract the key (4) from the position (5)
for the position to be used in the next step (=1).
- The next index (the second pair of 0 and 2 in the third row) needs to find
position 1. This will return the third pair of 0 1 in the last row. The new
position will be (1-0=) 1.
- Finally, position 1 in the bottom bucket stores the actual desired value.
This arrangement minimizes the changes to keys necessary when a new value is
inserted low in the sequence: ignoring balancing the tree, only parents and
their subsequent siblings must be adjusted. For instance, inserting a new value
in the 0 position of the bucketsequence described above (the worst case for the
algorithm, in terms of the number of objects touched) would result in the
following tree::
0 9
0 5 0 4
0 3 0 2 0 2 0 2
012 01 01 01 01 01 01 01
===========================
Performance Characteristics
===========================
The Good
========
- ``__getitem__`` is efficient, not loading unnecessary buckets. It handles
slices pretty well too, not even loading intermediary buckets if the slice
is very large. Slices currently return iterables rather than lists; this
may switch to a view of some sort. All that should be assumed right now is
that you can iterate over the result of a slice.
- ``__setitem__`` and all the write methods do a pretty good job in terms of
efficient loading of buckets, and only writing what they need to. It
supports full Python slice semantics.
- ``copy`` is cheap: it reuses buckets and indexes so that new inner
components are created lazily when they mutate.
- While ``__contains__``, ``__iter__``, ``index`` and other methods are brute
force and written in Python, they might not load all buckets and items, while
with a normal list or tuple, they always will. See also ``iter``,
``iterReversed``, and ``iterSlice``.
The So-So
=========
- ``count``, ``__eq__``, and other methods load all buckets and items, and are
brute force, and in Python. In contrast, lists and tuples will load all
items (worse), and is brute force in C (better, but not algorithmically).
The Bad
=======
- This will create a lot of Persistent objects for one blist, which may cause
cache eviction problems depending on circumstances and usage.
- Did I mention that this was in Python, not C? That's fixable, at least, and
in fact doesn't appear to be too problematic at the moment, at least for the
author's usage.
| zc.blist | /zc.blist-1.0b1.tar.gz/zc.blist-1.0b1/src/zc/blist/README.txt | README.txt |
import sys
import random
import itertools
import ZODB.POSException
import persistent
import persistent.list
import BTrees.OOBTree
import BTrees.Length
import rwproperty
def method(f):
def wrapper(self, *args, **kwargs):
if self.shared:
raise RuntimeError('cannot mutate shared object')
return f(self, *args, **kwargs)
return wrapper
class setproperty(rwproperty.rwproperty):
@staticmethod
def createProperty(func):
return property(None, method(func))
@staticmethod
def enhanceProperty(oldprop, func):
return property(oldprop.fget, method(func), oldprop.fdel)
def supercall(name):
sys._getframe(1).f_locals[name] = method(
lambda self, *args, **kwargs: getattr(
super(self.__class__, self), name)(*args, **kwargs))
def makeProperty(name, default=None):
protected = '_z_%s__' % name
def setprop(self, value):
if self.shared:
raise RuntimeError('cannot mutate shared object')
if value is not None and not isinstance (value, (int, long)):
raise TypeError(value)
setattr(self, protected, value)
sys._getframe(1).f_locals[name] = property(
lambda self: getattr(self, protected, default), setprop)
# helpers
def shift_sequence(l, count):
res = l[:count]
del l[:count]
return res
class Collections(persistent.Persistent):
# separate persistent object so a change does not necessitate rewriting
# bucket or index
def __init__(self, *collections):
self._collections = collections
def __iter__(self):
return iter(self._collections)
def add(self, collection):
if collection not in self:
self._collections += (collection,)
def remove(self, collection):
res = []
found = 0
for coll in self._collections:
if coll is not collection:
res.append(coll)
else:
assert not found
found += 1
if not found:
raise ValueError('blist programmer error: collection not found')
self._collections = tuple(res)
def __len__(self):
return len(self._collections)
def __getitem__(self, key):
return self._collections.__getitem__(key)
def __contains__(self, val):
for item in self._collections:
if val is item:
return True
return False
def __nonzero__(self):
return bool(self._collections)
# Bucket and Index extend this
class AbstractData(persistent.Persistent):
def __init__(self, collection, identifier=None, previous=None, next=None,
parent=None):
self.collections = Collections(collection)
self.identifier = identifier
self.previous = previous
self.next = next
self.parent = parent
@property
def collection(self):
if len(self.collections) > 1:
raise ValueError('ambiguous')
return self.collections[0]
@property
def shared(self):
return len(self.collections) > 1
makeProperty('identifier')
makeProperty('previous')
makeProperty('next')
makeProperty('parent')
# the other shared "interface" bits are `contained_len`, `clear`, `copy`,
# and `index`
class Bucket(persistent.list.PersistentList, AbstractData):
"""Buckets hold blocks of data from from the collection."""
def __init__(self, collection,
identifier, previous=None, next=None, parent=None, vals=None):
AbstractData.__init__(
self, collection, identifier, previous, next, parent)
persistent.list.PersistentList.__init__(self, vals)
def __getslice__(self, i, j):
return self.data[i:j]
def contained_len(self, collection):
return len(self)
@method
def clear(self):
del self[:]
def copy(self, collection):
assert self.shared, 'only copy shared object'
self.collections.remove(collection)
return Bucket(
collection, self.identifier, self.previous, self.next,
self.parent, self)
@method
def balance(self, right):
len_left = len(self)
len_right = len(right)
move_index = (len_left + len_right) // 2
right = self.collection._mutable(right)
if len_left > len_right:
# move some right
moved = self[move_index:]
right[0:0] = moved
del self[move_index:]
else:
# move some left
move_index -= len_left
moved = right[:move_index]
self.extend(moved)
del right[:move_index]
@method
def rotate(self, right):
if len(self) + len(right) > self.collection.bucket_size:
self.balance(right)
else:
moved = right[:]
self.extend(moved)
right = self.collection._mutable(right)
del right[:]
@method
def rotateRight(self, right):
if len(self) + len(right) > self.collection.bucket_size:
self.balance(right)
else:
moved = self[:]
right = self.collection._mutable(right)
right[0:0] = moved
del self[:]
supercall('__setitem__')
supercall('__delitem__')
supercall('__setslice__')
supercall('__delslice__')
supercall('__iadd__')
supercall('__imul__')
supercall('append')
supercall('insert')
supercall('pop')
supercall('remove')
supercall('reverse')
supercall('sort')
supercall('extend')
class Index(BTrees.family32.II.Bucket, AbstractData):
"""Indexes index buckets and sub-indexes."""
supercall('clear')
supercall('update')
supercall('__setitem__')
supercall('__delitem__')
supercall('setdefault')
supercall('pop')
def __init__(self, collection,
identifier, previous=None, next=None, parent=None):
AbstractData.__init__(
self, collection, identifier, previous, next, parent)
def _p_resolveConflict(self, oldstate, committedstate, newstate):
# disable conflict resolution; thinking about its effect in terms of
# balancing the tree makes my head hurt.
raise ZODB.POSException.ConflictError()
def index(self, other):
for k, v in self.items():
if v == other:
return k
raise ValueError('value not found; likely programmer error')
def contained_len(self, collection):
try:
val = self.maxKey()
except ValueError:
return 0
return val + collection._mapping[self[val]].contained_len(collection)
@method
def balance(self, right):
len_left = len(self)
len_right = len(right)
move_index = (len_left + len_right) // 2
right = self.collection._mutable(right)
if len_left > len_right:
# move some right
items = list(self.items()[move_index:])
zero = items[0][0] # this will be index 0 on the right
offset = (items[-1][0] + # this is offset for current right values
self.collection._mapping[items[-1][1]].contained_len(
self.collection) - zero)
for k, o in reversed(right.items()):
right[offset+k] = o
del right[k]
for k, o in items:
right[k-zero] = o
del self[k]
self.collection._mutable(
self.collection._mapping[o]).parent = right.identifier
else:
# move some left
move_index -= len_left
items = list(right.items()[:move_index])
offset = self.contained_len(self.collection)
for k, o in items:
self[offset+k] = o
del right[k]
self.collection._mutable(
self.collection._mapping[o]).parent = self.identifier
offset = (items[-1][0] +
self.collection._mapping[items[-1][1]].contained_len(
self.collection))
for k, o in list(right.items()):
del right[k]
right[k-offset] = o
@method
def rotate(self, right):
if len(self) + len(right) > self.collection.index_size:
self.balance(right)
else:
offset = self.contained_len(self.collection)
for k, o in list(right.items()):
self[offset+k] = o
self.collection._mutable(
self.collection._mapping[o]).parent = self.identifier
right = self.collection._mutable(right)
right.clear()
@method
def rotateRight(self, right):
if len(self) + len(right) > self.collection.index_size:
self.balance(right)
else:
offset = self.contained_len(self.collection)
right = self.collection._mutable(right)
for k, o in reversed(right.items()):
right[offset+k] = o
del right[k]
for k, o in self.items():
right[k] = o
self.collection._mutable(
self.collection._mapping[o]).parent = right.identifier
self.clear()
def copy(self, collection):
assert self.shared, 'only copy shared object'
self.collections.remove(collection)
res = Index(collection, self.identifier, self.previous, self.next,
self.parent)
res.update(self)
return res
class BList(persistent.Persistent):
family = BTrees.family32 # don't support 64 yet because need another Index
# class
def __init__(self, vals=None,
bucket_size=30, index_size=10):
self.bucket_size = bucket_size
self.index_size = index_size
self._mapping = self.family.IO.BTree()
self._top_index = 0
self._mapping[self._top_index] = Bucket(self, self._top_index)
if vals is not None:
self.extend(vals)
def copy(self):
res = self.__class__.__new__(self.__class__)
res.bucket_size = self.bucket_size
res.index_size = self.index_size
res.family = self.family
res._mapping = self.family.IO.BTree()
res._top_index = self._top_index
res._mapping.update(self._mapping)
for v in self._mapping.values():
v.collections.add(res)
return res
@property
def data(self):
return self._mapping[self._top_index]
# Read API
def __contains__(self, value):
# this potentially loads all buckets and items from ZODB. Then again,
# standard list or tuple *will* load all items.
for item in self:
if value == item:
return True
return False
def __len__(self):
return self.data.contained_len(self)
def count(self, value):
# whee! Let's load everything!
ct = 0
for item in self:
if value == item:
ct += 1
return ct
def _get_bucket(self, index):
bucket = self.data
ix = index
while not isinstance(bucket, Bucket):
key = bucket.maxKey(ix)
bucket = self._mapping[bucket[key]]
ix -= key
return bucket, ix
def iter(self, start=0):
length = len(self)
if start < 0:
start += length
if start < 0:
raise IndexError('list index out of range')
if length > start:
bucket, ix = self._get_bucket(start)
for v in bucket[ix:]:
yield v
bucket_ix = bucket.next
while bucket_ix is not None:
bucket = self._mapping[bucket_ix]
for v in bucket:
yield v
bucket_ix = bucket.next
def iterReversed(self, start=-1):
length = len(self)
if start < 0:
start += length
if start < 0:
raise IndexError('list index out of range')
if length > start:
bucket, ix = self._get_bucket(start)
for v in reversed(bucket[:ix+1]):
yield v
bucket_ix = bucket.previous
while bucket_ix is not None:
bucket = self._mapping[bucket_ix]
for v in reversed(bucket):
yield v
bucket_ix = bucket.previous
def iterSlice(self, start=0, stop=None, stride=None):
if isinstance(start, slice):
if stop is not None or stride is not None:
raise ValueError(
'cannot pass slice with additional stop or stride')
else:
start = slice(start, stop, stride)
start, stop, stride = start.indices(len(self))
if stride == 1:
ix = start
i = self.iter(start)
while ix < stop:
yield i.next()
ix += 1
elif stride == -1:
ix = start
i = self.iterReversed(start)
while ix > stop:
yield i.next()
ix -= 1
else:
if stride < 0:
condition = lambda begin, end: begin > end
else:
condition = lambda begin, end: begin < end
ix = start
while condition(ix, stop):
bucket, i = self._get_bucket(ix)
yield bucket[i]
ix += stride
def __iter__(self):
return self.iter()
def index(self, value, start=0, stop=None):
for ct, item in enumerate(self.iterSlice(start, stop)):
if item == value:
return start + ct
raise ValueError('.index(x): x not in collection')
def __getitem__(self, index):
if isinstance(index, slice):
return self.iterSlice(index) # XXX return view?
if index < 0:
index += length
if index < 0:
raise IndexError('list index out of range')
elif index > len(self):
raise IndexError('list index out of range')
bucket, ix = self._get_bucket(index)
return bucket[ix]
# Write API
# Everything relies on __setitem__ to reduce duplicated logic
def append(self, value):
self[len(self)] = value
def insert(self, index, value):
self[index:index] = (value,)
def __delitem__(self, index):
if not isinstance(index, slice):
if index > len(self):
raise IndexError('list assignment index out of range')
index = slice(index, index+1)
elif index.step == 1:
index = slice(index.start, index.stop)
elif index.step is not None:
start, stop, stride = index.indices(len(self))
if stride < 0:
ix = range(start, stop, stride)
else:
ix = reversed(range(start, stop, stride))
for i in ix:
self.__setitem__(slice(i, i+1), ())
return
self.__setitem__(index, ())
def extend(self, iterable):
length = len(self)
self[length:length] = iterable
def __iadd__(self, iterable):
self.extend(iterable)
return self
def pop(self, index=-1):
res = self[index]
self[index:index+1] = ()
return res
def remove(self, item):
index = self.index(item)
self[index:index+1] = ()
def reverse(self):
self[:] = reversed(self)
def sort(self, cmp=None, key=None, reverse=False):
vals = list(self)
vals.sort(cmp=cmp, key=key, reverse=reverse)
self[:] = vals
# __setitem__ helpers
def _reindex(self, start_bucket, stop_bucket_id, recurse=False):
if start_bucket is None or start_bucket.identifier == self._top_index:
return
orig_parent = parent = self._mapping[
self._mapping[start_bucket.identifier].parent]
stopped = found = False
new = {}
while not stopped:
found = found or parent.minKey() != 0
next = 0
changed = False
for k, v in parent.items():
if not found:
new[k] = v
if v == start_bucket.identifier:
found = True
next = k + self._mapping[v].contained_len(self)
else:
new[next] = v
changed = changed or k != next
next = next + self._mapping[v].contained_len(self)
stopped = stopped or v == stop_bucket_id
if changed:
parent = self._mutable(parent)
parent.clear()
parent.update(new)
if not stopped:
if parent.next is None:
stopped = True
else:
parent = self._mapping[parent.next]
new.clear()
if recurse:
self._reindex(
orig_parent, self._mapping[stop_bucket_id].parent, recurse)
def _mutable(self, bucket):
if bucket.shared:
bucket = bucket.copy(self)
self._mapping[bucket.identifier] = bucket
return bucket
_v_nextid = None
def _generateId(self):
# taken from zope.app.intid code
"""Generate an id which is not yet taken.
This tries to allocate sequential ids so they fall into the
same BTree bucket, and randomizes if it stumbles upon a
used one.
"""
while True:
if self._v_nextid is None:
self._v_nextid = random.randrange(
self.family.minint, self.family.maxint)
uid = self._v_nextid
self._v_nextid += 1
if uid not in self._mapping:
return uid
self._v_nextid = None
# __setitem__ itself: the workhorse
def __setitem__(self, index, value):
length = len(self)
# To reduce the amount of duplicated code, everything is based on
# slices. Either you are replacing specific items (index is an integer
# less than len or a slice with an explicit step) or deleting/inserting
# ranges (index is an integer equal to len or a slice with an implicit
# step of 1). We convert integer requests to slice requests here.
if not isinstance(index, slice):
value = (value,)
if index == length:
index = slice(length, length)
elif index > length:
raise IndexError('list assignment index out of range')
elif index == -1:
index = slice(length-1, length, 1) # we specify a step to use
# the second, "replace values" code path below, rather than
# the first "range" code path.
else:
index = slice(index, index+1, 1) # same reason as above for
# specifying the step.
start, stop, stride = index.indices(length)
if start > stop and stride > 0 or start < stop and stride < 0:
stop = start # that's the way the Python list works.
if index.step is None:
# delete and/or insert range; bucket arrangement may change
value = list(value) # we actually do mutate this, so a list is
# intentional. Even if the original value is a list, we don't want
# to mutate the original.
len_value = len(value)
if start == 0 and stop == length and stride == 1:
# shortcut: clear out everything
for data in self._mapping.values():
if data.shared:
data.collections.remove(self)
self._mapping.clear()
self._top_index = 0
start_bucket = self._mapping[
self._top_index] = Bucket(self, self._top_index)
start_bucket_id = self._top_index
start_ix = 0
elif stop != start:
# we're supposed to delete
bucket, start_ix = self._get_bucket(start)
start_bucket_id = bucket.identifier
_stop_bucket, stop_ix = self._get_bucket(stop)
stop_bucket_id = _stop_bucket.identifier
ix = start_ix
while True:
if bucket.identifier == stop_bucket_id:
removed = bucket[ix:stop_ix]
if removed:
bucket = self._mutable(bucket)
del bucket[ix:stop_ix]
elif (stop_bucket_id != start_bucket_id and
bucket.previous is not None):
stop_bucket_id = bucket.previous
break
removed = bucket[ix:]
if removed:
bucket = self._mutable(bucket)
del bucket[ix:]
bucket_ix = bucket.next
if bucket_ix is None:
break
bucket = self._mapping[bucket_ix]
ix = 0
bucket = self._mapping[start_bucket_id]
ix = start_ix
# populate old buckets with new values, until we are out of
# new values or old buckets
while value:
items = shift_sequence(
value, self.bucket_size - len(bucket))
bucket = self._mutable(bucket)
bucket[ix:ix] = items
if bucket.identifier == stop_bucket_id or not value:
stop_ix = len(items) + ix
break
bucket = self._mapping[bucket.next]
ix = 0
# we've deleted values, and may have replaced some,
# and now we need to see if we need to rearrange
# buckets because they are smaller than the fill ratio
# allows. We do this even if we have more values to
# insert so that the insert code can begin from a sane
# state; this is an obvious possible optimization point,
# therefore (though other optimizations may be better choices).
# The algorithm has us first try to balance across
# siblings, and then clean up the parents. Typically
# B+ tree algorithm descriptions go
# one-item-at-a-time, while we may have swaths of
# changes to which we need to adjust.
# Key adjustments are different than the standard B+
# tree story because this is a sequence, and our keys
# are indices that we need to adjust to accomodate the
# deletions. This means siblings to all of our
# parents, walking up the tree. The "swaths of
# changes" also makes this a bit tricky.
fill_maximum = self.bucket_size
fill_minimum = fill_maximum // 2
original_stop_bucket_id = stop_bucket_id
check_next = False
while start_bucket_id is not None:
# We'll get the buckets rotated so that any
# bucket that has members will be above the fill ratio
# (unless it is the only bucket).
#
# `bucket` is the last bucket we might have put
# anything in to; we'll want to look at it and the
# `stop_bucket` (if different) to see if we need to
# adjust.
len_bucket = len(bucket)
# Usually we only care about two buckets here: ``bucket``
# and ``stop_bucket``. However, the first thing we do here
# is handle an exceptional case. We might be working with
# an index row in which a lower row had to include three
# buckets in the calculation. See where ``check_next =
# True`` occurs (pretty far down) to understand the
# condition.
next = None
if check_next:
if bucket.next is None:
check_next = False
else:
assert len_bucket >= fill_minimum
next = self._mapping[bucket.next]
if 0 < len(next) < fill_minimum:
bucket = self._mutable(bucket)
next = self._mutable(next)
bucket.rotate(next)
len_bucket = len(bucket) # reset; it has
# changed if next needs to be removed from
# parent, it will be done later (below)
if not next: # (if empty)
# next's parent, if different than
# bucket's, should not have to be
# explicitly balanced in this way, because
# it is either empty (and will be removed)
# or the stop_bucket's parent (and will be
# balanced anyway).
check_next = False
else:
# We are not mutating next bucket, so next's
# parent should not have to be balanced
check_next = False
stop_bucket = self._mapping[stop_bucket_id]
len_stop = len(stop_bucket)
if next:
# We're still handing the exceptional case. ``next`` is
# not None and not empty. This means that we are
# working with indexes on this level. It also means
# that bucket and next are above the minimum fill
# level. The only thing we don't know is if the stop
# bucket is not next and the stop bucket is not empty
# and below the fill level. If that's the case, we need
# to balance next and stop.
if len_stop and len_stop < fill_minimum:
assert stop_bucket_id != next.identifier
next = self._mutable(next)
next.rotate(stop_bucket)
len_stop = len(stop_bucket)
# now, bucket is not empty and above the minimum,
# next is not empty and above the minimum, and
# stop_bucket is either empty, or not empty and
# above the minimum.
assert len_bucket >= fill_minimum
assert len(next) >= fill_minimum
assert not len_stop or len_stop >= fill_minimum
# this means we are balanced, and that we have two
# buckets, so neither of them is the top bucket.
else:
# this is the regular code path. We only need to look
# at the start bucket and the stop bucket.
# if bucket and stop_bucket are different and
# stop_bucket is not empty and either are below the
# fill_minimum...
# if the combination is less than the fill_maximum,
# put in bucket and empty stop_bucket
# else redistribute across so both are above
# fill_minimum
if (bucket is not stop_bucket and
len_stop and (
len_bucket < fill_minimum or
len_stop < fill_minimum)):
bucket = self._mutable(bucket)
stop_bucket = self._mutable(stop_bucket)
bucket.rotate(stop_bucket)
len_bucket = len(bucket)
len_stop = len(stop_bucket)
# if (bucket is stop_bucket or stop_bucket is empty)
# and bucket.previous is None and stop_bucket.next is
# None, shortcut: just make sure this is the top
# bucket and break.
if ((bucket is stop_bucket or not len_stop) and
bucket.previous is None and
stop_bucket.next is None):
bucket = self._mutable(bucket)
if bucket.identifier != self._top_index:
# get rid of unnecessary parents
p = bucket
while p.identifier != self._top_index:
p = self._mapping[p.parent]
if p.shared:
p.collections.remove(self)
del self._mapping[p.identifier]
# this is now the top of the tree
self._top_index = bucket.identifier
bucket.parent = None
else:
assert bucket.parent is None
bucket.next = None
stop_bucket = stop_bucket_id = None
break
# now these are the possible states:
# - bucket is stop_bucket and is empty
# - bucket is stop_bucket and is too small
# - bucket is stop_bucket and is ok
# - bucket and stop_bucket are both empty
# - bucket is ok and stop_bucket is empty
# - bucket is too small and stop_bucket is empty
# - bucket is ok and stop_bucket is ok
#
# Therefore,
# - if the stop_bucket is ok or the bucket is empty,
# we're ok with this step, and can move on to
# adjusting the indexes and pointers.
# - otherwise the bucket is too small, and there is
# another bucket to rotate with. Find the bucket
# and adjust so that no non-empty buckets are
# beneath the fill_minimum. Make sure to adjust the
# start_bucket or stop_bucket to include the altered
# bucket.
if len_bucket < fill_minimum:
previous = bucket.previous
next = stop_bucket.next
assert previous is not None or next is not None
assert bucket is stop_bucket or not len_stop
if next is not None:
next = self._mapping[next]
if (next is None or
previous is not None and
len(next) + len_bucket > fill_maximum):
# work with previous
previous = self._mutable(
self._mapping[previous])
bucket = self._mutable(bucket)
previous.rotate(bucket)
if bucket.identifier == start_bucket_id:
if bucket:
# now bucket may have a parent that
# needs to be balanced that is *not*
# the parent of start bucket or of stop
# bucket. We'll check this the next
# time we go through this loop, with
# the parents of this level.
check_next = True
bucket = previous
start_bucket_id = previous.identifier
if not bucket:
bucket = previous
assert bucket
else:
# work with next
bucket = self._mutable(bucket)
next = self._mutable(next)
bucket.rotateRight(next)
stop_bucket_id = next.identifier
# We do this part of the code both in the exceptional
# ``check_next`` case and in the usual case.
# Now we need to adjust pointers and get rid of empty
# buckets. We'll go level-by-level.
# we keep track of the bucket to reindex on separately from
# the start_index because we may have to move it to a
# previous bucket if the start_bucket is deleted to make
# sure that the next bucket is indexed correctly all the
# way up.
reindex_start = self._mapping[start_bucket_id]
# we need to stash these because they may be removed
start_bucket_parent = self._mapping[start_bucket_id].parent
stop_bucket_parent = self._mapping[stop_bucket_id].parent
b = bucket
while b is not None:
next = b.next
if next is not None:
next = self._mapping[next]
if not b: # it is empty
parent = self._mutable(self._mapping[b.parent])
ix = parent.index(b.identifier)
del parent[ix]
previous = b.previous
if previous is not None:
previous = self._mutable(
self._mapping[previous])
previous.next = b.next
if next is not None: # next defined at loop start
next = self._mutable(next)
next.previous = b.previous
if b.identifier == reindex_start.identifier:
if previous is not None:
reindex_start = previous
else:
reindex_start = next
assert not b.shared
del self._mapping[b.identifier]
if b.identifier == stop_bucket_id:
break
b = next
self._reindex(reindex_start, stop_bucket_id)
# now we get ready for the next round...
start_bucket_id = start_bucket_parent
stop_bucket_id = stop_bucket_parent
bucket = bucket.parent
if bucket is not None:
bucket = self._mapping[bucket]
fill_maximum = self.index_size
fill_minimum = fill_maximum // 2
assert stop_bucket_id is None
if not value:
return # we're done; don't fall through to add story
else:
# we've replaced old values with new, but there are
# some more left. we'll set things up so the
# standard insert story should work for the remaining
# values.
start_bucket_id = original_stop_bucket_id
start_ix = stop_ix
# ...now continue with add story
else:
_start_bucket, start_ix = self._get_bucket(start)
start_bucket_id = _start_bucket.identifier
# this is the add story.
# So, we have a start_bucket and a start_ix: we're supposed
# to insert the values in i at start_ix in start_bucket.
if not value:
return
fill_maximum = self.bucket_size
fill_minimum = fill_maximum // 2
# Clean out the ones after start_ix in the start bucket, if
# any.
bucket = self._mapping[start_bucket_id]
moved = bucket[start_ix:]
value.extend(moved)
bucket = self._mutable(bucket)
del bucket[start_ix:]
ix = start_ix
created = []
# Start filling at the ix. Fill until we reached len
# or until i is empty. Make new buckets, remembering them in
# a list, and fill them until i is empty, and then continue
# with the removed ones from the start_bucket. If the last
# bucket is too small, merge or rotate as appropriate.
length = fill_maximum - len(bucket)
while value:
added = shift_sequence(value, length)
bucket.extend(added)
if value:
old_bucket = bucket
identifier = self._generateId()
bucket = self._mapping[identifier] = Bucket(
self, identifier,
previous=old_bucket.identifier, next=old_bucket.next)
old_bucket.next = bucket.identifier
if bucket.next is not None:
self._mutable(self._mapping[bucket.next]).previous = (
bucket.identifier)
created.append(bucket)
length = self.bucket_size
if (bucket.identifier != self._top_index and
len(bucket) < fill_minimum):
# this should only be able to happen when a previous bucket
# is already filled. It's simplest, then, to just split the
# contents of the previous bucket and this one--that way
# there's not any empty bucket to have to handle.
previous = self._mapping[bucket.previous]
assert not previous.shared
assert len(previous) + len(bucket) >= 2 * fill_minimum
previous.balance(bucket)
# Now we have to insert any new buckets in the parents. We
# again fill the parents, creating and remembering as
# necessary, and rotating at the end. We keep on walking up
# until the list of new buckets is empty. If we reach the top,
# we add a level at the top and continue.
if not created:
self._reindex(
self._mapping[start_bucket_id], bucket.identifier,
recurse=True)
return
value = created
fill_maximum = self.index_size
fill_minimum = fill_maximum // 2
start_bucket = self._mutable(self._mapping[start_bucket_id])
while value:
if start_bucket_id == self._top_index: # the top
assert start_bucket.parent is None
self._top_index = identifier = self._generateId()
parent = self._mapping[identifier] = Index(
self, identifier, parent=None)
parent[0] = start_bucket.identifier
start_bucket.parent = parent.identifier
start_ix = 0
bucket = start_bucket = parent
start_bucket_id = identifier
else:
parent = self._mutable(self._mapping[start_bucket.parent])
start_ix = parent.index(start_bucket.identifier)
bucket = start_bucket = parent
start_bucket_id = start_bucket.identifier
value.extend(
self._mapping[i] for i in
start_bucket.values(start_ix, excludemin=True))
for k in tuple(
start_bucket.keys(start_ix, excludemin=True)):
del start_bucket[k]
ix = start_ix + self._mapping[
start_bucket[start_ix]].contained_len(self)
created = []
# Start filling at the ix. Fill until we reached len or
# until i is empty. Make new buckets, remembering them in a
# list, and fill them until i is empty, and then continue
# with the removed ones from the start_bucket. If the last
# bucket is too small, merge or rotate as appropriate.
length = fill_maximum - len(bucket)
while value:
for o in shift_sequence(value, length):
o = self._mutable(o)
bucket[ix] = o.identifier
o.parent = bucket.identifier
ix += o.contained_len(self)
# we don't need to fix parents--we'll get to them above
if value:
identifier = self._generateId()
previous = self._mutable(bucket)
bucket = self._mapping[identifier] = Index(
self, identifier,
previous=previous.identifier, next=previous.next)
previous.next = identifier
if bucket.next is not None:
next = self._mutable(self._mapping[bucket.next])
next.previous = identifier
created.append(bucket)
length = fill_maximum
ix = 0
if (bucket.identifier != self._top_index and
len(bucket) < fill_minimum):
# this should only be able to happen when a previous
# bucket is already filled. It's simplest, then, to
# just split the contents of the previous bucket and
# this one--that way there's not any empty bucket to
# have to handle.
previous = self._mapping[bucket.previous]
assert (len(previous) + len(bucket) >= 2 * fill_minimum)
assert not previous.shared # I *think* this is an
# invariant; otherwise need to doublecheck and then use
# _mutable
previous.balance(bucket)
value = created
if start_bucket.identifier != self._top_index:
# we need to correct the indices of the parents.
self._reindex(start_bucket, bucket.identifier, recurse=True)
else:
# replace one set with a set of equal length
changed = []
index = start
error = None
value_ct = 0
for v in value:
value_ct += 1
if (stride > 0 and index >= stop or
stride < 0 and index <= stop):
error = ValueError(
'attempt to assign sequence of at least size %d '
'to extended slice of size %d' % (
value_ct, (stop - start) / stride))
break
bucket, ix = self._get_bucket(index)
old = bucket[ix]
bucket = self._mutable(bucket)
bucket[ix] = v
changed.append((bucket, ix, old))
index += stride
else:
if value_ct < (stop - start) / stride:
error = ValueError(
'attempt to assign sequence of size %d to '
'extended slice of size %d' % (
value_ct, (stop - start) / stride))
if error:
for bucket, ix, old in changed:
bucket[ix] = old
raise error
# I want eq and ne but don't care much about the rest
def __eq__(self, other):
if self.__class__ is not other.__class__ or len(self) != len(other):
return False
for s, o in itertools.izip(self, other):
if s != o:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
# def __lt__(self, other):
# pass
#
# def __gt__(self, other):
# pass
#
# def __le__(self, other):
# pass
#
# def __ge__(self, other):
# pass
# def __add__(self, other):
# pass
#
# def __mul__(self, other):
# pass
#
# def __rmul__(self, other):
# pass | zc.blist | /zc.blist-1.0b1.tar.gz/zc.blist-1.0b1/src/zc/blist/__init__.py | __init__.py |
===============================
Integration of boto and keyring
===============================
Note: My hope is that this package will become redundant by
integrating something very similar into boto itself.
Boto is awesome for controlling AWS. Unfortunately, there are times
when specifying credentials is awkward. Typically, one has to either:
- includint the AWS password (aws_secret_access_key) in a clear text
dot file, which is insecure, or
- setting an environment variable with the password, which is
inconvenient and insecure on multi-user systems.
It would be better to use a secure keyring to store the password.
zc.botokeyring provides exactlly that capability. To use it, set
aws_access_key_id in the Credentials section of your boto
configuration filem as usual, and instead of setting
aws_secret_access_key, set keyring to the name of a keyring containing
the password::
[Credentials]
aws_access_key_id = 1234
keyring = test
.. -> config
>>> with open('.boto', 'w') as f:
... f.write(config)
>>> import boto, boto.provider, boto.pyami.config, zc.botokeyring
>>> _ = reload(boto.pyami.config)
>>> _ = reload(boto)
>>> _ = reload(boto.provider)
>>> zc.botokeyring.setup()
>>> p = boto.provider.Provider('aws')
>>> p.access_key
'1234'
>>> p.secret_key
'test1234pw'
Changes
=======
0.1.0 (2012-12-02)
==================
Initial release
| zc.botokeyring | /zc.botokeyring-0.1.0.tar.gz/zc.botokeyring-0.1.0/README.rst | README.rst |
import sys
if sys.version_info < (2, 7):
raise SystemError("Outside Python 2.7, no support for Python 2.x.")
if sys.version_info > (3, ) and sys.version_info < (3, 5):
raise SystemError("No support for Python 3.x under 3.5.")
import os, shutil, subprocess, tempfile
os.environ["SETUPTOOLS_USE_DISTUTILS"] = "stdlib"
def main(args):
for d in 'eggs', 'develop-eggs', 'bin', 'parts':
if not os.path.exists(d):
os.mkdir(d)
bin_buildout = os.path.join('bin', 'buildout')
if os.path.isfile(bin_buildout):
os.remove(bin_buildout)
if os.path.isdir('build'):
shutil.rmtree('build')
print("Current directory %s" % os.getcwd())
#######################################################################
def install_pip():
print('')
print('Install pip')
print('')
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tmp = tempfile.mkdtemp(prefix='buildout-dev-')
try:
get_pip = os.path.join(tmp, 'get-pip.py')
if sys.version_info < (3, ):
GET_PIP_URL = 'https://bootstrap.pypa.io/pip/2.7/get-pip.py'
elif (sys.version_info.major, sys.version_info.minor) == (3, 5):
GET_PIP_URL = 'https://bootstrap.pypa.io/pip/3.5/get-pip.py'
elif (sys.version_info.major, sys.version_info.minor) == (3, 6):
GET_PIP_URL = 'https://bootstrap.pypa.io/pip/3.6/get-pip.py'
else:
GET_PIP_URL = 'https://bootstrap.pypa.io/pip/get-pip.py'
with open(get_pip, 'wb') as f:
f.write(urlopen(GET_PIP_URL).read())
sys.stdout.flush()
if subprocess.call([sys.executable, get_pip]):
raise RuntimeError("Failed to install pip.")
finally:
shutil.rmtree(tmp)
print("Restart")
sys.stdout.flush()
return_code = subprocess.call(
[sys.executable] + sys.argv
)
sys.exit(return_code)
try:
import pip
print('')
try:
print(subprocess.check_output(
[sys.executable] + ['-m', 'pip', '--version'],
stderr=subprocess.STDOUT,
).decode('utf8'))
print('is installed.')
except subprocess.CalledProcessError as e:
# some debian/ubuntu based machines
# have broken pip installs
# that cannot import distutils or html5lib
# thus try to install via get-pip
if (b"ImportError" in e.output or
b"ModuleNotFoundError" in e.output):
install_pip()
raise e
except ImportError:
install_pip()
######################################################################
def check_upgrade(package):
print('')
print('Try to upgrade %s' % package)
print('')
try:
sys.stdout.flush()
output = subprocess.check_output(
[sys.executable] + ['-m', 'pip', 'install',
'--disable-pip-version-check', '--upgrade', package],
stderr=subprocess.STDOUT,
)
was_up_to_date = b"up-to-date" in output or b"already satisfied" in output
if not was_up_to_date:
print(output.decode('utf8'))
return not was_up_to_date
except subprocess.CalledProcessError as e:
print(e.output)
raise RuntimeError("Upgrade of %s failed." % package)
def install_pinned_version(package, version):
print('')
print('Try to install version %s of %s' % (version, package))
print('')
try:
sys.stdout.flush()
output = subprocess.check_output(
[sys.executable] + ['-m', 'pip', 'install',
'--disable-pip-version-check', package+'=='+version],
stderr=subprocess.STDOUT,
)
was_up_to_date = b"already satisfied" in output
if not was_up_to_date:
print(output.decode('utf8'))
return not was_up_to_date
except subprocess.CalledProcessError as e:
print(e.output)
raise RuntimeError(
"Install version %s of %s failed." % (version, package)
)
def show(package):
try:
sys.stdout.flush()
output = subprocess.check_output(
[sys.executable, '-m', 'pip', 'show', package],
)
for line in output.splitlines():
if line.startswith(b'Name') or line.startswith(b'Version'):
print(line.decode('utf8'))
except subprocess.CalledProcessError:
raise RuntimeError("Show version of %s failed." % package)
need_restart = False
package = 'pip'
if args.pip_version:
did_upgrade = install_pinned_version(package, args.pip_version)
else:
did_upgrade = check_upgrade(package)
show(package)
need_restart = need_restart or did_upgrade
package = 'setuptools'
if args.setuptools_version:
did_upgrade = install_pinned_version(package, args.setuptools_version)
else:
did_upgrade = check_upgrade(package)
show(package)
need_restart = need_restart or did_upgrade
package = 'wheel'
did_upgrade = check_upgrade(package)
show(package)
need_restart = need_restart or did_upgrade
if need_restart:
print("Restart")
sys.stdout.flush()
return_code = subprocess.call(
[sys.executable] + sys.argv
)
sys.exit(return_code)
######################################################################
print('')
print('Install buildout')
print('')
sys.stdout.flush()
if subprocess.call(
[sys.executable] +
['setup.py', '-q', 'develop', '-m', '-x', '-d', 'develop-eggs'],
):
raise RuntimeError("buildout build failed.")
import pkg_resources
pkg_resources.working_set.add_entry('src')
import zc.buildout.easy_install
zc.buildout.easy_install.scripts(
['zc.buildout'], pkg_resources.working_set, sys.executable, 'bin')
######################################################################
def install_coverage():
print('')
print('Install coverage')
print('')
bin_pip = os.path.join('bin', 'pip')
if subprocess.call(
[sys.executable] +
['-m', 'pip', 'install', 'coverage'],
):
raise RuntimeError("coverage install failed.")
try:
import coverage
except ImportError:
install_coverage()
######################################################################
print('')
print('Run buildout')
print('')
bin_buildout = os.path.join('bin', 'buildout')
if sys.platform.startswith('java'):
# Jython needs the script to be called twice via sys.executable
assert subprocess.Popen([sys.executable, bin_buildout, '-N']).wait() == 0
sys.stdout.flush()
sys.exit(subprocess.Popen(bin_buildout).wait())
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description='Setup buildout development environment')
parser.add_argument('--pip-version', help='version of pip to install',
action='store')
parser.add_argument('--setuptools-version', help='version of setuptools to install',
action='store')
parser.add_argument('--no-clean',
help='not used in the code, find out if still needed in Makefile',
action='store_const', const='NO_CLEAN')
args = parser.parse_args()
return args
if __name__ == '__main__':
main(parse_args()) | zc.buildout | /zc.buildout-3.0.1.tar.gz/zc.buildout-3.0.1/dev.py | dev.py |
Change History -- Old changes
*****************************
2.0.0 (2013-02-10)
==================
This is a backward incompatible release of buildout that attempts to
correct mistakes made in buildout 1.
- Buildout no-longer tries to provide full or partial isolation from
system Python installations. If you want isolation, use buildout
with virtualenv, or use a clean build of Python to begin with.
Providing isolation was a noble goal, but it's implementation
complicated buildout's implementation too much.
- Buildout no-longer supports using multiple versions of Python in a
single buildout. This too was a noble goal, but added too much
complexity to the implementation.
- Changed the configuration file format:
- Relative indentation in option values is retained if the first
line is blank. (IOW, if the non-blank text is on the continuation
lines.) As in::
[mysection]
tree =
/root
branch
In such cases, internal blank lines are also retained.
- The configuration syntax is more tightly defined, allowing fewer
syntax definitions.
Buildout 1 configuration files were parsed with the Python
ConfigParser module. The ConfigParser module's format is poorly
documented and wildly flexible. For example:
- Any characters other than left square brackets were allowed in
section names.
- Arbitrary text was allowed and ignored after the closing bracket on
section header lines.
- Any characters other than equal signs or colons were allowed in an
option name.
- Configuration options could be spelled as RFC 822 mail headers
(using a colon, rather than an equal sign).
- Comments could begin with "rem".
- Semicolons could be used to start inline comments, but only if
preceded by a whitespace character.
See `Configuration file syntax`_.
- Buildout now prefers final releases by default
(buildout:prefer-final now defaults to true, rather than false.)
However, if buildout is bootstrapped with a non-final release, it
won't downgrade itself to a final release.
- Buildout no-longer installs zipped eggs. (Distribute may still
install a zipped egg of itself during the bootstrapping process.)
The ``buildout:unzip`` option has been removed.
- Buildout no-longer supports setuptools. It now uses distribute
exclusively.
- Integrated the `buildout-versions
<http://packages.python.org/buildout-versions/>`_ extension into buildout
itself. For this, a few options were added to buildout:
- If ``show-picked-versions`` is set to true, all picked versions are
printed at the end of the buildout run. This saves you from running
buildout in verbose mode and extracting the picked versions from the
output.
- If ``update-versions-file`` is set to a filename (relative to the buildout
directory), the ``show-picked-versions`` output is appended to that file.
- Buildout options can be given on the command line using the form::
option_name=value
as a short-hand for::
buildout:option_name=value
- The ``versions`` option now defaults to ``versions``, so you no
longer need to include::
versions = versions
in a ``buildout`` section when pinning versions.
A ``versions`` section is added, if necessary, if a ``versions``
option isn't used.
- Buildout-defined default versions are included in the versions
section, if there is one.
- The ``buildout:zc.buildout-version`` and
``buildout:distribute-version`` options have been removed in favor
of providing version constraints in a versions section.
- Error if install-from-cache and offline are used together, because
offline largely means "don't install".
- Provide better error messages when distributions can't be installed
because buildout is run in offline mode.
- Versions in versions sections can now be simple constraints, like
>=2.0dev in addition to being simple versions.
Buildout 2 leverages this to make sure it uses
zc.recipe.egg>=2.0.0a3, which mainly matters for Python 3.
- The buildout init command now accepts distribution requirements and
paths to set up a custom interpreter part that has the distributions
or parts in the path. For example::
python bootstrap.py init BeautifulSoup
- Added buildout:socket-timeout option so that socket timeout can be configured
both from command line and from config files. (gotcha)
- Distutils-style scripts are also installed now (for instance pyflakes' and
docutils' scripts). https://bugs.launchpad.net/zc.buildout/+bug/422724
- Avoid sorting the working set and requirements when it won't be
logged. When profiling a simple buildout with 10 parts with
identical and large working sets, this resulted in a decrease of run
time from 93.411 to 15.068 seconds, about a 6 fold improvement. To
see the benefit be sure to run without any increase in verbosity
("-v" option). (rossp)
- Introduce a cache for the expensive `buildout._dir_hash` function.
- Remove duplicate path from script's sys.path setup.
- Make sure to download extended configuration files only once per buildout
run even if they are referenced multiple times (patch by Rafael Monnerat).
- Removed any traces of the implementation of ``extended-by``. Raise a
UserError if the option is encountered instead of ignoring it, though.
Fixed: relative-paths weren't honored when bootstrapping or upgrading
(which is how the buildout script gets generated).
Fixed: initialization code wasn't included in interpreter scripts.
Fixed: macro inheritance bug, https://github.com/buildout/buildout/pull/37
Fixed: In the download module, fixed the handling of directories that
are pointed to by file-system paths and ``file:`` URLs.
Fixed if you have a configuration with an extends entry in the [buildout]
section which points to a non-existing URL the result is not very
user friendly. https://bugs.launchpad.net/zc.buildout/+bug/566167
Fixed: https://bugs.launchpad.net/bugs/697913 : Buildout doesn't honor exit code
from scripts. Fixed.
1.4.4 (2010-08-20)
==================
The 1.4.4 release is a release for people who encounter trouble
with the 1.5 line. By switching to `the associated bootstrap script
<https://raw.github.com/buildout/buildout/master/bootstrap/bootstrap.py>`_
you can stay on 1.4.4 until you are ready to migrate.
1.4.3 (2009-12-10)
==================
Bugs fixed:
- Using pre-detected setuptools version for easy_installing tgz files. This
prevents a recursion error when easy_installing an upgraded "distribute"
tgz. Note that setuptools did not have this recursion problem solely
because it was packaged as an ``.egg``, which does not have to go through
the easy_install step.
1.4.2 (2009-11-01)
==================
New Feature:
- Added a --distribute option to the bootstrap script, in order
to use Distribute rather than Setuptools. By default, Setuptools
is used.
Bugs fixed:
- While checking for new versions of setuptools and buildout itself,
compare requirement locations instead of requirement objects.
- Incrementing didn't work properly when extending multiple files.
https://bugs.launchpad.net/zc.buildout/+bug/421022
- The download API computed MD5 checksums of text files wrong on Windows.
1.4.1 (2009-08-27)
==================
New Feature:
- Added a debug built-in recipe to make writing some tests easier.
Bugs fixed:
- (introduced in 1.4.0) option incrementing (-=) and decrementing (-=)
didn't work in the buildout section.
https://bugs.launchpad.net/zc.buildout/+bug/420463
- Option incrementing and decrementing didn't work for options
specified on the command line.
- Scripts generated with relative-paths enabled couldn't be
symbolically linked to other locations and still work.
- Scripts run using generated interpreters didn't have __file__ set correctly.
- The standard Python -m option didn't work for custom interpreters.
1.4.0 (2009-08-26)
==================
- When doing variable substitutions, you can omit the section name to
refer to a variable in the same section (e.g. ${:foo}).
- When doing variable substitution, you can use the special option,
``_buildout_section_name_`` to get the section name. This is most handy
for getting the current section name (e.g. ${:_buildout_section_name_}).
- A new special option, ``<`` allows sections to be used as macros.
- Added annotate command for annotated sections. Displays sections
key-value pairs along with the value origin.
- Added a download API that handles the download cache, offline mode etc and
is meant to be reused by recipes.
- Used the download API to allow caching of base configurations (specified by
the buildout section's 'extends' option).
1.3.1 (2009-08-12)
==================
- Bug fixed: extras were ignored in some cases when versions were specified.
1.3.0 (2009-06-22)
==================
- Better Windows compatibility in test infrastructure.
- Now the bootstrap.py has an optional --version argument,
that can be used to force buildout version to use.
- ``zc.buildout.testing.buildoutSetUp`` installs a new handler in the
python root logging facility. This handler is now removed during
tear down as it might disturb other packages reusing buildout's
testing infrastructure.
- fixed usage of 'relative_paths' keyword parameter on Windows
- Added an unload entry point for extensions.
- Fixed bug: when the relative paths option was used, relative paths
could be inserted into sys.path if a relative path was used to run
the generated script.
1.2.1 (2009-03-18)
==================
- Refactored generation of relative egg paths to generate simpler code.
1.2.0 (2009-03-17)
==================
- Added a relative_paths option to zc.buildout.easy_install.script to
generate egg paths relative to the script they're used in.
1.1.2 (2009-03-16)
==================
- Added Python 2.6 support. Removed Python 2.3 support.
- Fixed remaining deprecation warnings under Python 2.6, both when running
our tests and when using the package.
- Switched from using os.popen* to subprocess.Popen, to avoid a deprecation
warning in Python 2.6. See:
http://docs.python.org/library/subprocess.html#replacing-os-popen-os-popen2-os-popen3
- Made sure the 'redo_pyc' function and the doctest checkers work with Python
executable paths containing spaces.
- Expand shell patterns when processing the list of paths in `develop`, e.g::
[buildout]
develop = ./local-checkouts/*
- Conditionally import and use hashlib.md5 when it's available instead
of md5 module, which is deprecated in Python 2.6.
- Added Jython support for bootstrap, development bootstrap
and buildout support on Jython
- Fixed a bug that would cause buildout to break while computing a
directory hash if it found a broken symlink (Launchpad #250573)
1.1.1 (2008-07-28)
==================
- Fixed a bug that caused buildouts to fail when variable
substitutions are used to name standard directories, as in::
[buildout]
eggs-directory = ${buildout:directory}/develop-eggs
1.1.0 (2008-07-19)
==================
- Added a buildout-level unzip option to change the default policy for
unzipping zip-safe eggs.
- Tracebacks are now printed for internal errors (as opposed to user
errors) even without the -D option.
- pyc and pyo files are regenerated for installed eggs so that the
stored path in code objects matches the install location.
1.0.6 (2008-06-13)
==================
- Manually reverted the changeset for the fix for
https://bugs.launchpad.net/zc.buildout/+bug/239212 to verify thet the test
actually fails with the changeset:
http://svn.zope.org/zc.buildout/trunk/src/zc/buildout/buildout.py?rev=87309&r1=87277&r2=87309
Thanks tarek for pointing this out. (seletz)
- fixed the test for the += -= syntax in buildout.txt as the test
was actually wrong. The original implementation did a split/join
on whitespace, and later on that was corrected to respect the original
EOL setting, the test was not updated, though. (seletz)
- added a test to verify against https://bugs.launchpad.net/zc.buildout/+bug/239212
in allowhosts.txt (seletz)
- further fixes for """AttributeError: Buildout instance has no
attribute '_logger'""" by providing reasonable defaults
within the Buildout constructor (related to the new 'allow-hosts' option)
(patch by Gottfried Ganssauge) (ajung)
1.0.5 (2008-06-10)
==================
- Fixed wrong split when using the += and -= syntax (mustapha)
1.0.4 (2008-06-10)
==================
- Added the `allow-hosts` option (tarek)
- Quote the 'executable' argument when trying to detect the python
version using popen4. (sidnei)
- Quote the 'spec' argument, as in the case of installing an egg from
the buildout-cache, if the filename contains spaces it would fail (sidnei)
- Extended configuration syntax to allow -= and += operators (malthe, mustapha).
1.0.3 (2008-06-01)
==================
- fix for """AttributeError: Buildout instance has no attribute '_logger'"""
by providing reasonable defaults within the Buildout constructor.
(patch by Gottfried Ganssauge) (ajung)
1.0.2 (2008-05-13)
==================
- More fixes for Windows. A quoted sh-bang is now used on Windows to make the
.exe files work with a Python executable in 'program files'.
- Added "-t <timeout_in_seconds>" option for specifying the socket timeout.
(ajung)
1.0.1 (2008-04-02)
==================
- Made easy_install.py's _get_version accept non-final releases of Python,
like 2.4.4c0. (hannosch)
- Applied various patches for Windows (patch by Gottfried Ganssauge). (ajung)
- Applied patch fixing rmtree issues on Windows (patch by
Gottfried Ganssauge). (ajung)
1.0.0 (2008-01-13)
==================
- Added a French translation of the buildout tutorial.
1.0.0b31 (2007-11-01)
=====================
Feature Changes
---------------
- Added a configuration option that allows buildouts to ignore
dependency_links metadata specified in setup. By default
dependency_links in setup are used in addition to buildout specified
find-links. This can make it hard to control where eggs come
from. Here's how to tell buildout to ignore URLs in
dependency_links::
[buildout]
use-dependency-links = false
By default use-dependency-links is true, which matches the behavior
of previous versions of buildout.
- Added a configuration option that causes buildout to error if a
version is picked. This is a nice safety belt when fixing all
versions is intended, especially when creating releases.
Bugs Fixed
----------
- 151820: Develop failed if the setup.py script imported modules in
the distribution directory.
- Verbose logging of the develop command was omitting detailed
output.
- The setup command wasn't documented.
- The setup command failed if run in a directory without specifying a
configuration file.
- The setup command raised a stupid exception if run without arguments.
- When using a local find links or index, distributions weren't copied
to the download cache.
- When installing from source releases, a version specification (via a
buildout versions section) for setuptools was ignored when deciding
which setuptools to use to build an egg from the source release.
1.0.0b30 (2007-08-20)
=====================
Feature Changes
---------------
- Changed the default policy back to what it was to avoid breakage in
existing buildouts. Use::
[buildout]
prefer-final = true
to get the new policy. The new policy will go into effect in
buildout 2.
1.0.0b29 (2007-08-20)
=====================
Feature Changes
---------------
- Now, final distributions are preferred over non-final versions. If
both final and non-final versions satisfy a requirement, then the
final version will be used even if it is older. The normal way to
override this for specific packages is to specifically require a
non-final version, either specifically or via a lower bound.
- There is a buildout prefer-final version that can be used with a
value of "false"::
prefer-final = false
To prefer newer versions, regardless of whether or not they are
final, buildout-wide.
- The new simple Python index, http://cheeseshop.python.org/simple, is
used as the default index. This will provide better performance
than the human package index interface,
http://pypi.python.org/pypi. More importantly, it lists hidden
distributions, so buildouts with fixed distribution versions will be
able to find old distributions even if the distributions have been
hidden in the human PyPI interface.
Bugs Fixed
----------
- 126441: Look for default.cfg in the right place on Windows.
1.0.0b28 (2007-07-05)
=====================
Bugs Fixed
----------
- When requiring a specific version, buildout looked for new versions
even if that single version was already installed.
1.0.0b27 (2007-06-20)
=====================
Bugs Fixed
----------
- Scripts were generated incorrectly on Windows. This included the
buildout script itself, making buildout completely unusable.
1.0.0b26 (2007-06-19)
=====================
Feature Changes
---------------
- Thanks to recent fixes in setuptools, I was able to change buildout
to use find-link and index information when searching extensions.
Sadly, this work, especially the timing, was motivated my the need
to use alternate indexes due to performance problems in the cheese
shop (http://www.python.org/pypi/). I really home we can address
these performance problems soon.
1.0.0b25 (2007-05-31)
=====================
Feature Changes
---------------
- buildout now changes to the buildout directory before running recipe
install and update methods.
- Added a new init command for creating a new buildout. This creates
an empty configuration file and then bootstraps.
- Except when using the new init command, it is now an error to run
buildout without a configuration file.
- In verbose mode, when adding distributions to fulfil requirements of
already-added distributions, we now show why the new distributions
are being added.
- Changed the logging format to exclude the logger name for the
buildout logger. This reduces noise in the output.
- Clean up lots of messages, adding missing periods and adding quotes around
requirement strings and file paths.
Bugs Fixed
----------
- 114614: Buildouts could take a very long time if there were
dependency problems in large sets of pathologically interdependent
packages.
- 59270: Buggy recipes can cause failures in later recipes via chdir
- 61890: file:// urls don't seem to work in find-links
setuptools requires that file urls that point to directories must
end in a "/". Added a workaround.
- 75607: buildout should not run if it creates an empty buildout.cfg
1.0.0b24 (2007-05-09)
=====================
Feature Changes
---------------
- Improved error reporting by showing which packages require other
packages that can't be found or that cause version conflicts.
- Added an API for use by recipe writers to clean up created files
when recipe errors occur.
- Log installed scripts.
Bugs Fixed
----------
- 92891: bootstrap crashes with recipe option in buildout section.
- 113085: Buildout exited with a zero exist status when internal errors
occurred.
1.0.0b23 (2007-03-19)
=====================
Feature Changes
---------------
- Added support for download caches. A buildout can specify a cache
for distribution downloads. The cache can be shared among buildouts
to reduce network access and to support creating source
distributions for applications allowing install without network
access.
- Log scripts created, as suggested in:
https://bugs.launchpad.net/zc.buildout/+bug/71353
Bugs Fixed
----------
- It wasn't possible to give options on the command line for sections
not defined in a configuration file.
1.0.0b22 (2007-03-15)
=====================
Feature Changes
---------------
- Improved error reporting and debugging support:
- Added "logical tracebacks" that show functionally what the buildout
was doing when an error occurs. Don't show a Python traceback
unless the -D option is used.
- Added a -D option that causes the buildout to print a traceback and
start the pdb post-mortem debugger when an error occurs.
- Warnings are printed for unused options in the buildout section and
installed-part sections. This should make it easier to catch option
misspellings.
- Changed the way the installed database (.installed.cfg) is handled
to avoid database corruption when a user breaks out of a buildout
with control-c.
- Don't save an installed database if there are no installed parts or
develop egg links.
1.0.0b21 (2007-03-06)
=====================
Feature Changes
---------------
- Added support for repeatable buildouts by allowing egg versions to
be specified in a versions section.
- The easy_install module install and build functions now accept a
versions argument that supplied to mapping from project name to
version numbers. This can be used to fix version numbers for
required distributions and their dependencies.
When a version isn't fixed, using either a versions option or using
a fixed version number in a requirement, then a debug log message is
emitted indicating the version picked. This is useful for setting
versions options.
A default_versions function can be used to set a default value for
this option.
- Adjusted the output for verbosity levels. Using a single -v option
no longer causes voluminous setuptools output. Using -vv and -vvv
now triggers extra setuptools output.
- Added a remove testing helper function that removes files or directories.
1.0.0b20 (2007-02-08)
=====================
Feature Changes
---------------
- Added a buildout newest option, to control whether the newest
distributions should be sought to meet requirements. This might
also provide a hint to recipes that don't deal with
distributions. For example, a recipe that manages subversion
checkouts might not update a checkout if newest is set to "false".
- Added a *newest* keyword parameter to the
zc.buildout.easy_install.install and zc.buildout.easy_install.build
functions to control whether the newest distributions that need
given requirements should be sought. If a false value is provided
for this parameter and already installed eggs meet the given
requirements, then no attempt will be made to search for newer
distributions.
- The recipe-testing support setUp function now adds the name
*buildout* to the test namespace with a value that is the path to
the buildout script in the sample buildout. This allows tests to
use
>>> print system(buildout),
rather than:
>>> print system(join('bin', 'buildout')),
Bugs Fixed
----------
- Paths returned from update methods replaced lists of installed files
rather than augmenting them.
1.0.0b19 (2007-01-24)
=====================
Bugs Fixed
----------
- Explicitly specifying a Python executable failed if the output of
running Python with the -V option included a 2-digit (rather than a
3-digit) version number.
1.0.0b18 (2007-01-22)
=====================
Feature Changes
---------------
- Added documentation for some previously undocumented features of the
easy_install APIs.
- By popular demand, added a -o command-line option that is a short
hand for the assignment buildout:offline=true.
Bugs Fixed
----------
- When deciding whether recipe develop eggs had changed, buildout
incorrectly considered files in .svn and CVS directories.
1.0.0b17 (2006-12-07)
=====================
Feature Changes
---------------
- Configuration files can now be loaded from URLs.
Bugs Fixed
----------
- https://bugs.launchpad.net/products/zc.buildout/+bug/71246
Buildout extensions installed as eggs couldn't be loaded in offline
mode.
1.0.0b16 (2006-12-07)
=====================
Feature Changes
---------------
- A new command-line argument, -U, suppresses reading user defaults.
- You can now suppress use of an installed-part database
(e.g. .installed.cfg) by specifying an empty value for the buildout
installed option.
Bugs Fixed
----------
- When the install command is used with a list of parts, only
those parts are supposed to be installed, but the buildout was also
building parts that those parts depended on.
1.0.0b15 (2006-12-06)
=====================
Bugs Fixed
----------
- Uninstall recipes weren't loaded correctly in cases where
no parts in the (new) configuration used the recipe egg.
1.0.0b14 (2006-12-05)
=====================
Feature Changes
---------------
- Added uninstall recipes for dealing with complex uninstallation
scenarios.
Bugs Fixed
----------
- Automatic upgrades weren't performed on Windows due to a bug that
caused buildout to incorrectly determine that it wasn't running
locally in a buildout.
- Fixed some spurious test failures on Windows.
1.0.0b13 (2006-12-04)
=====================
Feature Changes
---------------
- Variable substitutions now reflect option data written by recipes.
- A part referenced by a part in a parts list is now added to the parts
list before the referencing part. This means that you can omit
parts from the parts list if they are referenced by other parts.
- Added a develop function to the easy_install module to aid in
creating develop eggs with custom build_ext options.
- The build and develop functions in the easy_install module now
return the path of the egg or egg link created.
- Removed the limitation that parts named in the install command can
only name configured parts.
- Removed support ConfigParser-style variable substitutions
(e.g. %(foo)s). Only the string-template style of variable
(e.g. ${section:option}) substitutions will be supported.
Supporting both violates "there's only one way to do it".
- Deprecated the buildout-section extendedBy option.
Bugs Fixed
----------
- We treat setuptools as a dependency of any distribution that
(declares that it) uses namespace packages, whether it declares
setuptools as a dependency or not. This wasn't working for eggs
installed by virtue of being dependencies.
1.0.0b12 (2006-10-24)
=====================
Feature Changes
---------------
- Added an initialization argument to the
zc.buildout.easy_install.scripts function to include initialization
code in generated scripts.
1.0.0b11 (2006-10-24)
=====================
Bugs Fixed
----------
`67737 <https://launchpad.net/products/zc.buildout/+bug/67737>`_
Verbose and quite output options caused errors when the
develop buildout option was used to create develop eggs.
`67871 <https://launchpad.net/products/zc.buildout/+bug/67871>`_
Installation failed if the source was a (local) unzipped
egg.
`67873 <https://launchpad.net/products/zc.buildout/+bug/67873>`_
There was an error in producing an error message when part names
passed to the install command weren't included in the
configuration.
1.0.0b10 (2006-10-16)
=====================
Feature Changes
---------------
- Renamed the runsetup command to setup. (The old name still works.)
- Added a recipe update method. Now install is only called when a part
is installed for the first time, or after an uninstall. Otherwise,
update is called. For backward compatibility, recipes that don't
define update methods are still supported.
- If a distribution defines namespace packages but fails to declare
setuptools as one of its dependencies, we now treat setuptools as an
implicit dependency. We generate a warning if the distribution
is a develop egg.
- You can now create develop eggs for setup scripts that don't use setuptools.
Bugs Fixed
----------
- Egg links weren't removed when corresponding entries were removed
from develop sections.
- Running a non-local buildout command (one not installed in the
buildout) led to a hang if there were new versions of buildout or
setuptools were available. Now we issue a warning and don't
upgrade.
- When installing zip-safe eggs from local directories, the eggs were
moved, rather than copied, removing them from the source directory.
1.0.0b9 (2006-10-02)
====================
Bugs Fixed
----------
Non-zip-safe eggs were not unzipped when they were installed.
1.0.0b8 (2006-10-01)
====================
Bugs Fixed
----------
- Installing source distributions failed when using alternate Python
versions (depending on the versions of Python used.)
- Installing eggs wasn't handled as efficiently as possible due to a
bug in egg URL parsing.
- Fixed a bug in runsetup that caused setup scripts that introspected
__file__ to fail.
1.0.0b7
=======
Added a documented testing framework for use by recipes. Refactored
the buildout tests to use it.
Added a runsetup command run a setup script. This is handy if, like
me, you don't install setuptools in your system Python.
1.0.0b6
=======
Fixed https://launchpad.net/products/zc.buildout/+bug/60582
Use of extension options caused bootstrapping to fail if the eggs
directory didn't already exist. We no longer use extensions for
bootstrapping. There really isn't any reason to anyway.
1.0.0b5
=======
Refactored to do more work in buildout and less work in easy_install.
This makes things go a little faster, makes errors a little easier to
handle, and allows extensions (like the sftp extension) to influence
more of the process. This was done to fix a problem in using the sftp
support.
1.0.0b4
=======
- Added an **experimental** extensions mechanism, mainly to support
adding sftp support to buildouts that need it.
- Fixed buildout self-updating on Windows.
1.0.0b3
=======
- Added a help option (-h, --help)
- Increased the default level of verbosity.
- Buildouts now automatically update themselves to new versions of
buildout and setuptools.
- Added Windows support.
- Added a recipe API for generating user errors.
- No-longer generate a py_zc.buildout script.
- Fixed some bugs in variable substitutions.
The characters "-", "." and " ", weren't allowed in section or
option names.
Substitutions with invalid names were ignored, which caused
misleading failures downstream.
- Improved error handling. No longer show tracebacks for user errors.
- Now require a recipe option (and therefore a section) for every part.
- Expanded the easy_install module API to:
- Allow extra paths to be provided
- Specify explicit entry points
- Specify entry-point arguments
1.0.0b2
=======
Added support for specifying some build_ext options when installing eggs
from source distributions.
1.0.0b1
=======
- Changed the bootstrapping code to only install setuptools and
buildout. The bootstrap code no-longer runs the buildout itself.
This was to fix a bug that caused parts to be recreated
unnecessarily because the recipe signature in the initial buildout
reflected temporary locations for setuptools and buildout.
- Now create a minimal setup.py if it doesn't exist and issue a
warning that it is being created.
- Fixed bug in saving installed configuration data. %'s and extra
spaces weren't quoted.
1.0.0a1
=======
Initial public version
| zc.buildout | /zc.buildout-3.0.1.tar.gz/zc.buildout-3.0.1/HISTORY.rst | HISTORY.rst |
Change History
**************
.. You should *NOT* be adding new change log entries to this file.
You should create a file in the news directory instead.
For helpful instructions, please see:
https://github.com/buildout/buildout/blob/master/doc/ADD-A-NEWS-ITEM.rst
.. towncrier release notes start
3.0.1 (2022-11-08)
------------------
Bug fixes:
- Fixed import of packaging.markers. [maurits] (#621)
3.0.0 (2022-11-07)
------------------
New features:
- Add support for PEP 508 markers in section condition expressions.
For example: ``[versions:python_version <= "3.9"]``.
[maurits] (#621)
Bug fixes:
- Command-line 'extends' now works with dirs in file names
[gotcha] (cli-extends)
- Add support for python311-315 in conditional section expressions. (#311)
- Make compatible with pip 22.2+, restoring Requires-Python functionality there.
Fixes `issue 613 <https://github.com/buildout/buildout/issues/613>`_.
[maurits] (#613)
3.0.0rc3 (2022-04-07)
---------------------
Bug fixes:
- Fix `TypeError: dist must be a Distribution instance` due to issue between
`setuptools` and `pip`. (#600)
3.0.0rc2 (2022-03-04)
---------------------
New features:
- add support for PEP496 environment markers (pep496)
Bug fixes:
- Fix TypeError for missing required `use_deprecated_html5lib` with pip 22.
Keep compatible with earlier pip versions. (#598)
3.0.0rc1 (2021-12-16)
---------------------
Bug fixes:
- Call pip via `python -m pip`. (#569)
3.0.0b5 (2021-11-29)
--------------------
Bug fixes:
- Fix when c extension implements namespace packages without the corresponding directories. (#589)
- Honor command-line buildout:extends (#592)
3.0.0b4 (2021-11-25)
--------------------
New features:
- Allow to run buildout in FIPS enabled environments. (#570)
- Proper error message if extends-cache tries to expand ${section:variable} (#585)
Bug fixes:
- Forward verbose option to pip (#576)
- Check that file top_level.txt exists before opening.
Add check for other files as well. (#582)
- Return code of pip install subprocess is now properly returned to buildout. (#586)
3.0.0b3 (2021-10-08)
--------------------
New features:
- Improve warning message when a section contains unused options. (#483)
Bug fixes:
- Fix support of ``pip>=21.1`` (#567)
- Fix confusion when using multiple Python versions and
installing packages with C extensions
without proper binary wheel available. (#574)
Development:
- Avoid broken jobs on Travis because of security on PRs (travis-pr)
3.0.0b2 (2021-03-09)
--------------------
New features:
- Improve error message when a package version is not pinned and `allow-picked-versions = false`. (#481)
Bug fixes:
- Fix FileNotFoundError when installing eggs with top-level directory without code (like doc). (#556)
Development:
- Login to docker hub to avoid pull limits (travis)
- Initialize towncrier (#519)
3.0.0b1 (2021-03-07)
====================
- Fix issue with combination of `>` specs and `extras` and recent `setuptools`.
- Fix issue with incrementing options from `.buildout/default.cfg`.
- Support python37, python38 and python39 in conditional section expressions.
- Fix bootstrapping for python27 and python35.
3.0.0a2 (2020-05-25)
====================
- Ignore `.git` when computing signature of a recipe develop egg
- Warn when the name passed to `zc.recipe.egg:scripts`
is not defined in egg entry points.
- Show pip warning about Python version only once.
- Better patch for ``pkg_resources.Distribution.hashcmp`` performance.
3.0.0a1 (2020-05-17)
====================
- Scripts: ensure eggs are inserted before ``site-packages`` in ``sys.path``.
- Fix forever loop when changing ``zc.buildout`` version via ``buildout``.
- Add support for ``Requires-Python`` metadata.
Fragile monkeypatch that relies on ``pip._internal``.
Emits a warning when support is disabled due to changes in ``pip``.
- Use ``pip install`` instead of deprecated ``setuptools.easy_install``.
- Patch ``pkg_resources.Distribution`` to make install of unpinned versions quicker.
Most obvious with ``setuptools``.
2.13.3 (2020-02-11)
===================
- Fix DeprecationWarning about MutableMapping.
(`#484 <https://github.com/buildout/buildout/issues/484>`_)
2.13.2 (2019-07-03)
===================
- Fixed DeprecationWarning on python 3.7: "'U' mode is deprecated".
2.13.1 (2019-01-29)
===================
- Documentation update for the new ``buildout query`` command.
2.13.0 (2019-01-17)
===================
- Get information about the configuration with new command ``buildout query``.
2.12.2 (2018-09-04)
===================
- Upon an error, buildout exits with a non-zero exit code. This now also works
when running with ``-D``.
- Fixed most 'Deprecation' and 'Resource' warnings.
2.12.1 (2018-07-02)
===================
- zc.buildout now explicitly requests zc.recipe.egg >=2.0.6 now.
2.12.0 (2018-07-02)
===================
- Add a new buildout option ``allow-unknown-extras`` to enable
installing requirements that specify extras that do not exist. This
needs a corresponding update to zc.recipe.egg. See `issue 457
<https://github.com/buildout/buildout/issues/457>`_.
zc.recipe.egg has been updated to 2.0.6 for this change.
2.11.5 (2018-06-19)
===================
- Fix for `issue 295 <https://github.com/buildout/buildout/issues/295>`_. On
windows, deletion of temporary egg files is more robust now.
2.11.4 (2018-05-14)
===================
- Fix for `issue 451 <https://github.com/buildout/buildout/issues/451>`_:
distributions with a version number that normalizes to a shorter version
number (3.3.0 to 3.3, for instance) can be installed now.
2.11.3 (2018-04-13)
===================
- Update to use the new PyPI at https://pypi.org/.
2.11.2 (2018-03-19)
===================
- Fix for the #442 issue: AttributeError on
``pkg_resources.SetuptoolsVersion``.
2.11.1 (2018-03-01)
===================
- Made upgrade check more robust. When using extensions, the improvement
introduced in 2.11 could prevent buildout from restarting itself when it
upgraded setuptools.
2.11.0 (2018-01-21)
===================
- Installed packages are added to the working set immediately. This helps in
some corner cases that occur when system packages have versions that
conflict with our specified versions.
2.10.0 (2017-12-04)
===================
- Setuptools 38.2.0 started supporting wheels. Through setuptools, buildout
now also supports wheels! You need at least version 38.2.3 to get proper
namespace support.
This setuptools change interfered with buildout's recent support for
`buildout.wheel <https://github.com/buildout/buildout.wheel>`_, resulting in
a sudden "Wheels are not supported" error message (see `issue 435
<https://github.com/buildout/buildout/issues/425>`_). Fixed by making
setuptools the default, though you can still use the buildout.wheel if you
want.
2.9.6 (2017-12-01)
==================
- Fixed: could not install eggs when sdist file name and package name had different
case.
2.9.5 (2017-09-22)
==================
- Use HTTPS for PyPI's index. PyPI redirects HTTP to HTTPS by default
now so using HTTPS directly avoids the potential for that redirect
being modified in flight.
2.9.4 (2017-06-20)
==================
- Sort the distributions used to compute ``__buildout_signature__`` to
ensure reproducibility under Python 3 or under Python 2 when ``-R``
is used on ``PYTHONHASHSEED`` is set to ``random``. Fixes `issue 392
<https://github.com/buildout/buildout/issues/392>`_.
**NOTE**: This may cause existing ``.installed.cfg`` to be
considered outdated and lead to parts being reinstalled spuriously
under Python 2.
- Add support code for doctests to be able to easily measure code
coverage. See `issue 397 <https://github.com/buildout/buildout/issues/397>`_.
2.9.3 (2017-03-30)
==================
- Add more verbosity to ``annotate`` results with ``-v``
- Select one or more sections with arguments after ``buildout annotate``.
2.9.2 (2017-03-06)
==================
- Fixed: We unnecessarily used a function from newer versions of
setuptools that caused problems when older setuptools or pkg_resources
installs were present (as in travis.ci).
2.9.1 (2017-03-06)
==================
- Fixed a minor packaging bug that broke the PyPI page.
2.9.0 (2017-03-06)
==================
- Added new syntax to explicitly declare that a part depends on other part.
See http://docs.buildout.org/en/latest/topics/implicit-parts.html
- Internal refactoring to work with `buildout.wheel
<https://github.com/buildout/buildout.wheel>`_.
- Fixed a bugs in ``zc.buildout.testing.Buildout``. It was loading
user-default configuration. It didn't support calling the
``created`` method on its sections.
- Fixed a bug (windows, py 3.4)
When processing metadata on "old-style" distutils scripts, .exe stubs
appeared in ``metadata_listdir``, in turn reading those burped with
``UnicodeDecodeError``. Skipping .exe stubs now.
2.8.0 (2017-02-13)
==================
- Added a hook to enable a soon-to-be-released buildout extension to
provide wheel support.
2.7.1 (2017-01-31)
==================
- Fixed a bug introduced in 2.6.0:
zc.buildout and its dependeoncies were reported as picked even when
their versions were fixed in a ``versions`` section. Worse, when the
``update-versions-file`` option was used, the ``versions`` section was
updated needlessly on every run.
2.7.0 (2017-01-30)
==================
- Added a buildout option, ``abi-tag-eggs`` that, when true, causes
the `ABI tag <https://www.python.org/dev/peps/pep-0425/#abi-tag>`_
for the buildout environment to be added to the eggs directory name.
This is useful when switching Python implementations (e.g. CPython
vs PyPI or debug builds vs regular builds), especially when
environment differences aren't reflected in egg names. It also has
the side benefit of making eggs directories smaller, because eggs
for different Python versions are in different directories.
2.6.0 (2017-01-29)
==================
- Updated to work with the latest setuptools.
- Added (verified) Python 3.6 support.
2.5.3 (2016-09-05)
==================
- After a dist is fetched and put into its final place, compile its
python files. No longer wait with compiling until all dists are in
place. This is related to the change below about not removing an
existing egg. [maurits]
- Do not remove an existing egg. When installing an egg to a location
that already exists, keep the current location (directory or file).
This can only happen when the location at first did not exist and
this changed during the buildout run. We used to remove the
previous location, but this could cause problems when running two
buildouts at the same time, when they try to install the same new
egg. Fixes #307. [maurits]
- In ``zc.buildout.testing.system``, set ``TERM=dumb`` in the environment.
This avoids invisible control characters popping up in some terminals,
like ``xterm``. Note that this may affect tests by buildout recipes.
[maurits]
- Removed Python 2.6 and 3.2 support.
[do3cc]
2.5.2 (2016-06-07)
==================
- Fixed ``-=`` and ``+=`` when extending sections. See #161.
[puittenbroek]
2.5.1 (2016-04-06)
==================
- Fix python 2 for downloading external config files with basic auth in the
URL. Fixes #257.
2.5.0 (2015-11-16)
==================
- Added more elaborate version and requirement information when there's a
version conflict. Previously, you could get a report of a version conflict
without information about which dependency requested the conflicing
requirement.
Now all this information is logged and displayed in case of an error.
[reinout]
- Dropped 3.2 support (at least in the automatic tests) as setuptools will
soon stop supporting it. Added python 3.5 to the automatic tests.
[reinout]
2.4.7 (2015-10-29)
==================
- Fix for #279. Distutils script detection previously broke on windows with
python 3 because it errored on ``.exe`` files.
[reinout]
2.4.6 (2015-10-28)
==================
- Relative paths are now also correctly generated for the current directory
("develop = .").
[youngking]
2.4.5 (2015-10-14)
==================
- More complete fix for #24. Distutils scripts are now also generated for
develop eggs.
[reinout]
2.4.4 (2015-10-02)
==================
- zc.buildout is now also released as a wheel. (Note: buildout itself doesn't
support installing wheels yet.)
[graingert]
2.4.3 (2015-09-03)
==================
- Added nested directory creation support
[guyzmo]
2.4.2 (2015-08-26)
==================
- If a downloaded config file in the "extends-cache" gets corrupted, buildout
now tells you the filename in the cache. Handy for troubleshooting.
[reinout]
2.4.1 (2015-08-08)
==================
- Check the ``use-dependency-links`` option earlier. This can give
a small speed increase.
[maurits]
- When using python 2, urllib2 is used to work around Python issue 24599, which
affects downloading from behind a proxy.
[stefano-m]
2.4.0 (2015-07-01)
==================
- Buildout no longer breaks on packages that contain a file with a non-ascii
filename. Fixes #89 and #148.
[reinout]
- Undo breakage on Windows machines where ``sys.prefix`` can also be a
``site-packages`` directory: don't remove it from ``sys.path``. See
https://github.com/buildout/buildout/issues/217 .
- Remove assumption that ``pkg_resources`` is a module (untrue since
release of `setuptools 8.3``). See
https://github.com/buildout/buildout/issues/227 .
- Fix for #212. For certain kinds of conflict errors you'd get an UnpackError
when rendering the error message. Instead of a nicely formatted version
conflict message.
[reinout]
- Making sure we use the correct easy_install when setuptools is installed
globally. See https://github.com/buildout/buildout/pull/232 and
https://github.com/buildout/buildout/pull/222 .
[lrowe]
- Updated buildout's `travis-ci <https://travis-ci.org/buildout/buildout>`_
configuration so that tests run much quicker so that buildout is easier and
quicker to develop.
[reinout]
- Note: zc.recipe.egg has also been updated to 2.0.2 together with this
zc.buildout release. Fixed: In ``zc.recipe.egg#custom`` recipe's ``rpath``
support, don't assume path elements are buildout-relative if they start with
one of the "special" tokens (e.g., ``$ORIGIN``). See:
https://github.com/buildout/buildout/issues/225.
[tseaver]
- ``download-cache``, ``eggs-directory`` and ``extends-cache`` are now
automatically created if their parent directory exists. Also they can be
relative directories (relative to the location of the buildout config file
that defines them). Also they can now be in the form ``~/subdir``, with the
usual convention that the ``~`` char means the home directory of the user
running buildout.
[lelit]
- A new bootstrap.py file is released (version 2015-07-01).
- When bootstrapping, the ``develop-eggs/`` directory is first removed. This
prevents old left-over ``.egg-link`` files from breaking buildout's careful
package collection mechanism.
[reinout]
- The bootstrap script now accepts ``--to-dir``. Setuptools is installed
there. If already available there, it is reused. This can be used to
bootstrap buildout without internet access. Similarly, a local
``ez_setup.py`` is used when available instead of it being downloaded. You
need setuptools 14.0 or higher for this functionality.
[lrowe]
- The bootstrap script now uses ``--buildout-version`` instead of
``--version`` to pick a specific buildout version.
[reinout]
- The bootstrap script now accepts ``--version`` which prints the bootstrap
version. This version is the date the bootstrap.py was last changed. A date
is handier or less confusing than either tracking zc.buildout's version or
having a separate bootstrap version number.
[reinout]
2.3.1 (2014-12-16)
==================
- Fixed: Buildout merged single-version requirements with
version-range requirements in a way that caused it to think there
wasn't a single-version requirement. IOW, buildout through that
versions were being picked when they weren't.
- Suppress spurious (and possibly non-spurious) version-parsing warnings.
2.3.0 (2014-12-14)
==================
- Buildout is now compatible with (and requires) setuptools 8.
2.2.5 (2014-11-04)
==================
- Improved fix for #198: when bootstrapping with an extension, buildout was
too strict on itself, resulting in an inability to upgrade or downgrade its
own version.
[reinout]
- Setuptools must be at 3.3 or higher now. If you use the latest bootstrap
from http://downloads.buildout.org/2/bootstrap.py you're all set.
[reinout]
- Installing *recipes* that themselves have dependencies used to fail with a
VersionConflict if such a dependency was installed globally with a lower
version. Buildout now ignores the version conflict in those cases and simply
installs the correct version.
[reinout]
2.2.4 (2014-11-01)
==================
- Fix for #198: buildout 2.2.3 caused a version conflict when bootstrapping a
buildout with a version pinned to an earlier one. Same version conflict
could occur with system-wide installed packages that were newer than the
pinned version.
[reinout]
2.2.3 (2014-10-30)
==================
- Fix #197, Python 3 regression
[aclark4life]
2.2.2 (2014-10-30)
==================
- Open files for ``exec()`` in universal newlines mode. See
https://github.com/buildout/buildout/issues/130
- Add ``BUILDOUT_HOME`` as an alternate way to control how the user default
configuration is found.
- Close various files when finished writing to them. This avoids
ResourceWarnings on Python 3, and better supports doctests under PyPy.
- Introduce improved easy_install Install.install function. This is present
in 1.5.X and 1.7X but was never merged into 2.X somehow.
2.2.1 (2013-09-05)
==================
- ``distutils`` scripts: correct order of operations on ``from ... import``
lines (see https://github.com/buildout/buildout/issues/134).
- Add an ``--allow-site-packges`` option to ``bootstrap.py``, defaulting
to False. If the value is false, strip any "site packages" (as defined by
the ``site`` module) from ``sys.path`` before attempting to import
``setuptools`` / ``pkg_resources``.
- Updated the URL used to fetch ``ez_setup.py`` to the official, non-version-
pinned version.
2.2.0 (2013-07-05)
==================
- Handle both addition and subtraction of elements (+= and -=) on the same key
in the same section. Forward-ported from buildout 1.6.
- Suppress the useless ``Link to <URL> ***BLOCKED*** by --allow-hosts``
error message being emitted by distribute / setuptools.
- Extend distutils script generation to support module docstrings and
__future__ imports.
- Refactored picked versions logic to make it easier to use for plugins.
- Use ``get_win_launcher`` API to find Windows launcher (falling back to
``resource_string`` for ``cli.exe``).
- Remove ``data_files`` from ``setup.py``: it was installing ``README.txt``
in current directory during installation (merged from 1.x branch).
- Switch dependency from ``distribute 0.6.x`` to ``setuptools 0.7.x``.
2.1.0 (2013-03-23)
==================
- Meta-recipe support
- Conditional sections
- Buildout now accepts a ``--version`` command-line option to print
its version.
Fixed: Builout didn't exit with a non-zero exit status if there was a
failure in combination with an upgrade.
Fixed: We now fail with an informative error when an old bootstrap
script causes buildout 2 to be used with setuptools.
Fixed: An error incorrectly suggested that buildout 2 implemented all
of the functionality of dumppickedversions.
Fixed: Buildout generated bad scripts when no eggs needed to be added
to ``sys.path``.
Fixed: Buildout didn't honour Unix umask when generating scripts.
https://bugs.launchpad.net/zc.buildout/+bug/180705
Fixed: ``update-versions-file`` didn't work unless
``show-picked-versions`` was also set.
https://github.com/buildout/buildout/issues/71
2.0.1 (2013-02-16)
==================
- Fixed: buildout didn't honor umask settings when creating scripts.
- Fix for distutils scripts installation on Python 3, related to
``__pycache__`` directories.
- Fixed: encoding data in non-entry-point-based scripts was lost.
| zc.buildout | /zc.buildout-3.0.1.tar.gz/zc.buildout-3.0.1/CHANGES.rst | CHANGES.rst |
********
Buildout
********
.. image:: https://github.com/buildout/buildout/actions/workflows/run-tests.yml/badge.svg
:alt: GHA tests report
:target: https://github.com/buildout/buildout/actions/workflows/run-tests.yml
Buildout is a project designed to solve 2 problems:
1. Application-centric assembly and deployment
*Assembly* runs the gamut from stitching together libraries to
create a running program, to production deployment configuration of
applications, and associated systems and tools (e.g. run-control
scripts, cron jobs, logs, service registration, etc.).
Buildout might be confused with build tools like make or ant, but
it is a little higher level and might invoke systems like make or
ant to get its work done.
Buildout might be confused with systems like puppet or chef, but it
is more application focused. Systems like puppet or chef might
use buildout to get their work done.
Buildout is also somewhat Python-centric, even though it can be
used to assemble and deploy non-python applications. It has some
special features for assembling Python programs. It's scripted with
Python, unlike, say puppet or chef, which are scripted with Ruby.
2. Repeatable assembly of programs from Python software distributions
Buildout puts great effort toward making program assembly a highly
repeatable process, whether in a very open-ended development mode,
where dependency versions aren't locked down, or in a deployment
environment where dependency versions are fully specified. You
should be able to check buildout into a VCS and later check it out.
Two checkouts built at the same time in the same environment should
always give the same result, regardless of their history. Among
other things, after a buildout, all dependencies should be at the
most recent version consistent with any version specifications
expressed in the buildout.
Buildout supports applications consisting of multiple programs,
with different programs in an application free to use different
versions of Python distributions. This is in contrast with a
Python installation (real or virtual), where, for any given
distribution, there can only be one installed.
To learn more about buildout, including how to use it, see
http://docs.buildout.org/.
| zc.buildout | /zc.buildout-3.0.1.tar.gz/zc.buildout-3.0.1/README.rst | README.rst |
How To Contribute
*****************
Thank you for considering contributing to ``buildout``!
Workflow
========
- No contribution is too small!
- Please make sure to create one pull request for one change.
- Please try to add tests for your code.
- Make sure your changes pass **continuous integration**.
When CI fails, please try to fix it or ask for help.
Developing buildout itself and running the test suite
=====================================================
When you're developing buildout itself, you need to know two things:
- Use a clean Python *without* setuptools installed. Otherwise many tests
will find your already-installed setuptools, leading to test differences
when setuptools' presence is explicitly tested.
- Also the presence of ``~/.buildout/default.cfg`` may interfere with the
tests so you may want to temporarily rename it so that it does not get in
the way.
- Bootstrap with with ``python dev.py``.
- Run buildout with -U, to ignore user (default) settings which can interfere
with using the development version
For your convenience we provide a Makefile to build various Python versions
in subdirectories of the buildout checkout. To use these and run the tests
with them do::
make PYTHON_VER=2.7 build
make PYTHON_VER=2.7 test
make PYTHON_VER=3.9 build
make PYTHON_VER=3.9 test
The actual Python compilation is only done once and then re-used. So on
subsequent builds, only the development buildout itself needs to be redone.
Releases: zc.buildout, zc.recipe.egg and bootstrap.py
=====================================================
Buildout consists of two Python packages that are released separately:
zc.buildout and zc.recipe.egg. zc.recipe.egg is changed much less often than
zc.buildout.
zc.buildout's setup.py and changelog is in the same directory as this
``DEVELOPERS.txt`` and the code is in ``src/zc/buildout``.
zc.recipe.egg, including setup.py and a separate changelog, is in the
``zc.recipe.egg_`` subdirectory.
When releasing, make sure you also build a (universal) wheel in addition to
the regular .tar.gz::
$ python setup.py sdist bdist_wheel upload
You can also use zest.releaser to release it. If you've installed it as
``zest.releaser[recommended]`` it builds the wheel for you and uploads it via
https (via twine).
Roadmap
=======
Currently, there are two active branches:
- master (development branch for the upcoming version 3)
- 2.x (development branch for the current version 2)
Active feature development and bug fixes only happen on the **master** branch.
Supported Python Versions
=========================
We align the support of Python versions with
`Zope <https://www.zope.org/developer/roadmap.html>`_ and
`Plone <https://plone.org/download/release-schedule>`_ development.
This means, currently there are no plans to drop Python 2.7 support.
Licensing
=========
This project is licensed under the Zope Public License.
Unlike contributing to the Zope and Plone projects,
you do not need to sign a contributor agreement to contribute to **buildout**.
| zc.buildout | /zc.buildout-3.0.1.tar.gz/zc.buildout-3.0.1/CONTRIBUTING.rst | CONTRIBUTING.rst |
import copy
import distutils.errors
import errno
import glob
import logging
import os
import pkg_resources
from pkg_resources import packaging
import py_compile
import re
import setuptools.archive_util
import setuptools.command.easy_install
import setuptools.command.setopt
import setuptools.package_index
import shutil
import subprocess
import sys
import tempfile
import zc.buildout
import zc.buildout.rmtree
from zc.buildout import WINDOWS
from zc.buildout import PY3
import warnings
import csv
try:
from setuptools.wheel import Wheel # This is the important import
from setuptools import __version__ as setuptools_version
# Now we need to check if we have at least 38.2.3 for namespace support.
SETUPTOOLS_SUPPORTS_WHEELS = (
pkg_resources.parse_version(setuptools_version) >=
pkg_resources.parse_version('38.2.3'))
except ImportError:
SETUPTOOLS_SUPPORTS_WHEELS = False
BIN_SCRIPTS = 'Scripts' if WINDOWS else 'bin'
warnings.filterwarnings(
'ignore', '.+is being parsed as a legacy, non PEP 440, version')
_oprp = getattr(os.path, 'realpath', lambda path: path)
def realpath(path):
return os.path.normcase(os.path.abspath(_oprp(path)))
default_index_url = os.environ.get(
'buildout_testing_index_url',
'https://pypi.org/simple',
)
logger = logging.getLogger('zc.buildout.easy_install')
url_match = re.compile('[a-z0-9+.-]+://').match
is_source_encoding_line = re.compile(r'coding[:=]\s*([-\w.]+)').search
# Source encoding regex from http://www.python.org/dev/peps/pep-0263/
is_win32 = sys.platform == 'win32'
is_jython = sys.platform.startswith('java')
if is_jython:
import java.lang.System
jython_os_name = (java.lang.System.getProperties()['os.name']).lower()
# Make sure we're not being run with an older bootstrap.py that gives us
# setuptools instead of setuptools
has_distribute = pkg_resources.working_set.find(
pkg_resources.Requirement.parse('distribute')) is not None
has_setuptools = pkg_resources.working_set.find(
pkg_resources.Requirement.parse('setuptools')) is not None
if has_distribute and not has_setuptools:
sys.exit("zc.buildout 3 needs setuptools, not distribute."
"Did you properly install with pip in a virtualenv ?")
# Include buildout and setuptools eggs in paths. We get this
# initially from the entire working set. Later, we'll use the install
# function to narrow to just the buildout and setuptools paths.
buildout_and_setuptools_path = sorted({d.location for d in pkg_resources.working_set})
setuptools_path = buildout_and_setuptools_path
pip_path = buildout_and_setuptools_path
logger.debug('before restricting versions: pip_path %r', pip_path)
FILE_SCHEME = re.compile('file://', re.I).match
DUNDER_FILE_PATTERN = re.compile(r"__file__ = '(?P<filename>.+)'$")
class _Monkey(object):
def __init__(self, module, **kw):
mdict = self._mdict = module.__dict__
self._before = mdict.copy()
self._overrides = kw
def __enter__(self):
self._mdict.update(self._overrides)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._mdict.clear()
self._mdict.update(self._before)
class _NoWarn(object):
def warn(self, *args, **kw):
pass
_no_warn = _NoWarn()
class AllowHostsPackageIndex(setuptools.package_index.PackageIndex):
"""Will allow urls that are local to the system.
No matter what is allow_hosts.
"""
def url_ok(self, url, fatal=False):
if FILE_SCHEME(url):
return True
# distutils has its own logging, which can't be hooked / suppressed,
# so we monkey-patch the 'log' submodule to suppress the stupid
# "Link to <URL> ***BLOCKED*** by --allow-hosts" message.
with _Monkey(setuptools.package_index, log=_no_warn):
return setuptools.package_index.PackageIndex.url_ok(
self, url, False)
_indexes = {}
def _get_index(index_url, find_links, allow_hosts=('*',)):
key = index_url, tuple(find_links)
index = _indexes.get(key)
if index is not None:
return index
if index_url is None:
index_url = default_index_url
if index_url.startswith('file://'):
index_url = index_url[7:]
index = AllowHostsPackageIndex(index_url, hosts=allow_hosts)
if find_links:
index.add_find_links(find_links)
_indexes[key] = index
return index
clear_index_cache = _indexes.clear
if is_win32:
# work around spawn lamosity on windows
# XXX need safe quoting (see the subproces.list2cmdline) and test
def _safe_arg(arg):
return '"%s"' % arg
else:
_safe_arg = str
def call_subprocess(args, **kw):
if subprocess.call(args, **kw) != 0:
raise Exception(
"Failed to run command:\n%s"
% repr(args)[1:-1])
def _execute_permission():
current_umask = os.umask(0o022)
# os.umask only returns the current umask if you also give it one, so we
# have to give it a dummy one and immediately set it back to the real
# value... Distribute does the same.
os.umask(current_umask)
return 0o777 - current_umask
def get_namespace_package_paths(dist):
"""
Generator of the expected pathname of each __init__.py file of the
namespaces of a distribution.
"""
base = [dist.location]
init = ['__init__.py']
for namespace in dist.get_metadata_lines('namespace_packages.txt'):
yield os.path.join(*(base + namespace.split('.') + init))
def namespace_packages_need_pkg_resources(dist):
if os.path.isfile(dist.location):
# Zipped egg, with namespaces, surely needs setuptools
return True
# If they have `__init__.py` files that use pkg_resources and don't
# fallback to using `pkgutil`, then they need setuptools/pkg_resources:
for path in get_namespace_package_paths(dist):
if os.path.isfile(path):
with open(path, 'rb') as f:
source = f.read()
if (source and
b'pkg_resources' in source and
not b'pkgutil' in source):
return True
return False
def dist_needs_pkg_resources(dist):
"""
A distribution needs setuptools/pkg_resources added as requirement if:
* It has namespace packages declared with:
- `pkg_resources.declare_namespace()`
* Those namespace packages don't fall back to `pkgutil`
* It doesn't have `setuptools/pkg_resources` as requirement already
"""
return (
dist.has_metadata('namespace_packages.txt') and
# This will need to change when `pkg_resources` gets its own
# project:
'setuptools' not in {r.project_name for r in dist.requires()} and
namespace_packages_need_pkg_resources(dist)
)
class Installer(object):
_versions = {}
_required_by = {}
_picked_versions = {}
_download_cache = None
_install_from_cache = False
_prefer_final = True
_use_dependency_links = True
_allow_picked_versions = True
_store_required_by = False
_allow_unknown_extras = False
def __init__(self,
dest=None,
links=(),
index=None,
executable=sys.executable,
always_unzip=None, # Backward compat :/
path=None,
newest=True,
versions=None,
use_dependency_links=None,
allow_hosts=('*',),
check_picked=True,
allow_unknown_extras=False,
):
assert executable == sys.executable, (executable, sys.executable)
self._dest = dest if dest is None else pkg_resources.normalize_path(dest)
self._allow_hosts = allow_hosts
self._allow_unknown_extras = allow_unknown_extras
if self._install_from_cache:
if not self._download_cache:
raise ValueError("install_from_cache set to true with no"
" download cache")
links = ()
index = 'file://' + self._download_cache
if use_dependency_links is not None:
self._use_dependency_links = use_dependency_links
self._links = links = list(self._fix_file_links(links))
if self._download_cache and (self._download_cache not in links):
links.insert(0, self._download_cache)
self._index_url = index
path = (path and path[:] or []) + buildout_and_setuptools_path
self._path = path
if self._dest is None:
newest = False
self._newest = newest
self._env = self._make_env()
self._index = _get_index(index, links, self._allow_hosts)
self._requirements_and_constraints = []
self._check_picked = check_picked
if versions is not None:
self._versions = normalize_versions(versions)
def _make_env(self):
full_path = self._get_dest_dist_paths() + self._path
env = pkg_resources.Environment(full_path)
# this needs to be called whenever self._env is modified (or we could
# make an Environment subclass):
self._eggify_env_dest_dists(env, self._dest)
return env
def _env_rescan_dest(self):
self._env.scan(self._get_dest_dist_paths())
self._eggify_env_dest_dists(self._env, self._dest)
def _get_dest_dist_paths(self):
dest = self._dest
if dest is None:
return []
eggs = glob.glob(os.path.join(dest, '*.egg'))
dists = [os.path.dirname(dist_info) for dist_info in
glob.glob(os.path.join(dest, '*', '*.dist-info'))]
return list(set(eggs + dists))
@staticmethod
def _eggify_env_dest_dists(env, dest):
"""
Make sure everything found under `dest` is seen as an egg, even if it's
some other kind of dist.
"""
for project_name in env:
for dist in env[project_name]:
if os.path.dirname(dist.location) == dest:
dist.precedence = pkg_resources.EGG_DIST
def _version_conflict_information(self, name):
"""Return textual requirements/constraint information for debug purposes
We do a very simple textual search, as that filters out most
extraneous information without missing anything.
"""
output = [
"Version and requirements information containing %s:" % name]
version_constraint = self._versions.get(name)
if version_constraint:
output.append(
"[versions] constraint on %s: %s" % (name, version_constraint))
output += [line for line in self._requirements_and_constraints
if name.lower() in line.lower()]
return '\n '.join(output)
def _satisfied(self, req, source=None):
dists = [dist for dist in self._env[req.project_name] if dist in req]
if not dists:
logger.debug('We have no distributions for %s that satisfies %r.',
req.project_name, str(req))
return None, self._obtain(req, source)
# Note that dists are sorted from best to worst, as promised by
# env.__getitem__
for dist in dists:
if (dist.precedence == pkg_resources.DEVELOP_DIST):
logger.debug('We have a develop egg: %s', dist)
return dist, None
# Special common case, we have a specification for a single version:
specs = req.specs
if len(specs) == 1 and specs[0][0] == '==':
logger.debug('We have the distribution that satisfies %r.',
str(req))
return dists[0], None
if self._prefer_final:
fdists = [dist for dist in dists
if self._final_version(dist.parsed_version)
]
if fdists:
# There are final dists, so only use those
dists = fdists
if not self._newest:
# We don't need the newest, so we'll use the newest one we
# find, which is the first returned by
# Environment.__getitem__.
return dists[0], None
best_we_have = dists[0] # Because dists are sorted from best to worst
# We have some installed distros. There might, theoretically, be
# newer ones. Let's find out which ones are available and see if
# any are newer. We only do this if we're willing to install
# something, which is only true if dest is not None:
best_available = self._obtain(req, source)
if best_available is None:
# That's a bit odd. There aren't any distros available.
# We should use the best one we have that meets the requirement.
logger.debug(
'There are no distros available that meet %r.\n'
'Using our best, %s.',
str(req), best_we_have)
return best_we_have, None
if self._prefer_final:
if self._final_version(best_available.parsed_version):
if self._final_version(best_we_have.parsed_version):
if (best_we_have.parsed_version
<
best_available.parsed_version
):
return None, best_available
else:
return None, best_available
else:
if (not self._final_version(best_we_have.parsed_version)
and
(best_we_have.parsed_version
<
best_available.parsed_version
)
):
return None, best_available
else:
if (best_we_have.parsed_version
<
best_available.parsed_version
):
return None, best_available
logger.debug(
'We have the best distribution that satisfies %r.',
str(req))
return best_we_have, None
def _call_pip_install(self, spec, dest, dist):
tmp = tempfile.mkdtemp(dir=dest)
try:
paths = call_pip_install(spec, tmp)
dists = []
env = pkg_resources.Environment(paths)
for project in env:
dists.extend(env[project])
if not dists:
raise zc.buildout.UserError("Couldn't install: %s" % dist)
if len(dists) > 1:
logger.warn("Installing %s\n"
"caused multiple distributions to be installed:\n"
"%s\n",
dist, '\n'.join(map(str, dists)))
else:
d = dists[0]
if d.project_name != dist.project_name:
logger.warn("Installing %s\n"
"Caused installation of a distribution:\n"
"%s\n"
"with a different project name.",
dist, d)
if d.version != dist.version:
logger.warn("Installing %s\n"
"Caused installation of a distribution:\n"
"%s\n"
"with a different version.",
dist, d)
result = []
for d in dists:
result.append(_move_to_eggs_dir_and_compile(d, dest))
return result
finally:
zc.buildout.rmtree.rmtree(tmp)
def _obtain(self, requirement, source=None):
# initialize out index for this project:
index = self._index
if index.obtain(requirement) is None:
# Nothing is available.
return None
# Filter the available dists for the requirement and source flag
dists = [dist for dist in index[requirement.project_name]
if ((dist in requirement)
and
((not source) or
(dist.precedence == pkg_resources.SOURCE_DIST)
)
)
]
# If we prefer final dists, filter for final and use the
# result if it is non empty.
if self._prefer_final:
fdists = [dist for dist in dists
if self._final_version(dist.parsed_version)
]
if fdists:
# There are final dists, so only use those
dists = fdists
# Now find the best one:
best = []
bestv = None
for dist in dists:
distv = dist.parsed_version
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if not best:
return None
if len(best) == 1:
return best[0]
if self._download_cache:
for dist in best:
if (realpath(os.path.dirname(dist.location))
==
self._download_cache
):
return dist
best.sort()
return best[-1]
def _fetch(self, dist, tmp, download_cache):
if (download_cache
and (realpath(os.path.dirname(dist.location)) == download_cache)
):
logger.debug("Download cache has %s at: %s", dist, dist.location)
return dist
logger.debug("Fetching %s from: %s", dist, dist.location)
new_location = self._index.download(dist.location, tmp)
if (download_cache
and (realpath(new_location) == realpath(dist.location))
and os.path.isfile(new_location)
):
# setuptools avoids making extra copies, but we want to copy
# to the download cache
shutil.copy2(new_location, tmp)
new_location = os.path.join(tmp, os.path.basename(new_location))
return dist.clone(location=new_location)
def _get_dist(self, requirement, ws):
__doing__ = 'Getting distribution for %r.', str(requirement)
# Maybe an existing dist is already the best dist that satisfies the
# requirement
dist, avail = self._satisfied(requirement)
if dist is None:
if self._dest is None:
raise zc.buildout.UserError(
"We don't have a distribution for %s\n"
"and can't install one in offline (no-install) mode.\n"
% requirement)
logger.info(*__doing__)
# Retrieve the dist:
if avail is None:
self._index.obtain(requirement)
raise MissingDistribution(requirement, ws)
# We may overwrite distributions, so clear importer
# cache.
sys.path_importer_cache.clear()
tmp = self._download_cache
if tmp is None:
tmp = tempfile.mkdtemp('get_dist')
try:
dist = self._fetch(avail, tmp, self._download_cache)
if dist is None:
raise zc.buildout.UserError(
"Couldn't download distribution %s." % avail)
dists = [_move_to_eggs_dir_and_compile(dist, self._dest)]
for _d in dists:
if _d not in ws:
ws.add(_d, replace=True)
finally:
if tmp != self._download_cache:
zc.buildout.rmtree.rmtree(tmp)
self._env_rescan_dest()
dist = self._env.best_match(requirement, ws)
logger.info("Got %s.", dist)
else:
dists = [dist]
if dist not in ws:
ws.add(dist)
if not self._install_from_cache and self._use_dependency_links:
self._add_dependency_links_from_dists(dists)
if self._check_picked:
self._check_picked_requirement_versions(requirement, dists)
return dists
def _add_dependency_links_from_dists(self, dists):
reindex = False
links = self._links
for dist in dists:
if dist.has_metadata('dependency_links.txt'):
for link in dist.get_metadata_lines('dependency_links.txt'):
link = link.strip()
if link not in links:
logger.debug('Adding find link %r from %s',
link, dist)
links.append(link)
reindex = True
if reindex:
self._index = _get_index(self._index_url, links, self._allow_hosts)
def _check_picked_requirement_versions(self, requirement, dists):
""" Check whether we picked a version and, if we did, report it """
for dist in dists:
if not (dist.precedence == pkg_resources.DEVELOP_DIST
or
(len(requirement.specs) == 1
and
requirement.specs[0][0] == '==')
):
logger.debug('Picked: %s = %s',
dist.project_name, dist.version)
self._picked_versions[dist.project_name] = dist.version
if not self._allow_picked_versions:
msg = NOT_PICKED_AND_NOT_ALLOWED.format(
name=dist.project_name,
version=dist.version
)
raise zc.buildout.UserError(msg)
def _maybe_add_setuptools(self, ws, dist):
if dist_needs_pkg_resources(dist):
# We have a namespace package but no requirement for setuptools
if dist.precedence == pkg_resources.DEVELOP_DIST:
logger.warning(
"Develop distribution: %s\n"
"uses namespace packages but the distribution "
"does not require setuptools.",
dist)
requirement = self._constrain(
pkg_resources.Requirement.parse('setuptools')
)
if ws.find(requirement) is None:
self._get_dist(requirement, ws)
def _constrain(self, requirement):
"""Return requirement with optional [versions] constraint added."""
constraint = self._versions.get(requirement.project_name.lower())
if constraint:
try:
requirement = _constrained_requirement(constraint,
requirement)
except IncompatibleConstraintError:
logger.info(self._version_conflict_information(
requirement.project_name.lower()))
raise
return requirement
def install(self, specs, working_set=None):
logger.debug('Installing %s.', repr(specs)[1:-1])
self._requirements_and_constraints.append(
"Base installation request: %s" % repr(specs)[1:-1])
for_buildout_run = bool(working_set)
requirements = [pkg_resources.Requirement.parse(spec)
for spec in specs]
requirements = [
self._constrain(requirement)
for requirement in requirements
if not requirement.marker or requirement.marker.evaluate()
]
if working_set is None:
ws = pkg_resources.WorkingSet([])
else:
ws = working_set
for requirement in requirements:
for dist in self._get_dist(requirement, ws):
self._maybe_add_setuptools(ws, dist)
# OK, we have the requested distributions and they're in the working
# set, but they may have unmet requirements. We'll resolve these
# requirements. This is code modified from
# pkg_resources.WorkingSet.resolve. We can't reuse that code directly
# because we have to constrain our requirements (see
# versions_section_ignored_for_dependency_in_favor_of_site_packages in
# zc.buildout.tests).
requirements.reverse() # Set up the stack.
processed = {} # This is a set of processed requirements.
best = {} # This is a mapping of package name -> dist.
# Note that we don't use the existing environment, because we want
# to look for new eggs unless what we have is the best that
# matches the requirement.
env = pkg_resources.Environment(ws.entries)
while requirements:
# Process dependencies breadth-first.
current_requirement = requirements.pop(0)
req = self._constrain(current_requirement)
if req in processed:
# Ignore cyclic or redundant dependencies.
continue
dist = best.get(req.key)
if dist is None:
try:
dist = env.best_match(req, ws)
except pkg_resources.VersionConflict as err:
logger.debug(
"Version conflict while processing requirement %s "
"(constrained to %s)",
current_requirement, req)
# Installing buildout itself and its extensions and
# recipes requires the global
# ``pkg_resources.working_set`` to be active, which also
# includes all system packages. So there might be
# conflicts, which are fine to ignore. We'll grab the
# correct version a few lines down.
if not for_buildout_run:
raise VersionConflict(err, ws)
if dist is None:
if self._dest:
logger.debug('Getting required %r', str(req))
else:
logger.debug('Adding required %r', str(req))
self._log_requirement(ws, req)
for dist in self._get_dist(req, ws):
self._maybe_add_setuptools(ws, dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency.
logger.info(self._version_conflict_information(req.key))
raise VersionConflict(
pkg_resources.VersionConflict(dist, req), ws)
best[req.key] = dist
missing_requested = sorted(
set(req.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
if missing_requested:
if not self._allow_unknown_extras:
raise zc.buildout.UserError(
"Couldn't find the required extra. "
"This means the requirement is incorrect. "
"If the requirement is itself from software you "
"requested, then there might be a bug in "
"requested software. You can ignore this by "
"using 'allow-unknown-extras=true', however "
"that may simply cause needed software to be omitted."
)
extra_requirements = sorted(
set(dist.extras) & set(req.extras)
)
else:
extra_requirements = dist.requires(req.extras)[::-1]
for extra_requirement in extra_requirements:
self._requirements_and_constraints.append(
"Requirement of %s: %s" % (
current_requirement, extra_requirement))
requirements.extend(extra_requirements)
processed[req] = True
return ws
def build(self, spec, build_ext):
requirement = self._constrain(pkg_resources.Requirement.parse(spec))
dist, avail = self._satisfied(requirement, 1)
if dist is not None:
return [dist.location]
# Retrieve the dist:
if avail is None:
raise zc.buildout.UserError(
"Couldn't find a source distribution for %r."
% str(requirement))
if self._dest is None:
raise zc.buildout.UserError(
"We don't have a distribution for %s\n"
"and can't build one in offline (no-install) mode.\n"
% requirement
)
logger.debug('Building %r', spec)
tmp = self._download_cache
if tmp is None:
tmp = tempfile.mkdtemp('get_dist')
try:
dist = self._fetch(avail, tmp, self._download_cache)
build_tmp = tempfile.mkdtemp('build')
try:
setuptools.archive_util.unpack_archive(dist.location,
build_tmp)
if os.path.exists(os.path.join(build_tmp, 'setup.py')):
base = build_tmp
else:
setups = glob.glob(
os.path.join(build_tmp, '*', 'setup.py'))
if not setups:
raise distutils.errors.DistutilsError(
"Couldn't find a setup script in %s"
% os.path.basename(dist.location)
)
if len(setups) > 1:
raise distutils.errors.DistutilsError(
"Multiple setup scripts in %s"
% os.path.basename(dist.location)
)
base = os.path.dirname(setups[0])
setup_cfg = os.path.join(base, 'setup.cfg')
if not os.path.exists(setup_cfg):
f = open(setup_cfg, 'w')
f.close()
setuptools.command.setopt.edit_config(
setup_cfg, dict(build_ext=build_ext))
dists = self._call_pip_install(base, self._dest, dist)
return [dist.location for dist in dists]
finally:
zc.buildout.rmtree.rmtree(build_tmp)
finally:
if tmp != self._download_cache:
zc.buildout.rmtree.rmtree(tmp)
def _fix_file_links(self, links):
for link in links:
if link.startswith('file://') and link[-1] != '/':
if os.path.isdir(link[7:]):
# work around excessive restriction in setuptools:
link += '/'
yield link
def _log_requirement(self, ws, req):
if (not logger.isEnabledFor(logging.DEBUG) and
not Installer._store_required_by):
# Sorting the working set and iterating over it's requirements
# is expensive, so short circuit the work if it won't even be
# logged. When profiling a simple buildout with 10 parts with
# identical and large working sets, this resulted in a
# decrease of run time from 93.411 to 15.068 seconds, about a
# 6 fold improvement.
return
ws = list(ws)
ws.sort()
for dist in ws:
if req in dist.requires():
logger.debug(" required by %s." % dist)
req_ = str(req)
if req_ not in Installer._required_by:
Installer._required_by[req_] = set()
Installer._required_by[req_].add(str(dist.as_requirement()))
def _final_version(self, parsed_version):
return not parsed_version.is_prerelease
def normalize_versions(versions):
"""Return version dict with keys normalized to lowercase.
PyPI is case-insensitive and not all distributions are consistent in
their own naming.
"""
return dict([(k.lower(), v) for (k, v) in versions.items()])
def default_versions(versions=None):
old = Installer._versions
if versions is not None:
Installer._versions = normalize_versions(versions)
return old
def download_cache(path=-1):
old = Installer._download_cache
if path != -1:
if path:
path = realpath(path)
Installer._download_cache = path
return old
def install_from_cache(setting=None):
old = Installer._install_from_cache
if setting is not None:
Installer._install_from_cache = bool(setting)
return old
def prefer_final(setting=None):
old = Installer._prefer_final
if setting is not None:
Installer._prefer_final = bool(setting)
return old
def use_dependency_links(setting=None):
old = Installer._use_dependency_links
if setting is not None:
Installer._use_dependency_links = bool(setting)
return old
def allow_picked_versions(setting=None):
old = Installer._allow_picked_versions
if setting is not None:
Installer._allow_picked_versions = bool(setting)
return old
def store_required_by(setting=None):
old = Installer._store_required_by
if setting is not None:
Installer._store_required_by = bool(setting)
return old
def get_picked_versions():
picked_versions = sorted(Installer._picked_versions.items())
required_by = Installer._required_by
return (picked_versions, required_by)
def install(specs, dest,
links=(), index=None,
executable=sys.executable,
always_unzip=None, # Backward compat :/
path=None, working_set=None, newest=True, versions=None,
use_dependency_links=None, allow_hosts=('*',),
include_site_packages=None,
allowed_eggs_from_site_packages=None,
check_picked=True,
allow_unknown_extras=False,
):
assert executable == sys.executable, (executable, sys.executable)
assert include_site_packages is None
assert allowed_eggs_from_site_packages is None
installer = Installer(dest, links, index, sys.executable,
always_unzip, path,
newest, versions, use_dependency_links,
allow_hosts=allow_hosts,
check_picked=check_picked,
allow_unknown_extras=allow_unknown_extras)
return installer.install(specs, working_set)
buildout_and_setuptools_dists = list(install(['zc.buildout'], None,
check_picked=False))
buildout_and_setuptools_path = sorted({d.location
for d in buildout_and_setuptools_dists})
pip_dists = [d for d in buildout_and_setuptools_dists if d.project_name != 'zc.buildout']
pip_path = sorted({d.location for d in pip_dists})
logger.debug('after restricting versions: pip_path %r', pip_path)
pip_pythonpath = os.pathsep.join(pip_path)
setuptools_path = pip_path
setuptools_pythonpath = pip_pythonpath
def build(spec, dest, build_ext,
links=(), index=None,
executable=sys.executable,
path=None, newest=True, versions=None, allow_hosts=('*',)):
assert executable == sys.executable, (executable, sys.executable)
installer = Installer(dest, links, index, executable,
True, path, newest,
versions, allow_hosts=allow_hosts)
return installer.build(spec, build_ext)
def _rm(*paths):
for path in paths:
if os.path.isdir(path):
zc.buildout.rmtree.rmtree(path)
elif os.path.exists(path):
os.remove(path)
def _copyeggs(src, dest, suffix, undo):
result = []
undo.append(lambda : _rm(*result))
for name in os.listdir(src):
if name.endswith(suffix):
new = os.path.join(dest, name)
_rm(new)
os.rename(os.path.join(src, name), new)
result.append(new)
assert len(result) == 1, str(result)
undo.pop()
return result[0]
_develop_distutils_scripts = {}
def _detect_distutils_scripts(directory):
"""Record detected distutils scripts from develop eggs
``setup.py develop`` doesn't generate metadata on distutils scripts, in
contrast to ``setup.py install``. So we have to store the information for
later.
"""
dir_contents = os.listdir(directory)
egginfo_filenames = [filename for filename in dir_contents
if filename.endswith('.egg-link')]
if not egginfo_filenames:
return
egg_name = egginfo_filenames[0].replace('.egg-link', '')
marker = 'EASY-INSTALL-DEV-SCRIPT'
scripts_found = []
for filename in dir_contents:
if filename.endswith('.exe'):
continue
filepath = os.path.join(directory, filename)
if not os.path.isfile(filepath):
continue
with open(filepath) as fp:
dev_script_content = fp.read()
if marker in dev_script_content:
# The distutils bin script points at the actual file we need.
for line in dev_script_content.splitlines():
match = DUNDER_FILE_PATTERN.search(line)
if match:
# The ``__file__ =`` line in the generated script points
# at the actual distutils script we need.
actual_script_filename = match.group('filename')
with open(actual_script_filename) as fp:
actual_script_content = fp.read()
scripts_found.append([filename, actual_script_content])
if scripts_found:
logger.debug(
"Distutils scripts found for develop egg %s: %s",
egg_name, scripts_found)
_develop_distutils_scripts[egg_name] = scripts_found
def develop(setup, dest,
build_ext=None,
executable=sys.executable):
assert executable == sys.executable, (executable, sys.executable)
if os.path.isdir(setup):
directory = setup
setup = os.path.join(directory, 'setup.py')
else:
directory = os.path.dirname(setup)
undo = []
try:
if build_ext:
setup_cfg = os.path.join(directory, 'setup.cfg')
if os.path.exists(setup_cfg):
os.rename(setup_cfg, setup_cfg+'-develop-aside')
def restore_old_setup():
if os.path.exists(setup_cfg):
os.remove(setup_cfg)
os.rename(setup_cfg+'-develop-aside', setup_cfg)
undo.append(restore_old_setup)
else:
f = open(setup_cfg, 'w')
f.close()
undo.append(lambda: os.remove(setup_cfg))
setuptools.command.setopt.edit_config(
setup_cfg, dict(build_ext=build_ext))
fd, tsetup = tempfile.mkstemp()
undo.append(lambda: os.remove(tsetup))
undo.append(lambda: os.close(fd))
os.write(fd, (runsetup_template % dict(
setupdir=directory,
setup=setup,
__file__ = setup,
)).encode())
tmp3 = tempfile.mkdtemp('build', dir=dest)
undo.append(lambda : zc.buildout.rmtree.rmtree(tmp3))
args = [executable, tsetup, '-q', 'develop', '-mN', '-d', tmp3]
log_level = logger.getEffectiveLevel()
if log_level <= 0:
if log_level == 0:
del args[2]
else:
args[2] == '-v'
if log_level < logging.DEBUG:
logger.debug("in: %r\n%s", directory, ' '.join(args))
call_subprocess(args)
_detect_distutils_scripts(tmp3)
return _copyeggs(tmp3, dest, '.egg-link', undo)
finally:
undo.reverse()
[f() for f in undo]
def working_set(specs, executable, path=None,
include_site_packages=None,
allowed_eggs_from_site_packages=None):
# Backward compat:
if path is None:
path = executable
else:
assert executable == sys.executable, (executable, sys.executable)
assert include_site_packages is None
assert allowed_eggs_from_site_packages is None
return install(specs, None, path=path)
def scripts(reqs, working_set, executable, dest=None,
scripts=None,
extra_paths=(),
arguments='',
interpreter=None,
initialization='',
relative_paths=False,
):
assert executable == sys.executable, (executable, sys.executable)
path = [dist.location for dist in working_set]
path.extend(extra_paths)
# order preserving unique
unique_path = []
for p in path:
if p not in unique_path:
unique_path.append(p)
path = [realpath(p) for p in unique_path]
generated = []
if isinstance(reqs, str):
raise TypeError('Expected iterable of requirements or entry points,'
' got string.')
if initialization:
initialization = '\n'+initialization+'\n'
entry_points = []
distutils_scripts = []
for req in reqs:
if isinstance(req, str):
req = pkg_resources.Requirement.parse(req)
if req.marker and not req.marker.evaluate():
continue
dist = working_set.find(req)
# regular console_scripts entry points
for name in pkg_resources.get_entry_map(dist, 'console_scripts'):
entry_point = dist.get_entry_info('console_scripts', name)
entry_points.append(
(name, entry_point.module_name,
'.'.join(entry_point.attrs))
)
# The metadata on "old-style" distutils scripts is not retained by
# distutils/setuptools, except by placing the original scripts in
# /EGG-INFO/scripts/.
if dist.metadata_isdir('scripts'):
# egg-info metadata from installed egg.
for name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + name):
# Probably Python 3 __pycache__ directory.
continue
if name.lower().endswith('.exe'):
# windows: scripts are implemented with 2 files
# the .exe gets also into metadata_listdir
# get_metadata chokes on the binary
continue
contents = dist.get_metadata('scripts/' + name)
distutils_scripts.append((name, contents))
elif dist.key in _develop_distutils_scripts:
# Development eggs don't have metadata about scripts, so we
# collected it ourselves in develop()/ and
# _detect_distutils_scripts().
for name, contents in _develop_distutils_scripts[dist.key]:
distutils_scripts.append((name, contents))
else:
entry_points.append(req)
entry_points_names = []
for name, module_name, attrs in entry_points:
entry_points_names.append(name)
if scripts is not None:
sname = scripts.get(name)
if sname is None:
continue
else:
sname = name
sname = os.path.join(dest, sname)
spath, rpsetup = _relative_path_and_setup(sname, path, relative_paths)
generated.extend(
_script(module_name, attrs, spath, sname, arguments,
initialization, rpsetup)
)
# warn when a script name passed in 'scripts' argument
# is not defined in an entry point.
if scripts is not None:
for name, target in scripts.items():
if name not in entry_points_names:
if name == target:
logger.warning("Could not generate script '%s' as it is not "
"defined in the egg entry points.", name)
else:
logger.warning("Could not generate script '%s' as script "
"'%s' is not defined in the egg entry points.", name, target)
for name, contents in distutils_scripts:
if scripts is not None:
sname = scripts.get(name)
if sname is None:
continue
else:
sname = name
sname = os.path.join(dest, sname)
spath, rpsetup = _relative_path_and_setup(sname, path, relative_paths)
generated.extend(
_distutils_script(spath, sname, contents, initialization, rpsetup)
)
if interpreter:
sname = os.path.join(dest, interpreter)
spath, rpsetup = _relative_path_and_setup(sname, path, relative_paths)
generated.extend(_pyscript(spath, sname, rpsetup, initialization))
return generated
def _relative_path_and_setup(sname, path, relative_paths):
if relative_paths:
relative_paths = os.path.normcase(relative_paths)
sname = os.path.normcase(os.path.abspath(sname))
spath = ',\n '.join(
[_relativitize(os.path.normcase(path_item), sname, relative_paths)
for path_item in path]
)
rpsetup = relative_paths_setup
for i in range(_relative_depth(relative_paths, sname)):
rpsetup += "base = os.path.dirname(base)\n"
else:
spath = repr(path)[1:-1].replace(', ', ',\n ')
rpsetup = ''
return spath, rpsetup
def _relative_depth(common, path):
n = 0
while 1:
dirname = os.path.dirname(path)
if dirname == path:
raise AssertionError("dirname of %s is the same" % dirname)
if dirname == common:
break
n += 1
path = dirname
return n
def _relative_path(common, path):
r = []
while 1:
dirname, basename = os.path.split(path)
r.append(basename)
if dirname == common:
break
if dirname == path:
raise AssertionError("dirname of %s is the same" % dirname)
path = dirname
r.reverse()
return os.path.join(*r)
def _relativitize(path, script, relative_paths):
if path == script:
raise AssertionError("path == script")
if path == relative_paths:
return "base"
common = os.path.dirname(os.path.commonprefix([path, script]))
if (common == relative_paths or
common.startswith(os.path.join(relative_paths, ''))
):
return "join(base, %r)" % _relative_path(common, path)
else:
return repr(path)
relative_paths_setup = """
import os
join = os.path.join
base = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
"""
def _script(module_name, attrs, path, dest, arguments, initialization, rsetup):
if is_win32:
dest += '-script.py'
python = _safe_arg(sys.executable)
contents = script_template % dict(
python = python,
path = path,
module_name = module_name,
attrs = attrs,
arguments = arguments,
initialization = initialization,
relative_paths_setup = rsetup,
)
return _create_script(contents, dest)
def _distutils_script(path, dest, script_content, initialization, rsetup):
if is_win32:
dest += '-script.py'
lines = script_content.splitlines(True)
if not ('#!' in lines[0]) and ('python' in lines[0]):
# The script doesn't follow distutil's rules. Ignore it.
return []
lines = lines[1:] # Strip off the first hashbang line.
line_with_first_import = len(lines)
for line_number, line in enumerate(lines):
if not 'import' in line:
continue
if not (line.startswith('import') or line.startswith('from')):
continue
if '__future__' in line:
continue
line_with_first_import = line_number
break
before = ''.join(lines[:line_with_first_import])
after = ''.join(lines[line_with_first_import:])
python = _safe_arg(sys.executable)
contents = distutils_script_template % dict(
python = python,
path = path,
initialization = initialization,
relative_paths_setup = rsetup,
before = before,
after = after
)
return _create_script(contents, dest)
def _file_changed(filename, old_contents, mode='r'):
try:
with open(filename, mode) as f:
return f.read() != old_contents
except EnvironmentError as e:
if e.errno == errno.ENOENT:
return True
else:
raise
def _create_script(contents, dest):
generated = []
script = dest
changed = _file_changed(dest, contents)
if is_win32:
# generate exe file and give the script a magic name:
win32_exe = os.path.splitext(dest)[0] # remove ".py"
if win32_exe.endswith('-script'):
win32_exe = win32_exe[:-7] # remove "-script"
win32_exe = win32_exe + '.exe' # add ".exe"
try:
new_data = setuptools.command.easy_install.get_win_launcher('cli')
except AttributeError:
# fall back for compatibility with older Distribute versions
new_data = pkg_resources.resource_string('setuptools', 'cli.exe')
if _file_changed(win32_exe, new_data, 'rb'):
# Only write it if it's different.
with open(win32_exe, 'wb') as f:
f.write(new_data)
generated.append(win32_exe)
if changed:
with open(dest, 'w') as f:
f.write(contents)
logger.info(
"Generated script %r.",
# Normalize for windows
script.endswith('-script.py') and script[:-10] or script)
try:
os.chmod(dest, _execute_permission())
except (AttributeError, os.error):
pass
generated.append(dest)
return generated
if is_jython and jython_os_name == 'linux':
script_header = '#!/usr/bin/env %(python)s'
else:
script_header = '#!%(python)s'
script_template = script_header + '''\
%(relative_paths_setup)s
import sys
sys.path[0:0] = [
%(path)s,
]
%(initialization)s
import %(module_name)s
if __name__ == '__main__':
sys.exit(%(module_name)s.%(attrs)s(%(arguments)s))
'''
distutils_script_template = script_header + '''
%(before)s
%(relative_paths_setup)s
import sys
sys.path[0:0] = [
%(path)s,
]
%(initialization)s
%(after)s'''
def _pyscript(path, dest, rsetup, initialization=''):
generated = []
script = dest
if is_win32:
dest += '-script.py'
python = _safe_arg(sys.executable)
if path:
path += ',' # Courtesy comma at the end of the list.
contents = py_script_template % dict(
python = python,
path = path,
relative_paths_setup = rsetup,
initialization=initialization,
)
changed = _file_changed(dest, contents)
if is_win32:
# generate exe file and give the script a magic name:
exe = script + '.exe'
with open(exe, 'wb') as f:
f.write(
pkg_resources.resource_string('setuptools', 'cli.exe')
)
generated.append(exe)
if changed:
with open(dest, 'w') as f:
f.write(contents)
try:
os.chmod(dest, _execute_permission())
except (AttributeError, os.error):
pass
logger.info("Generated interpreter %r.", script)
generated.append(dest)
return generated
if sys.version_info[0] < 3:
universal_newline_option = ", 'U'"
else:
universal_newline_option = ''
py_script_template = script_header + '''\
%%(relative_paths_setup)s
import sys
sys.path[0:0] = [
%%(path)s
]
%%(initialization)s
_interactive = True
if len(sys.argv) > 1:
_options, _args = __import__("getopt").getopt(sys.argv[1:], 'ic:m:')
_interactive = False
for (_opt, _val) in _options:
if _opt == '-i':
_interactive = True
elif _opt == '-c':
exec(_val)
elif _opt == '-m':
sys.argv[1:] = _args
_args = []
__import__("runpy").run_module(
_val, {}, "__main__", alter_sys=True)
if _args:
sys.argv[:] = _args
__file__ = _args[0]
del _options, _args
with open(__file__%s) as __file__f:
exec(compile(__file__f.read(), __file__, "exec"))
if _interactive:
del _interactive
__import__("code").interact(banner="", local=globals())
''' % universal_newline_option
runsetup_template = """
import sys
sys.path.insert(0, %%(setupdir)r)
sys.path[0:0] = %r
import os, setuptools
__file__ = %%(__file__)r
os.chdir(%%(setupdir)r)
sys.argv[0] = %%(setup)r
with open(%%(setup)r%s) as f:
exec(compile(f.read(), %%(setup)r, 'exec'))
""" % (setuptools_path, universal_newline_option)
class VersionConflict(zc.buildout.UserError):
def __init__(self, err, ws):
ws = list(ws)
ws.sort()
self.err, self.ws = err, ws
def __str__(self):
result = ["There is a version conflict."]
if len(self.err.args) == 2:
existing_dist, req = self.err.args
result.append("We already have: %s" % existing_dist)
for dist in self.ws:
if req in dist.requires():
result.append("but %s requires %r." % (dist, str(req)))
else:
# The error argument is already a nice error string.
result.append(self.err.args[0])
return '\n'.join(result)
class MissingDistribution(zc.buildout.UserError):
def __init__(self, req, ws):
ws = list(ws)
ws.sort()
self.data = req, ws
def __str__(self):
req, ws = self.data
return "Couldn't find a distribution for %r." % str(req)
def redo_pyc(egg):
if not os.path.isdir(egg):
return
for dirpath, dirnames, filenames in os.walk(egg):
for filename in filenames:
if not filename.endswith('.py'):
continue
filepath = os.path.join(dirpath, filename)
if not (os.path.exists(filepath+'c')
or os.path.exists(filepath+'o')):
# If it wasn't compiled, it may not be compilable
continue
# OK, it looks like we should try to compile.
# Remove old files.
for suffix in 'co':
if os.path.exists(filepath+suffix):
os.remove(filepath+suffix)
# Compile under current optimization
try:
py_compile.compile(filepath)
except py_compile.PyCompileError:
logger.warning("Couldn't compile %s", filepath)
else:
# Recompile under other optimization. :)
args = [sys.executable]
if __debug__:
args.append('-O')
args.extend(['-m', 'py_compile', filepath])
call_subprocess(args)
def _constrained_requirement(constraint, requirement):
assert isinstance(requirement, pkg_resources.Requirement)
if constraint[0] not in '<>':
if constraint.startswith('='):
assert constraint.startswith('==')
version = constraint[2:]
else:
version = constraint
constraint = '==' + constraint
if version not in requirement:
msg = ("The requirement (%r) is not allowed by your [versions] "
"constraint (%s)" % (str(requirement), version))
raise IncompatibleConstraintError(msg)
specifier = packaging.specifiers.SpecifierSet(constraint)
else:
specifier = requirement.specifier & constraint
constrained = copy.deepcopy(requirement)
constrained.specifier = specifier
return pkg_resources.Requirement.parse(str(constrained))
class IncompatibleConstraintError(zc.buildout.UserError):
"""A specified version is incompatible with a given requirement.
"""
IncompatibleVersionError = IncompatibleConstraintError # Backward compatibility
def call_pip_install(spec, dest):
"""
Call `pip install` from a subprocess to install a
distribution specified by `spec` into `dest`.
Returns all the paths inside `dest` created by the above.
"""
args = [sys.executable, '-m', 'pip', 'install', '--no-deps', '-t', dest]
level = logger.getEffectiveLevel()
if level >= logging.INFO:
args.append('-q')
else:
args.append('-v')
args.append(spec)
try:
from pip._internal.cli.cmdoptions import no_python_version_warning
HAS_WARNING_OPTION = True
except ImportError:
HAS_WARNING_OPTION = False
if HAS_WARNING_OPTION:
if not hasattr(call_pip_install, 'displayed'):
call_pip_install.displayed = True
else:
args.append('--no-python-version-warning')
env = copy.copy(os.environ)
python_path = copy.copy(pip_path)
python_path.append(env.get('PYTHONPATH', ''))
env['PYTHONPATH'] = os.pathsep.join(python_path)
if level <= logging.DEBUG:
logger.debug('Running pip install:\n"%s"\npath=%s\n',
'" "'.join(args), pip_path)
sys.stdout.flush() # We want any pending output first
exit_code = subprocess.call(list(args), env=env)
if exit_code:
logger.error(
"An error occurred when trying to install %s. "
"Look above this message for any errors that "
"were output by pip install.",
spec)
sys.exit(1)
split_entries = [os.path.splitext(entry) for entry in os.listdir(dest)]
try:
distinfo_dir = [
base + ext for base, ext in split_entries if ext == ".dist-info"
][0]
except IndexError:
logger.error(
"No .dist-info directory after successful pip install of %s",
spec)
raise
return make_egg_after_pip_install(dest, distinfo_dir)
def make_egg_after_pip_install(dest, distinfo_dir):
"""build properly named egg directory"""
# `pip install` does not build the namespace aware __init__.py files
# but they are needed in egg directories.
# Add them before moving files setup by pip
namespace_packages_file = os.path.join(
dest, distinfo_dir,
'namespace_packages.txt'
)
if os.path.isfile(namespace_packages_file):
with open(namespace_packages_file) as f:
namespace_packages = [
line.strip().replace('.', os.path.sep)
for line in f.readlines()
]
for namespace_package in namespace_packages:
namespace_package_dir = os.path.join(dest, namespace_package)
if os.path.isdir(namespace_package_dir):
init_py_file = os.path.join(
namespace_package_dir, '__init__.py')
with open(init_py_file, 'w') as f:
f.write(
"__import__('pkg_resources')."
"declare_namespace(__name__)"
)
# Remove `bin` directory if needed
# as there is no way to avoid script installation
# when running `pip install`
entry_points_file = os.path.join(dest, distinfo_dir, 'entry_points.txt')
if os.path.isfile(entry_points_file):
with open(entry_points_file) as f:
content = f.read()
if "console_scripts" in content or "gui_scripts" in content:
bin_dir = os.path.join(dest, BIN_SCRIPTS)
if os.path.exists(bin_dir):
shutil.rmtree(bin_dir)
# Make properly named new egg dir
distro = list(pkg_resources.find_distributions(dest))[0]
base = "{}-{}".format(
distro.egg_name(), pkg_resources.get_supported_platform()
)
egg_name = base + '.egg'
new_distinfo_dir = base + '.dist-info'
egg_dir = os.path.join(dest, egg_name)
os.mkdir(egg_dir)
# Move ".dist-info" dir into new egg dir
os.rename(
os.path.join(dest, distinfo_dir),
os.path.join(egg_dir, new_distinfo_dir)
)
top_level_file = os.path.join(egg_dir, new_distinfo_dir, 'top_level.txt')
if os.path.isfile(top_level_file):
with open(top_level_file) as f:
top_levels = filter(
(lambda x: len(x) != 0),
[line.strip() for line in f.readlines()]
)
else:
top_levels = ()
# Move all top_level modules or packages
for top_level in top_levels:
# as package
top_level_dir = os.path.join(dest, top_level)
if os.path.exists(top_level_dir):
shutil.move(top_level_dir, egg_dir)
continue
# as module
top_level_py = top_level_dir + '.py'
if os.path.exists(top_level_py):
shutil.move(top_level_py, egg_dir)
top_level_pyc = top_level_dir + '.pyc'
if os.path.exists(top_level_pyc):
shutil.move(top_level_pyc, egg_dir)
continue
record_file = os.path.join(egg_dir, new_distinfo_dir, 'RECORD')
if os.path.isfile(record_file):
if PY3:
with open(record_file, newline='') as f:
all_files = [row[0] for row in csv.reader(f)]
else:
with open(record_file, 'rb') as f:
all_files = [row[0] for row in csv.reader(f)]
# There might be some c extensions left over
for entry in all_files:
if entry.endswith(('.pyc', '.pyo')):
continue
dest_entry = os.path.join(dest, entry)
# work around pip install -t bug that leaves entries in RECORD
# that starts with '../../'
if not os.path.abspath(dest_entry).startswith(dest):
continue
egg_entry = os.path.join(egg_dir, entry)
if os.path.exists(dest_entry) and not os.path.exists(egg_entry):
egg_entry_dir = os.path.dirname(egg_entry)
if not os.path.exists(egg_entry_dir):
os.makedirs(egg_entry_dir)
os.rename(dest_entry, egg_entry)
return [egg_dir]
def unpack_egg(location, dest):
# Buildout 2 no longer installs zipped eggs,
# so we always want to unpack it.
dest = os.path.join(dest, os.path.basename(location))
setuptools.archive_util.unpack_archive(location, dest)
WHEEL_WARNING = """
*.whl file detected (%s), you'll need setuptools >= 38.2.3 for that
or an extension like buildout.wheel > 0.2.0.
"""
def unpack_wheel(location, dest):
if SETUPTOOLS_SUPPORTS_WHEELS:
wheel = Wheel(location)
wheel.install_as_egg(os.path.join(dest, wheel.egg_name()))
else:
raise zc.buildout.UserError(WHEEL_WARNING % location)
UNPACKERS = {
'.egg': unpack_egg,
'.whl': unpack_wheel,
}
def _get_matching_dist_in_location(dist, location):
"""
Check if `locations` contain only the one intended dist.
Return the dist with metadata in the new location.
"""
# Getting the dist from the environment causes the distribution
# meta data to be read. Cloning isn't good enough. We must compare
# dist.parsed_version, not dist.version, because one or the other
# may be normalized (e.g., 3.3 becomes 3.3.0 when downloaded from
# PyPI.)
env = pkg_resources.Environment([location])
dists = [ d for project_name in env for d in env[project_name] ]
dist_infos = [ (d.project_name.lower(), d.parsed_version) for d in dists ]
if dist_infos == [(dist.project_name.lower(), dist.parsed_version)]:
return dists.pop()
def _move_to_eggs_dir_and_compile(dist, dest):
"""Move distribution to the eggs destination directory.
And compile the py files, if we have actually moved the dist.
Its new location is expected not to exist there yet, otherwise we
would not be calling this function: the egg is already there. But
the new location might exist at this point if another buildout is
running in parallel. So we copy to a temporary directory first.
See discussion at https://github.com/buildout/buildout/issues/307
We return the new distribution with properly loaded metadata.
"""
# First make sure the destination directory exists. This could suffer from
# the same kind of race condition as the rest: if we check that it does not
# exist, and we then create it, it will fail when a second buildout is
# doing the same thing.
try:
os.makedirs(dest)
except OSError:
if not os.path.isdir(dest):
# Unknown reason. Reraise original error.
raise
tmp_dest = tempfile.mkdtemp(dir=dest)
try:
installed_with_pip = False
if (os.path.isdir(dist.location) and
dist.precedence >= pkg_resources.BINARY_DIST):
# We got a pre-built directory. It must have been obtained locally.
# Just copy it.
tmp_loc = os.path.join(tmp_dest, os.path.basename(dist.location))
shutil.copytree(dist.location, tmp_loc)
else:
# It is an archive of some sort.
# Figure out how to unpack it, or fall back to easy_install.
_, ext = os.path.splitext(dist.location)
if ext in UNPACKERS:
unpacker = UNPACKERS[ext]
unpacker(dist.location, tmp_dest)
[tmp_loc] = glob.glob(os.path.join(tmp_dest, '*'))
else:
[tmp_loc] = call_pip_install(dist.location, tmp_dest)
installed_with_pip = True
# We have installed the dist. Now try to rename/move it.
newloc = os.path.join(dest, os.path.basename(tmp_loc))
try:
os.rename(tmp_loc, newloc)
except OSError:
# Might be for various reasons. If it is because newloc already
# exists, we can investigate.
if not os.path.exists(newloc):
# No, it is a different reason. Give up.
raise
# Try to use it as environment and check if our project is in it.
newdist = _get_matching_dist_in_location(dist, newloc)
if newdist is None:
# Path exists, but is not our package. We could
# try something, but it seems safer to bail out
# with the original error.
raise
# newloc looks okay to use. Do print a warning.
logger.warn(
"Path %s unexpectedly already exists.\n"
"Maybe a buildout running in parallel has added it. "
"We will accept it.\n"
"If this contains a wrong package, please remove it yourself.",
newloc)
else:
# There were no problems during the rename.
# Do the compile step.
redo_pyc(newloc)
newdist = _get_matching_dist_in_location(dist, newloc)
assert newdist is not None # newloc above is missing our dist?!
finally:
# Remember that temporary directories must be removed
zc.buildout.rmtree.rmtree(tmp_dest)
if installed_with_pip:
newdist.precedence = pkg_resources.EGG_DIST
return newdist
def sort_working_set(ws, eggs_dir, develop_eggs_dir):
develop_paths = set()
pattern = os.path.join(develop_eggs_dir, '*.egg-link')
for egg_link in glob.glob(pattern):
with open(egg_link, 'rt') as f:
path = f.readline().strip()
if path:
develop_paths.add(path)
sorted_paths = []
egg_paths = []
other_paths = []
for dist in ws:
path = dist.location
if path in develop_paths:
sorted_paths.append(path)
elif os.path.commonprefix([path, eggs_dir]) == eggs_dir:
egg_paths.append(path)
else:
other_paths.append(path)
sorted_paths.extend(egg_paths)
sorted_paths.extend(other_paths)
return pkg_resources.WorkingSet(sorted_paths)
NOT_PICKED_AND_NOT_ALLOWED = """\
Picked: {name} = {version}
The `{name}` egg does not have a version pin and `allow-picked-versions = false`.
To resolve this, add
{name} = {version}
to the [versions] section,
OR set `allow-picked-versions = true`.""" | zc.buildout | /zc.buildout-3.0.1.tar.gz/zc.buildout-3.0.1/src/zc/buildout/easy_install.py | easy_install.py |
import sys
import warnings
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
import distutils.util
def get_config_var(var):
try:
return sysconfig.get_config_var(var)
except IOError as e: # pip Issue #1074
warnings.warn("{0}".format(e), RuntimeWarning)
return None
def get_abbr_impl():
"""Return abbreviated implementation name."""
if hasattr(sys, 'pypy_version_info'):
pyimpl = 'pp'
elif sys.platform.startswith('java'):
pyimpl = 'jy'
elif sys.platform == 'cli':
pyimpl = 'ip'
else:
pyimpl = 'cp'
return pyimpl
def get_impl_ver():
"""Return implementation version."""
impl_ver = get_config_var("py_version_nodot")
if not impl_ver or get_abbr_impl() == 'pp':
impl_ver = ''.join(map(str, get_impl_version_info()))
return impl_ver
def get_impl_version_info():
"""Return sys.version_info-like tuple for use in decrementing the minor
version."""
if get_abbr_impl() == 'pp':
# as per https://github.com/pypa/pip/issues/2882
return (sys.version_info[0], sys.pypy_version_info.major,
sys.pypy_version_info.minor)
else:
return sys.version_info[0], sys.version_info[1]
def get_flag(var, fallback, expected=True, warn=True):
"""Use a fallback method for determining SOABI flags if the needed config
var is unset or unavailable."""
val = get_config_var(var)
if val is None:
if warn:
warnings.warn("Config variable '{0}' is unset, Python ABI tag may "
"be incorrect".format(var), RuntimeWarning, 2)
return fallback()
return val == expected
def get_abi_tag():
"""Return the ABI tag based on SOABI (if available) or emulate SOABI
(CPython 2, PyPy)."""
soabi = get_config_var('SOABI')
impl = get_abbr_impl()
if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):
d = ''
m = ''
u = ''
if get_flag('Py_DEBUG',
lambda: hasattr(sys, 'gettotalrefcount'),
warn=(impl == 'cp')):
d = 'd'
if get_flag('WITH_PYMALLOC',
lambda: impl == 'cp',
warn=(impl == 'cp')):
m = 'm'
if get_flag('Py_UNICODE_SIZE',
lambda: sys.maxunicode == 0x10ffff,
expected=4,
warn=(impl == 'cp' and
sys.version_info < (3, 3))) \
and sys.version_info < (3, 3):
u = 'u'
abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)
elif soabi and soabi.startswith('cpython-'):
abi = 'cp' + soabi.split('-')[1]
elif soabi:
abi = soabi.replace('.', '_').replace('-', '_')
else:
abi = None
return abi
def get_platform():
"""Return our platform name 'win32', 'linux_x86_64'"""
# XXX remove distutils dependency
result = distutils.util.get_platform().replace('.', '_').replace('-', '_')
if result == "linux_x86_64" and sys.maxsize == 2147483647:
# pip pull request #3497
result = "linux_i686"
return result
def get_supported(versions=None, supplied_platform=None):
"""Return a list of supported tags for each version specified in
`versions`.
:param versions: a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
"""
supported = []
# Versions must be given with respect to the preference
if versions is None:
versions = []
version_info = get_impl_version_info()
major = version_info[:-1]
# Support all previous minor Python versions.
for minor in range(version_info[-1], -1, -1):
versions.append(''.join(map(str, major + (minor,))))
impl = get_abbr_impl()
abis = []
abi = get_abi_tag()
if abi:
abis[0:0] = [abi]
abi3s = set()
import imp
for suffix in imp.get_suffixes():
if suffix[0].startswith('.abi'):
abi3s.add(suffix[0].split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
platforms = []
if supplied_platform:
platforms.append(supplied_platform)
platforms.append(get_platform())
# Current version, current API (built specifically for our Python):
for abi in abis:
for arch in platforms:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# No abi / arch, but requires our implementation:
for i, version in enumerate(versions):
supported.append(('%s%s' % (impl, version), 'none', 'any'))
if i == 0:
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# Major Python version + platform; e.g. binaries not using the Python API
supported.append(('py%s' % (versions[0][0]), 'none', arch))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported | zc.buildout | /zc.buildout-3.0.1.tar.gz/zc.buildout-3.0.1/src/zc/buildout/pep425tags.py | pep425tags.py |
from zc.buildout.rmtree import rmtree
import zc.buildout.easy_install
from functools import partial
try:
from hashlib import md5 as md5_original
except ImportError:
from md5 import md5 as md5_original
try:
from collections.abc import MutableMapping as DictMixin
except ImportError:
from UserDict import DictMixin
import zc.buildout.configparser
import copy
import datetime
import distutils.errors
import glob
import importlib
import inspect
import itertools
import logging
import os
import pkg_resources
import re
import shutil
import subprocess
import sys
import tempfile
import zc.buildout
import zc.buildout.download
PY3 = sys.version_info[0] == 3
if PY3:
text_type = str
else:
text_type = unicode
try:
hashed = md5_original(b'test')
md5 = md5_original
except ValueError:
md5 = partial(md5_original, usedforsecurity=False)
def command(method):
method.buildout_command = True
return method
def commands(cls):
for name, method in cls.__dict__.items():
if hasattr(method, "buildout_command"):
cls.COMMANDS.add(name)
return cls
def _print_options(sep=' ', end='\n', file=None):
return sep, end, file
def print_(*args, **kw):
sep, end, file = _print_options(**kw)
if file is None:
file = sys.stdout
file.write(sep.join(map(str, args))+end)
realpath = zc.buildout.easy_install.realpath
_isurl = re.compile('([a-zA-Z0-9+.-]+)://').match
class MissingOption(zc.buildout.UserError, KeyError):
"""A required option was missing.
"""
class MissingSection(zc.buildout.UserError, KeyError):
"""A required section is missing.
"""
def __str__(self):
return "The referenced section, %r, was not defined." % self.args[0]
def _annotate_section(section, source):
for key in section:
section[key] = SectionKey(section[key], source)
return section
class SectionKey(object):
def __init__(self, value, source):
self.history = []
self.value = value
self.addToHistory("SET", value, source)
@property
def source(self):
return self.history[-1].source
def overrideValue(self, sectionkey):
self.value = sectionkey.value
if sectionkey.history[-1].operation not in ['ADD', 'REMOVE']:
self.addToHistory("OVERRIDE", sectionkey.value, sectionkey.source)
else:
self.history = copy.deepcopy(sectionkey.history)
def setDirectory(self, value):
self.value = value
self.addToHistory("DIRECTORY", value, self.source)
def addToValue(self, added, source):
subvalues = self.value.split('\n') + added.split('\n')
self.value = "\n".join(subvalues)
self.addToHistory("ADD", added, source)
def removeFromValue(self, removed, source):
subvalues = [
v
for v in self.value.split('\n')
if v not in removed.split('\n')
]
self.value = "\n".join(subvalues)
self.addToHistory("REMOVE", removed, source)
def addToHistory(self, operation, value, source):
item = HistoryItem(operation, value, source)
self.history.append(item)
def printAll(self, key, basedir, verbose):
self.printKeyAndValue(key)
if verbose:
self.printVerbose(basedir)
else:
self.printTerse(basedir)
def printKeyAndValue(self, key):
lines = self.value.splitlines()
if len(lines) <= 1:
args = [key, "="]
if self.value:
args.append(" ")
args.append(self.value)
print_(*args, sep='')
else:
print_(key, "= ", lines[0], sep='')
for line in lines[1:]:
print_(line)
def printVerbose(self, basedir):
print_()
for item in reversed(self.history):
item.printAll(basedir)
print_()
def printTerse(self, basedir):
toprint = []
history = copy.deepcopy(self.history)
while history:
next = history.pop()
if next.operation in ["ADD", "REMOVE"]:
next.printShort(toprint, basedir)
else:
next.printShort(toprint, basedir)
break
for line in reversed(toprint):
if line.strip():
print_(line)
def __repr__(self):
return "<SectionKey value=%s source=%s>" % (
" ".join(self.value.split('\n')), self.source)
class HistoryItem(object):
def __init__(self, operation, value, source):
self.operation = operation
self.value = value
self.source = source
def printShort(self, toprint, basedir):
source = self.source_for_human(basedir)
if self.operation in ["OVERRIDE", "SET", "DIRECTORY"]:
toprint.append(" " + source)
elif self.operation == "ADD":
toprint.append("+= " + source)
elif self.operation == "REMOVE":
toprint.append("-= " + source)
def printOperation(self):
lines = self.value.splitlines()
if len(lines) <= 1:
print_(" ", self.operation, "VALUE =", self.value)
else:
print_(" ", self.operation, "VALUE =")
for line in lines:
print_(" ", " ", line)
def printSource(self, basedir):
if self.source in (
'DEFAULT_VALUE', 'COMPUTED_VALUE', 'COMMAND_LINE_VALUE'
):
prefix = "AS"
else:
prefix = "IN"
print_(" ", prefix, self.source_for_human(basedir))
def source_for_human(self, basedir):
if self.source.startswith(basedir):
return os.path.relpath(self.source, basedir)
else:
return self.source
def printAll(self, basedir):
self.printSource(basedir)
self.printOperation()
def __repr__(self):
return "<HistoryItem operation=%s value=%s source=%s>" % (
self.operation, " ".join(self.value.split('\n')), self.source)
def _annotate(data, note):
for key in data:
data[key] = _annotate_section(data[key], note)
return data
def _print_annotate(data, verbose, chosen_sections, basedir):
sections = list(data.keys())
sections.sort()
print_()
print_("Annotated sections")
print_("="*len("Annotated sections"))
for section in sections:
if (not chosen_sections) or (section in chosen_sections):
print_()
print_('[%s]' % section)
keys = list(data[section].keys())
keys.sort()
for key in keys:
sectionkey = data[section][key]
sectionkey.printAll(key, basedir, verbose)
def _unannotate_section(section):
return {key: entry.value for key, entry in section.items()}
def _unannotate(data):
return {key: _unannotate_section(section) for key, section in data.items()}
def _format_picked_versions(picked_versions, required_by):
output = ['[versions]']
required_output = []
for dist_, version in picked_versions:
if dist_ in required_by:
required_output.append('')
required_output.append('# Required by:')
for req_ in sorted(required_by[dist_]):
required_output.append('# '+req_)
target = required_output
else:
target = output
target.append("%s = %s" % (dist_, version))
output.extend(required_output)
return output
_buildout_default_options = _annotate_section({
'allow-hosts': '*',
'allow-picked-versions': 'true',
'bin-directory': 'bin',
'develop-eggs-directory': 'develop-eggs',
'eggs-directory': 'eggs',
'executable': sys.executable,
'find-links': '',
'install-from-cache': 'false',
'installed': '.installed.cfg',
'log-format': '',
'log-level': 'INFO',
'newest': 'true',
'offline': 'false',
'parts-directory': 'parts',
'prefer-final': 'true',
'python': 'buildout',
'show-picked-versions': 'false',
'socket-timeout': '',
'update-versions-file': '',
'use-dependency-links': 'true',
'allow-unknown-extras': 'false',
}, 'DEFAULT_VALUE')
def _get_user_config():
buildout_home = os.path.join(os.path.expanduser('~'), '.buildout')
buildout_home = os.environ.get('BUILDOUT_HOME', buildout_home)
return os.path.join(buildout_home, 'default.cfg')
@commands
class Buildout(DictMixin):
COMMANDS = set()
def __init__(self, config_file, cloptions,
use_user_defaults=True,
command=None, args=()):
__doing__ = 'Initializing.'
# default options
_buildout_default_options_copy = copy.deepcopy(
_buildout_default_options)
data = dict(buildout=_buildout_default_options_copy)
self._buildout_dir = os.getcwd()
if config_file and not _isurl(config_file):
config_file = os.path.abspath(config_file)
if not os.path.exists(config_file):
if command == 'init':
self._init_config(config_file, args)
elif command == 'setup':
# Sigh. This model of a buildout instance
# with methods is breaking down. :(
config_file = None
data['buildout']['directory'] = SectionKey(
'.', 'COMPUTED_VALUE')
else:
raise zc.buildout.UserError(
"Couldn't open %s" % config_file)
elif command == 'init':
raise zc.buildout.UserError(
"%r already exists." % config_file)
if config_file:
data['buildout']['directory'] = SectionKey(
os.path.dirname(config_file), 'COMPUTED_VALUE')
cloptions = dict(
(section, dict((option, SectionKey(value, 'COMMAND_LINE_VALUE'))
for (_, option, value) in v))
for (section, v) in itertools.groupby(sorted(cloptions),
lambda v: v[0])
)
override = copy.deepcopy(cloptions.get('buildout', {}))
# load user defaults, which override defaults
user_config = _get_user_config()
if use_user_defaults and os.path.exists(user_config):
download_options = data['buildout']
user_defaults, _ = _open(
os.path.dirname(user_config),
user_config, [], download_options,
override, set(), {}
)
for_download_options = _update(data, user_defaults)
else:
user_defaults = {}
for_download_options = copy.deepcopy(data)
# load configuration files
if config_file:
download_options = for_download_options['buildout']
cfg_data, _ = _open(
os.path.dirname(config_file),
config_file, [], download_options,
override, set(), user_defaults
)
data = _update(data, cfg_data)
# extends from command-line
if 'buildout' in cloptions:
cl_extends = cloptions['buildout'].pop('extends', None)
if cl_extends:
for extends in cl_extends.value.split():
download_options = for_download_options['buildout']
cfg_data, _ = _open(
os.path.dirname(extends),
os.path.basename(extends),
[], download_options,
override, set(), user_defaults
)
data = _update(data, cfg_data)
# apply command-line options
data = _update(data, cloptions)
# Set up versions section, if necessary
if 'versions' not in data['buildout']:
data['buildout']['versions'] = SectionKey(
'versions', 'DEFAULT_VALUE')
if 'versions' not in data:
data['versions'] = {}
# Default versions:
versions_section_name = data['buildout']['versions'].value
if versions_section_name:
versions = data[versions_section_name]
else:
versions = {}
versions.update(
dict((k, SectionKey(v, 'DEFAULT_VALUE'))
for (k, v) in (
# Prevent downgrading due to prefer-final:
('zc.buildout',
'>='+pkg_resources.working_set.find(
pkg_resources.Requirement.parse('zc.buildout')
).version),
# Use 2, even though not final
('zc.recipe.egg', '>=2.0.6'),
)
if k not in versions
))
# Absolutize some particular directory, handling also the ~/foo form,
# and considering the location of the configuration file that generated
# the setting as the base path, falling back to the main configuration
# file location
for name in ('download-cache', 'eggs-directory', 'extends-cache'):
if name in data['buildout']:
sectionkey = data['buildout'][name]
origdir = sectionkey.value
src = sectionkey.source
if '${' in origdir:
continue
if not os.path.isabs(origdir):
if src in ('DEFAULT_VALUE',
'COMPUTED_VALUE',
'COMMAND_LINE_VALUE'):
if 'directory' in data['buildout']:
basedir = data['buildout']['directory'].value
else:
basedir = self._buildout_dir
else:
if _isurl(src):
raise zc.buildout.UserError(
'Setting "%s" to a non absolute location ("%s") '
'within a\n'
'remote configuration file ("%s") is ambiguous.' % (
name, origdir, src))
basedir = os.path.dirname(src)
absdir = os.path.expanduser(origdir)
if not os.path.isabs(absdir):
absdir = os.path.join(basedir, absdir)
absdir = os.path.abspath(absdir)
sectionkey.setDirectory(absdir)
self._annotated = copy.deepcopy(data)
self._raw = _unannotate(data)
self._data = {}
self._parts = []
# provide some defaults before options are parsed
# because while parsing options those attributes might be
# used already (Gottfried Ganssauge)
buildout_section = self._raw['buildout']
# Try to make sure we have absolute paths for standard
# directories. We do this before doing substitutions, in case
# a one of these gets read by another section. If any
# variable references are used though, we leave it as is in
# _buildout_path.
if 'directory' in buildout_section:
self._buildout_dir = buildout_section['directory']
for name in ('bin', 'parts', 'eggs', 'develop-eggs'):
d = self._buildout_path(buildout_section[name+'-directory'])
buildout_section[name+'-directory'] = d
# Attributes on this buildout object shouldn't be used by
# recipes in their __init__. It can cause bugs, because the
# recipes will be instantiated below (``options = self['buildout']``)
# before this has completed initializing. These attributes are
# left behind for legacy support but recipe authors should
# beware of using them. A better practice is for a recipe to
# use the buildout['buildout'] options.
links = buildout_section['find-links']
self._links = links and links.split() or ()
allow_hosts = buildout_section['allow-hosts'].split('\n')
self._allow_hosts = tuple([host.strip() for host in allow_hosts
if host.strip() != ''])
self._logger = logging.getLogger('zc.buildout')
self.offline = bool_option(buildout_section, 'offline')
self.newest = ((not self.offline) and
bool_option(buildout_section, 'newest')
)
##################################################################
## WARNING!!!
## ALL ATTRIBUTES MUST HAVE REASONABLE DEFAULTS AT THIS POINT
## OTHERWISE ATTRIBUTEERRORS MIGHT HAPPEN ANY TIME FROM RECIPES.
## RECIPES SHOULD GENERALLY USE buildout['buildout'] OPTIONS, NOT
## BUILDOUT ATTRIBUTES.
##################################################################
# initialize some attrs and buildout directories.
options = self['buildout']
# now reinitialize
links = options.get('find-links', '')
self._links = links and links.split() or ()
allow_hosts = options['allow-hosts'].split('\n')
self._allow_hosts = tuple([host.strip() for host in allow_hosts
if host.strip() != ''])
self._buildout_dir = options['directory']
# Make sure we have absolute paths for standard directories. We do this
# a second time here in case someone overrode these in their configs.
for name in ('bin', 'parts', 'eggs', 'develop-eggs'):
d = self._buildout_path(options[name+'-directory'])
options[name+'-directory'] = d
if options['installed']:
options['installed'] = os.path.join(options['directory'],
options['installed'])
self._setup_logging()
self._setup_socket_timeout()
# finish w versions
if versions_section_name:
# refetching section name just to avoid a warning
versions = self[versions_section_name]
else:
# remove annotations
versions = dict((k, v.value) for (k, v) in versions.items())
options['versions'] # refetching section name just to avoid a warning
self.versions = versions
zc.buildout.easy_install.default_versions(versions)
zc.buildout.easy_install.prefer_final(
bool_option(options, 'prefer-final'))
zc.buildout.easy_install.use_dependency_links(
bool_option(options, 'use-dependency-links'))
zc.buildout.easy_install.allow_picked_versions(
bool_option(options, 'allow-picked-versions'))
self.show_picked_versions = bool_option(options,
'show-picked-versions')
self.update_versions_file = options['update-versions-file']
zc.buildout.easy_install.store_required_by(self.show_picked_versions or
self.update_versions_file)
download_cache = options.get('download-cache')
extends_cache = options.get('extends-cache')
if bool_option(options, 'abi-tag-eggs', 'false'):
from zc.buildout.pep425tags import get_abi_tag
options['eggs-directory'] = os.path.join(
options['eggs-directory'], get_abi_tag())
eggs_cache = options.get('eggs-directory')
for cache in [download_cache, extends_cache, eggs_cache]:
if cache:
cache = os.path.join(options['directory'], cache)
if not os.path.exists(cache):
self._logger.info('Creating directory %r.', cache)
os.makedirs(cache)
if download_cache:
# Actually, we want to use a subdirectory in there called 'dist'.
download_cache = os.path.join(download_cache, 'dist')
if not os.path.exists(download_cache):
os.mkdir(download_cache)
zc.buildout.easy_install.download_cache(download_cache)
if bool_option(options, 'install-from-cache'):
if self.offline:
raise zc.buildout.UserError(
"install-from-cache can't be used with offline mode.\n"
"Nothing is installed, even from cache, in offline\n"
"mode, which might better be called 'no-install mode'.\n"
)
zc.buildout.easy_install.install_from_cache(True)
# "Use" each of the defaults so they aren't reported as unused options.
for name in _buildout_default_options:
options[name]
os.chdir(options['directory'])
def _buildout_path(self, name):
if '${' in name:
return name
return os.path.join(self._buildout_dir, name)
@command
def bootstrap(self, args):
__doing__ = 'Bootstrapping.'
if os.path.exists(self['buildout']['develop-eggs-directory']):
if os.path.isdir(self['buildout']['develop-eggs-directory']):
rmtree(self['buildout']['develop-eggs-directory'])
self._logger.debug(
"Removed existing develop-eggs directory")
self._setup_directories()
# Now copy buildout and setuptools eggs, and record destination eggs:
entries = []
for dist in zc.buildout.easy_install.buildout_and_setuptools_dists:
if dist.precedence == pkg_resources.DEVELOP_DIST:
dest = os.path.join(self['buildout']['develop-eggs-directory'],
dist.key + '.egg-link')
with open(dest, 'w') as fh:
fh.write(dist.location)
entries.append(dist.location)
else:
dest = os.path.join(self['buildout']['eggs-directory'],
os.path.basename(dist.location))
entries.append(dest)
if not os.path.exists(dest):
if os.path.isdir(dist.location):
shutil.copytree(dist.location, dest)
else:
shutil.copy2(dist.location, dest)
# Create buildout script
ws = pkg_resources.WorkingSet(entries)
ws.require('zc.buildout')
options = self['buildout']
eggs_dir = options['eggs-directory']
develop_eggs_dir = options['develop-eggs-directory']
ws = zc.buildout.easy_install.sort_working_set(
ws,
eggs_dir=eggs_dir,
develop_eggs_dir=develop_eggs_dir
)
zc.buildout.easy_install.scripts(
['zc.buildout'], ws, sys.executable,
options['bin-directory'],
relative_paths = (
bool_option(options, 'relative-paths', False)
and options['directory']
or ''),
)
def _init_config(self, config_file, args):
print_('Creating %r.' % config_file)
f = open(config_file, 'w')
sep = re.compile(r'[\\/]')
if args:
eggs = '\n '.join(a for a in args if not sep.search(a))
sepsub = os.path.sep == '/' and '/' or re.escape(os.path.sep)
paths = '\n '.join(
sep.sub(sepsub, a)
for a in args if sep.search(a))
f.write('[buildout]\n'
'parts = py\n'
'\n'
'[py]\n'
'recipe = zc.recipe.egg\n'
'interpreter = py\n'
'eggs =\n'
)
if eggs:
f.write(' %s\n' % eggs)
if paths:
f.write('extra-paths =\n %s\n' % paths)
for p in [a for a in args if sep.search(a)]:
if not os.path.exists(p):
os.mkdir(p)
else:
f.write('[buildout]\nparts =\n')
f.close()
@command
def init(self, args):
self.bootstrap(())
if args:
self.install(())
@command
def install(self, install_args):
__doing__ = 'Installing.'
self._load_extensions()
self._setup_directories()
# Add develop-eggs directory to path so that it gets searched
# for eggs:
sys.path.insert(0, self['buildout']['develop-eggs-directory'])
# Check for updates. This could cause the process to be restarted
self._maybe_upgrade()
# load installed data
(installed_part_options, installed_exists
)= self._read_installed_part_options()
# Remove old develop eggs
self._uninstall(
installed_part_options['buildout'].get(
'installed_develop_eggs', '')
)
# Build develop eggs
installed_develop_eggs = self._develop()
installed_part_options['buildout']['installed_develop_eggs'
] = installed_develop_eggs
if installed_exists:
self._update_installed(
installed_develop_eggs=installed_develop_eggs)
# get configured and installed part lists
conf_parts = self['buildout']['parts']
conf_parts = conf_parts and conf_parts.split() or []
installed_parts = installed_part_options['buildout']['parts']
installed_parts = installed_parts and installed_parts.split() or []
if install_args:
install_parts = install_args
uninstall_missing = False
else:
install_parts = conf_parts
uninstall_missing = True
# load and initialize recipes
[self[part]['recipe'] for part in install_parts]
if not install_args:
install_parts = self._parts
if self._log_level < logging.DEBUG:
sections = list(self)
sections.sort()
print_()
print_('Configuration data:')
for section in sorted(self._data):
_save_options(section, self[section], sys.stdout)
print_()
# compute new part recipe signatures
self._compute_part_signatures(install_parts)
# uninstall parts that are no-longer used or who's configs
# have changed
for part in reversed(installed_parts):
if part in install_parts:
old_options = installed_part_options[part].copy()
installed_files = old_options.pop('__buildout_installed__')
new_options = self.get(part)
if old_options == new_options:
# The options are the same, but are all of the
# installed files still there? If not, we should
# reinstall.
if not installed_files:
continue
for f in installed_files.split('\n'):
if not os.path.exists(self._buildout_path(f)):
break
else:
continue
# output debugging info
if self._logger.getEffectiveLevel() < logging.DEBUG:
for k in old_options:
if k not in new_options:
self._logger.debug("Part %s, dropped option %s.",
part, k)
elif old_options[k] != new_options[k]:
self._logger.debug(
"Part %s, option %s changed:\n%r != %r",
part, k, new_options[k], old_options[k],
)
for k in new_options:
if k not in old_options:
self._logger.debug("Part %s, new option %s.",
part, k)
elif not uninstall_missing:
continue
self._uninstall_part(part, installed_part_options)
installed_parts = [p for p in installed_parts if p != part]
if installed_exists:
self._update_installed(parts=' '.join(installed_parts))
# Check for unused buildout options:
_check_for_unused_options_in_section(self, 'buildout')
# install new parts
for part in install_parts:
signature = self[part].pop('__buildout_signature__')
saved_options = self[part].copy()
recipe = self[part].recipe
if part in installed_parts: # update
need_to_save_installed = False
__doing__ = 'Updating %s.', part
self._logger.info(*__doing__)
old_options = installed_part_options[part]
old_installed_files = old_options['__buildout_installed__']
try:
update = recipe.update
except AttributeError:
update = recipe.install
self._logger.warning(
"The recipe for %s doesn't define an update "
"method. Using its install method.",
part)
try:
installed_files = self[part]._call(update)
except:
installed_parts.remove(part)
self._uninstall(old_installed_files)
if installed_exists:
self._update_installed(
parts=' '.join(installed_parts))
raise
old_installed_files = old_installed_files.split('\n')
if installed_files is None:
installed_files = old_installed_files
else:
if isinstance(installed_files, str):
installed_files = [installed_files]
else:
installed_files = list(installed_files)
need_to_save_installed = [
p for p in installed_files
if p not in old_installed_files]
if need_to_save_installed:
installed_files = (old_installed_files
+ need_to_save_installed)
else: # install
need_to_save_installed = True
__doing__ = 'Installing %s.', part
self._logger.info(*__doing__)
installed_files = self[part]._call(recipe.install)
if installed_files is None:
self._logger.warning(
"The %s install returned None. A path or "
"iterable os paths should be returned.",
part)
installed_files = ()
elif isinstance(installed_files, str):
installed_files = [installed_files]
else:
installed_files = list(installed_files)
installed_part_options[part] = saved_options
saved_options['__buildout_installed__'
] = '\n'.join(installed_files)
saved_options['__buildout_signature__'] = signature
installed_parts = [p for p in installed_parts if p != part]
installed_parts.append(part)
_check_for_unused_options_in_section(self, part)
if need_to_save_installed:
installed_part_options['buildout']['parts'] = (
' '.join(installed_parts))
self._save_installed_options(installed_part_options)
installed_exists = True
else:
assert installed_exists
self._update_installed(parts=' '.join(installed_parts))
if installed_develop_eggs:
if not installed_exists:
self._save_installed_options(installed_part_options)
elif (not installed_parts) and installed_exists:
os.remove(self['buildout']['installed'])
if self.show_picked_versions or self.update_versions_file:
self._print_picked_versions()
self._unload_extensions()
def _update_installed(self, **buildout_options):
installed = self['buildout']['installed']
f = open(installed, 'a')
f.write('\n[buildout]\n')
for option, value in list(buildout_options.items()):
_save_option(option, value, f)
f.close()
def _uninstall_part(self, part, installed_part_options):
# uninstall part
__doing__ = 'Uninstalling %s.', part
self._logger.info(*__doing__)
# run uninstall recipe
recipe, entry = _recipe(installed_part_options[part])
try:
uninstaller = _install_and_load(
recipe, 'zc.buildout.uninstall', entry, self)
self._logger.info('Running uninstall recipe.')
uninstaller(part, installed_part_options[part])
except (ImportError, pkg_resources.DistributionNotFound):
pass
# remove created files and directories
self._uninstall(
installed_part_options[part]['__buildout_installed__'])
def _setup_directories(self):
__doing__ = 'Setting up buildout directories'
# Create buildout directories
for name in ('bin', 'parts', 'develop-eggs'):
d = self['buildout'][name+'-directory']
if not os.path.exists(d):
self._logger.info('Creating directory %r.', d)
os.mkdir(d)
def _develop(self):
"""Install sources by running setup.py develop on them
"""
__doing__ = 'Processing directories listed in the develop option'
develop = self['buildout'].get('develop')
if not develop:
return ''
dest = self['buildout']['develop-eggs-directory']
old_files = os.listdir(dest)
env = dict(os.environ,
PYTHONPATH=zc.buildout.easy_install.setuptools_pythonpath)
here = os.getcwd()
try:
try:
for setup in develop.split():
setup = self._buildout_path(setup)
files = glob.glob(setup)
if not files:
self._logger.warning("Couldn't develop %r (not found)",
setup)
else:
files.sort()
for setup in files:
self._logger.info("Develop: %r", setup)
__doing__ = 'Processing develop directory %r.', setup
zc.buildout.easy_install.develop(setup, dest)
except:
# if we had an error, we need to roll back changes, by
# removing any files we created.
self._sanity_check_develop_eggs_files(dest, old_files)
self._uninstall('\n'.join(
[os.path.join(dest, f)
for f in os.listdir(dest)
if f not in old_files
]))
raise
else:
self._sanity_check_develop_eggs_files(dest, old_files)
return '\n'.join([os.path.join(dest, f)
for f in os.listdir(dest)
if f not in old_files
])
finally:
os.chdir(here)
def _sanity_check_develop_eggs_files(self, dest, old_files):
for f in os.listdir(dest):
if f in old_files:
continue
if not (os.path.isfile(os.path.join(dest, f))
and f.endswith('.egg-link')):
self._logger.warning(
"Unexpected entry, %r, in develop-eggs directory.", f)
def _compute_part_signatures(self, parts):
# Compute recipe signature and add to options
for part in parts:
options = self.get(part)
if options is None:
options = self[part] = {}
recipe, entry = _recipe(options)
req = pkg_resources.Requirement.parse(recipe)
sig = _dists_sig(pkg_resources.working_set.resolve([req]))
options['__buildout_signature__'] = ' '.join(sig)
def _read_installed_part_options(self):
old = self['buildout']['installed']
if old and os.path.isfile(old):
fp = open(old)
sections = zc.buildout.configparser.parse(fp, old)
fp.close()
result = {}
for section, options in sections.items():
for option, value in options.items():
if '%(' in value:
for k, v in _spacey_defaults:
value = value.replace(k, v)
options[option] = value
result[section] = self.Options(self, section, options)
return result, True
else:
return ({'buildout': self.Options(self, 'buildout', {'parts': ''})},
False,
)
def _uninstall(self, installed):
for f in installed.split('\n'):
if not f:
continue
f = self._buildout_path(f)
if os.path.isdir(f):
rmtree(f)
elif os.path.isfile(f):
try:
os.remove(f)
except OSError:
if not (
sys.platform == 'win32' and
(realpath(os.path.join(os.path.dirname(sys.argv[0]),
'buildout.exe'))
==
realpath(f)
)
# Sigh. This is the executable used to run the buildout
# and, of course, it's in use. Leave it.
):
raise
def _install(self, part):
options = self[part]
recipe, entry = _recipe(options)
recipe_class = pkg_resources.load_entry_point(
recipe, 'zc.buildout', entry)
installed = recipe_class(self, part, options).install()
if installed is None:
installed = []
elif isinstance(installed, str):
installed = [installed]
base = self._buildout_path('')
installed = [d.startswith(base) and d[len(base):] or d
for d in installed]
return ' '.join(installed)
def _save_installed_options(self, installed_options):
installed = self['buildout']['installed']
if not installed:
return
f = open(installed, 'w')
_save_options('buildout', installed_options['buildout'], f)
for part in installed_options['buildout']['parts'].split():
print_(file=f)
_save_options(part, installed_options[part], f)
f.close()
def _error(self, message, *args):
raise zc.buildout.UserError(message % args)
def _setup_socket_timeout(self):
timeout = self['buildout']['socket-timeout']
if timeout != '':
try:
timeout = int(timeout)
import socket
self._logger.info(
'Setting socket time out to %d seconds.', timeout)
socket.setdefaulttimeout(timeout)
except ValueError:
self._logger.warning("Default socket timeout is used !\n"
"Value in configuration is not numeric: [%s].\n",
timeout)
def _setup_logging(self):
root_logger = logging.getLogger()
self._logger = logging.getLogger('zc.buildout')
handler = logging.StreamHandler(sys.stdout)
log_format = self['buildout']['log-format']
if not log_format:
# No format specified. Use different formatter for buildout
# and other modules, showing logger name except for buildout
log_format = '%(name)s: %(message)s'
buildout_handler = logging.StreamHandler(sys.stdout)
buildout_handler.setFormatter(logging.Formatter('%(message)s'))
self._logger.propagate = False
self._logger.addHandler(buildout_handler)
handler.setFormatter(logging.Formatter(log_format))
root_logger.addHandler(handler)
level = self['buildout']['log-level']
if level in ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'):
level = getattr(logging, level)
else:
try:
level = int(level)
except ValueError:
self._error("Invalid logging level %s", level)
verbosity = self['buildout'].get('verbosity', 0)
try:
verbosity = int(verbosity)
except ValueError:
self._error("Invalid verbosity %s", verbosity)
level -= verbosity
root_logger.setLevel(level)
self._log_level = level
def _maybe_upgrade(self):
# See if buildout or setuptools need to be upgraded.
# If they do, do the upgrade and restart the buildout process.
__doing__ = 'Checking for upgrades.'
if 'BUILDOUT_RESTART_AFTER_UPGRADE' in os.environ:
return
if not self.newest:
return
ws = zc.buildout.easy_install.install(
('zc.buildout', 'setuptools', 'pip', 'wheel'),
self['buildout']['eggs-directory'],
links = self['buildout'].get('find-links', '').split(),
index = self['buildout'].get('index'),
path = [self['buildout']['develop-eggs-directory']],
allow_hosts = self._allow_hosts
)
upgraded = []
for project in 'zc.buildout', 'setuptools', 'pip', 'wheel':
req = pkg_resources.Requirement.parse(project)
dist = ws.find(req)
importlib.import_module(project)
if not inspect.getfile(sys.modules[project]).startswith(dist.location):
upgraded.append(dist)
if not upgraded:
return
__doing__ = 'Upgrading.'
should_run = realpath(
os.path.join(os.path.abspath(self['buildout']['bin-directory']),
'buildout')
)
if sys.platform == 'win32':
should_run += '-script.py'
if (realpath(os.path.abspath(sys.argv[0])) != should_run):
self._logger.debug("Running %r.", realpath(sys.argv[0]))
self._logger.debug("Local buildout is %r.", should_run)
self._logger.warning("Not upgrading because not running a local "
"buildout command.")
return
self._logger.info("Upgraded:\n %s;\nRestarting.",
",\n ".join([("%s version %s"
% (dist.project_name, dist.version)
)
for dist in upgraded
]
),
)
# the new dist is different, so we've upgraded.
# Update the scripts and return True
options = self['buildout']
eggs_dir = options['eggs-directory']
develop_eggs_dir = options['develop-eggs-directory']
ws = zc.buildout.easy_install.sort_working_set(
ws,
eggs_dir=eggs_dir,
develop_eggs_dir=develop_eggs_dir
)
zc.buildout.easy_install.scripts(
['zc.buildout'], ws, sys.executable,
options['bin-directory'],
relative_paths = (
bool_option(options, 'relative-paths', False)
and options['directory']
or ''),
)
# Restart
args = sys.argv[:]
if not __debug__:
args.insert(0, '-O')
args.insert(0, sys.executable)
env=dict(os.environ, BUILDOUT_RESTART_AFTER_UPGRADE='1')
sys.exit(subprocess.call(args, env=env))
def _load_extensions(self):
__doing__ = 'Loading extensions.'
specs = self['buildout'].get('extensions', '').split()
for superceded_extension in ['buildout-versions',
'buildout.dumppickedversions']:
if superceded_extension in specs:
msg = ("Buildout now includes 'buildout-versions' (and part "
"of the older 'buildout.dumppickedversions').\n"
"Remove the extension from your configuration and "
"look at the 'show-picked-versions' option in "
"buildout's documentation.")
raise zc.buildout.UserError(msg)
if specs:
path = [self['buildout']['develop-eggs-directory']]
if self.offline:
dest = None
path.append(self['buildout']['eggs-directory'])
else:
dest = self['buildout']['eggs-directory']
zc.buildout.easy_install.install(
specs, dest, path=path,
working_set=pkg_resources.working_set,
links = self['buildout'].get('find-links', '').split(),
index = self['buildout'].get('index'),
newest=self.newest, allow_hosts=self._allow_hosts)
# Clear cache because extensions might now let us read pages we
# couldn't read before.
zc.buildout.easy_install.clear_index_cache()
for ep in pkg_resources.iter_entry_points('zc.buildout.extension'):
ep.load()(self)
def _unload_extensions(self):
__doing__ = 'Unloading extensions.'
specs = self['buildout'].get('extensions', '').split()
if specs:
for ep in pkg_resources.iter_entry_points(
'zc.buildout.unloadextension'):
ep.load()(self)
def _print_picked_versions(self):
picked_versions, required_by = (zc.buildout.easy_install
.get_picked_versions())
if not picked_versions:
# Don't print empty output.
return
output = _format_picked_versions(picked_versions, required_by)
if self.show_picked_versions:
print_("Versions had to be automatically picked.")
print_("The following part definition lists the versions picked:")
print_('\n'.join(output))
if self.update_versions_file:
# Write to the versions file.
if os.path.exists(self.update_versions_file):
output[:1] = [
'',
'# Added by buildout at %s' % datetime.datetime.now()
]
output.append('')
f = open(self.update_versions_file, 'a')
f.write(('\n'.join(output)))
f.close()
print_("Picked versions have been written to " +
self.update_versions_file)
@command
def setup(self, args):
if not args:
raise zc.buildout.UserError(
"The setup command requires the path to a setup script or \n"
"directory containing a setup script, and its arguments."
)
setup = args.pop(0)
if os.path.isdir(setup):
setup = os.path.join(setup, 'setup.py')
self._logger.info("Running setup script %r.", setup)
setup = os.path.abspath(setup)
fd, tsetup = tempfile.mkstemp()
try:
os.write(fd, (zc.buildout.easy_install.runsetup_template % dict(
setupdir=os.path.dirname(setup),
setup=setup,
__file__ = setup,
)).encode())
args = [sys.executable, tsetup] + args
zc.buildout.easy_install.call_subprocess(args)
finally:
os.close(fd)
os.remove(tsetup)
@command
def runsetup(self, args):
self.setup(args)
@command
def query(self, args=None):
if args is None or len(args) != 1:
_error('The query command requires a single argument.')
option = args[0]
option = option.split(':')
if len(option) == 1:
option = 'buildout', option[0]
elif len(option) != 2:
_error('Invalid option:', args[0])
section, option = option
verbose = self['buildout'].get('verbosity', 0) != 0
if verbose:
print_('${%s:%s}' % (section, option))
try:
print_(self._raw[section][option])
except KeyError:
if section in self._raw:
_error('Key not found:', option)
else:
_error('Section not found:', section)
@command
def annotate(self, args=None):
verbose = self['buildout'].get('verbosity', 0) != 0
section = None
if args is None:
sections = []
else:
sections = args
_print_annotate(self._annotated, verbose, sections, self._buildout_dir)
def print_options(self, base_path=None):
for section in sorted(self._data):
if section == 'buildout' or section == self['buildout']['versions']:
continue
print_('['+section+']')
for k, v in sorted(self._data[section].items()):
if '\n' in v:
v = '\n ' + v.replace('\n', '\n ')
else:
v = ' '+v
if base_path:
v = v.replace(os.getcwd(), base_path)
print_("%s =%s" % (k, v))
def __getitem__(self, section):
__doing__ = 'Getting section %s.', section
try:
return self._data[section]
except KeyError:
pass
try:
data = self._raw[section]
except KeyError:
raise MissingSection(section)
options = self.Options(self, section, data)
self._data[section] = options
options._initialize()
return options
def __setitem__(self, name, data):
if name in self._raw:
raise KeyError("Section already exists", name)
self._raw[name] = dict((k, str(v)) for (k, v) in data.items())
self[name] # Add to parts
def parse(self, data):
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import textwrap
sections = zc.buildout.configparser.parse(
StringIO(textwrap.dedent(data)), '', _default_globals)
for name in sections:
if name in self._raw:
raise KeyError("Section already exists", name)
self._raw[name] = dict((k, str(v))
for (k, v) in sections[name].items())
for name in sections:
self[name] # Add to parts
def __delitem__(self, key):
raise NotImplementedError('__delitem__')
def keys(self):
return list(self._raw.keys())
def __iter__(self):
return iter(self._raw)
def __len__(self):
return len(self._raw)
def _install_and_load(spec, group, entry, buildout):
__doing__ = 'Loading recipe %r.', spec
try:
req = pkg_resources.Requirement.parse(spec)
buildout_options = buildout['buildout']
if pkg_resources.working_set.find(req) is None:
__doing__ = 'Installing recipe %s.', spec
if buildout.offline:
dest = None
path = [buildout_options['develop-eggs-directory'],
buildout_options['eggs-directory'],
]
else:
dest = buildout_options['eggs-directory']
path = [buildout_options['develop-eggs-directory']]
zc.buildout.easy_install.install(
[spec], dest,
links=buildout._links,
index=buildout_options.get('index'),
path=path,
working_set=pkg_resources.working_set,
newest=buildout.newest,
allow_hosts=buildout._allow_hosts
)
__doing__ = 'Loading %s recipe entry %s:%s.', group, spec, entry
return pkg_resources.load_entry_point(
req.project_name, group, entry)
except Exception:
v = sys.exc_info()[1]
buildout._logger.log(
1,
"Couldn't load %s entry point %s\nfrom %s:\n%s.",
group, entry, spec, v)
raise
class Options(DictMixin):
def __init__(self, buildout, section, data):
self.buildout = buildout
self.name = section
self._raw = data
self._cooked = {}
self._data = {}
def _initialize(self):
name = self.name
__doing__ = 'Initializing section %s.', name
if '<' in self._raw:
self._raw = self._do_extend_raw(name, self._raw, [])
# force substitutions
for k, v in sorted(self._raw.items()):
if '${' in v:
self._dosub(k, v)
if name == 'buildout':
return # buildout section can never be a part
for dname in self.get('<part-dependencies>', '').split():
# force use of dependencies in buildout:
self.buildout[dname]
if self.get('recipe'):
self.initialize()
self.buildout._parts.append(name)
def initialize(self):
reqs, entry = _recipe(self._data)
buildout = self.buildout
recipe_class = _install_and_load(reqs, 'zc.buildout', entry, buildout)
name = self.name
self.recipe = recipe_class(buildout, name, self)
def _do_extend_raw(self, name, data, doing):
if name == 'buildout':
return data
if name in doing:
raise zc.buildout.UserError("Infinite extending loop %r" % name)
doing.append(name)
try:
to_do = data.get('<', None)
if to_do is None:
return data
__doing__ = 'Loading input sections for %r', name
result = {}
for iname in to_do.split('\n'):
iname = iname.strip()
if not iname:
continue
raw = self.buildout._raw.get(iname)
if raw is None:
raise zc.buildout.UserError("No section named %r" % iname)
result.update(self._do_extend_raw(iname, raw, doing))
result = _annotate_section(result, "")
data = _annotate_section(copy.deepcopy(data), "")
result = _update_section(result, data)
result = _unannotate_section(result)
result.pop('<', None)
return result
finally:
assert doing.pop() == name
def _dosub(self, option, v):
__doing__ = 'Getting option %s:%s.', self.name, option
seen = [(self.name, option)]
v = '$$'.join([self._sub(s, seen) for s in v.split('$$')])
self._cooked[option] = v
def get(self, option, default=None, seen=None):
try:
return self._data[option]
except KeyError:
pass
v = self._cooked.get(option)
if v is None:
v = self._raw.get(option)
if v is None:
return default
__doing__ = 'Getting option %s:%s.', self.name, option
if '${' in v:
key = self.name, option
if seen is None:
seen = [key]
elif key in seen:
raise zc.buildout.UserError(
"Circular reference in substitutions.\n"
)
else:
seen.append(key)
v = '$$'.join([self._sub(s, seen) for s in v.split('$$')])
seen.pop()
self._data[option] = v
return v
_template_split = re.compile('([$]{[^}]*})').split
_simple = re.compile('[-a-zA-Z0-9 ._]+$').match
_valid = re.compile(r'\${[-a-zA-Z0-9 ._]*:[-a-zA-Z0-9 ._]+}$').match
def _sub(self, template, seen):
value = self._template_split(template)
subs = []
for ref in value[1::2]:
s = tuple(ref[2:-1].split(':'))
if not self._valid(ref):
if len(s) < 2:
raise zc.buildout.UserError("The substitution, %s,\n"
"doesn't contain a colon."
% ref)
if len(s) > 2:
raise zc.buildout.UserError("The substitution, %s,\n"
"has too many colons."
% ref)
if not self._simple(s[0]):
raise zc.buildout.UserError(
"The section name in substitution, %s,\n"
"has invalid characters."
% ref)
if not self._simple(s[1]):
raise zc.buildout.UserError(
"The option name in substitution, %s,\n"
"has invalid characters."
% ref)
section, option = s
if not section:
section = self.name
v = self.buildout[section].get(option, None, seen)
if v is None:
if option == '_buildout_section_name_':
v = self.name
else:
raise MissingOption("Referenced option does not exist:",
section, option)
subs.append(v)
subs.append('')
return ''.join([''.join(v) for v in zip(value[::2], subs)])
def __getitem__(self, key):
try:
return self._data[key]
except KeyError:
pass
v = self.get(key)
if v is None:
raise MissingOption("Missing option: %s:%s" % (self.name, key))
return v
def __setitem__(self, option, value):
if not isinstance(value, str):
raise TypeError('Option values must be strings', value)
self._data[option] = value
def __delitem__(self, key):
if key in self._raw:
del self._raw[key]
if key in self._data:
del self._data[key]
if key in self._cooked:
del self._cooked[key]
elif key in self._data:
del self._data[key]
else:
raise KeyError(key)
def keys(self):
raw = self._raw
return list(self._raw) + [k for k in self._data if k not in raw]
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self.keys())
def copy(self):
result = copy.deepcopy(self._raw)
result.update(self._cooked)
result.update(self._data)
return result
def _call(self, f):
buildout_directory = self.buildout['buildout']['directory']
self._created = []
try:
try:
os.chdir(buildout_directory)
return f()
except:
for p in self._created:
if os.path.isdir(p):
rmtree(p)
elif os.path.isfile(p):
os.remove(p)
else:
self.buildout._logger.warning("Couldn't clean up %r.", p)
raise
finally:
self._created = None
os.chdir(buildout_directory)
def created(self, *paths):
try:
self._created.extend(paths)
except AttributeError:
raise TypeError(
"Attempt to register a created path while not installing",
self.name)
return self._created
def __repr__(self):
return repr(dict(self))
Buildout.Options = Options
_spacey_nl = re.compile('[ \t\r\f\v]*\n[ \t\r\f\v\n]*'
'|'
'^[ \t\r\f\v]+'
'|'
'[ \t\r\f\v]+$'
)
_spacey_defaults = [
('%(__buildout_space__)s', ' '),
('%(__buildout_space_n__)s', '\n'),
('%(__buildout_space_r__)s', '\r'),
('%(__buildout_space_f__)s', '\f'),
('%(__buildout_space_v__)s', '\v'),
]
def _quote_spacey_nl(match):
match = match.group(0).split('\n', 1)
result = '\n\t'.join(
[(s
.replace(' ', '%(__buildout_space__)s')
.replace('\r', '%(__buildout_space_r__)s')
.replace('\f', '%(__buildout_space_f__)s')
.replace('\v', '%(__buildout_space_v__)s')
.replace('\n', '%(__buildout_space_n__)s')
)
for s in match]
)
return result
def _save_option(option, value, f):
value = _spacey_nl.sub(_quote_spacey_nl, value)
if value.startswith('\n\t'):
value = '%(__buildout_space_n__)s' + value[2:]
if value.endswith('\n\t'):
value = value[:-2] + '%(__buildout_space_n__)s'
print_(option, '=', value, file=f)
def _save_options(section, options, f):
print_('[%s]' % section, file=f)
items = list(options.items())
items.sort()
for option, value in items:
_save_option(option, value, f)
def _default_globals():
"""Return a mapping of default and precomputed expressions.
These default expressions are convenience defaults available when eveluating
section headers expressions.
NB: this is wrapped in a function so that the computing of these expressions
is lazy and done only if needed (ie if there is at least one section with
an expression) because the computing of some of these expressions can be
expensive.
"""
# partially derived or inspired from its.py
# Copyright (c) 2012, Kenneth Reitz All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer. Redistributions in binary form must
# reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the
# distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# default available modules, explicitly re-imported locally here on purpose
import sys
import os
import platform
import re
globals_defs = {'sys': sys, 'os': os, 'platform': platform, 're': re,}
# major python major_python_versions as python2 and python3
major_python_versions = tuple(map(str, platform.python_version_tuple()))
globals_defs.update({'python2': major_python_versions[0] == '2',
'python3': major_python_versions[0] == '3'})
# minor python major_python_versions as python24, python25 ... python39
minor_python_versions = ('24', '25', '26', '27',
'30', '31', '32', '33', '34', '35', '36', '37', '38', '39',
'310', '311', '312', '313', '314', '315')
for v in minor_python_versions:
globals_defs['python' + v] = ''.join(major_python_versions[:2]) == v
# interpreter type
sys_version = sys.version.lower()
pypy = 'pypy' in sys_version
jython = 'java' in sys_version
ironpython ='iron' in sys_version
# assume CPython, if nothing else.
cpython = not any((pypy, jython, ironpython,))
globals_defs.update({'cpython': cpython,
'pypy': pypy,
'jython': jython,
'ironpython': ironpython})
# operating system
sys_platform = str(sys.platform).lower()
globals_defs.update({'linux': 'linux' in sys_platform,
'windows': 'win32' in sys_platform,
'cygwin': 'cygwin' in sys_platform,
'solaris': 'sunos' in sys_platform,
'macosx': 'darwin' in sys_platform,
'posix': 'posix' in os.name.lower()})
#bits and endianness
import struct
void_ptr_size = struct.calcsize('P') * 8
globals_defs.update({'bits32': void_ptr_size == 32,
'bits64': void_ptr_size == 64,
'little_endian': sys.byteorder == 'little',
'big_endian': sys.byteorder == 'big'})
return globals_defs
variable_template_split = re.compile('([$]{[^}]*})').split
def _open(
base, filename, seen, download_options,
override, downloaded, user_defaults
):
"""Open a configuration file and return the result as a dictionary,
Recursively open other files based on buildout options found.
"""
download_options = _update_section(download_options, override)
raw_download_options = _unannotate_section(download_options)
newest = bool_option(raw_download_options, 'newest', 'false')
fallback = newest and not (filename in downloaded)
extends_cache = raw_download_options.get('extends-cache')
if extends_cache and variable_template_split(extends_cache)[1::2]:
raise ValueError(
"extends-cache '%s' may not contain ${section:variable} to expand."
% extends_cache
)
download = zc.buildout.download.Download(
raw_download_options, cache=extends_cache,
fallback=fallback, hash_name=True)
is_temp = False
downloaded_filename = None
if _isurl(filename):
downloaded_filename, is_temp = download(filename)
fp = open(downloaded_filename)
base = filename[:filename.rfind('/')]
elif _isurl(base):
if os.path.isabs(filename):
fp = open(filename)
base = os.path.dirname(filename)
else:
filename = base + '/' + filename
downloaded_filename, is_temp = download(filename)
fp = open(downloaded_filename)
base = filename[:filename.rfind('/')]
else:
filename = os.path.join(base, filename)
fp = open(filename)
base = os.path.dirname(filename)
downloaded.add(filename)
if filename in seen:
if is_temp:
fp.close()
os.remove(downloaded_filename)
raise zc.buildout.UserError("Recursive file include", seen, filename)
root_config_file = not seen
seen.append(filename)
filename_for_logging = filename
if downloaded_filename:
filename_for_logging = '%s (downloaded as %s)' % (
filename, downloaded_filename)
result = zc.buildout.configparser.parse(
fp, filename_for_logging, _default_globals)
fp.close()
if is_temp:
os.remove(downloaded_filename)
options = result.get('buildout', {})
extends = options.pop('extends', None)
if 'extended-by' in options:
raise zc.buildout.UserError(
'No-longer supported "extended-by" option found in %s.' %
filename)
result = _annotate(result, filename)
if root_config_file and 'buildout' in result:
download_options = _update_section(
download_options, result['buildout']
)
if extends:
extends = extends.split()
eresult, user_defaults = _open(
base, extends.pop(0), seen, download_options, override,
downloaded, user_defaults
)
for fname in extends:
next_extend, user_defaults = _open(
base, fname, seen, download_options, override,
downloaded, user_defaults
)
eresult = _update(eresult, next_extend)
result = _update(eresult, result)
else:
if user_defaults:
result = _update(user_defaults, result)
user_defaults = {}
seen.pop()
return result, user_defaults
ignore_directories = '.svn', 'CVS', '__pycache__', '.git'
_dir_hashes = {}
def _dir_hash(dir):
dir_hash = _dir_hashes.get(dir, None)
if dir_hash is not None:
return dir_hash
hash = md5()
for (dirpath, dirnames, filenames) in os.walk(dir):
dirnames[:] = sorted(n for n in dirnames if n not in ignore_directories)
filenames[:] = sorted(f for f in filenames
if (not (f.endswith('pyc') or f.endswith('pyo'))
and os.path.exists(os.path.join(dirpath, f)))
)
for_hash = ' '.join(dirnames + filenames)
if isinstance(for_hash, text_type):
for_hash = for_hash.encode()
hash.update(for_hash)
for name in filenames:
path = os.path.join(dirpath, name)
if name == 'entry_points.txt':
f = open(path)
# Entry points aren't written in stable order. :(
try:
sections = zc.buildout.configparser.parse(f, path)
data = repr([(sname, sorted(sections[sname].items()))
for sname in sorted(sections)]).encode('utf-8')
except Exception:
f.close()
f = open(path, 'rb')
data = f.read()
else:
f = open(path, 'rb')
data = f.read()
f.close()
hash.update(data)
_dir_hashes[dir] = dir_hash = hash.hexdigest()
return dir_hash
def _dists_sig(dists):
seen = set()
result = []
for dist in sorted(dists):
if dist in seen:
continue
seen.add(dist)
location = dist.location
if dist.precedence == pkg_resources.DEVELOP_DIST:
result.append(dist.project_name + '-' + _dir_hash(location))
else:
result.append(os.path.basename(location))
return result
def _update_section(in1, s2):
s1 = copy.deepcopy(in1)
# Base section 2 on section 1; section 1 is copied, with key-value pairs
# in section 2 overriding those in section 1. If there are += or -=
# operators in section 2, process these to add or subtract items (delimited
# by newlines) from the preexisting values.
s2 = copy.deepcopy(s2) # avoid mutating the second argument, which is unexpected
# Sort on key, then on the addition or subtraction operator (+ comes first)
for k, v in sorted(s2.items(), key=lambda x: (x[0].rstrip(' +'), x[0][-1])):
if k.endswith('+'):
key = k.rstrip(' +')
implicit_value = SectionKey("", "IMPLICIT_VALUE")
# Find v1 in s2 first; it may have been defined locally too.
section_key = s2.get(key, s1.get(key, implicit_value))
section_key = copy.deepcopy(section_key)
section_key.addToValue(v.value, v.source)
s2[key] = section_key
del s2[k]
elif k.endswith('-'):
key = k.rstrip(' -')
implicit_value = SectionKey("", "IMPLICIT_VALUE")
# Find v1 in s2 first; it may have been set by a += operation first
section_key = s2.get(key, s1.get(key, implicit_value))
section_key = copy.deepcopy(section_key)
section_key.removeFromValue(v.value, v.source)
s2[key] = section_key
del s2[k]
_update_verbose(s1, s2)
return s1
def _update_verbose(s1, s2):
for key, v2 in s2.items():
if key in s1:
v1 = s1[key]
v1.overrideValue(v2)
else:
s1[key] = copy.deepcopy(v2)
def _update(in1, d2):
d1 = copy.deepcopy(in1)
for section in d2:
if section in d1:
d1[section] = _update_section(d1[section], d2[section])
else:
d1[section] = copy.deepcopy(d2[section])
return d1
def _recipe(options):
recipe = options['recipe']
if ':' in recipe:
recipe, entry = recipe.split(':')
else:
entry = 'default'
return recipe, entry
def _doing():
_, v, tb = sys.exc_info()
message = str(v)
doing = []
while tb is not None:
d = tb.tb_frame.f_locals.get('__doing__')
if d:
doing.append(d)
tb = tb.tb_next
if doing:
sys.stderr.write('While:\n')
for d in doing:
if not isinstance(d, str):
d = d[0] % d[1:]
sys.stderr.write(' %s\n' % d)
def _error(*message):
sys.stderr.write('Error: ' + ' '.join(message) +'\n')
sys.exit(1)
_internal_error_template = """
An internal error occurred due to a bug in either zc.buildout or in a
recipe being used:
"""
def _check_for_unused_options_in_section(buildout, section):
options = buildout[section]
unused = [option for option in sorted(options._raw)
if option not in options._data]
if unused:
buildout._logger.warning(
"Section `%s` contains unused option(s): %s.\n"
"This may be an indication for either a typo in the option's name "
"or a bug in the used recipe." %
(section, ' '.join(map(repr, unused)))
)
_usage = """\
Usage: buildout [options] [assignments] [command [command arguments]]
Options:
-c config_file
Specify the path to the buildout configuration file to be used.
This defaults to the file named "buildout.cfg" in the current
working directory.
-D
Debug errors. If an error occurs, then the post-mortem debugger
will be started. This is especially useful for debugging recipe
problems.
-h, --help
Print this message and exit.
-N
Run in non-newest mode. This is equivalent to the assignment
buildout:newest=false. With this setting, buildout will not seek
new distributions if installed distributions satisfy it's
requirements.
-q
Decrease the level of verbosity. This option can be used multiple times.
-t socket_timeout
Specify the socket timeout in seconds.
-U
Don't read user defaults.
-v
Increase the level of verbosity. This option can be used multiple times.
--version
Print buildout version number and exit.
Assignments are of the form: section:option=value and are used to
provide configuration options that override those given in the
configuration file. For example, to run the buildout in offline mode,
use buildout:offline=true.
Options and assignments can be interspersed.
Commands:
install
Install the parts specified in the buildout configuration. This is
the default command if no command is specified.
bootstrap
Create a new buildout in the current working directory, copying
the buildout and setuptools eggs and, creating a basic directory
structure and a buildout-local buildout script.
init [requirements]
Initialize a buildout, creating a minimal buildout.cfg file if it doesn't
exist and then performing the same actions as for the bootstrap
command.
If requirements are supplied, then the generated configuration
will include an interpreter script that requires them. This
provides an easy way to quickly set up a buildout to experiment
with some packages.
setup script [setup command and options]
Run a given setup script arranging that setuptools is in the
script's path and and that it has been imported so that
setuptools-provided commands (like bdist_egg) can be used even if
the setup script doesn't import setuptools.
The script can be given either as a script path or a path to a
directory containing a setup.py script.
annotate
Display annotated sections. All sections are displayed, sorted
alphabetically. For each section, all key-value pairs are displayed,
sorted alphabetically, along with the origin of the value (file name or
COMPUTED_VALUE, DEFAULT_VALUE, COMMAND_LINE_VALUE).
query section:key
Display value of given section key pair.
"""
def _help():
print_(_usage)
sys.exit(0)
def _version():
version = pkg_resources.working_set.find(
pkg_resources.Requirement.parse('zc.buildout')).version
print_("buildout version %s" % version)
sys.exit(0)
def main(args=None):
if args is None:
args = sys.argv[1:]
config_file = 'buildout.cfg'
verbosity = 0
options = []
use_user_defaults = True
debug = False
while args:
if args[0][0] == '-':
op = orig_op = args.pop(0)
op = op[1:]
while op and op[0] in 'vqhWUoOnNDA':
if op[0] == 'v':
verbosity += 10
elif op[0] == 'q':
verbosity -= 10
elif op[0] == 'U':
use_user_defaults = False
elif op[0] == 'o':
options.append(('buildout', 'offline', 'true'))
elif op[0] == 'O':
options.append(('buildout', 'offline', 'false'))
elif op[0] == 'n':
options.append(('buildout', 'newest', 'true'))
elif op[0] == 'N':
options.append(('buildout', 'newest', 'false'))
elif op[0] == 'D':
debug = True
else:
_help()
op = op[1:]
if op[:1] in ('c', 't'):
op_ = op[:1]
op = op[1:]
if op_ == 'c':
if op:
config_file = op
else:
if args:
config_file = args.pop(0)
else:
_error("No file name specified for option", orig_op)
elif op_ == 't':
try:
timeout_string = args.pop(0)
timeout = int(timeout_string)
options.append(
('buildout', 'socket-timeout', timeout_string))
except IndexError:
_error("No timeout value specified for option", orig_op)
except ValueError:
_error("Timeout value must be numeric", orig_op)
elif op:
if orig_op == '--help':
_help()
elif orig_op == '--version':
_version()
else:
_error("Invalid option", '-'+op[0])
elif '=' in args[0]:
option, value = args.pop(0).split('=', 1)
option = option.split(':')
if len(option) == 1:
option = 'buildout', option[0]
elif len(option) != 2:
_error('Invalid option:', option)
section, option = option
options.append((section.strip(), option.strip(), value.strip()))
else:
# We've run out of command-line options and option assignments
# The rest should be commands, so we'll stop here
break
if verbosity:
options.append(('buildout', 'verbosity', str(verbosity)))
if args:
command = args.pop(0)
if command not in Buildout.COMMANDS:
_error('invalid command:', command)
else:
command = 'install'
try:
try:
buildout = Buildout(config_file, options,
use_user_defaults, command, args)
getattr(buildout, command)(args)
except SystemExit:
logging.shutdown()
# Make sure we properly propagate an exit code from a restarted
# buildout process.
raise
except Exception:
v = sys.exc_info()[1]
_doing()
exc_info = sys.exc_info()
import pdb, traceback
if debug:
traceback.print_exception(*exc_info)
sys.stderr.write('\nStarting pdb:\n')
pdb.post_mortem(exc_info[2])
else:
if isinstance(v, (zc.buildout.UserError,
distutils.errors.DistutilsError
)
):
_error(str(v))
else:
sys.stderr.write(_internal_error_template)
traceback.print_exception(*exc_info)
sys.exit(1)
finally:
logging.shutdown()
if sys.version_info[:2] < (2, 4):
def reversed(iterable):
result = list(iterable);
result.reverse()
return result
_bool_names = {'true': True, 'false': False, True: True, False: False}
def bool_option(options, name, default=None):
value = options.get(name, default)
if value is None:
raise KeyError(name)
try:
return _bool_names[value]
except KeyError:
raise zc.buildout.UserError(
'Invalid value for %r option: %r' % (name, value)) | zc.buildout | /zc.buildout-3.0.1.tar.gz/zc.buildout-3.0.1/src/zc/buildout/buildout.py | buildout.py |
"""Buildout download infrastructure"""
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
try:
# Python 3
from urllib.request import urlretrieve
from urllib.parse import urlparse
except ImportError:
# Python 2
import base64
from urlparse import urlparse
from urlparse import urlunparse
import urllib2
def urlretrieve(url, tmp_path):
"""Work around Python issue 24599 including basic auth support
"""
scheme, netloc, path, params, query, frag = urlparse(url)
auth, host = urllib2.splituser(netloc)
if auth:
url = urlunparse((scheme, host, path, params, query, frag))
req = urllib2.Request(url)
base64string = base64.encodestring(auth)[:-1]
basic = "Basic " + base64string
req.add_header("Authorization", basic)
else:
req = urllib2.Request(url)
url_obj = urllib2.urlopen(req)
with open(tmp_path, 'wb') as fp:
fp.write(url_obj.read())
return tmp_path, url_obj.info()
from zc.buildout.easy_install import realpath
import logging
import os
import os.path
import re
import shutil
import sys
import tempfile
import zc.buildout
class ChecksumError(zc.buildout.UserError):
pass
class Download(object):
"""Configurable download utility.
Handles the download cache and offline mode.
Download(options=None, cache=None, namespace=None,
offline=False, fallback=False, hash_name=False, logger=None)
options: mapping of buildout options (e.g. a ``buildout`` config section)
cache: path to the download cache (excluding namespaces)
namespace: namespace directory to use inside the cache
offline: whether to operate in offline mode
fallback: whether to use the cache as a fallback (try downloading first)
hash_name: whether to use a hash of the URL as cache file name
logger: an optional logger to receive download-related log messages
"""
def __init__(self, options=None, cache=-1, namespace=None,
offline=-1, fallback=False, hash_name=False, logger=None):
if options is None:
options = {}
self.directory = options.get('directory', '')
self.cache = cache
if cache == -1:
self.cache = options.get('download-cache')
self.namespace = namespace
self.offline = offline
if offline == -1:
self.offline = (options.get('offline') == 'true'
or options.get('install-from-cache') == 'true')
self.fallback = fallback
self.hash_name = hash_name
self.logger = logger or logging.getLogger('zc.buildout')
@property
def download_cache(self):
if self.cache is not None:
return realpath(os.path.join(self.directory, self.cache))
@property
def cache_dir(self):
if self.download_cache is not None:
return os.path.join(self.download_cache, self.namespace or '')
def __call__(self, url, md5sum=None, path=None):
"""Download a file according to the utility's configuration.
url: URL to download
md5sum: MD5 checksum to match
path: where to place the downloaded file
Returns the path to the downloaded file.
"""
if self.cache:
local_path, is_temp = self.download_cached(url, md5sum)
else:
local_path, is_temp = self.download(url, md5sum, path)
return locate_at(local_path, path), is_temp
def download_cached(self, url, md5sum=None):
"""Download a file from a URL using the cache.
This method assumes that the cache has been configured. Optionally, it
raises a ChecksumError if a cached copy of a file has an MD5 mismatch,
but will not remove the copy in that case.
"""
if not os.path.exists(self.download_cache):
raise zc.buildout.UserError(
'The directory:\n'
'%r\n'
"to be used as a download cache doesn't exist.\n"
% self.download_cache)
cache_dir = self.cache_dir
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
cache_key = self.filename(url)
cached_path = os.path.join(cache_dir, cache_key)
self.logger.debug('Searching cache at %s' % cache_dir)
if os.path.exists(cached_path):
is_temp = False
if self.fallback:
try:
_, is_temp = self.download(url, md5sum, cached_path)
except ChecksumError:
raise
except Exception:
pass
if not check_md5sum(cached_path, md5sum):
raise ChecksumError(
'MD5 checksum mismatch for cached download '
'from %r at %r' % (url, cached_path))
self.logger.debug('Using cache file %s' % cached_path)
else:
self.logger.debug('Cache miss; will cache %s as %s' %
(url, cached_path))
_, is_temp = self.download(url, md5sum, cached_path)
return cached_path, is_temp
def download(self, url, md5sum=None, path=None):
"""Download a file from a URL to a given or temporary path.
An online resource is always downloaded to a temporary file and moved
to the specified path only after the download is complete and the
checksum (if given) matches. If path is None, the temporary file is
returned and the client code is responsible for cleaning it up.
"""
# Make sure the drive letter in windows-style file paths isn't
# interpreted as a URL scheme.
if re.match(r"^[A-Za-z]:\\", url):
url = 'file:' + url
parsed_url = urlparse(url, 'file')
url_scheme, _, url_path = parsed_url[:3]
if url_scheme == 'file':
self.logger.debug('Using local resource %s' % url)
if not check_md5sum(url_path, md5sum):
raise ChecksumError(
'MD5 checksum mismatch for local resource at %r.' %
url_path)
return locate_at(url_path, path), False
if self.offline:
raise zc.buildout.UserError(
"Couldn't download %r in offline mode." % url)
self.logger.info('Downloading %s' % url)
handle, tmp_path = tempfile.mkstemp(prefix='buildout-')
os.close(handle)
try:
tmp_path, headers = urlretrieve(url, tmp_path)
if not check_md5sum(tmp_path, md5sum):
raise ChecksumError(
'MD5 checksum mismatch downloading %r' % url)
except IOError:
e = sys.exc_info()[1]
os.remove(tmp_path)
raise zc.buildout.UserError("Error downloading extends for URL "
"%s: %s" % (url, e))
except Exception:
os.remove(tmp_path)
raise
if path:
shutil.move(tmp_path, path)
return path, False
else:
return tmp_path, True
def filename(self, url):
"""Determine a file name from a URL according to the configuration.
"""
if self.hash_name:
return md5(url.encode()).hexdigest()
else:
if re.match(r"^[A-Za-z]:\\", url):
url = 'file:' + url
parsed = urlparse(url, 'file')
url_path = parsed[2]
if parsed[0] == 'file':
while True:
url_path, name = os.path.split(url_path)
if name:
return name
if not url_path:
break
else:
for name in reversed(url_path.split('/')):
if name:
return name
url_host, url_port = parsed[-2:]
return '%s:%s' % (url_host, url_port)
def check_md5sum(path, md5sum):
"""Tell whether the MD5 checksum of the file at path matches.
No checksum being given is considered a match.
"""
if md5sum is None:
return True
f = open(path, 'rb')
checksum = md5()
try:
chunk = f.read(2**16)
while chunk:
checksum.update(chunk)
chunk = f.read(2**16)
return checksum.hexdigest() == md5sum
finally:
f.close()
def remove(path):
if os.path.exists(path):
os.remove(path)
def locate_at(source, dest):
if dest is None or realpath(dest) == realpath(source):
return source
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
try:
os.link(source, dest)
except (AttributeError, OSError):
shutil.copyfile(source, dest)
return dest | zc.buildout | /zc.buildout-3.0.1.tar.gz/zc.buildout-3.0.1/src/zc/buildout/download.py | download.py |
def patch_Distribution():
try:
from pkg_resources import Distribution
def hashcmp(self):
if hasattr(self, '_hashcmp'):
return self._hashcmp
else:
self._hashcmp = result = (
self.parsed_version,
self.precedence,
self.key,
self.location,
self.py_version or '',
self.platform or '',
)
return result
setattr(Distribution, 'hashcmp', property(hashcmp))
except ImportError:
return
patch_Distribution()
def patch_PackageIndex():
"""Patch the package index from setuptools.
Main goal: check the package urls on an index page to see if they are
compatible with the Python version.
"""
try:
import logging
logging.getLogger('pip._internal.index.collector').setLevel(logging.ERROR)
from setuptools.package_index import PackageIndex
from setuptools.package_index import URL_SCHEME
from setuptools.package_index import distros_for_url
try:
# pip 22.2+
from pip._internal.index.collector import IndexContent
except ImportError:
# pip 22.1-
from pip._internal.index.collector import HTMLPage as IndexContent
from pip._internal.index.collector import parse_links
from pip._internal.index.package_finder import _check_link_requires_python
from pip._internal.models.target_python import TargetPython
from pip._vendor import six
from pip._vendor.six.moves import urllib
PY_VERSION_INFO = TargetPython().py_version_info
# method copied over from setuptools 46.1.3
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
tmpl = "Download error on %s: %%s -- Some packages may not be found!"
f = self.open_url(url, tmpl % url)
if f is None:
return
if isinstance(f, urllib.error.HTTPError) and f.code == 401:
self.info("Authentication error: %s" % f.msg)
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
# --- LOCAL CHANGES MADE HERE: ---
if isinstance(page, six.text_type):
page = page.encode('utf8')
charset = 'utf8'
else:
if isinstance(f, urllib.error.HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
try:
charset = f.headers.get_param('charset') or 'latin-1'
except AttributeError:
# Python 2
charset = f.headers.getparam('charset') or 'latin-1'
try:
content_type = f.getheader('content-type')
except AttributeError:
# On at least Python 2.7:
# addinfourl instance has no attribute 'getheader'
content_type = "text/html"
try:
# pip 22.2+
html_page = IndexContent(
page,
content_type=content_type,
encoding=charset,
url=base,
cache_link_parsing=False,
)
except TypeError:
try:
# pip 20.1-22.1
html_page = IndexContent(page, charset, base, cache_link_parsing=False)
except TypeError:
# pip 20.0 or older
html_page = IndexContent(page, charset, base)
# https://github.com/buildout/buildout/issues/598
# use_deprecated_html5lib is a required addition in pip 22.0/22.1
# and it is gone already in 22.2
try:
plinks = parse_links(html_page, use_deprecated_html5lib=False)
except TypeError:
plinks = parse_links(html_page)
plinks = list(plinks)
# --- END OF LOCAL CHANGES ---
if not isinstance(page, str):
# In Python 3 and got bytes but want str.
page = page.decode(charset, "ignore")
f.close()
# --- LOCAL CHANGES MADE HERE: ---
for link in plinks:
if _check_link_requires_python(link, PY_VERSION_INFO):
self.process_url(link.url)
# --- END OF LOCAL CHANGES ---
if url.startswith(self.index_url) and getattr(f, 'code', None) != 404:
page = self.process_index(url, page)
setattr(PackageIndex, 'process_url', process_url)
except ImportError:
import logging
logger = logging.getLogger('zc.buildout.patches')
logger.warning(
'Requires-Python support missing and could not be patched into '
'zc.buildout. \n\n',
exc_info=True
)
return
patch_PackageIndex() | zc.buildout | /zc.buildout-3.0.1.tar.gz/zc.buildout-3.0.1/src/zc/buildout/patches.py | patches.py |
# The following copied from Python 2 config parser because:
# - The py3 configparser isn't backward compatible
# - Both strip option values in undesirable ways
# - dict of dicts is a much simpler api
import re
import textwrap
import logging
try:
from packaging import markers
except ImportError:
try:
from pip._vendor.packaging import markers
except ImportError:
from pkg_resources.packaging import markers
logger = logging.getLogger('zc.buildout')
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation in
BaseException."""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation in
BaseException."""
self.__message = value
# BaseException.message has been deprecated since Python 2.6. To prevent
# DeprecationWarning from popping up over this pre-existing attribute, use
# a new property that takes lookup precedence.
message = property(_get_message, _set_message)
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, filename):
Error.__init__(self, 'File contains parsing errors: %s' % filename)
self.filename = filename
self.errors = []
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.filename = filename
self.lineno = lineno
self.line = line
# This regex captures either sections headers with optional trailing comment
# separated by a semicolon or a hash. Section headers can have an optional
# expression. Expressions and comments can contain brackets but no verbatim '#'
# and ';' : these need to be escaped.
# A title line with an expression has the general form:
# [section_name: some Python expression] #; some comment
# This regex leverages the fact that the following is a valid Python expression:
# [some Python expression] # some comment
# and that section headers are also delimited by [brackets] that are also [list]
# delimiters.
# So instead of doing complex parsing to balance brackets in an expression, we
# capture just enough from a header line to collect then remove the section_name
# and colon expression separator keeping only a list-enclosed expression and
# optional comments. The parsing and validation of this Python expression can be
# entirely delegated to Python's eval. The result of the evaluated expression is
# the always returned wrapped in a list with a single item that contains the
# original expression
section_header = re.compile(
r'(?P<head>\[)'
r'\s*'
r'(?P<name>[^\s#[\]:;{}]+)'
r'\s*'
r'(:(?P<expression>[^#;]*))?'
r'\s*'
r'(?P<tail>]'
r'\s*'
r'([#;].*)?$)'
).match
option_start = re.compile(
r'(?P<name>[^\s{}[\]=:]+\s*[-+]?)'
r'='
r'(?P<value>.*)$').match
leading_blank_lines = re.compile(r"^(\s*\n)+")
def parse(fp, fpname, exp_globals=dict):
"""Parse a sectioned setup file.
The sections in setup files contain a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
The title line is in the form [name] followed by an optional trailing
comment separated by a semicolon `;' or a hash `#' character.
Optionally the title line can have the form `[name:expression]' where
expression is an arbitrary Python expression. Sections with an expression
that evaluates to False are ignored. Semicolon `;' an hash `#' characters
must be string-escaped in expression literals.
exp_globals is a callable returning a mapping of defaults used as globals
during the evaluation of a section conditional expression.
"""
sections = {}
# the current section condition, possibly updated from a section expression
section_condition = True
context = None
cursect = None # None, or a dictionary
blockmode = None
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break # EOF
lineno = lineno + 1
if line[0] in '#;':
continue # comment
if line[0].isspace() and cursect is not None and optname:
if not section_condition:
#skip section based on its expression condition
continue
# continuation line
if blockmode:
line = line.rstrip()
else:
line = line.strip()
if not line:
continue
cursect[optname] = "%s\n%s" % (cursect[optname], line)
else:
header = section_header(line)
if header:
# reset to True when starting a new section
section_condition = True
sectname = header.group('name')
head = header.group('head') # the starting [
expression = header.group('expression')
tail = header.group('tail') # closing ]and comment
if expression:
# normalize tail comments to Python style
tail = tail.replace(';', '#') if tail else ''
# un-escape literal # and ; . Do not use a
# string-escape decode
expr = expression.replace(r'\x23','#').replace(r'\x3b', ';')
try:
# new-style markers as used in pip constraints, e.g.:
# 'python_version < "3.11" and platform_system == "Windows"'
marker = markers.Marker(expr)
section_condition = marker.evaluate()
except markers.InvalidMarker:
# old style buildout expression
# rebuild a valid Python expression wrapped in a list
expr = head + expr + tail
# lazily populate context only expression
if not context:
context = exp_globals()
# evaluated expression is in list: get first element
section_condition = eval(expr, context)[0]
# finally, ignore section when an expression
# evaluates to false
if not section_condition:
logger.debug(
'Ignoring section %(sectname)r with [expression]:'
' %(expression)r' % locals())
continue
if sectname in sections:
cursect = sections[sectname]
else:
sections[sectname] = cursect = {}
# So sections can't start with a continuation line
optname = None
elif cursect is None:
if not line.strip():
continue
# no section header in the file?
raise MissingSectionHeaderError(fpname, lineno, line)
else:
if line[:2] == '=>':
line = '<part-dependencies> = ' + line[2:]
mo = option_start(line)
if mo:
if not section_condition:
# filter out options of conditionally ignored section
continue
# option start line
optname, optval = mo.group('name', 'value')
optname = optname.rstrip()
optval = optval.strip()
cursect[optname] = optval
blockmode = not optval
elif not (optname or line.strip()):
# blank line after section start
continue
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
for sectname in sections:
section = sections[sectname]
for name in section:
value = section[name]
if value[:1].isspace():
section[name] = leading_blank_lines.sub(
'', textwrap.dedent(value.rstrip()))
return sections | zc.buildout | /zc.buildout-3.0.1.tar.gz/zc.buildout-3.0.1/src/zc/buildout/configparser.py | configparser.py |
import enum
import re
from typing import List
# https://jinja.palletsprojects.com/en/2.11.x/templates/#list-of-control-structures
# we ignore block assignments for simplicity
class JinjaStatement(str, enum.Enum):
For = 'for'
If = 'if'
Macro = 'macro'
Call = 'call'
Filter = 'filter'
jinja_statements = set(JinjaStatement)
end_block_statement = {
'endfor': JinjaStatement.For,
'endif': JinjaStatement.If,
'endmacro': JinjaStatement.Macro,
'endcall': JinjaStatement.Call,
'endfilter': JinjaStatement.Filter,
}
statement_re = re.compile(r'.*\{%[\-\+\s]*(?P<statement>[\w]+).*%\}')
expression_re = re.compile(r'\{\{.*?\}\}')
multiline_expression_start_re = re.compile(r'^\s*\{\{')
multiline_expression_end_re = re.compile(r'\}\}')
multiline_statement_start_re = re.compile(r'^\s*\{%')
multiline_statement_end_re = re.compile(r'%\}')
class JinjaParser:
"""A very simple jinja parser which allow skipping lines containing jinja blocks.
"""
# a replacement for jinja expressions, so that we
# can still parse them as buildout
jinja_value = "JINJA_EXPRESSION"
def __init__(self) -> None:
self.has_expression = False
self.is_in_comment = False
self.is_error = False
self._stack: List[JinjaStatement] = []
self._current_line_was_in_jinja = False
self._in_comment = False
self._in_multiline_expression = False
self._in_multiline_statement = False
self.line = ""
def feed(self, line: str) -> None:
"""Feeds a line and update the state.
"""
self._current_line_was_in_jinja = False
self.has_expression = bool(expression_re.search(line))
expression_re_match = expression_re.search(line)
if expression_re_match:
if expression_re_match.start() == 0 \
and expression_re_match.end() == len(line.strip()):
line = f'{self.jinja_value} = {self.jinja_value}'
else:
line = expression_re.sub(self.jinja_value, line)
self.line = line
self.is_error = False
if '{#' in line or self._in_comment:
self._current_line_was_in_jinja = True
self._in_comment = '#}' not in line
statement_match = statement_re.match(line)
if statement_match:
self._current_line_was_in_jinja = True
statement = statement_match.group('statement')
if statement in jinja_statements:
self._stack.append(JinjaStatement(statement))
elif statement in end_block_statement:
self.is_error = True
if self._stack:
popped = self._stack.pop()
self.is_error = end_block_statement[statement] != popped
if multiline_expression_start_re.match(
line) or self._in_multiline_expression:
self._current_line_was_in_jinja = True
self._in_multiline_expression = multiline_expression_end_re.search(
line) is None
if multiline_statement_start_re.match(
line) or self._in_multiline_statement:
self._current_line_was_in_jinja = True
self._in_multiline_statement = multiline_statement_end_re.search(
line) is None
@property
def is_in_jinja(self) -> bool:
return (bool(self._stack) or self._current_line_was_in_jinja) | zc.buildout.languageserver | /zc.buildout.languageserver-0.9.2-py3-none-any.whl/buildoutls/jinja.py | jinja.py |
import logging
from typing import List, Optional, Union
from lsprotocol.converters import get_converter
from lsprotocol.types import (
CodeAction,
CodeActionKind,
CodeActionParams,
Command,
TextEdit,
WorkspaceEdit,
)
from pygls.server import LanguageServer
from .commands import COMMAND_OPEN_PYPI_PAGE, COMMAND_UPDATE_MD5SUM
from .types import (
OpenPypiPageCommandParams,
PyPIPackageInfo,
UpdateMD5SumCommandParams,
)
logger = logging.getLogger(__name__)
converter = get_converter()
from . import buildout, pypi
pypi_client = pypi.PyPIClient()
async def getCodeActions(
ls: LanguageServer,
params: CodeActionParams,
) -> Optional[List[Union[Command, CodeAction]]]:
current_line = params.range.start.line
code_actions: List[Union[Command, CodeAction]] = []
parsed = await buildout.open(ls, params.text_document.uri)
if not isinstance(parsed, buildout.BuildoutProfile):
return None
symbol = await parsed.getSymbolAtPosition(params.range.end)
if not symbol:
return None
if symbol.current_option_name is None or symbol.current_section_name is None:
return None
try:
value = parsed.resolve_value(
symbol.current_section_name,
symbol.current_option_name,
)
except KeyError:
return None
logger.debug(
"getting code actions resolved value=%s symbol=%s",
value,
symbol,
)
if symbol.current_section_name == 'versions' \
and symbol.current_option_name \
and symbol.current_option is not None:
url = pypi_client.get_home_page_url(
symbol.current_option_name,
symbol.current_option.value,
)
code_actions.append(
CodeAction(
title=f"View on pypi {url}",
command=Command(
title="View on pypi",
command=COMMAND_OPEN_PYPI_PAGE,
arguments=[OpenPypiPageCommandParams(url=url)],
),
), )
elif symbol.current_option_name in ("url", "md5sum") \
and "url" in symbol.current_section:
return [
CodeAction(
title="Update md5sum",
kind=CodeActionKind.QuickFix,
command=Command(
title="Update md5sum",
command=COMMAND_UPDATE_MD5SUM,
arguments=[
UpdateMD5SumCommandParams(
document_uri=params.text_document.uri,
section_name=symbol.current_section_name,
)
],
),
)
]
for diagnostic in params.context.diagnostics:
if diagnostic.data and diagnostic.range.start.line == current_line:
try:
package_info = converter.structure(diagnostic.data, PyPIPackageInfo)
except Exception:
logging.debug(
"Unable to convert diagnostic data %s",
diagnostic.data,
exc_info=True,
)
return None
if package_info.latest_version:
edit = WorkspaceEdit(
changes={
params.text_document.uri: [
TextEdit(
range=diagnostic.range,
new_text=' ' + package_info.latest_version,
),
]
})
code_actions.insert(
0,
CodeAction(
title=f"Use version {package_info.latest_version}",
kind=CodeActionKind.QuickFix,
edit=edit,
is_preferred=True,
),
)
return code_actions | zc.buildout.languageserver | /zc.buildout.languageserver-0.9.2-py3-none-any.whl/buildoutls/code_actions.py | code_actions.py |
import hashlib
import time
import uuid
from lsprotocol.types import (
Location,
MessageType,
Position,
Range,
TextEdit,
WorkDoneProgressBegin,
WorkDoneProgressEnd,
WorkDoneProgressReport,
WorkspaceEdit,
)
from pygls.server import LanguageServer
from . import aiohttp_session, buildout
from .types import UpdateMD5SumCommandParams
async def update_md5sum(
ls: LanguageServer,
params: UpdateMD5SumCommandParams,
) -> None:
profile = await buildout.open(ls, params['document_uri'])
assert isinstance(profile, buildout.BuildoutProfile)
section = profile[params['section_name']]
url = profile.resolve_value(params['section_name'], "url")
token = str(uuid.uuid4())
await ls.progress.create_async(token)
ls.progress.begin(
token,
WorkDoneProgressBegin(
cancellable=True, # TODO actually support cancellation
title=f"Updating md5sum for {url}",
))
start = time.time()
m = hashlib.md5()
async with aiohttp_session.get_session().get(url) as resp:
if not resp.ok:
ls.show_message(
f"Could not update md5sum: {url} had status code {resp.status}",
MessageType.Error,
)
ls.progress.end(token, WorkDoneProgressEnd(kind='end'))
return
download_total_size = int(resp.headers.get('content-length', '-1'))
downloaded_size = 0
async for chunk in resp.content.iter_chunked(2 << 14):
m.update(chunk)
downloaded_size += len(chunk)
elapsed_time = time.time() - start
percentage = (downloaded_size / download_total_size * 100)
ls.progress.report(
token,
WorkDoneProgressReport(
message=f"{percentage:0.2f}% in {elapsed_time:0.2f}s",
percentage=max(0, int(percentage)),
))
hexdigest = m.hexdigest()
if 'md5sum' in section:
md5sum_location = section['md5sum'].location
new_text = " " + hexdigest
else:
# if no md5sum option in profile, insert a line just below url
url_location = section['url'].location
md5sum_location = Location(
uri=url_location.uri,
range=Range(
start=Position(
line=url_location.range.start.line + 1,
character=0,
),
end=Position(
line=url_location.range.start.line + 1,
character=0,
),
),
)
new_text = f"md5sum = {hexdigest}\n"
ls.progress.end(token, WorkDoneProgressEnd())
ls.apply_edit(
WorkspaceEdit(
changes={
md5sum_location.uri:
[TextEdit(
range=md5sum_location.range,
new_text=new_text,
)]
})) | zc.buildout.languageserver | /zc.buildout.languageserver-0.9.2-py3-none-any.whl/buildoutls/md5sum.py | md5sum.py |
import collections
import copy
import enum
import io
import logging
import os
import pathlib
import re
import textwrap
import urllib.parse
from typing import (
TYPE_CHECKING,
AsyncIterator,
Dict,
Iterator,
List,
Match,
Optional,
Set,
TextIO,
Tuple,
Union,
cast,
)
import aiohttp.client_exceptions
from lsprotocol.types import Location, Position, Range
from pygls.server import LanguageServer
from pygls.workspace import Document
from typing_extensions import TypeAlias
from zc.buildout.buildout import _buildout_default_options
from zc.buildout.configparser import (
MissingSectionHeaderError,
ParsingError,
leading_blank_lines,
option_start,
section_header,
)
from . import aiohttp_session, jinja, recipes
logger = logging.getLogger(__name__)
# Matches a reference like ${section:option}
# We also tolerate ${section: without option or the ending } to generate completions.
option_reference_re = re.compile(
r'\${(?P<section>[-a-zA-Z0-9 ._]*):(?P<option>[-a-zA-Z0-9 ._]*)')
# In this version, we don't tolerate the missing }
option_reference_strict_re = re.compile(
r'\${(?P<section>[-a-zA-Z0-9 ._]*):(?P<option>[-a-zA-Z0-9 ._]*)}')
# Matches of an unterminated ${section:
section_reference_re = re.compile(r'.*\$\{(?P<section>[-a-zA-Z0-9 ._]*)[^:]*$')
# Matches an option definition, ie option = value in:
# [section]
# option = value
option_definition_re = re.compile(
r'^(?P<option>[^=]*)\s*=\s*(?P<option_value>.*)$')
# Matches a comment
comment_re = re.compile(r'.*[#;].*')
# Filenames of slapos instances, that might be a buildout profile as a buildout template
slapos_instance_profile_filename_re = re.compile(
r'.*\/instance[^\/]*\.cfg[^\/]*')
### type definitions ###
URI: TypeAlias = str
class ResolveError(Exception):
"""Error when resolving buildout
"""
class RecursiveIncludeError(ResolveError):
"""Loop in profile extensions.
"""
class RecursiveMacroError(ResolveError):
"""Loop in macros, like in ::
```
[a]
<=b
[b]
<=a
```
"""
class MissingExtendedSection(ResolveError):
"""Extending a non existing section::
```
[a]
<= not_exists
```
"""
class BuildoutOptionDefinition:
"""Option definition
Keep track of the current value as `value` and of the
`locations` where this value was defined. `values` is
a list of intermediate values for each of the locations.
`default_value` are default values that are not defined
in profiles, but are implicit, such as buildout default
values or sections added by slapos instance.
"""
def __init__(
self,
value: str,
location: Location,
default_value: bool = False,
):
self.locations: Tuple[Location, ...] = (location, )
self.values: Tuple[str, ...] = (value, )
self.default_values: Tuple[bool, ...] = (default_value, )
@property
def value(self) -> str:
return self.values[-1]
@property
def location(self) -> Location:
return self.locations[-1]
@property
def default_value(self) -> bool:
return self.default_values[-1]
def __repr__(self) -> str:
locations = ' '.join(
['{} {}'.format(l.uri, l.range) for l in self.locations])
return '{} ({})'.format(self.value, locations)
def overrideValue(self, value: str, location: Location) -> None:
"""Add a value to the list of values."""
self.values = self.values + (value, )
self.locations = self.locations + (location, )
self.default_values = self.default_values + (False, )
def updateValue(
self,
value: str,
location: Optional[Location] = None,
) -> None:
"""Replace the current value, used internally to clean up extra whitespaces."""
self.values = self.values[:-1] + (value, )
self.default_values = self.default_values[:-1] + (False, )
if location is not None:
self.locations = self.locations[:-1] + (location, )
def copy(self) -> 'BuildoutOptionDefinition':
copied = BuildoutOptionDefinition(self.value, self.locations[0])
copied.locations = self.locations
copied.values = self.values
copied.default_values = self.default_values
return copied
class _BuildoutSection(Dict[str, BuildoutOptionDefinition]):
"""Section of a buildout.
"""
def getRecipe(self) -> Optional[recipes.Recipe]:
recipe_option = self.get('recipe')
if recipe_option is not None:
return recipes.registry.get(recipe_option.value)
return None
if TYPE_CHECKING:
def copy(self) -> '_BuildoutSection':
...
# Inherit from OrderDict so that we can instanciate BuildoutSection.
# Only do this at runtime, so that during typecheck we have proper types on dict
# methods.
if TYPE_CHECKING:
BuildoutSection = _BuildoutSection
else:
class BuildoutSection(
collections.OrderedDict,
_BuildoutSection,
):
pass
class SymbolKind(enum.Enum):
"""Types of symbols.
One of:
* ``SectionDefinition``: section in::
[section]
* ``BuildoutOptionKey``: option in::
[section]
option = value
* ``BuildoutOptionValue``: value in::
[section]
option = value
* ``SectionReference``: a specialised version of ``BuildoutOptionValue`` where
the position is on ``section`` from ``${section:option}``.
* ``OptionReference``: a specialised version of ``BuildoutOptionValue`` where
the position is on ``option`` from ``${section:option}``.
* ``Comment``: when inside a comment
"""
SectionDefinition = 0
BuildoutOptionKey = 1
BuildoutOptionValue = 2
SectionReference = 3
OptionReference = 4
Comment = 5
class Symbol:
"""A buildout symbol, can be of any SymbolKind
"""
def __init__(
self,
buildout: 'BuildoutProfile',
kind: SymbolKind,
value: str,
current_section_name: Optional[str] = None,
current_option_name: Optional[str] = None,
referenced_section_name: Optional[str] = None,
referenced_option_name: Optional[str] = None,
is_same_section_reference: bool = False,
):
self._buildout = buildout
self.kind = kind
self.value = value
self.current_section_name = current_section_name
self.current_option_name = current_option_name
self.referenced_section_name = referenced_section_name
self.referenced_option_name = referenced_option_name
self.is_same_section_reference = is_same_section_reference
def __repr__(self) -> str:
referenced = ""
if self.referenced_section_name:
referenced = f"referenced=${{{self.referenced_section_name}:{self.referenced_option_name}}}"
return (
f"<Symbol kind={self.kind} "
f"buildout={self._buildout.uri!r} "
f"value={self.value!r} "
f"current=${{{self.current_section_name}:{self.current_option_name}}} "
f"{referenced}>")
@property
def current_section(self) -> BuildoutSection:
assert self.current_section_name
return self._buildout[self.current_section_name]
@property
def current_option(self) -> Optional[BuildoutOptionDefinition]:
if self.current_option_name:
return self.current_section.get(self.current_option_name)
return None
@property
def current_section_recipe(self) -> Optional[recipes.Recipe]:
return self.current_section.getRecipe() if self.current_section else None
@property
def referenced_section(self) -> Optional[BuildoutSection]:
if self.referenced_section_name:
return self._buildout.get(self.referenced_section_name)
return None
@property
def referenced_section_recipe_name(self) -> Optional[str]:
if self.referenced_section:
recipe = self.referenced_section.get('recipe')
if recipe:
return (recipe.value)
return None
@property
def referenced_section_recipe(self) -> Optional[recipes.Recipe]:
referenced_section_recipe_name = self.referenced_section_recipe_name
if referenced_section_recipe_name:
return recipes.registry.get(referenced_section_recipe_name)
return None
@property
def referenced_option(self) -> Optional[BuildoutOptionDefinition]:
referenced_section = self.referenced_section
if referenced_section and self.referenced_option_name:
return referenced_section.get(self.referenced_option_name)
return None
class OptionReferenceSymbolWithPosition(Symbol):
"""An Symbol of kind OptionReference with ranges already calculated.
"""
section_range: Range
option_range: Range
class BuildoutTemplate:
"""A text document where ${}-style values can be substitued.
This also supports $${}-style substitutions.
"""
def __init__(
self,
uri: URI,
source: str,
buildout: 'BuildoutProfile',
second_level_buildout: Optional['BuildoutProfile'] = None,
):
self.uri = uri
self.source = source
# where the ${ substitution values are read
self.buildout = buildout
# where the $${ substitution values are read
self.second_level_buildout = second_level_buildout
def copy(self) -> 'BuildoutTemplate':
return self.__class__(
self.uri,
self.source,
self.buildout,
self.second_level_buildout,
)
def _getSymbolAtPosition(
self,
position: Position,
current_section_name: Optional[str] = None,
current_option_name: Optional[str] = None,
) -> Optional[Symbol]:
lines = self.source.splitlines()
# extract line for the position.
line = ''
if position.line < len(lines):
line = lines[position.line]
if comment_re.match(line[:position.character]):
return Symbol(kind=SymbolKind.Comment, buildout=self.buildout, value='')
line_offset = 0
remaining_line = line
while remaining_line:
remaining_line = line[line_offset:]
option_reference_match = option_reference_re.match(remaining_line)
section_reference_match = section_reference_re.match(remaining_line)
if option_reference_match:
logger.debug("got an option_reference_match %s",
option_reference_match)
referenced_buildout = self.buildout
if (option_reference_match.start() + line_offset > 0
and line[option_reference_match.start() + line_offset - 1] == '$'):
if self.second_level_buildout:
referenced_buildout = self.second_level_buildout
else:
return None
if (option_reference_match.start() <=
(position.character - line_offset) <=
option_reference_match.end()):
# the position is in ${section:option}, find out wether it is in section or option
position_on_option = (line_offset + option_reference_match.start() +
len('${') +
len(option_reference_match.group('section'))
) < position.character
referenced_section_name = option_reference_match.group('section')
referenced_option_name = option_reference_match.group('option')
return Symbol(
kind=SymbolKind.OptionReference
if position_on_option else SymbolKind.SectionReference,
buildout=referenced_buildout,
value=referenced_option_name
if position_on_option else referenced_section_name,
current_section_name=current_section_name,
current_option_name=current_option_name,
referenced_section_name=referenced_section_name
or current_section_name,
is_same_section_reference=referenced_section_name == '',
referenced_option_name=referenced_option_name,
)
else:
logger.debug("option_reference_match was not in range, advancing")
line_offset += option_reference_match.start()
if section_reference_match:
logger.debug("got a section_reference_match %s",
section_reference_match)
referenced_buildout = self.buildout
if section_reference_match.span('section')[0] > 3 and remaining_line[
section_reference_match.span('section')[0] - 3] == '$':
if self.second_level_buildout:
referenced_buildout = self.second_level_buildout
else:
return None
if (section_reference_match.start('section') <=
(position.character - line_offset) <=
section_reference_match.end('section')):
referenced_section_name = section_reference_match.group('section')
return Symbol(
kind=SymbolKind.SectionReference,
buildout=referenced_buildout,
value=referenced_section_name,
current_section_name=current_section_name,
current_option_name=current_option_name,
referenced_section_name=referenced_section_name
or current_section_name,
is_same_section_reference=referenced_section_name == '',
)
else:
logger.debug("section_reference_match was not in range, advancing")
line_offset += section_reference_match.start()
line_offset += 1
return None
async def getSymbolAtPosition(self, position: Position) -> Optional[Symbol]:
"""Return the symbol at given position.
"""
return self._getSymbolAtPosition(position)
async def getAllOptionReferenceSymbols(
self) -> AsyncIterator[OptionReferenceSymbolWithPosition]:
"""Return all symbols of kind OptionReference in this profile.
"""
for lineno, line in enumerate(self.source.splitlines()):
if line and line[0] in '#;':
continue
for match in option_reference_re.finditer(line):
referenced_buildout = self.buildout
if match.start() > 0 and line[match.start() - 1] == '$':
if self.second_level_buildout:
referenced_buildout = self.second_level_buildout
else:
continue
symbol = OptionReferenceSymbolWithPosition(
buildout=referenced_buildout,
kind=SymbolKind.OptionReference,
value=match.string[slice(*match.span())],
referenced_section_name=match.group('section'),
referenced_option_name=match.group('option'),
is_same_section_reference=match.group('section') == '',
)
symbol.section_range = Range(
start=Position(
line=lineno,
character=match.start() + 2, # the ${ was captured
),
end=Position(
line=lineno,
character=match.end() - len(match.group('option')) - 1,
),
)
symbol.option_range = Range(
start=Position(
line=lineno,
character=match.end() - len(match.group('option')),
),
end=Position(
line=lineno,
character=match.end(),
),
)
yield symbol
class BuildoutProfile(Dict[str, BuildoutSection], BuildoutTemplate):
"""A parsed buildout file, without extends.
"""
def copy(self) -> 'BuildoutProfile':
copied = self.__class__(self.uri, self.source)
copied.section_header_locations = self.section_header_locations.copy()
copied.has_dynamic_extends = self.has_dynamic_extends
copied.has_jinja = self.has_jinja
for k, v in self.items():
copied[k] = v.copy()
return copied
def __init__(self, uri: URI, source: str):
BuildoutTemplate.__init__(
self,
uri=uri,
source=source,
buildout=self,
)
self.section_header_locations: Dict[str,
Location] = collections.OrderedDict()
"""The locations for each section, keyed by section names.
"""
self.has_dynamic_extends = False
"""Flag true if this resolved buildout had extends defined dynamically.
This only happens with SlapOS instance buildout which are templates of profiles.
"""
self.has_jinja = False
"""Flag true if this resolved buildout is a jinja template.
This only happens with SlapOS instance buildout which are templates of profiles.
"""
async def getTemplate(
self,
ls: LanguageServer,
uri: URI,
) -> Optional[BuildoutTemplate]:
"""Returns the template from this uri, if it is a template for this profile.
One exception is for profiles names software.cfg or buildout.cfg - we just assume
that the template is valid for these profiles. For other profiles, we check if
the profile really uses this template.
"""
# uri can be passed as relative or absolute. Let's build a set of absolute
# and relative uris.
uris = set((uri, ))
if not _isurl(uri):
base = self.uri[:self.uri.rfind('/')] + '/'
uri = urllib.parse.urljoin(base, uri)
uris.add(uri)
else:
assert uri.startswith('file://')
assert self.uri.startswith('file://')
uri_path = pathlib.Path(uri[len('file://'):])
uris.add(
str(
uri_path.relative_to(
pathlib.Path(self.uri[len('file://'):]).parent)))
document = ls.workspace.get_document(uri)
if not os.path.exists(document.path):
return None
for section_name, section_value in self.items():
recipe = section_value.getRecipe()
if recipe is not None:
for template_option_name in recipe.template_options:
template_option_value = section_value.get(template_option_name)
if template_option_value is not None:
template_option_value_uri = self.resolve_value(
section_name, template_option_name)
# Normalize URI path, in case it contain double slashes, ./ or ..
template_option_value_parsed = urllib.parse.urlparse(
template_option_value_uri)
template_option_value_uri = urllib.parse.urlunparse(
template_option_value_parsed._replace(
path=os.path.normpath(template_option_value_parsed.path)))
if template_option_value_uri in uris:
if slapos_instance_profile_filename_re.match(uri):
# a slapos "buildout profile as a template"
slapos_instance_profile = await open(
ls,
uri,
allow_errors=True,
force_open_as_buildout_profile=True,
)
assert isinstance(slapos_instance_profile, BuildoutProfile)
slapos_instance_profile.second_level_buildout = slapos_instance_profile
slapos_instance_profile.buildout = self
return slapos_instance_profile
return BuildoutTemplate(
uri=uri,
source=document.source,
buildout=self,
)
return None
async def getSymbolAtPosition(self, position: Position) -> Optional[Symbol]:
"""Return the symbol at given position.
"""
lines = self.source.splitlines()
# parse until this line to find out what is the current section.
buildout_for_current_section = await _parse(
uri=self.uri,
fp=io.StringIO('\n'.join(lines[:position.line + 1])),
allow_errors=True,
)
current_section_name, current_section_value = \
buildout_for_current_section.popitem()
# find current option in current_section
current_option_name = None
for k, v in current_section_value.items():
for l in v.locations:
if (l.range.start.line <= position.line <= l.range.end.line):
current_option_name = k
break
logger.debug("current_section_name: %s current_option_name: %s",
current_section_name, current_option_name)
symbol = self._getSymbolAtPosition(
position,
current_section_name=current_section_name,
current_option_name=current_option_name,
)
if symbol is not None:
return symbol
# extract line for the position.
line = ''
if position.line < len(lines):
line = lines[position.line]
line_offset = 0
remaining_line = line
while remaining_line:
remaining_line = line[line_offset:]
logger.debug("trying line from %s >%s<", line_offset, remaining_line)
option_value_definition_match = option_definition_re.search(
remaining_line)
if line_offset == 0:
# we can be in the following cases (> denotes beginning of lines)
# - a section header
# >[section]
# - a single line option and value:
# >option = value
# - an option without value:
# >option
# an empty option is also valid case, but we handled it outside of
# the `if remaining_line`
# - a value only, like in a multi line option. In this case we should
# have a leading space.
# > value
section_header_match = section_header(line) # reuse buildout's regexp
if section_header_match:
return Symbol(
kind=SymbolKind.SectionDefinition,
buildout=self,
value=section_header_match.group('name'),
current_section_name=section_header_match.group('name'),
)
if option_value_definition_match:
# Single line option and value. The position might be on option
# or value
logger.debug("got a option_definition_match %s",
option_value_definition_match)
if (option_value_definition_match.start() <=
(position.character - line_offset) <=
option_value_definition_match.end()):
option = option_value_definition_match.group('option')
option_value = option_value_definition_match.group('option_value')
# is the position on option or option value ?
position_on_option = position.character < (
line_offset + option_value_definition_match.start() +
len(option_value_definition_match.group('option')))
logger.debug(
"option_value_definition_match, position on option %s",
position_on_option)
return Symbol(
kind=SymbolKind.BuildoutOptionKey
if position_on_option else SymbolKind.BuildoutOptionValue,
buildout=self,
value=option.strip()
if position_on_option else option_value.strip(),
current_section_name=current_section_name,
current_option_name=current_option_name,
)
elif not (line.startswith(' ') or line.startswith('\t')):
# Option without value
if not line.startswith('['):
return Symbol(
kind=SymbolKind.BuildoutOptionKey,
buildout=self,
value=line.strip(),
current_section_name=current_section_name,
current_option_name=line.strip(),
)
else:
# Value only, like in a multi line option.
return Symbol(
kind=SymbolKind.BuildoutOptionValue,
buildout=self,
value=line.strip(),
current_section_name=current_section_name,
current_option_name=current_option_name,
)
line_offset += 1
if line_offset == 0:
# an empty line is also an option without value
return Symbol(
kind=SymbolKind.BuildoutOptionKey,
buildout=self,
value=line.strip(),
current_section_name=current_section_name,
current_option_name=line.strip(),
)
return None
async def getAllOptionReferenceSymbols(
self) -> AsyncIterator[OptionReferenceSymbolWithPosition]:
"""Return all symbols of kind OptionReference in this profile.
In a buildout profile, we also resolve the current section name
in ${:option}.
"""
async for symbol in super().getAllOptionReferenceSymbols():
if not symbol.referenced_section_name:
sap = await symbol._buildout.getSymbolAtPosition(
symbol.section_range.start)
assert sap is not None
symbol.referenced_section_name = sap.current_section_name
yield symbol
def getOptionValues(
self,
section_name: str,
option_name: str,
) -> Iterator[Tuple[str, Range]]:
"""Iterate on all values of an option
When we have:
```
[section]
value =
a
b
c
```
the iterator yields `"a"`, `"b"`, `"c"` and the range of each value.
```
[section]
value = a b c
```
"""
option: BuildoutOptionDefinition
option = self[section_name][option_name]
location = option.locations[-1]
if location.uri == self.uri:
start_line = location.range.start.line
lines = self.source.splitlines()[start_line:location.range.end.line + 1]
is_multi_line_option = len(lines) > 1
for line_offset, option_value_text in enumerate(lines):
if option_value_text and option_value_text[0] not in '#;':
start_character = 0
if option_value_text.startswith(option_name):
option_name_text, option_value_text = option_value_text.split(
'=', 1)
start_character += len(option_name_text) + 1
start_character += len(option_value_text) - len(
option_value_text.lstrip())
option_value_text = option_value_text.strip()
if option_value_text:
if is_multi_line_option:
yield (
option_value_text,
Range(
start=Position(
line=start_line + line_offset,
character=start_character,
),
end=Position(
line=start_line + line_offset,
character=start_character + len(option_value_text),
),
),
)
else:
for match in re.finditer(r'([^\s]+)', option_value_text):
yield (match.group(),
Range(
start=Position(
line=start_line + line_offset,
character=start_character + match.start(),
),
end=Position(
line=start_line + line_offset,
character=start_character + match.start() +
len(match.group()),
),
))
@staticmethod
def looksLikeBuildoutProfile(uri: URI) -> bool:
"""Check if this URI looks like a buildout profile URI.
"""
return (uri.endswith('.cfg') or uri.endswith('.cfg.in')
or uri.endswith('.cfg.j2') or uri.endswith('.cfg.jinja2'))
def resolve_value(self, section_name: str, option_name: str) -> str:
"""Get the value of an option, after substituting references.
If substitution is not possible, the original value is returned.
"""
def _get_section(section_name: str) -> BuildoutSection:
section = self[section_name]
if '<' in section:
macro = copy.copy(self[section['<'].value])
macro.update(**section)
return macro
return section
def _resolve_value(
section_name: str,
option_name: str,
value: str,
seen: Set[Tuple[str, str]],
) -> str:
if (section_name, option_name) in seen:
return value
seen.add((section_name, option_name))
def _sub(match: Match[str]) -> str:
referenced_section_name = match.group('section') or section_name
if referenced_section_name in self:
referenced_section = _get_section(referenced_section_name)
referenced_option = match.group('option')
if referenced_option in referenced_section:
return _resolve_value(
referenced_section_name,
referenced_option,
referenced_section[referenced_option].value,
seen,
)
return value
return option_reference_strict_re.sub(_sub, value)
return _resolve_value(
section_name,
option_name,
_get_section(section_name)[option_name].value,
set(),
)
class ResolvedBuildout(BuildoutProfile):
"""A buildout where extends and section macros <= have been extended.
"""
if TYPE_CHECKING:
def copy(self) -> 'ResolvedBuildout':
...
### cache ###
# a cache of un-resolved buildouts by uri
_parse_cache: Dict[URI, BuildoutProfile] = {}
# a cache of resolved buildouts by uri. This is the cache that will be used for most operations
# such as completions, code actions etc.
_resolved_buildout_cache: Dict[URI, ResolvedBuildout] = {}
# a cache of resolved buildouts by list of uris. This is an intermediate cache used to rebuild
# quickly the cach from _resolved__buildout_cache, because the cache from _resolved__buildout_cache
# needs to be flushed each time the document at `uri` is modified. This cache is only flushed if
# ${buildout:extends} is modified.
_resolved_extends_cache: Dict[Tuple[URI, ...], BuildoutProfile] = {}
# a mapping of dependencies between extends, so that we can clear caches when
# a profile is modified.
_extends_dependency_graph: Dict[URI, Set[URI]] = collections.defaultdict(set)
def clearCache(uri: URI) -> None:
"""Clear all caches for uri.
This is to be called when the document is modified.
"""
logger.debug("Clearing cache for %s", uri)
_parse_cache.pop(uri, None)
_clearExtendCache(uri, set())
def _clearExtendCache(uri: URI, done: Set[URI]) -> None:
"""Clear the `extends` cache for URI.
This is to be called for all URIs extended by `uri`.
"""
if uri in done:
return
done.add(uri)
_resolved_buildout_cache.pop(uri, None)
for uris in list(_resolved_extends_cache):
if uri in uris:
_resolved_extends_cache.pop(uris, None)
logger.debug(
"Clearing extends cache for %s Dependencies: %s",
uri,
_extends_dependency_graph[uri],
)
for dependend_uri in _extends_dependency_graph[uri]:
_resolved_buildout_cache.pop(dependend_uri, None)
for dependend_uris in list(_resolved_extends_cache):
if dependend_uri in dependend_uris:
_resolved_extends_cache.pop(dependend_uris, None)
_clearExtendCache(dependend_uri, done)
_extends_dependency_graph[uri].clear()
### buildout copied & modified functions ###
_isurl = re.compile('([a-zA-Z0-9+.-]+)://').match
async def parse(
ls: LanguageServer,
uri: URI,
allow_errors: bool = True,
) -> BuildoutProfile:
"""
Parse a sectioned setup file and return a non-resolved buildout.
This is a wrapper over _parse which uses language server's workspace to access documents.
Returned value changed to a BuildoutProfile instance.
"""
try:
return _parse_cache[uri].copy()
except KeyError:
pass
parsed_uri = urllib.parse.urlparse(uri)
if parsed_uri.scheme in (
'http',
'https',
):
try:
async with aiohttp_session.get_session().get(uri) as resp:
resp.raise_for_status()
fp = io.StringIO(await resp.text())
except aiohttp.client_exceptions.ClientError:
logger.warning('Error parsing from uri %s', uri, exc_info=True)
fp = io.StringIO('')
else:
document = ls.workspace.get_document(uri)
try:
fp = io.StringIO(document.source)
except IOError:
if not allow_errors:
raise
fp = io.StringIO('')
parsed = await _parse(
fp,
uri,
allow_errors,
)
_parse_cache[uri] = parsed
return parsed.copy()
async def _parse(
fp: TextIO,
uri: URI,
allow_errors: bool,
) -> BuildoutProfile:
"""Parse a sectioned setup file and return a non-resolved buildout.
This is equivalent to buildout's zc.buildout.configparser.parse with the
following differences:
This is patched here in order to:
- allow to parse with errors
- keep track of options overloaded in same file
- record the line numbers
- don't execute section conditions.
- return ordered dicts in the same order as the input text.
- optionally resolve extends directly here
- ignore jinja contexts
The returned value changed to a BuildoutProfile instance.
"""
sections = BuildoutProfile(uri, fp.read())
fp.seek(0)
# buildout default values
sections['buildout'] = BuildoutSection()
for k, v in _buildout_default_options.items():
if isinstance(v, tuple):
value = v[0] # buildout < 2.9.3
else:
value = v.value
sections['buildout'][k] = BuildoutOptionDefinition(
value=value,
location=Location(uri=uri,
range=Range(start=Position(line=0, character=0),
end=Position(line=0, character=0))),
default_value=True,
)
sections['buildout']['directory'] = BuildoutOptionDefinition(
value='.',
location=Location(uri=uri,
range=Range(start=Position(line=0, character=0),
end=Position(line=0, character=0))),
default_value=True,
)
sections.section_header_locations['buildout'] = Location(
uri="",
range=Range(
start=Position(line=0, character=0),
end=Position(line=0, character=0),
),
)
if slapos_instance_profile_filename_re.match(uri):
# Add slapos instance generated sections.
sections.section_header_locations.setdefault(
'slap-connection',
Location(uri='',
range=Range(start=Position(line=0, character=0),
end=Position(line=0, character=0))))
slap_connection = BuildoutSection()
for k in (
'computer-id',
'partition-id',
'server-url',
'key-file',
'cert-file',
'software-release-url',
):
slap_connection[k] = BuildoutOptionDefinition(
value='',
location=Location(uri=uri,
range=Range(start=Position(line=0, character=0),
end=Position(line=0, character=0))),
default_value=True,
)
sections.setdefault('slap-connection', slap_connection)
sections.section_header_locations.setdefault(
'slap-network-information',
Location(uri='',
range=Range(start=Position(line=0, character=0),
end=Position(line=0, character=0))))
slap_network_information = BuildoutSection()
for k in (
'local-ipv4',
'global-ipv6',
'network-interface',
'tap-ipv4',
'tap-gateway',
'tap-netmask',
'tap-network',
'global-ipv4-network',
):
slap_network_information[k] = BuildoutOptionDefinition(
value='',
location=Location(uri=uri,
range=Range(start=Position(line=0, character=0),
end=Position(line=0, character=0))),
default_value=True,
)
sections.setdefault('slap-network-information', slap_network_information)
jinja_parser = jinja.JinjaParser()
cursect: Optional[Dict[str, BuildoutOptionDefinition]] = None
blockmode = False
optname: Optional[str] = None
lineno = -1
e: Optional[ParsingError] = None
while True:
line = fp.readline()
if not line:
break # EOF
lineno = lineno + 1
jinja_parser.feed(line)
if jinja_parser.is_in_jinja:
sections.has_jinja = True
continue
line = jinja_parser.line
if line[0] in '#;':
continue # comment
if line[0].isspace() and (cursect is not None) and optname:
_line = line
# continuation line
if blockmode:
line = line.rstrip()
else:
line = line.strip()
if not line:
continue
assert cursect is not None
assert optname is not None
option_def = cursect[optname]
# update current option in case of multi line option
option_def.updateValue(
value=("%s\n%s" % (option_def.value, line)),
location=Location(
uri=option_def.location.uri,
range=Range(
start=option_def.location.range.start,
end=Position(line=lineno, character=len(_line) - 1),
),
),
)
cursect[optname] = option_def
else:
header = section_header(line)
if header:
sectname = header.group('name')
sections.section_header_locations[sectname] = Location(
uri=uri,
range=Range(
start=Position(line=lineno, character=0),
end=Position(line=lineno + 1, character=0),
))
if sectname in sections:
cursect = sections[sectname]
else:
sections[sectname] = cursect = BuildoutSection()
# initialize buildout default options
cursect['_buildout_section_name_'] = BuildoutOptionDefinition(
location=Location(uri=uri,
range=Range(start=Position(line=0,
character=0),
end=Position(line=0,
character=0))),
value=sectname,
default_value=True,
)
# _profile_base_location_ is a slapos.buildout extension
base_location = '.'
if '/' in uri:
base_location = uri[:uri.rfind('/')] + '/'
cursect['_profile_base_location_'] = BuildoutOptionDefinition(
location=Location(uri=uri,
range=Range(start=Position(line=0,
character=0),
end=Position(line=0,
character=0))),
value=base_location,
default_value=True,
)
# So sections can't start with a continuation line
optname = None
elif cursect is None:
if not line.strip():
continue
# no section header in the file?
if allow_errors:
continue
raise MissingSectionHeaderError(uri, lineno, line)
else:
if line[:2] == '=>':
line = '<part-dependencies> = ' + line[2:]
mo = option_start(line)
if mo:
# option start line
optname, optval = mo.group('name', 'value')
assert optname
optname = optname.rstrip()
optval = optval.strip()
optlocation = Location(
uri=uri,
range=Range(
start=Position(
line=lineno,
character=len(mo.groups()[0]) + 1,
),
end=Position(
line=lineno,
character=len(line) - 1,
),
),
)
if optname in cursect:
option_def = cursect[optname]
option_def.overrideValue(optval, optlocation)
else:
option_def = BuildoutOptionDefinition(value=optval,
location=optlocation)
cursect[optname] = option_def
blockmode = not optval
elif not (optname or line.strip()):
# blank line after section start
continue
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(uri)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e and not allow_errors:
raise e
# normalize spaces
for section in sections.values():
for name in section:
value = section[name].value
if value[:1].isspace():
section[name].updateValue(
leading_blank_lines.sub('', textwrap.dedent(value.rstrip())))
return sections
async def getProfileForTemplate(
ls: LanguageServer,
document: Document,
) -> Optional[URI]:
"""Find the profile for template.
For example when there's a buildout.cfg containing:
[template]
recipe = collective.recipe.template
input = template.in
output = template
when called with `uri` template.in, this function would return `buildout.cfg`.
"""
uri = document.uri
def getCandidateBuildoutProfiles() -> Iterator[pathlib.Path]:
path = pathlib.Path(document.path).parent
for _ in range(3): # look for buildouts up to 3 levels
# we sort just to have stable behavior
for profile in sorted(path.glob('*.cfg')):
yield profile
path = path.parent
if slapos_instance_profile_filename_re.match(
uri) or not uri.endswith('.cfg'):
for buildout_path in getCandidateBuildoutProfiles():
resolved_path = str(buildout_path.resolve())
# For paths in workspace, we don't use buildout_path.resolve().as_uri(),
# because we have fake uri -> path mapping in tests
if resolved_path.startswith(ls.workspace.root_path):
buildout_uri = resolved_path.replace(
ls.workspace.root_path,
ls.workspace.root_uri,
1,
)
else:
# but we still need to support the case where the path is outside the workspace
buildout_uri = buildout_path.resolve().as_uri()
logger.debug("Trying to find templates's buildout with %s -> %s",
buildout_path, buildout_uri)
buildout = await _open(
ls,
'',
buildout_uri,
[],
allow_errors=True,
)
assert isinstance(buildout, BuildoutProfile)
template = await buildout.getTemplate(ls, uri)
if template is not None:
return buildout_uri
return None
async def open(
ls: LanguageServer,
uri: URI,
allow_errors: bool = True,
force_open_as_buildout_profile: bool = False,
) -> Optional[Union[BuildoutTemplate, ResolvedBuildout]]:
"""Open an URI and returnes either a buildout or a profile connected to buildout.
In the case of slapos buildout templates (instance.cfg.in), it is both. This is
not true for slapos buildout templates as jinja templates, which have their own
namespace as ${} and not as $${}.
force_open_as_buildout_profile is used to force assuming that this file is a
buildout profile (and not a buildout template).
For buildout, it is a wrapper over _open which uses language server's workspace
"""
document = ls.workspace.get_document(uri)
logger.debug("open %s", uri)
if not force_open_as_buildout_profile:
# First, try to read as a template, because buildout profiles can be templates.
buildout_uri = await getProfileForTemplate(ls, document)
if buildout_uri is not None:
buildout = await _open(
ls,
'',
buildout_uri,
[],
allow_errors=allow_errors,
)
return await buildout.getTemplate(ls, uri)
if BuildoutProfile.looksLikeBuildoutProfile(
uri) or force_open_as_buildout_profile:
return await _open(ls, '', uri, [], allow_errors=allow_errors)
return None
async def _open(
ls: LanguageServer,
base: str,
uri: URI,
seen: List[str],
allow_errors: bool,
) -> ResolvedBuildout:
"""Open a configuration file and return the result as a dictionary,
Recursively open other files based on buildout options found.
This is equivalent of zc.buildout.buildout._open
"""
logger.debug("_open %r %r", base, uri)
if not _isurl(uri):
assert base
uri = urllib.parse.urljoin(base, uri)
try:
return _resolved_buildout_cache[uri].copy()
except KeyError:
pass
base = uri[:uri.rfind('/')] + '/'
if uri in seen:
if allow_errors:
return ResolvedBuildout(uri, '')
raise RecursiveIncludeError("Recursive file include", seen, uri)
seen.append(uri)
profile = await parse(ls, uri, allow_errors=allow_errors)
extends_option = profile['buildout'].pop(
'extends', None) if 'buildout' in profile else None
result = profile
has_dynamic_extends = False
has_jinja = profile.has_jinja
if extends_option:
extends = extends_option.value.split()
has_dynamic_extends = (jinja.JinjaParser.jinja_value in extends) or any(
option_reference_re.match(extended_profile)
for extended_profile in extends)
if extends:
# buildout:extends, as absolute URI that we can use as cache key
absolute_extends: Tuple[URI, ...] = tuple(
urllib.parse.urljoin(base, x) for x in extends)
if absolute_extends in _resolved_extends_cache:
logger.debug("_open %r was in cache", absolute_extends)
eresult = _resolved_extends_cache[absolute_extends]
else:
eresult = await _open(ls, base, extends.pop(0), seen, allow_errors)
for fname in extends:
has_dynamic_extends = has_dynamic_extends or eresult.has_dynamic_extends
has_jinja = has_jinja or eresult.has_jinja
eresult = _update(eresult, await _open(ls, base, fname, seen,
allow_errors))
for absolute_extend in absolute_extends:
_extends_dependency_graph[absolute_extend].add(uri)
if not has_dynamic_extends:
_resolved_extends_cache[absolute_extends] = eresult
result = _update(eresult, profile)
seen.pop()
for section_name, options in result.items():
if '<' in options:
try:
result[section_name] = _do_extend_raw(
section_name,
options,
result,
[],
)
except ResolveError:
# this happens with non top-level buildout
pass
result.has_dynamic_extends = has_dynamic_extends
result.has_jinja = has_jinja
resolved = cast(ResolvedBuildout, result)
_resolved_buildout_cache[uri] = resolved
return resolved.copy()
def _update_section(
s1: BuildoutSection,
s2: BuildoutSection,
) -> BuildoutSection:
"""Update s1 with values from s2.
"""
s1 = s1.copy()
for k, v in s2.items():
if k == '_profile_base_location_':
continue
if k.endswith('-'):
k = k.rstrip(' -')
# Find v1 in s2 first; it may have been set by a += operation first
option_def = s2.get(k, s1.get(k, v))
new_option_def = option_def.copy()
new_option_def.overrideValue(
# same logic as as SectionKey.removeFromValue
value='\n'.join(new_v for new_v in option_def.value.split('\n')
if new_v not in v.value.split('\n')),
location=v.locations[-1])
s1[k] = new_option_def
elif k.endswith('+'):
k = k.rstrip(' +')
# Find v1 in s2 first; it may have been defined locally too.
option_def = s2.get(k, s1.get(k, v))
option_values = [] if option_def.default_value else option_def.value.split(
'\n')
new_option_def = option_def.copy()
new_option_def.overrideValue(
# same logic as as SectionKey.addToValue
value='\n'.join(option_values + v.value.split('\n')),
location=v.location)
s1[k] = new_option_def
else:
if k in s1 and (v.location != s1[k].location):
if not v.default_value:
new_option_def = s1[k].copy()
new_option_def.overrideValue(v.value, v.location)
s1[k] = new_option_def
else:
s1[k] = v
return s1
def _update(d1: BuildoutProfile, d2: BuildoutProfile) -> BuildoutProfile:
"""update d1 with values from d2
"""
d1 = d1.copy()
d1.uri = d2.uri
d1.source = d2.source
for section in d2:
d1.section_header_locations[section] = d2.section_header_locations[section]
if section in d1:
d1[section] = _update_section(d1[section], d2[section])
else:
d1[section] = d2[section]
return d1
def _do_extend_raw(
name: str,
section: BuildoutSection,
buildout: BuildoutProfile,
doing: List[str],
) -> BuildoutSection:
"""Extends macros:
[macro]
[user]
<= macro
this is zc.buildout.buildout.Option._do_extend_raw
"""
if name == 'buildout':
return section
if name in doing:
raise RecursiveMacroError("Infinite extending loop %r" % name)
doing.append(name)
try:
to_do = section.get('<', None)
if to_do is None:
return section
result = BuildoutSection()
for iname in to_do.value.split('\n'):
iname = iname.strip()
if not iname:
continue
raw = buildout.get(iname)
if raw is None:
raise MissingExtendedSection("No section named %r" % iname)
result.update({
k: v.copy()
for (k, v) in _do_extend_raw(iname, raw, buildout, doing).items()
})
result = _update_section(result, section)
result.pop('<', None)
return result
finally:
assert doing.pop() == name | zc.buildout.languageserver | /zc.buildout.languageserver-0.9.2-py3-none-any.whl/buildoutls/buildout.py | buildout.py |
import asyncio
import logging
import re
import urllib.parse
from typing import AsyncIterable, Awaitable, List, Optional, Set, Tuple
import packaging
from lsprotocol.types import (
Diagnostic,
DiagnosticRelatedInformation,
DiagnosticSeverity,
DiagnosticTag,
Position,
Range,
)
from pygls.server import LanguageServer
from zc.buildout.configparser import MissingSectionHeaderError, ParsingError
from . import buildout, jinja, pypi, types
logger = logging.getLogger(__name__)
_profile_base_location_re = re.compile(
r'\$\{([-a-zA-Z0-9 ._]*):_profile_base_location_\}')
# this is a function to be patched in unittest
from os.path import exists as os_path_exists
pypi_client = pypi.PyPIClient()
async def getDiagnostics(
ls: LanguageServer,
uri: str,
) -> AsyncIterable[Diagnostic]:
parsed = None
if buildout.BuildoutProfile.looksLikeBuildoutProfile(uri):
# parse errors
try:
parsed = await buildout.parse(
ls=ls,
uri=uri,
allow_errors=False,
)
except ParsingError as e:
if e.filename != uri:
logger.debug("skipping error in external file %s", e.filename)
elif isinstance(e, MissingSectionHeaderError):
yield Diagnostic(
message=e.message,
range=Range(
start=Position(line=e.lineno, character=0),
end=Position(line=e.lineno + 1, character=0),
),
source="buildout",
severity=DiagnosticSeverity.Error,
)
else:
for (lineno, _), msg in zip(e.errors, e.message.splitlines()[1:]):
msg = msg.split(":", 1)[1].strip()
yield Diagnostic(
message=f"ParseError: {msg}",
range=Range(
start=Position(line=lineno, character=0),
end=Position(line=lineno + 1, character=0),
),
source="buildout",
severity=DiagnosticSeverity.Error,
)
resolved_buildout = await buildout.open(
ls=ls,
uri=uri,
)
assert resolved_buildout is not None
# all these checks can not be performed on a buildout profile
# with dynamic extends, we don't know what it's in the dynamic profile.
has_dynamic_extends = (isinstance(resolved_buildout,
buildout.BuildoutProfile)
and resolved_buildout.has_dynamic_extends)
has_jinja = (isinstance(resolved_buildout, buildout.BuildoutProfile)
and resolved_buildout.has_jinja)
if not has_dynamic_extends and not has_jinja:
installed_parts: Set[str] = set([])
if isinstance(resolved_buildout, buildout.BuildoutProfile):
if "parts" in resolved_buildout["buildout"]:
installed_parts = set(
(v[0]
for v in resolved_buildout.getOptionValues("buildout", "parts")))
async for symbol in resolved_buildout.getAllOptionReferenceSymbols():
if symbol.referenced_section is None:
yield Diagnostic(
message=
f"Section `{symbol.referenced_section_name}` does not exist.",
range=symbol.section_range,
source="buildout",
severity=DiagnosticSeverity.Error,
)
elif symbol.referenced_option is None:
# if we have a recipe, either it's a known recipe where we know
# all options that this recipe can generate, or it's an unknown
# recipe and in this case we assume it's OK.
if (symbol.referenced_section_recipe_name is not None
and symbol.referenced_section_recipe is None) or (
symbol.referenced_section_recipe is not None and
(symbol.referenced_section_recipe.any_options
or symbol.referenced_option_name
in symbol.referenced_section_recipe.generated_options)):
continue
# if a section is a macro, it's OK to self reference ${:missing}
if (symbol.is_same_section_reference
and symbol.current_section_name not in installed_parts):
continue
yield Diagnostic(
message=
f"Option `{symbol.referenced_option_name}` does not exist in `{symbol.referenced_section_name}`.",
range=symbol.option_range,
source="buildout",
severity=DiagnosticSeverity.Warning,
)
if isinstance(resolved_buildout, buildout.BuildoutProfile):
for section_name, section in resolved_buildout.items():
if (section_name in installed_parts
and resolved_buildout.section_header_locations[section_name].uri
== uri):
# check for required options
recipe = section.getRecipe()
if recipe:
missing_required_options = recipe.required_options.difference(
section.keys())
if missing_required_options:
missing_required_options_text = ", ".join(
["`{}`".format(o) for o in missing_required_options])
yield Diagnostic(
message=
f"Missing required options for `{recipe.name}`: {missing_required_options_text}",
range=resolved_buildout.
section_header_locations[section_name].range,
source="buildout",
severity=DiagnosticSeverity.Error,
)
# hints with redefined options, but at "Information" level when option redefines
# the same values
for option_name, option in section.items():
if option.locations[-1].uri != uri:
continue
if jinja.JinjaParser.jinja_value in (option_name, option.value):
continue
# extend ${:_profile_base_location_}, because this option is dynamic
# per profile, so redefining an option from another profile with the same
# ${:_profile_base_location_} should not be considered as redefining to
# same value.
if len(option.locations) > 1:
is_same_value = (_profile_base_location_re.sub(
option.locations[-1].uri,
option.values[-1],
) == _profile_base_location_re.sub(
option.locations[-2].uri,
option.values[-2],
))
related_information = []
reported_related_location = set()
overriding_default_value = False
for other_location, other_value, other_is_default_value in zip(
option.locations,
option.values,
option.default_values,
):
if other_is_default_value:
overriding_default_value = True
hashable_location = (
other_location.uri,
other_location.range.start.line,
)
if hashable_location in reported_related_location:
continue
reported_related_location.add(hashable_location)
related_information.append(
DiagnosticRelatedInformation(
location=other_location,
message=f"default value: `{other_value}`"
if other_is_default_value else f"value: `{other_value}`",
))
if is_same_value:
yield Diagnostic(
message=
f"`{option_name}` already has value `{option.value}`.",
range=option.locations[-1].range,
source="buildout",
severity=DiagnosticSeverity.Information,
related_information=related_information,
tags=[DiagnosticTag.Unnecessary])
elif not overriding_default_value:
yield Diagnostic(
message=f"`{option_name}` overrides an existing value.",
range=option.locations[-1].range,
source="buildout",
severity=DiagnosticSeverity.Hint,
related_information=related_information,
)
jinja_parser = jinja.JinjaParser()
if parsed is not None and "extends" in parsed["buildout"]:
for extend_filename, extend_range in parsed.getOptionValues(
"buildout", "extends"):
if extend_filename.startswith("${"):
continue # assume substitutions are OK
jinja_parser.feed(extend_filename)
if jinja_parser.is_in_jinja:
continue # ignore anything in jinja context
if buildout._isurl(extend_filename):
continue
base = uri[:uri.rfind('/')] + '/'
if not os_path_exists(
urllib.parse.urlparse(urllib.parse.urljoin(
base, extend_filename)).path):
yield Diagnostic(
message=f"Extended profile `{extend_filename}` does not exist.",
range=extend_range,
source="buildout",
severity=DiagnosticSeverity.Error,
)
if "parts" in resolved_buildout["buildout"]:
for part_name, part_range in resolved_buildout.getOptionValues(
"buildout", "parts"):
if part_name:
if part_name.startswith("${"):
continue # assume substitutions are OK
jinja_parser.feed(part_name)
if jinja_parser.is_in_jinja:
continue # ignore anything in jinja context
if part_name not in resolved_buildout:
if not resolved_buildout.has_dynamic_extends:
yield Diagnostic(
message=f"Section `{part_name}` does not exist.",
range=part_range,
source="buildout",
severity=DiagnosticSeverity.Error,
)
elif "recipe" not in resolved_buildout[part_name]:
yield Diagnostic(
message=f"Section `{part_name}` has no recipe.",
range=part_range,
source="buildout",
severity=DiagnosticSeverity.Error,
)
if resolved_buildout.get('versions'):
sem = asyncio.Semaphore(4)
package_version_options: List[Tuple[
str, str, buildout.BuildoutOptionDefinition]] = []
known_vulnerabilities_coros: List[Awaitable[Tuple[
types.KnownVulnerability, ...]]] = []
latest_version_coros: List[Awaitable[Optional[
packaging.version.Version]]] = []
for package_name, option in resolved_buildout['versions'].items():
if option.location.uri != uri:
continue
if package_name in (
'_buildout_section_name_',
'_profile_base_location_',
):
continue
package_version = option.value
# handle some slapos markers in versions
if package_version.endswith(':whl'):
package_version = package_version[:-4]
if "+slapos" in package_version.lower():
continue
logger.debug(
'Found package %s at version %s @ %s',
package_name,
package_version,
option.location,
)
package_version_options.append(
(package_name, package_version, option))
known_vulnerabilities_coros.append(
pypi_client.get_known_vulnerabilities(
package_name,
package_version,
sem,
))
latest_version_coros.append(
pypi_client.get_latest_version(
package_name,
package_version,
sem,
))
logger.debug('gathering %d known vulnerabilities',
len(known_vulnerabilities_coros))
known_vulnerabilities_results = await asyncio.gather(
*known_vulnerabilities_coros, return_exceptions=True)
logger.debug('gathered %s', known_vulnerabilities_results)
logger.debug('gathering %d latest versions', len(latest_version_coros))
latest_version_results = await asyncio.gather(*latest_version_coros,
return_exceptions=True)
logger.debug('gathered %s', latest_version_results)
for (
(package_name, package_version, option),
known_vulnerabilities,
latest_version,
) in zip(
package_version_options,
known_vulnerabilities_results,
latest_version_results,
):
if isinstance(latest_version, types.ProjectNotFound):
yield Diagnostic(
message=f'Project {package_name} does not exist',
range=option.location.range,
source="buildout",
severity=DiagnosticSeverity.Warning,
data=types.PyPIPackageInfo(
latest_version='',
url=pypi_client.get_home_page_url(
package_name,
package_version,
),
known_vulnerabilities=[],
),
)
continue
elif isinstance(known_vulnerabilities, types.VersionNotFound):
yield Diagnostic(
message=
f'Version {package_version} does not exist for {package_name}',
range=option.location.range,
source="buildout",
severity=DiagnosticSeverity.Warning,
data=types.PyPIPackageInfo(
latest_version=str(latest_version),
url=pypi_client.get_home_page_url(
package_name,
package_version,
),
known_vulnerabilities=[],
),
)
continue
elif isinstance(known_vulnerabilities, BaseException) or isinstance(
latest_version, BaseException):
logger.error(
'error with %s %s: %s / %s',
package_name,
package_version,
known_vulnerabilities,
latest_version,
)
continue
if latest_version:
severity = DiagnosticSeverity.Hint
message = f"Newer version available ({latest_version})"
if known_vulnerabilities:
message = f'{package_name} {package_version} has some known vulnerabilities:\n' + '\n\n'.join(
f"{v.id}\n{v.details}\n{v.link}"
for v in known_vulnerabilities)
severity = DiagnosticSeverity.Warning
yield Diagnostic(
message=message,
range=option.location.range,
source="buildout",
severity=severity,
data=types.PyPIPackageInfo(
latest_version=str(latest_version),
url=pypi_client.get_home_page_url(
package_name,
package_version,
),
known_vulnerabilities=known_vulnerabilities,
),
) | zc.buildout.languageserver | /zc.buildout.languageserver-0.9.2-py3-none-any.whl/buildoutls/diagnostic.py | diagnostic.py |
from typing import Dict, Optional, Sequence, Set
class RecipeOption:
"""A Recipe option.
"""
def __init__(
self,
documentation: str = "",
valid_values: Sequence[str] = (),
deprecated: Optional[str] = "",
):
self.documentation = documentation
self.valid_values = valid_values
"""Possible values. If this is empty, it means no constraint on values.
"""
self.deprecated = deprecated
"""Reason for the option to be deprected, if it is deprecated.
"""
class Recipe:
"""Information about a recipe.
"""
def __init__(
self,
name: str = "",
description: str = "",
url: str = "",
options: Optional[Dict[str, RecipeOption]] = None,
generated_options: Optional[Dict[str, RecipeOption]] = None,
required_options: Sequence[str] = (),
template_options: Sequence[str] = (),
any_options: bool = False,
):
self.name = name
self.description = description
self.url = url
self.options: Dict[str, RecipeOption] = options or {}
self.generated_options = generated_options or {}
self.required_options: Set[str] = set(required_options)
# Template options are filenames which are using buildout substitution.
self.template_options: Set[str] = set(template_options)
# Flag for recipe which can generates arbitrary options. If true, we can
# not know if referenced options exist or not during diagnostics.
self.any_options = any_options
registry[self.name] = self
@property
def documentation(self) -> str:
"""Documentation of the recipe
"""
return '## `{}`\n\n---\n{}'.format(self.name, self.description)
registry: Dict[str, Recipe] = {}
Recipe(
name='slapos.recipe.template',
description='Template recipe which supports remote resource.',
url='https://pypi.org/project/slapos.recipe.template/',
options={
'url':
RecipeOption('Url or path of the input template', ),
'inline':
RecipeOption('Inline input template', ),
'output':
RecipeOption('Path of the output', ),
'md5sum':
RecipeOption('Check the integrity of the input file.', ),
'mode':
RecipeOption(
'Specify the filesystem permissions in octal notation.', ),
},
required_options=('url', 'output'),
template_options=('url', ),
)
Recipe(
name='slapos.recipe.template:jinja2',
description=
'Template recipe which supports remote resource and templating with [jinja2](https://jinja.palletsprojects.com/en/2.10.x/)',
url='https://pypi.org/project/slapos.recipe.template/',
required_options=('url', 'output'),
options={
'url':
RecipeOption('Url or path of the input template', ),
'inline':
RecipeOption('Inline input template', ),
'output':
RecipeOption('Path of the output', ),
'template':
RecipeOption(
'Template url/path, as accepted by `zc.buildout.download.Download.__call__`. For very short template, it can make sense to put it directly into buildout.cfg: the value is the template itself, prefixed by the string `inline:` + an optional newline.',
deprecated="Use `url` or `inline` options instead"),
'rendered':
RecipeOption('Where rendered template should be stored.',
deprecated="Use `output` option instead"),
'context':
RecipeOption(
"""
Jinja2 context specification, one variable per line, with 3 whitespace-separated parts:
`type` `name` `expression`
Available types are described below. name is the variable name to declare. Expression semantic varies depending on the type.
Available types:
* `raw`: Immediate literal string.
* `key`: Indirect literal string.
* `import`: Import a python module.
* `section`: Make a whole buildout section available to template, as a dictionary.
Indirection targets are specified as `[section]:key` . It is possible to use buildout’s built-in variable replacement instead instead of `key` type, but keep in mind that different lines are different variables for this recipe. It might be what you want (factorising context chunk declarations), otherwise you should use indirect types
""", ),
'md5sum':
RecipeOption(
"Template’s MD5, for file integrity checking. By default, no integrity check is done.",
),
'mode':
RecipeOption(
"Mode, in octal representation (no need for 0-prefix) to set output file to. This is applied before storing anything in output file.",
),
'once':
RecipeOption(
"Path of a marker file to prevents rendering altogether.", ),
'extensions':
RecipeOption(
"Jinja2 extensions to enable when rendering the template, whitespace-separated. By default, none is loaded.",
),
'import-delimiter':
RecipeOption(
"Delimiter character for in-template imports. Defaults to `/`. See also: `import-list`",
),
"import-list":
RecipeOption(
"""Declares a list of import paths. Format is similar to context. `name` becomes import’s base name.
Available types:
* `rawfile`: Literal path of a file.
* `file`: Indirect path of a file.
* `rawfolder`: Literal path of a folder. Any file in such folder can be imported.
* `folder`: Indirect path of a folder. Any file in such folder can be imported.
* `encoding`: Encoding for input template and output file. Defaults to `utf-8`.
""", ),
},
)
Recipe(
name='slapos.recipe.build:gitclone',
url='https://pypi.org/project/slapos.recipe.build/#id59',
description=
'Checkout a git repository and its submodules by default. Supports `slapos.libnetworkcache` if present, and if boolean `use-cache` option is true.',
required_options=('repository', ),
options={
'repository':
RecipeOption('URL of the git repository', ),
'branch':
RecipeOption('Branch in the remote repository to check out', ),
'revision':
RecipeOption(
'Revision in the remote repository to check out. `revision` has priority over `branch`',
),
'develop':
RecipeOption(
"Don't let buildout modify/delete this directory. By default, the checkout is managed by buildout, which means buildout will delete the working copy when option changes, if you don't want this, you can set `develop` to a true value. In that case, changes to buildout configuration will not be applied to working copy after intial checkout",
valid_values=('true', 'false', 'yes', 'no'),
),
'ignore-cloning-submodules':
RecipeOption(
'By default, cloning the repository will clone its submodules also. You can force git to ignore cloning submodules by defining `ignore-cloning-submodules` boolean option to true',
valid_values=('true', 'false', 'yes', 'no'),
),
'ignore-ssl-certificate':
RecipeOption(
'Ignore server certificate. By default, when remote server use SSL protocol git checks if the SSL certificate of the remote server is valid before executing commands. You can force git to ignore this check using ignore-ssl-certificate boolean option.',
valid_values=('true', 'false', 'yes', 'no'),
),
'git-command':
RecipeOption('Full path to git command', ),
'shared':
RecipeOption(
"Clone with `--shared` option if true. See `git-clone` command.",
valid_values=('true', 'false', 'yes', 'no'),
),
'sparse-checkout':
RecipeOption(
"The value of the sparse-checkout option is written to the `$GITDIR/info/sparse-checkout` file, which is used to populate the working directory sparsely. See the *SPARSE CHECKOUT* section of `git-read-tree` command. This feature is disabled if the value is empty or unset."
),
},
generated_options={
'location':
RecipeOption(
'Path where to clone the repository, default to parts/${:_buildout_section_name_}',
),
})
Recipe(
'plone.recipe.command',
url='https://pypi.org/project/plone.recipe.command/',
description=
'The `plone.recipe.command` buildout recipe allows you to run a command when a buildout part is installed or updated.',
required_options=('command', ),
options={
'command':
RecipeOption('Command to run when the buildout part is installed.', ),
'update-command':
RecipeOption(
'Command to run when the buildout part is updated. This happens when buildout is run but the configuration for this buildout part has not changed.',
),
'location':
RecipeOption(
'''A list of filesystem paths that buildout should consider as being managed by this buildout part.
These will be removed when buildout (re)installs or removes this part.''', ),
'stop-on-error':
RecipeOption(
'When `yes`, `on` or `true`, buildout will stop if the command ends with a non zero exit code.',
valid_values=('true', 'yes'),
),
},
)
Recipe(
name='slapos.recipe.build:download',
description='''Download a file
''',
url='https://pypi.org/project/slapos.recipe.build/',
options={
'url':
RecipeOption('URL to download from', ),
'md5sum':
RecipeOption('Checksum of the download', ),
'offline':
RecipeOption(
'Override buildout global ``offline`` setting for the context of this section',
valid_values=('true', 'false')),
'filename':
RecipeOption('', ),
},
generated_options={
'location': RecipeOption('', ),
'target': RecipeOption('', ),
},
required_options=(
'url',
'md5sum',
))
Recipe(
name='slapos.recipe.build:download-unpacked',
description='''Download an archive and unpack it
''',
url='https://pypi.org/project/slapos.recipe.build/',
options={
'url':
RecipeOption('URL to download from', ),
'md5sum':
RecipeOption('Checksum of the download', ),
'offline':
RecipeOption(
'Override buildout global ``offline`` setting for the context of this section',
valid_values=('true', 'false')),
'filename':
RecipeOption('', ),
'strip-top-level-dir':
RecipeOption('', valid_values=('true', 'false')),
},
generated_options={
'location': RecipeOption('', ),
'target': RecipeOption('', ),
},
required_options=(
'url',
'md5sum',
))
Recipe(
name='slapos.recipe.build',
description=
'''Generally deprecated in favor slapos.recipe.cmmi, which supports shared parts,
but useful for corner cases as it allows inline python code.
''',
url='https://pypi.org/project/slapos.recipe.build/',
options={
'init': RecipeOption('python code executed at initialization step', ),
'install': RecipeOption('python code executed at install step', ),
'update': RecipeOption('python code executed when updating', ),
},
generated_options={
'location': RecipeOption('', ),
},
any_options=True,
)
Recipe(
name='slapos.recipe.cmmi',
description=
'The recipe provides the means to compile and install source distributions using configure and make and other similar tools.',
url='https://pypi.org/project/slapos.recipe.cmmi/',
options={
'url':
RecipeOption(
'''URL to the package that will be downloaded and extracted. The
supported package formats are `.tar.gz`, `.tar.bz2`, and `.zip`. The value must be a full URL,
e.g. http://python.org/ftp/python/2.4.4/Python-2.4.4.tgz. The `path` option can not be used at the same time with `url`.'''
),
'path':
RecipeOption(
'''Path to a local directory containing the source code to be built
and installed. The directory must contain the `configure` script. The `url` option can not be used at the same time with `path`. '''
),
'prefix':
RecipeOption(
'''Custom installation prefix passed to the `--prefix` option of the configure script. Defaults to the location of the part.
Note that this is a convenience shortcut which assumes that the default configure command is used to configure the package.
If the `configure-command` option is used to define a custom configure command no automatic `--prefix` injection takes place.
You can also set the `--prefix` parameter explicitly in `configure-options`.'''
),
'shared':
RecipeOption(
'''Specify the path in which this package is shared by many other packages.
`shared-part-list` should be defined in `[buildout]` section
Shared option is True or False.
The package will be installed on `path/name/hash of options`.
''',
valid_values=['true', 'false'],
),
'md5sum':
RecipeOption('''MD5 checksum for the package file.
If available the MD5 checksum of the downloaded package will be compared to this value and if the values do not match the execution of the recipe will fail.'''
),
'make-binary':
RecipeOption(
'''Path to the make program. Defaults to `make` which should work on any system that has the make program available in the system `PATH`.'''
),
'make-options':
RecipeOption(
'''Extra `KEY=VALUE` options included in the invocation of the make program.
Multiple options can be given on separate lines to increase readability.'''),
'make-targets':
RecipeOption(
'''Targets for the `make` command. Defaults to `install` which will be enough to install most software packages.
You only need to use this if you want to build alternate targets. Each target must be given on a separate line.'''
),
'configure-command':
RecipeOption(
'''Name of the configure command that will be run to generate the Makefile.
This defaults to `./configure` which is fine for packages that come with a configure script.
You may wish to change this when compiling packages with a different set up.
See the *Compiling a Perl package* section for an example.'''),
'configure-options':
RecipeOption('''Extra options to be given to the configure script.
By default only the `--prefix` option is passed which is set to the part directory.
Each option must be given on a separate line.
'''),
'patch-binary':
RecipeOption('''Path to the `patch` program.
Defaults to `patch` which should work on any system that has the patch program available in the system `PATH`.'''
),
'patch-options':
RecipeOption(
'''Options passed to the `patch` program. Defaults to `-p0`.'''),
'patches':
RecipeOption(
'''List of patch files to the applied to the extracted source.
Each file should be given on a separate line.'''),
'pre-configure-hook':
RecipeOption(
'''Custom python script that will be executed before running the configure script.
The format of the options is:
```
/path/to/the/module.py:name_of_callable
url:name_of_callable
url#md5sum:name_of_callable
````
where the first part is a filesystem path or url to the python
module and the second part is the name of the callable in the
module that will be called. The callable will be passed three
parameters in the following order:
1. The options dictionary from the recipe.
2. The global buildout dictionary.
3. A dictionary containing the current os.environ augmented with the part specific overrides.
The callable is not expected to return anything.
*Note:*
The `os.environ` is not modified so if the hook script is
interested in the environment variable overrides defined for the
part it needs to read them from the dictionary that is passed in
as the third parameter instead of accessing os.environ
directly.
'''),
'pre-make-hook':
RecipeOption(
'''Custom python script that will be executed before running `make`.
The format and semantics are the same as with the `pre-configure-hook option`.'''
),
'post-make-hook':
RecipeOption(
'''Custom python script that will be executed after running `make`.
The format and semantics are the same as with the `pre-configure-hook` option.'''
),
'pre-configure':
RecipeOption(
'''Shell command that will be executed before running `configure` script.
It takes the same effect as `pre-configure-hook` option except it's shell command.'''
),
'pre-build':
RecipeOption(
'''Shell command that will be executed before running `make`.
It takes the same effect as `pre-make-hook` option except it's shell command.'''
),
'pre-install':
RecipeOption(
'''Shell command that will be executed before running `make` install.'''
),
'post-install':
RecipeOption(
'''Shell command that will be executed after running `make` install.
It takes the same effect as `post-make-hook` option except it's shell command.'''
),
'keep-compile-dir':
RecipeOption(
'''Switch to optionally keep the temporary directory where the package was compiled.
This is mostly useful for other recipes that use this recipe to compile a software but wish to do some additional steps not handled by this recipe.
The location of the compile directory is stored in `options['compile-directory']`.
Accepted values are true or false, defaults to false.''',
valid_values=['true', 'false'],
),
'promises':
RecipeOption(
'''List the pathes and files should be existed after install part.
The file or path must be absolute path.
One line one item.
If any item doesn't exist, the recipe shows a warning message.
The default value is empty.'''),
'dependencies':
RecipeOption('''List all the depended parts:
```
dependencies = part1 part2 ...
```
All the dependent parts will be installed before this part, besides the changes in any dependent parts will trigger to reinstall current part.
'''),
'environment-section':
RecipeOption(
'''Name of a section that provides environment variables that will be used to
augment the variables read from `os.environ` before executing the
recipe.
This recipe does not modify `os.environ` directly. External commands
run as part of the recipe (e.g. `make`, `configure`, etc.) get an augmented
environment when they are forked. Python hook scripts are passed the
augmented as a parameter.
The values of the environment variables may contain references to other
existing environment variables (including themselves) in the form of
Python string interpolation variables using the dictionary notation. These
references will be expanded using values from `os.environ`. This can be
used, for example, to append to the `PATH` variable, e.g.:
```
[component]
recipe = slapos.recipe.cmmi
environment-section =
environment
[environment]
PATH = %(PATH)s:${buildout:directory}/bin
```
'''),
'environment':
RecipeOption(
'''A sequence of `KEY=VALUE` pairs separated by newlines that define
additional environment variables used to update `os.environ` before
executing the recipe.
The semantics of this option are the same as `environment-section`. If
both `environment-section` and `environment` are provided the values from
the former will be overridden by the latter allowing per-part customization.
'''),
},
generated_options={
'location':
RecipeOption(
'''Location where the package is installed.
Defaults to `${buildout:parts-directory}/${:_buildout_section_name_}`,
or to ${buildout:shared-part-list[-1]}/${:_buildout_section_name_}/${option_hash} if `shared` was set to a true value.
This option is only available after part is installed, but to help resolve bootstrap
issues, the magic string `@@LOCATION@@` is also understood by this recipe as an alias
to the `location` option.
''', ),
},
)
Recipe(
name='zc.recipe.egg',
description=
'The `zc.recipe.egg:eggs` recipe can be used to install various types if distutils distributions as eggs.',
url='https://pypi.org/project/zc.recipe.egg/',
options={
'eggs':
RecipeOption(
'''A list of eggs to install given as one or more setuptools requirement strings.
Each string must be given on a separate line.'''),
'find-links':
RecipeOption(
'''A list of URLs, files, or directories to search for distributions.'''
),
'index':
RecipeOption(
'''The URL of an index server, or almost any other valid URL. :)
If not specified, the Python Package Index, https://pypi.org/simple, is used.
You can specify an alternate index with this option.
If you use the links option and if the links point to the needed distributions, then the index can be anything and will be largely ignored.
'''),
})
Recipe(
name='zc.recipe.egg:eggs',
description=
'The `zc.recipe.egg:eggs` recipe can be used to install various types if distutils distributions as eggs.',
url='https://pypi.org/project/zc.recipe.egg/',
options={
'eggs':
RecipeOption(
'''A list of eggs to install given as one or more setuptools requirement strings.
Each string must be given on a separate line.'''),
'find-links':
RecipeOption(
'''A list of URLs, files, or directories to search for distributions.'''
),
'index':
RecipeOption(
'''The URL of an index server, or almost any other valid URL. :)
If not specified, the Python Package Index, https://pypi.org/simple, is used.
You can specify an alternate index with this option.
If you use the links option and if the links point to the needed distributions, then the index can be anything and will be largely ignored.
'''),
})
for name in (
'zc.recipe.egg',
'zc.recipe.egg:script',
'zc.recipe.egg:scripts',
):
Recipe(
name=name,
description=f'The `{name}` recipe install python distributions as eggs',
url='https://pypi.org/project/zc.recipe.egg/',
options={
'entry-points':
RecipeOption('''A list of entry-point identifiers of the form:
```
name=module:attrs
```
where `name` is a script name, `module` is a dotted name resolving to a module name, and `attrs` is a dotted name resolving to a callable object within a module.
This option is useful when working with distributions that don’t declare entry points, such as distributions not written to work with setuptools.'''
),
'scripts':
RecipeOption('''Control which scripts are generated.
The value should be a list of zero or more tokens.
Each token is either a name, or a name followed by an ‘=’ and a new name. Only the named scripts are generated.
If no tokens are given, then script generation is disabled.
If the option isn’t given at all, then all scripts defined by the named eggs will be generated.'''
),
'dependent-scripts':
RecipeOption(
'''If set to the string “true”, scripts will be generated for all required eggs in addition to the eggs specifically named.''',
valid_values=['true', 'false']),
'interpreter':
RecipeOption(
'''The name of a script to generate that allows access to a Python interpreter that has the path set based on the eggs installed.'''
),
'extra-paths':
RecipeOption('''Extra paths to include in a generated script.'''),
'initialization':
RecipeOption('''Specify some Python initialization code.
This is very limited.
In particular, be aware that leading whitespace is stripped from the code given.'''
),
'arguments':
RecipeOption(
'''Specify some arguments to be passed to entry points as Python source.'''
),
'relative-paths':
RecipeOption(
'''If set to true, then egg paths will be generated relative to the script path.
This allows a buildout to be moved without breaking egg paths.
This option can be set in either the script section or in the buildout section.
''',
valid_values=['true', 'false']),
'egg':
RecipeOption(
'''An specification for the egg to be created, to install given as a setuptools requirement string.
This defaults to the part name.'''),
'eggs':
RecipeOption(
'''A list of eggs to install given as one or more setuptools requirement strings.
Each string must be given on a separate line.'''),
'find-links':
RecipeOption(
'''A list of URLs, files, or directories to search for distributions.'''
),
'index':
RecipeOption(
'''The URL of an index server, or almost any other valid URL. :)
If not specified, the Python Package Index, https://pypi.org/simple, is used.
You can specify an alternate index with this option.
If you use the links option and if the links point to the needed distributions, then the index can be anything and will be largely ignored.
'''),
})
Recipe(
name='zc.recipe.egg:custom',
description=
'The `zc.recipe.egg:custom` recipe can be used to install an egg with custom build parameters.',
url='https://pypi.org/project/zc.recipe.egg/',
options={
'include-dirs':
RecipeOption(
'''A new-line separated list of directories to search for include files.'''
),
'library-dirs':
RecipeOption(
'''A new-line separated list of directories to search for libraries to link with.'''
),
'rpath':
RecipeOption(
'''A new-line separated list of directories to search for dynamic libraries at run time.'''
),
'define':
RecipeOption(
'''A comma-separated list of names of C preprocessor variables to define.'''
),
'undef':
RecipeOption(
'''A comma-separated list of names of C preprocessor variables to undefine.'''
),
'libraries':
RecipeOption('''The name of an additional library to link with.
Due to limitations in distutils and despite the option name, only a single library can be specified.'''
),
'link-objects':
RecipeOption('''The name of an link object to link against.
Due to limitations in distutils and despite the option name, only a single link object can be specified.'''
),
'debug':
RecipeOption('''Compile/link with debugging information'''),
'force':
RecipeOption('''Forcibly build everything (ignore file timestamps)'''),
'compiler':
RecipeOption('''Specify the compiler type'''),
'swig':
RecipeOption('''The path to the swig executable'''),
'swig-cpp':
RecipeOption('''Make SWIG create C++ files (default is C)'''),
'swig-opts':
RecipeOption('''List of SWIG command line options'''),
'egg':
RecipeOption(
'''An specification for the egg to be created, to install given as a setuptools requirement string.
This defaults to the part name.'''),
'find-links':
RecipeOption(
'''A list of URLs, files, or directories to search for distributions.'''
),
'index':
RecipeOption(
'''The URL of an index server, or almost any other valid URL. :)
If not specified, the Python Package Index, https://pypi.org/simple, is used.
You can specify an alternate index with this option.
If you use the links option and if the links point to the needed distributions, then the index can be anything and will be largely ignored.'''
),
'environment':
RecipeOption(
'''The name of a section with additional environment variables.
The environment variables are set before the egg is built.'''),
})
Recipe(
name='zc.recipe.egg:develop',
description=
'''The `zc.recipe.egg:develop` recipe can be used to make a path containing source available as an installation candidate.
It does not install the egg, another `zc.recipe.egg` section will be needed for this.''',
url='https://pypi.org/project/zc.recipe.egg/',
options={
'setup':
RecipeOption(
'The path to a setup script or directory containing a startup script. This is required'
)
})
Recipe(
name='slapos.cookbook:wrapper',
description='''Recipe to create a script from given command and options.
''',
url='https://lab.nexedi.com/nexedi/slapos/',
options={
'command-line':
RecipeOption('shell command which launches the intended process'),
'wrapper-path':
RecipeOption("absolute path to file's destination"),
'wait-for-files':
RecipeOption('list of files to wait for'),
'hash-files':
RecipeOption('list of buildout-generated files to be checked by hash'),
'hash-existing-files':
RecipeOption('list of existing files to be checked by hash'),
'pidfile':
RecipeOption('path to pidfile ensure exclusivity for the process'),
'private-tmpfs':
RecipeOption(
'list of "<size> <path>" private tmpfs, using user namespaces'),
'reserve-cpu':
RecipeOption('Command will ask for an exclusive CPU core', ),
},
required_options=('command-line', 'wrapper-path'),
) | zc.buildout.languageserver | /zc.buildout.languageserver-0.9.2-py3-none-any.whl/buildoutls/recipes.py | recipes.py |
import asyncio
import datetime
import logging
from typing import AsyncIterable, Dict, Optional, Tuple, cast
import aiohttp
import cachetools
import packaging.version
import pkg_resources
from . import aiohttp_session
from .types import KnownVulnerability, VersionNotFound, ProjectNotFound
import cattrs
converter = cattrs.Converter()
logger = logging.getLogger(__name__)
# type aliases
Project = str
VersionStr = str
ProjectAndVersionStr = Tuple[Project, VersionStr]
OptionalVersion = Optional[packaging.version.Version]
class PyPIClient:
def __init__(self, package_index_url: str = 'https://pypi.org'):
self._package_index_url = package_index_url
self.__get_latest_version_cache = cast(
Dict[ProjectAndVersionStr, OptionalVersion],
cachetools.TTLCache(
maxsize=2 << 10,
ttl=datetime.timedelta(hours=2).total_seconds(),
))
self.__get_known_vulnerabilities_cache = cast(
Dict[ProjectAndVersionStr, Tuple[KnownVulnerability, ...]],
cachetools.TTLCache(
maxsize=2 << 10,
ttl=datetime.timedelta(hours=2).total_seconds(),
))
async def get_latest_version(
self,
project: str,
version: str,
semaphore: asyncio.Semaphore,
) -> OptionalVersion:
try:
return self.__get_latest_version_cache[project, version]
except KeyError:
pass
async with semaphore:
latest_version = await self.__get_latest_version(project, version)
try:
self.__get_latest_version_cache[project, version] = latest_version
except ValueError:
pass
return latest_version
async def __get_latest_version(
self,
project: str,
version: str,
) -> OptionalVersion:
try:
# https://warehouse.pypa.io/api-reference/json.html#project
async with aiohttp_session.get_session().get(
f'{self._package_index_url}/pypi/{project}/json') as resp:
project_data = await resp.json()
except (aiohttp.ClientError, ValueError, asyncio.TimeoutError):
logger.warning(
'Error fetching latest version for %s',
project,
exc_info=True,
)
return None
if 'info' not in project_data:
raise ProjectNotFound(project)
if version not in project_data['releases']:
version = '0'
current = pkg_resources.parse_version(version)
latest = pkg_resources.parse_version(project_data['info']['version'])
if latest > current:
return cast(packaging.version.Version, latest)
return None
async def get_known_vulnerabilities(
self,
project: str,
version: str,
semaphore: asyncio.Semaphore,
) -> Tuple[KnownVulnerability, ...]:
try:
return self.__get_known_vulnerabilities_cache[project, version]
except KeyError:
pass
vulnerabilities = tuple([
v async for v in self.__get_known_vulnerabilities(
project, version, semaphore)
])
try:
self.__get_known_vulnerabilities_cache[project,
version] = vulnerabilities
except ValueError:
pass
return vulnerabilities
async def __get_known_vulnerabilities(
self,
project: str,
version: str,
semaphore: asyncio.Semaphore,
) -> AsyncIterable[KnownVulnerability]:
try:
# https://warehouse.pypa.io/api-reference/json.html#release
async with semaphore:
async with aiohttp_session.get_session().get(
f'{self._package_index_url}/pypi/{project}/{version}/json',
) as resp:
project_data = await resp.json()
except (aiohttp.ClientError, ValueError, asyncio.TimeoutError):
logger.warning(
'Error fetching project release %s %s',
project,
version,
exc_info=True,
)
else:
if 'info' not in project_data:
raise VersionNotFound((project, version))
parsed_version = pkg_resources.parse_version(version)
for vulnerability in (converter.structure(v, KnownVulnerability)
for v in project_data.get('vulnerabilities', ())):
for fixed_in in (pkg_resources.parse_version(f)
for f in vulnerability.fixed_in):
if fixed_in > parsed_version:
yield vulnerability
break
def get_home_page_url(self, project: str, version: str) -> str:
return f'{self._package_index_url}/project/{project}/{version}/' | zc.buildout.languageserver | /zc.buildout.languageserver-0.9.2-py3-none-any.whl/buildoutls/pypi.py | pypi.py |
import itertools
import logging
import os
import pathlib
import re
import urllib.parse
from typing import Iterable, List, Optional, Tuple, Union
from lsprotocol.types import (
TEXT_DOCUMENT_CODE_ACTION,
TEXT_DOCUMENT_COMPLETION,
TEXT_DOCUMENT_DEFINITION,
TEXT_DOCUMENT_DID_CHANGE,
TEXT_DOCUMENT_DID_OPEN,
TEXT_DOCUMENT_DOCUMENT_LINK,
TEXT_DOCUMENT_DOCUMENT_SYMBOL,
TEXT_DOCUMENT_HOVER,
TEXT_DOCUMENT_REFERENCES,
WORKSPACE_DID_CHANGE_WATCHED_FILES,
CodeAction,
CodeActionKind,
CodeActionOptions,
CodeActionParams,
Command,
CompletionItem,
CompletionItemKind,
CompletionItemTag,
CompletionOptions,
CompletionParams,
DidChangeTextDocumentParams,
DidChangeWatchedFilesParams,
DidOpenTextDocumentParams,
DocumentLink,
DocumentLinkParams,
DocumentSymbol,
DocumentSymbolParams,
Hover,
Location,
MarkupContent,
MarkupKind,
Position,
Range,
ShowDocumentParams,
SymbolKind,
TextDocumentPositionParams,
TextEdit,
)
from pygls.server import LanguageServer
from pygls.workspace import Document
from . import (
buildout,
code_actions,
commands,
diagnostic,
md5sum,
profiling,
recipes,
types,
)
server = LanguageServer(name="zc.buildout.languageserver", version="0.9.0")
server.command(commands.COMMAND_START_PROFILING)(profiling.start_profiling)
server.command(commands.COMMAND_STOP_PROFILING)(profiling.stop_profiling)
reference_start = '${'
reference_re = re.compile(
r'\${(?P<section>[-a-zA-Z0-9 ._]*):(?P<option>[-a-zA-Z0-9 ._]+)}')
logger = logging.getLogger(__name__)
def getOptionValue(
option: Union[buildout.BuildoutOptionDefinition, str]) -> str:
# Options read with our patch remember the position and have their values in
# .value but options added by buildout for default values does not.
# We normalize this here.
if isinstance(option, str):
return option
return option.value
async def parseAndSendDiagnostics(
ls: LanguageServer,
uri: str,
) -> None:
diagnostics = []
async for diag in diagnostic.getDiagnostics(ls, uri):
diagnostics.append(diag)
ls.publish_diagnostics(uri, diagnostics)
@server.command(commands.COMMAND_OPEN_PYPI_PAGE)
async def command_open_pypi_page(
ls: LanguageServer,
args: List[types.OpenPypiPageCommandParams],
) -> None:
await ls.show_document_async(
ShowDocumentParams(
uri=args[0]['url'],
external=True,
))
@server.command(commands.COMMAND_UPDATE_MD5SUM)
async def command_update_md5sum(
ls: LanguageServer,
args: List[types.UpdateMD5SumCommandParams],
) -> None:
await md5sum.update_md5sum(ls, args[0])
@server.feature(
TEXT_DOCUMENT_CODE_ACTION,
CodeActionOptions(resolve_provider=False,
code_action_kinds=[
CodeActionKind.QuickFix,
]),
)
async def lsp_code_action(
ls: LanguageServer,
params: CodeActionParams) -> Optional[List[Union[Command, CodeAction]]]:
return await code_actions.getCodeActions(ls, params)
@server.feature(TEXT_DOCUMENT_DID_OPEN)
async def did_open(
ls: LanguageServer,
params: DidOpenTextDocumentParams,
) -> None:
await parseAndSendDiagnostics(ls, params.text_document.uri)
@server.feature(TEXT_DOCUMENT_DID_CHANGE)
async def did_change(
ls: LanguageServer,
params: DidChangeTextDocumentParams,
) -> None:
buildout.clearCache(params.text_document.uri)
await parseAndSendDiagnostics(ls, params.text_document.uri)
@server.feature(WORKSPACE_DID_CHANGE_WATCHED_FILES)
async def did_change_watched_file(
ls: LanguageServer,
params: DidChangeWatchedFilesParams,
) -> None:
for change in params.changes:
buildout.clearCache(change.uri)
@server.feature(TEXT_DOCUMENT_DOCUMENT_SYMBOL)
async def lsp_symbols(
ls: LanguageServer,
params: DocumentSymbolParams,
) -> List[DocumentSymbol]:
symbols: List[DocumentSymbol] = []
parsed = await buildout.parse(
ls=ls,
uri=params.text_document.uri,
allow_errors=True,
)
for section_name, section_value in parsed.items():
section_header_location = parsed.section_header_locations[section_name]
# don't include implicit sections such as [buildout] unless defined in this profile.
if section_header_location.uri != params.text_document.uri:
continue
children: List[DocumentSymbol] = []
for option_name, option_value in section_value.items():
if option_value.default_value:
continue
option_range = Range(
start=Position(line=min(r.range.start.line
for r in option_value.locations),
character=0),
end=Position(line=max(r.range.end.line
for r in option_value.locations),
character=0))
detail = getOptionValue(option_value)
if len(detail.splitlines()) > 1:
# vscode does not like too long multi-lines detail
detail = '{} ...'.format(detail.splitlines()[0])
children.append(
DocumentSymbol(name=option_name,
kind=SymbolKind.Field,
range=option_range,
selection_range=option_range,
detail=detail,
children=[]))
section_range = Range(
start=section_header_location.range.start,
end=Position(
line=max(s.range.end.line for s in children)
if children else section_header_location.range.end.line,
character=0,
),
)
symbols.append(
DocumentSymbol(
name=section_name,
kind=SymbolKind.Class,
range=section_range,
selection_range=section_range,
detail=getOptionValue(section_value.get('recipe', '')),
children=children,
))
return symbols
@server.feature(TEXT_DOCUMENT_COMPLETION,
CompletionOptions(trigger_characters=["{", ":"]))
async def lsp_completion(
ls: LanguageServer,
params: CompletionParams,
) -> Optional[List[CompletionItem]]:
items: List[CompletionItem] = []
doc = ls.workspace.get_document(params.text_document.uri)
def getSectionReferenceCompletionTextEdit(
doc: Document,
pos: Position,
new_text: str,
) -> TextEdit:
"""Calculate the edition to insert ${section: in ${section:option}
"""
words_split = re.compile(r'\$\{[-a-zA-Z0-9 ._]*')
line = doc.lines[pos.line]
index = 0
while True:
match = words_split.search(line, index)
assert match
if match.start() <= pos.character <= match.end():
start = match.start()
end = match.end()
return TextEdit(
range=Range(start=Position(line=pos.line, character=start),
end=Position(line=pos.line, character=end)),
new_text=new_text,
)
index = max(match.start(), index + 1)
return TextEdit(
Range(
start=Position(line=pos.line, character=pos.character),
end=Position(line=pos.line, character=pos.character),
),
new_text=new_text,
)
def getOptionReferenceTextEdit(
doc: Document,
pos: Position,
new_text: str,
) -> TextEdit:
"""Calculate the edition to insert option} a ${section:option}
"""
words_split = re.compile(
r'(?P<section>\${[-a-zA-Z0-9 ._]*\:)(?P<option>[ ]*[-a-zA-Z0-9._]*\}{0,1})'
)
line = doc.lines[pos.line]
index = 0
while True:
match = words_split.search(line, index)
assert match
section_len = len(match.group('section'))
if match.start() + section_len <= pos.character <= match.end():
start = match.start() + section_len
end = match.end()
return TextEdit(
range=Range(start=Position(line=pos.line, character=start),
end=Position(line=pos.line, character=end)),
new_text=new_text,
)
index = max(match.start(), index + 1)
return TextEdit(
range=Range(
start=Position(line=pos.line, character=pos.character),
end=Position(line=pos.line, character=pos.character),
),
new_text=new_text,
)
def getDefaultTextEdit(
doc: Document,
pos: Position,
new_text: str,
) -> TextEdit:
"""Calculate the edition to replace the current token at position by the new text.
"""
# regex to split the current token, basically we consider everything a word
# but stop at substitution start and end.
words_split = re.compile(r'[-a-zA-Z0-9\._\$\{\/]*')
line = ''
if len(doc.lines) > pos.line:
line = doc.lines[pos.line]
if not line.strip():
return TextEdit(
range=Range(
start=Position(line=pos.line, character=pos.character),
end=Position(line=pos.line, character=pos.character),
),
new_text=new_text,
)
index = 0
while True:
match = words_split.search(line, index)
assert match
if match.start() <= pos.character <= match.end():
start = match.start()
end = match.end()
# if end was a '}', erase it
if (line + ' ')[end] == '}':
end += 1
# TODO: test
return TextEdit(
range=Range(start=Position(line=pos.line, character=start),
end=Position(line=pos.line, character=end)),
new_text=new_text,
)
index = max(match.start(), index + 1)
parsed = await buildout.open(ls, params.text_document.uri)
if parsed is None:
return None
symbol = await parsed.getSymbolAtPosition(params.position)
logger.debug("getting completions on %s", symbol)
if symbol:
if symbol.kind == buildout.SymbolKind.Comment:
return None
if symbol.kind == buildout.SymbolKind.SectionReference:
for buildout_section_name, section_items in symbol._buildout.items():
documentation = '```ini\n{}\n```'.format(
'\n'.join('{} = {}'.format(k, v.value)
for (k, v) in section_items.items()
if v and not v.default_value), )
if section_items.get('recipe'):
recipe = section_items.getRecipe()
if recipe:
documentation = f'{recipe.documentation}\n\n---\n{documentation}'
else:
documentation = f'## `{section_items["recipe"].value}`\n\n---\n{documentation}'
items.append(
CompletionItem(label=buildout_section_name,
text_edit=getSectionReferenceCompletionTextEdit(
doc,
params.position,
'${' + buildout_section_name,
),
filter_text='${' + buildout_section_name,
kind=CompletionItemKind.Class,
documentation=MarkupContent(
kind=MarkupKind.Markdown,
value=documentation,
)))
elif symbol.kind == buildout.SymbolKind.OptionReference:
# complete referenced option:
# [section]
# option = ${another_section:|
valid_option_references: Iterable[Tuple[str, str]] = []
# We include the options of `another_section`
if symbol.referenced_section:
valid_option_references = [
(k, f'```\n{getOptionValue(v)}```')
for k, v in symbol.referenced_section.items()
]
# also if `another_section` uses a known recipe, includes
# the generated options of this recipe.
recipe = symbol.referenced_section_recipe
if recipe:
valid_option_references = itertools.chain(
valid_option_references,
((k, v.documentation)
for k, v in recipe.generated_options.items()),
)
for buildout_option_name, buildout_option_value in valid_option_references:
items.append(
CompletionItem(label=buildout_option_name,
text_edit=getOptionReferenceTextEdit(
doc,
params.position,
buildout_option_name + '}',
),
kind=CompletionItemKind.Property,
documentation=MarkupContent(
kind=MarkupKind.Markdown,
value=buildout_option_value,
)))
elif symbol.kind == buildout.SymbolKind.BuildoutOptionKey:
# complete options of a section, ie:
# [section]
# opt|
assert isinstance(parsed, buildout.BuildoutProfile)
# complete with existing options from this sections, to override
# options and for [buildout]
for option_name, option_default_value in symbol.current_section.items():
# skip some options that are not supposed to be defined, only referenced
if option_name in (
'_buildout_section_name_',
'_profile_base_location_',
):
continue
items.append(
CompletionItem(label=option_name,
text_edit=getDefaultTextEdit(
doc,
params.position,
option_name + ' = ',
),
kind=CompletionItemKind.Variable,
documentation=MarkupContent(
kind=MarkupKind.Markdown,
value=f'`{option_default_value.value}`',
)))
# if section is buildout, completes extends & parts which are usually
# multi lines already with an extra \n
if symbol.current_section_name == 'buildout':
for option_name, option_documentation in (
('extends', 'Profiles extended by this buildout'),
('parts', 'Parts that will be installed'),
):
items.append(
CompletionItem(label=option_name,
text_edit=getDefaultTextEdit(
doc,
params.position,
option_name + ' =\n ',
),
kind=CompletionItemKind.Variable,
documentation=MarkupContent(
kind=MarkupKind.Markdown,
value=option_documentation,
)))
else:
# if section uses a known recipe, complete with the options of this recipe.
recipe = symbol.current_section_recipe
if recipe:
for k, v in recipe.options.items():
items.append(
CompletionItem(
label=k,
text_edit=getDefaultTextEdit(
doc,
params.position,
k + ' = ',
),
kind=CompletionItemKind.Variable,
tags=([CompletionItemTag.Deprecated]
if v.deprecated else []),
documentation=MarkupContent(
kind=MarkupKind.Markdown,
value=
(f'**Deprecated**\n{v.deprecated}\n\n----\n{v.documentation}'
if v.deprecated else v.documentation),
)))
else:
# section has no recipe, complete `recipe` as an option name
items.append(
CompletionItem(label='recipe',
text_edit=getDefaultTextEdit(
doc,
params.position,
'recipe = ',
),
kind=CompletionItemKind.Variable))
elif symbol.kind == buildout.SymbolKind.BuildoutOptionValue:
# complete option = |
assert isinstance(parsed, buildout.BuildoutProfile)
if symbol.current_option_name == 'recipe':
# complete recipe = | with known recipes
for recipe_name, recipe in recipes.registry.items():
items.append(
CompletionItem(label=recipe_name,
text_edit=getDefaultTextEdit(
doc, params.position, recipe_name),
kind=CompletionItemKind.Constructor,
documentation=MarkupContent(
kind=MarkupKind.Markdown,
value=recipe.documentation,
)))
if symbol.current_option_name == '<':
# complete <= | with parts
for section_name in symbol._buildout:
if section_name != 'buildout':
items.append(
CompletionItem(label=section_name,
text_edit=getDefaultTextEdit(
doc, params.position, section_name),
kind=CompletionItemKind.Function))
if symbol.current_section_recipe:
# complete with recipe options if recipe is known
for k, v in symbol.current_section_recipe.options.items():
if k == symbol.current_option_name:
for valid in v.valid_values:
items.append(
CompletionItem(label=valid,
text_edit=getDefaultTextEdit(
doc, params.position, valid),
kind=CompletionItemKind.Keyword))
if symbol.current_section_name == 'buildout':
# complete options of [buildout]
if symbol.current_option_name == 'extends':
# complete extends = | with local files
doc_path = pathlib.Path(doc.path)
root_path = pathlib.Path(ls.workspace.root_path)
for profile in itertools.chain(root_path.glob('**/*.cfg'),
root_path.glob('*.cfg')):
profile_relative_path = os.path.relpath(profile, doc_path.parent)
items.append(
CompletionItem(
label=profile_relative_path,
text_edit=getDefaultTextEdit(
doc,
params.position,
profile_relative_path,
),
kind=CompletionItemKind.File,
# make current directory show first
sort_text='{}{}'.format(
'Z' if profile_relative_path.startswith('.') else 'A',
profile_relative_path)))
if symbol.current_option_name == 'parts':
# complete parts = | with sections
for section in parsed.keys():
if section != 'buildout':
items.append(
CompletionItem(label=section,
text_edit=getDefaultTextEdit(
doc,
params.position,
section + '\n',
),
kind=CompletionItemKind.Function))
return items
@server.feature(TEXT_DOCUMENT_DEFINITION)
async def lsp_definition(
ls: LanguageServer,
params: TextDocumentPositionParams,
) -> List[Location]:
parsed = await buildout.open(ls, params.text_document.uri)
if parsed is None:
return []
symbol = await parsed.getSymbolAtPosition(params.position)
logger.debug('definition @%s -> %s', params.position, symbol)
locations: List[Location] = []
if symbol:
if symbol.kind in (
buildout.SymbolKind.SectionReference,
buildout.SymbolKind.OptionReference,
):
assert symbol.referenced_section_name
if symbol.referenced_option:
locations.extend(symbol.referenced_option.locations)
else:
l = symbol._buildout.section_header_locations.get(
symbol.referenced_section_name)
if l:
locations.append(l)
elif symbol.kind == buildout.SymbolKind.BuildoutOptionValue:
assert isinstance(parsed, buildout.BuildoutProfile)
if symbol.current_option_name == '<':
l = parsed.section_header_locations.get(symbol.value)
if l:
locations.append(l)
elif symbol.current_section_name == 'buildout' and symbol.current_option_name == 'extends':
extend = symbol.value
if not buildout._isurl(extend):
uri = params.text_document.uri
base = uri[:uri.rfind('/')] + '/'
locations.append(
Location(uri=urllib.parse.urljoin(base, extend),
range=Range(start=Position(line=0, character=0),
end=Position(line=1, character=0))))
return locations
@server.feature(TEXT_DOCUMENT_REFERENCES)
async def lsp_references(
server: LanguageServer,
params: TextDocumentPositionParams,
) -> List[Location]:
references: List[Location] = []
searched_document = await buildout.parse(server, params.text_document.uri)
assert searched_document is not None
searched_symbol = await searched_document.getSymbolAtPosition(params.position
)
if searched_symbol is not None:
searched_option = None
if searched_symbol.kind in (
buildout.SymbolKind.SectionDefinition,
buildout.SymbolKind.BuildoutOptionKey,
):
searched_section = searched_symbol.current_section_name
if searched_symbol.kind == buildout.SymbolKind.BuildoutOptionKey:
searched_option = searched_symbol.current_option_name
else:
searched_section = searched_symbol.referenced_section_name
if searched_symbol.kind == buildout.SymbolKind.OptionReference:
searched_option = searched_symbol.referenced_option_name
logger.debug("Looking for references for %s ${%s:%s}", searched_symbol,
searched_section, searched_option)
assert searched_section
for profile_path in pathlib.Path(
server.workspace.root_path).glob('**/*.cfg'):
profile = await buildout.parse(server, profile_path.as_uri())
if profile is not None:
assert isinstance(profile, buildout.BuildoutProfile)
# listing a section in ${buildout:parts} is a reference
parts = profile['buildout'].get('parts')
if parts is not None and searched_section in parts.value:
for option_text, option_range in profile.getOptionValues(
'buildout', 'parts'):
if searched_section == option_text:
references.append(Location(uri=profile.uri, range=option_range))
async for symbol in profile.getAllOptionReferenceSymbols():
if symbol.referenced_section_name == searched_section:
if searched_option is None:
references.append(
Location(uri=profile.uri, range=symbol.section_range))
elif symbol.referenced_option_name == searched_option:
references.append(
Location(uri=profile.uri, range=symbol.option_range))
if searched_option is None:
# find references in <= macros
for options in profile.values():
for option_key, option_value in options.items():
if option_key == '<':
if option_value.value == searched_section:
loc = option_value.locations[-1]
assert loc.uri == profile.uri
references.append(loc)
return references
@server.feature(TEXT_DOCUMENT_HOVER)
async def lsp_hover(
ls: LanguageServer,
params: TextDocumentPositionParams,
) -> Optional[Hover]:
parsed = await buildout.open(ls, params.text_document.uri)
if parsed is None:
return None
symbol = await parsed.getSymbolAtPosition(params.position)
hover_text = ''
if symbol:
if symbol.kind == buildout.SymbolKind.OptionReference:
assert symbol.referenced_section_name
if symbol.referenced_option:
hover_text = symbol.referenced_option.value
if symbol.kind == buildout.SymbolKind.SectionReference:
assert symbol.referenced_section_name
recipe = symbol.referenced_section_recipe
if recipe:
hover_text = recipe.name
return Hover(contents=f'```\n{hover_text}\n```')
@server.feature(TEXT_DOCUMENT_DOCUMENT_LINK)
async def lsp_document_link(
ls: LanguageServer,
params: DocumentLinkParams,
) -> List[DocumentLink]:
links: List[DocumentLink] = []
uri = params.text_document.uri
parsed_buildout = await buildout.parse(ls, uri)
base = uri[:uri.rfind('/')] + '/'
if 'extends' in parsed_buildout['buildout']:
for extend, extend_range in parsed_buildout.getOptionValues(
'buildout', 'extends'):
target = extend
if target:
if not buildout._isurl(extend):
target = urllib.parse.urljoin(base, extend)
links.append(DocumentLink(range=extend_range, target=target))
return links | zc.buildout.languageserver | /zc.buildout.languageserver-0.9.2-py3-none-any.whl/buildoutls/server.py | server.py |
===========================================
Secure FTP (SFTP) Extension for zc.buildout
===========================================
The zc.buildoutsftp package provides a zc.buildout extension that
provides support for SFTP. To use it, simply provide the option::
extensions = zc.buildoutsftp
in your buildout section. Then you can use sftp URLs for find-links or
index URLs.
An SFTP URL is similar to an FTP URL and is of the form::
sftp://user:password@hostname:port/path
where the user name, password, and port are optional. Here are some
examples:
The following URL accesses the path /distribution on download.zope.org::
sftp://download.zope.org/distribution
The following URL accesses the path /distribution on download.zope.org
using the user id jim::
sftp://[email protected]/distribution
The following URL accesses the path /distribution on download.zope.org
using the user id jim and password 123::
sftp://jim:[email protected]/distribution
The following url accesses the path /distribution on download.zope.org
using an ssh server running on port 1022::
sftp://download.zope.org:1022/distribution
The buildout extension actually installs a urllib2 handler for the
"sftp" protocol. This handler is actually setuptools specific because
it generates HTML directory listings, needed by setuptools and makes
no effort to make directory listings useful for anything else.
It is possible that, in the future, setuptools will provide it's own
extension mechanism for handling alternate protocols, in which case,
we might bypass the urllib2 extension mechanism.
SSH Compatibility
=================
The extension works with Open SSH on unix-based systems and PuTTY on
Windows. Unless a password is given in the URL, private keys are
contained from ssh agent (pagent on Windows).
Status and Change History
=========================
This package has been used for years on Linux and Mac OS X. The
author doesn't use it on Windows, but, presumably, other people do.
0.11.0 (2013/08/01)
-------------------
Compatibility fix for setuptools 0.7 and later.
0.10.0 (2013/05/22)
-------------------
Compatibility fix for paramiko 1.10.x
0.9.0 (2012/09/13)
------------------
Removed beta label.
0.9.0b1 (2012/06/29)
--------------------
Added support for:
- Global-configuration settings.
- Global known-hosts files.
- Host-specific ssh keys.
Added mock-based tests for unix-like systems. Unfortunately, these
tests will fail for Windows and windows support, while present, is
untested.
0.6.1 (2010/03/17)
------------------
Fixed documentation typo.
0.6.0 (2009/06/22)
------------------
Added an unload entry point. This is necessary so we don't hang when
the buildout process exits due to non-daemonic paramiko connection
threads.
0.5.0 (2008/12/08)
------------------
Added connection pooling. This speeds up multiple downloads from the
same server substantially.
Adjust the paramiko logging level relative to the buildout logging
level to make it less chatty.
0.4.0 (2007/12/6)
-----------------
Now reads user definitions from ~/.ssh/config, if possible.
0.3.2 (2007/03/22)
------------------
Fixed a serious bug that caused files to be downloaded incompletely.
0.3.1 (2007/03/22)
------------------
Fixed a serious bug that caused files read to be truncated to 0 bytes.
0.3 (2007/03/22)
----------------
Added debug logging to help diagnose problems.
Close transports after use to prevent leakage.
0.2.2
-----
Fixed a bug in handling multiple host keys for a given host.
0.2.1
-----
Fixed a bug in handling multiple user keys.
0.2
---
Added missing entry point.
Adjusted content-type information to work with setuptools.
0.1
---
Initial release
| zc.buildoutsftp | /zc.buildoutsftp-0.11.0.tar.gz/zc.buildoutsftp-0.11.0/README.txt | README.txt |
import atexit
import cStringIO
import getpass
import logging
import mimetypes
import os
import paramiko
import re
import stat
import sys
import urllib
try:
from setuptools.compat import urllib2
except ImportError:
# Older setuptools/distribute
import urllib2
original_build_opener = urllib2.build_opener
logger = logging.getLogger(__name__)
def install(buildout=None):
urllib2.build_opener = lambda *a: original_build_opener(SFTPHandler, *a)
urllib2.install_opener(urllib2.build_opener())
logging.getLogger('paramiko').setLevel(logger.getEffectiveLevel()+10)
def unload(buildout=None):
urllib2.build_opener = original_build_opener
urllib2.install_opener(urllib2.build_opener())
cleanup()
parse_url_host = re.compile(
'(?:' '([^@:]+)(?::([^@]*))?@' ')?'
'([^:]*)(?::(\d+))?$').match
def deunixpath(path):
return os.path.join(*path.split('/'))
_configs = None
def _get_config(host):
global _configs
if _configs is None:
_configs = []
for path in (
deunixpath('/etc/ssh/ssh_config'), deunixpath('/etc/ssh_config'),
os.path.expanduser(deunixpath('~/.ssh/config')),
):
if os.path.exists(path):
config = paramiko.SSHConfig()
with open(path) as f:
config.parse(f)
_configs.append(config)
r = {}
for config in _configs:
r.update(config.lookup(host))
return r
if sys.platform == 'win32':
import _winreg
parse_reg_key_name = re.compile('(rsa|dss)2?@22:(\S+)$').match
def _get_host_keys(config):
regkey = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
r'Software\SimonTatham\PuTTY\SshHostKeys',
)
keys = paramiko.HostKeys()
i = 0
while 1:
try:
name, value, type_ = _winreg.EnumValue(regkey, i)
i += 1
value = [long(v, 16) for v in value.split(',')]
ktype, host = parse_reg_key_name(name).groups()
if ktype == 'rsa':
key = paramiko.RSAKey(vals=value)
if ktype == 'dss':
key = paramiko.DSSKey(vals=value)
keys.add(host, 'ssh-'+ktype, key)
except WindowsError:
break
return keys
else:
def _get_host_keys(config):
user_host_keys = os.path.expanduser('~/.ssh/known_hosts')
if os.path.exists(user_host_keys):
host_keys = paramiko.HostKeys(user_host_keys)
else:
host_keys = {}
global_host_keys = config.get('globalknownhostsfile')
if not global_host_keys:
for path in ('/etc/ssh/ssh_known_hosts',
'/etc/ssh_known_hosts'):
if os.path.exists(path):
global_host_keys = path
break
if global_host_keys:
host_keys.update(paramiko.HostKeys(global_host_keys))
return host_keys
class Result:
def __init__(self, fp, url, info, trans):
self._fp = fp
self.url = url
self.headers = info
self.__trans = trans
def geturl(self):
return self.url
def info(self):
return self.headers
def __getattr__(self, name):
return getattr(self._fp, name)
def _open_key(key_path):
key = None
if os.path.exists(key_path):
try:
key = paramiko.RSAKey.from_private_key_file(key_path)
except paramiko.SSHException:
try:
key = paramiko.DSSKey.from_private_key_file(key_path)
except paramiko.SSHException:
logger.error('Invalid key file: %s', key_path)
return key
_connection_pool = {}
def cleanup():
for k in list(_connection_pool):
trans = _connection_pool.pop(k)
if trans is not False:
trans.close()
global _configs
_configs = None
atexit.register(cleanup)
class SFTPHandler(urllib2.BaseHandler):
def sftp_open(self, req):
host = req.get_host()
if not host:
raise IOError, ('sftp error', 'no host given')
parsed = parse_url_host(host)
if not parsed:
raise IOError, ('sftp error', 'invalid host', host)
user, pw, host, port = parsed.groups()
host = urllib.unquote(host or '')
config = _get_config(host)
host_keys = _get_host_keys(config).get(host)
if host_keys is None:
raise paramiko.AuthenticationException("No stored host key", host)
if user:
user = urllib.unquote(user)
else:
user = config.get('user', getpass.getuser())
if port:
port = int(port)
else:
port = 22
if pw:
pw = urllib.unquote(pw)
if pw is not None:
pool_key = (host, port, user, pw)
trans = _connection_pool.get(pool_key)
if trans is None:
trans = paramiko.Transport((host, port))
try:
trans.connect(username=user, password=pw)
except paramiko.AuthenticationException:
trans.close()
raise
else:
keys = list(paramiko.Agent().get_keys())
IdentityFile = config.get('identityfile')
if IdentityFile:
if isinstance(IdentityFile, basestring):
IdentityFile = [IdentityFile]
for key_path in IdentityFile:
key = _open_key(os.path.expanduser(key_path))
if key is None:
logger.error('IdentityFile, %s, does not exist', key)
else:
keys.insert(0, key)
else:
for path in (
'~/.ssh/identity', '~/.ssh/id_rsa', '~/.ssh/id_dsa'):
path = deunixpath(path)
key = _open_key(os.path.expanduser(path))
if key is not None:
keys.insert(0, key)
for key in keys:
pool_key = (host, port, str(key))
trans = _connection_pool.get(pool_key)
if trans is not None:
if trans is False:
# Failed previously, so don't try again
continue
break
trans = paramiko.Transport((host, port))
try:
trans.connect(username=user, pkey=key)
break
except paramiko.AuthenticationException:
trans.close()
_connection_pool[pool_key] = False
else:
raise paramiko.AuthenticationException(
"Authentication failed.")
if pool_key not in _connection_pool:
# Check host key
remote_server_key = trans.get_remote_server_key()
host_key = host_keys.get(remote_server_key.get_name())
if host_key != remote_server_key:
raise paramiko.AuthenticationException(
"Remote server authentication failed.", host)
_connection_pool[pool_key] = trans
sftp = paramiko.SFTPClient.from_transport(trans)
path = req.get_selector()
url = req.get_full_url()
logger.debug('sftp get: %s', url)
mode = sftp.stat(path).st_mode
if stat.S_ISDIR(mode):
if logger.getEffectiveLevel() < logging.DEBUG:
logger.log(1, "Dir %s:\n %s\n",
path, '\n '.join(sftp.listdir(path)))
return Result(
cStringIO.StringIO('\n'.join([
('<a href="%s/%s">%s</a><br />'
% (url, x, x)
)
for x in sorted(sftp.listdir(path))
])),
url, {'content-type': 'text/html'}, trans)
else:
mtype = mimetypes.guess_type(url)[0]
if mtype is None:
mtype = 'application/octet-stream'
return Result(sftp.open(path), url, {'content-type': mtype},
trans) | zc.buildoutsftp | /zc.buildoutsftp-0.11.0.tar.gz/zc.buildoutsftp-0.11.0/src/zc/buildoutsftp/__init__.py | __init__.py |
zc.cacheheaders
===============
This is a utility egg that provides functions for setting the exires header, to
disable caching, and to enable caching to a specified time. We'll begin by
creating a response, and viewing its headers.
>>> import pprint
>>> import zope.publisher.http
>>> response = zope.publisher.http.HTTPResponse()
>>> pprint.pprint(response.getHeaders())
[('X-Powered-By', 'Zope (www.zope.org), Python (www.python.org)')]
Now we'll test stuff.
>>> import zc.cacheheaders
>>> zc.cacheheaders.set_expires_header(
... response, zc.cacheheaders.date_in_the_past)
>>> pprint.pprint(response.getHeaders())
[('X-Powered-By', 'Zope (www.zope.org), Python (www.python.org)'),
('Expires', 'Tue, 01 Jan 2008 00:00:00 GMT')]
>>> zc.cacheheaders.expires_header_set(
... dict(response.getHeaders()), zc.cacheheaders.date_in_the_past)
True
>>> response = zope.publisher.http.HTTPResponse()
>>> zc.cacheheaders.disable_caching(response)
>>> pprint.pprint(response.getHeaders())
[('X-Powered-By', 'Zope (www.zope.org), Python (www.python.org)'),
('Expires', 'Tue, 01 Jan 2008 00:00:00 GMT'),
('Pragma', 'no-cache'),
('Cache-Control', 'no-cache')]
>>> zc.cacheheaders.caching_disabled(dict(response.getHeaders()))
True
>>> response = zope.publisher.http.HTTPResponse()
>>> zc.cacheheaders.set_cache_headers(
... response, 3, now=lambda:zc.cacheheaders.date_in_the_past)
>>> pprint.pprint(response.getHeaders())
[('X-Powered-By', 'Zope (www.zope.org), Python (www.python.org)'),
('Expires', 'Tue, 01 Jan 2008 00:03:00 GMT'),
('Cache-Control', 'max-age=180')]
>>> zc.cacheheaders.cache_headers_set(
... dict(response.getHeaders()),
... 3,
... now=lambda:zc.cacheheaders.date_in_the_past)
True
We can just set the cache control headers (without creating a Expires header).
>>> response = zope.publisher.http.HTTPResponse()
>>> zc.cacheheaders.set_cache_control_header(
... response, minutes=3)
>>> pprint.pprint(response.getHeaders())
[('X-Powered-By', 'Zope (www.zope.org), Python (www.python.org)'),
('Cache-Control', 'max-age=180')]
Sometimes you want more control over the cache interval than just "minutes" can
afford. In those situations you can use a new invention called "seconds":
>>> response = zope.publisher.http.HTTPResponse()
>>> zc.cacheheaders.set_cache_headers(
... response, seconds=10, now=lambda:zc.cacheheaders.date_in_the_past)
>>> pprint.pprint(response.getHeaders())
[('X-Powered-By', 'Zope (www.zope.org), Python (www.python.org)'),
('Expires', 'Tue, 01 Jan 2008 00:00:10 GMT'),
('Cache-Control', 'max-age=10')]
>>> zc.cacheheaders.cache_headers_set(
... dict(response.getHeaders()),
... seconds=10,
... now=lambda:zc.cacheheaders.date_in_the_past)
True
| zc.cacheheaders | /zc.cacheheaders-1.4.0.tar.gz/zc.cacheheaders-1.4.0/src/zc/cacheheaders/README.txt | README.txt |
=========
CHANGES
=========
3.0 (2019-03-21)
================
- Drop support for Python 3.4 as it reached its end of life.
- Add support for Python 3.7 and 3.8a2.
2.0.1 (2017-06-15)
==================
- Add Python 3 compatibility for the ``zopyx.txng3.ext`` stemmer.
See `#4 <https://github.com/zopefoundation/zc.catalog/issues/4>`_.
2.0.0 (2017-05-09)
==================
- Add support for Python 3.4, 3.5, 3.6 and PyPy. Note that the
``zopyx.txng3.ext`` stemmer is not available on Python 3.
- Remove test dependency on zope.app.zcmlfiles and zope.app.testing,
among others.
1.6 (2013-07-04)
================
- Using Python's ``doctest`` module instead of deprecated
``zope.testing.doctest``.
- Move ``zope.intid`` to dependencies.
1.5.1 (2012-01-20)
==================
- Fix the extent catalog's `searchResults` method to work when using a
local uid source.
- Replaced a testing dependency on ``zope.app.authentication`` with
``zope.password``.
- Removed ``zope.app.server`` test dependency.
1.5 (2010-10-19)
================
- The package's ``configure.zcml`` does not include the browser subpackage's
``configure.zcml`` anymore.
This, together with ``browser`` and ``test_browser`` ``extras_require``,
decouples the browser view registrations from the main code. As a result
projects that do not need the ZMI views to be registered are not pulling in
the zope.app.* dependencies anymore.
To enable the ZMI views for your project, you will have to do two things:
* list ``zc.catalog [browser]`` as a ``install_requires``.
* have your project's ``configure.zcml`` include the ``zc.catalog.browser``
subpackage.
- Only include the browser tests whenever the dependencies for the browser
tests are available.
- Python2.7 test fix.
1.4.5 (2010-10-05)
==================
- Remove implicit test dependency on zope.app.dublincore, that was not needed
in the first place.
1.4.4 (2010-07-06)
==================
* Fixed test-failure happening with more recent ``mechanize`` (>=2.0).
1.4.3 (2010-03-09)
==================
* Try to import the stemmer from the zopyx.txng3.ext package first, which
as of 3.3.2 contains stability and memory leak fixes.
1.4.2 (2010-01-20)
==================
* Fix missing testing dependencies when using ZTK by adding zope.login.
1.4.1 (2009-02-27)
==================
* Add FieldIndex-like sorting support for the ValueIndex.
* Add sorting indexes support for the NormalizationWrapper.
1.4.0 (2009-02-07)
==================
* Fixed a typo in ValueIndex addform and addMenuItem
* Use ``zope.container`` instead of ``zope.app.container``.
* Use ``zope.keyreference`` instead of ``zope.app.keyreference``.
* Use ``zope.intid`` instead of ``zope.app.intid``.
* Use ``zope.catalog`` instead of ``zope.app.catalog``.
1.3.0 (2008-09-10)
==================
* Added hook point to allow extent catalog to be used with local UID sources.
1.2.0 (2007-11-03)
==================
* Updated package meta-data.
* zc.catalog now can use 64-bit BTrees ("L") as provided by ZODB 3.8.
* Albertas Agejavas ([email protected]) included the new CallableWrapper, for
when the typical Zope 3 index-by-adapter story
(zope.app.catalog.attribute) is unnecessary trouble, and you just want
to use a callable. See callablewrapper.txt. This can also be used for
other indexes based on the zope.index interfaces.
* Extents now have a __len__. The current implementation defers to the
standard BTree len implementation, and shares its performance
characteristics: it needs to wake up all of the buckets, but if all of the
buckets are awake it is a fairly quick operation.
* A simple ISelfPoulatingExtent was added to the extentcatalog module for
which populating is a no-op. This is directly useful for catalogs that
are used as implementation details of a component, in which objects are
indexed explicitly by your own calls rather than by the usual subscribers.
It is also potentially slightly useful as a base for other self-populating
extents.
1.1.1 (2007-3-17)
=================
'all_of' would return all results when one of the values had no results.
Reported, with test and fix provided, by Nando Quintana.
1.1 (2007-01-06)
================
Features removed
----------------
The queueing of events in the extent catalog has been entirely removed.
Subtransactions caused significant problems to the code introduced in 1.0.
Other solutions also have significant problems, and the win of this kind
of queueing is qustionable. Here is a run down of the approaches rejected
for getting the queueing to work:
* _p_invalidate (used in 1.0). Not really designed for use within a
transaction, and reverts to last savepoint, rather than the beginning of
the transaction. Could monkeypatch savepoints to iterate over
precommit transaction hooks but that just smells too bad.
* _p_resolveConflict. Requires application software to exist in ZEO and
even ZRS installations, which is counter to our software deployment goals.
Also causes useless repeated writes of empty queue to database, but that's
not the showstopper.
* vague hand-wavy ideas for separate storages or transaction managers for the
queue. Never panned out in discussion.
1.0 (2007-01-05)
================
Bugs fixed
----------
* adjusted extentcatalog tests to trigger (and discuss and test) the queueing
behavior.
* fixed problem with excessive conflict errors due to queueing code.
* updated stemming to work with newest version of TextIndexNG's extensions.
* omitted stemming test when TextIndexNG's extensions are unavailable, so
tests pass without it. Since TextIndexNG's extensions are optional, this
seems reasonable.
* removed use of zapi in extentcatalog.
0.2 (2006-11-22)
================
Features added
--------------
* First release on Cheeseshop.
| zc.catalog | /zc.catalog-3.0.tar.gz/zc.catalog-3.0/CHANGES.rst | CHANGES.rst |
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
__version__ = '2015-07-01'
# See zc.buildout's changelog if this version is up to date.
tmpeggs = tempfile.mkdtemp(prefix='bootstrap-')
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("--version",
action="store_true", default=False,
help=("Return bootstrap.py version."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--buildout-version",
help="Use a specific zc.buildout version")
parser.add_option("--setuptools-version",
help="Use a specific setuptools version")
parser.add_option("--setuptools-to-dir",
help=("Allow for re-use of existing directory of "
"setuptools versions"))
options, args = parser.parse_args()
if options.version:
print("bootstrap.py version %s" % __version__)
sys.exit(0)
######################################################################
# load/install setuptools
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
if os.path.exists('ez_setup.py'):
exec(open('ez_setup.py').read(), ez)
else:
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
# Strip all site-packages directories from sys.path that
# are not sys.prefix; this is because on Windows
# sys.prefix is a site-package directory.
if sitepackage_path != sys.prefix:
sys.path[:] = [x for x in sys.path
if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
if options.setuptools_to_dir is not None:
setup_args['to_dir'] = options.setuptools_to_dir
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
# Fix sys.path here as easy_install.pth added before PYTHONPATH
cmd = [sys.executable, '-c',
'import sys; sys.path[0:0] = [%r]; ' % setuptools_path +
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
requirement = 'zc.buildout'
version = options.buildout_version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs) | zc.catalog | /zc.catalog-3.0.tar.gz/zc.catalog-3.0/bootstrap.py | bootstrap.py |
=======
Stemmer
=======
The stemmer uses Andreas Jung's stemmer code, which is a Python wrapper of
M. F. Porter's Snowball project (http://snowball.tartarus.org/index.php).
It is designed to be used as part of a pipeline in a zope/index/text/
lexicon, after a splitter. This enables getting the relevance ranking
of the zope/index/text code with the splitting functionality of TextIndexNG 3.x.
It requires that the TextIndexNG extensions--specifically txngstemmer--have
been compiled and installed in your Python installation. Inclusion of the
textindexng package is not necessary.
As of this writing (Jan 3, 2007), installing the necessary extensions can be
done with the following steps:
- `svn co https://svn.sourceforge.net/svnroot/textindexng/extension_modules/trunk ext_mod`
- `cd ext_mod`
- (using the python you use for Zope) `python setup.py install`
Another approach is to simply install TextIndexNG (see
http://opensource.zopyx.com/software/textindexng3)
The stemmer must be instantiated with the language for which stemming is
desired. It defaults to 'english'. For what it is worth, other languages
supported as of this writing, using the strings that the stemmer expects,
include the following: 'danish', 'dutch', 'english', 'finnish', 'french',
'german', 'italian', 'norwegian', 'portuguese', 'russian', 'spanish', and
'swedish'.
For instance, let's build an index with an english stemmer.
>>> from zope.index.text import textindex, lexicon
>>> import zc.catalog.stemmer
>>> lex = lexicon.Lexicon(
... lexicon.Splitter(), lexicon.CaseNormalizer(),
... lexicon.StopWordRemover(), zc.catalog.stemmer.Stemmer('english'))
>>> ix = textindex.TextIndex(lex)
>>> data = [
... (0, 'consigned consistency consoles the constables'),
... (1, 'knaves kneeled and knocked knees, knowing no knights')]
>>> for doc_id, text in data:
... ix.index_doc(doc_id, text)
...
>>> list(ix.apply('consoling a constable'))
[0]
>>> list(ix.apply('knightly kneel'))
[1]
Note that query terms with globbing characters are not stemmed.
>>> list(ix.apply('constables*'))
[]
| zc.catalog | /zc.catalog-3.0.tar.gz/zc.catalog-3.0/src/zc/catalog/stemmer.rst | stemmer.rst |
import sys
import BTrees
import persistent
from zope import interface
from zope.catalog import catalog
from zope.intid.interfaces import IIntIds
import zope.component
from zc.catalog import interfaces
@interface.implementer(interfaces.IExtent)
class Extent(persistent.Persistent):
__parent__ = None
family = BTrees.family32
def __init__(self, family=None):
if family is not None:
self.family = family
self.set = self.family.IF.TreeSet()
# Deprecated.
@property
def BTreeAPI(self):
return sys.modules[self.set.__class__.__module__]
def __len__(self):
return len(self.set)
def add(self, uid, obj):
self.set.insert(uid)
def clear(self):
self.set.clear()
def __or__(self, other):
"extent | set"
return self.union(other)
__ror__ = __or__
def union(self, other, self_weight=1, other_weight=1):
return self.family.IF.weightedUnion(
self.set, other, self_weight, other_weight)[1]
def __and__(self, other):
"extent & set"
return self.intersection(other)
__rand__ = __and__
def intersection(self, other, self_weight=1, other_weight=1):
return self.family.IF.weightedIntersection(
self.set, other, self_weight, other_weight)[1]
def __sub__(self, other):
"extent - set"
return self.difference(other)
def difference(self, other):
return self.family.IF.difference(self.set, other)
def __rsub__(self, other):
"set - extent"
return self.rdifference(other)
def rdifference(self, other):
return self.family.IF.difference(other, self.set)
def __iter__(self):
return iter(self.set)
def __nonzero__(self):
return bool(self.set)
__bool__ = __nonzero__
def __contains__(self, uid):
return uid in self.set
def remove(self, uid):
self.set.remove(uid)
def discard(self, uid):
try:
self.set.remove(uid)
except KeyError:
pass
@interface.implementer(interfaces.IFilterExtent)
class FilterExtent(Extent):
def __init__(self, filter, family=None):
super(FilterExtent, self).__init__(family=family)
self.filter = filter
def add(self, uid, obj):
if not self.addable(uid, obj):
raise ValueError
else:
self.set.insert(uid)
def addable(self, uid, obj):
return self.filter(self, uid, obj)
@interface.implementer(interfaces.ISelfPopulatingExtent)
class NonPopulatingExtent(Extent):
"""Base class for populating extent.
This simple, no-op implementation comes in handy surprisingly often
for catalogs that handle a very contained domain within an application.
"""
populated = False
def populate(self):
self.populated = True
@interface.implementer(interfaces.IExtentCatalog)
class Catalog(catalog.Catalog):
UIDSource = None
def __init__(self, extent, UIDSource=None):
"""Construct a catalog based on an extent.
Note that the `family` keyword parameter of the base class
constructor is not supported here; the family of the extent is
used.
"""
self.UIDSource = UIDSource
if extent.__parent__ is not None: # pragma: no cover
raise ValueError("extent's __parent__ must be None")
super(Catalog, self).__init__(family=extent.family)
self.extent = extent
extent.__parent__ = self # inform extent of catalog
def _getUIDSource(self):
res = self.UIDSource
if res is None:
res = zope.component.getUtility(IIntIds)
return res
def clear(self):
self.extent.clear()
super(Catalog, self).clear()
def index_doc(self, docid, texts):
"""Register the data in indexes of this catalog.
"""
try:
self.extent.add(docid, texts)
except ValueError:
self.unindex_doc(docid)
else:
super(Catalog, self).index_doc(docid, texts)
def unindex_doc(self, docid):
if docid in self.extent:
super(Catalog, self).unindex_doc(docid)
self.extent.remove(docid)
def searchResults(self, **kwargs):
res = super(Catalog, self).searchResults(**kwargs)
if res is not None:
res.uidutil = self._getUIDSource()
return res
def updateIndex(self, index):
if index.__parent__ is not self:
# not an index in us. Let the superclass handle it.
super(Catalog, self).updateIndex(index)
else:
uidutil = self._getUIDSource()
if interfaces.ISelfPopulatingExtent.providedBy(self.extent):
if not self.extent.populated:
self.extent.populate()
assert self.extent.populated
for uid in self.extent:
obj = uidutil.getObject(uid)
index.index_doc(uid, obj)
else:
for uid in uidutil:
obj = uidutil.getObject(uid)
try:
self.extent.add(uid, obj)
except ValueError:
self.unindex_doc(uid)
else:
index.index_doc(uid, obj)
def updateIndexes(self):
uidutil = self._getUIDSource()
if interfaces.ISelfPopulatingExtent.providedBy(self.extent):
if not self.extent.populated:
self.extent.populate()
assert self.extent.populated
for uid in self.extent:
self.index_doc(uid, uidutil.getObject(uid))
else:
for uid in uidutil:
self.index_doc(uid, uidutil.getObject(uid)) | zc.catalog | /zc.catalog-3.0.tar.gz/zc.catalog-3.0/src/zc/catalog/extentcatalog.py | extentcatalog.py |
from zope import interface, schema
from zope.schema.vocabulary import SimpleVocabulary, SimpleTerm
import zope.index.interfaces
import zope.catalog.interfaces
from zc.catalog.i18n import _
class IExtent(interface.Interface):
"""An extent represents the full set of objects indexed by a catalog.
It is useful for a variety of index operations and catalog queries.
"""
__parent__ = interface.Attribute(
"""The catalog for which this is an extent; must be None before it is
set to a catalog""")
def add(uid, obj):
"""add uid to extent; raise ValueError if it is not addable.
If uid is already a member of the extent, calling add is a no-op,
except that if the uid and obj are no longer addable to the extent then
ValueError is still raised (but without removing the uid)"""
def remove(uid):
"""Remove uid from set. Raise KeyError if not a member"""
def discard(uid):
"""Remove uid from set. Ignore if not a member"""
def clear():
"""Remove all uids from set."""
def __len__():
"""the number of items in the extent."""
def __iter__():
"""return iterator of uids in set"""
def __or__(other):
"Given BTrees.IFBTree data structure, return weighted union"
def __ror__(other):
"Given BTrees.IFBTree data structure, return weighted union"
def union(other, self_weight, other_weight):
"Given BTrees.IFBTree data structure, return weighted union"
def __and__(other):
"Given BTrees.IFBTree data structure, return weighted intersection"
def __rand__(other):
"Given BTrees.IFBTree data structure, return weighted intersection"
def intersection(other, self_weight, other_weight):
"Given BTrees.IFBTree data structure, return weighted intersection"
def __sub__(other):
"extent - set: given BTrees.IFBTree data structure, return difference"
def difference(other):
"extent - set: given BTrees.IFBTree data structure, return difference"
def __rsub__(other):
"set - extent: given BTrees.IFBTree data structure, return difference"
def rdifference(other):
"set - extent: given BTrees.IFBTree data structure, return difference"
def __nonzero__():
"return boolean indicating if any uids are in set"
def __contains__(uid):
"return boolean indicating if uid is in set"
class IFilterExtent(IExtent):
filter = interface.Attribute(
"""A (persistent) callable that is passed the extent, a docid, and the
associated obj and should return a boolean True (is member of extent)
or False (is not member of extent).""")
def addable(uid, obj):
"""returns True or False, indicating whether the obj may be added to
the extent"""
class ISelfPopulatingExtent(IExtent):
"""An extent that knows how to create it's own initial population."""
populated = schema.Bool(
title=_("Populated"),
description=_(
"Flag indicating whether self-population has been performed."),
readonly=True,
)
def populate():
"""Populate the extent based on the current content of the database.
After a successful call, `populated` will be True. Unsuccessful calls
must raise exceptions.
If `populated` is true when called, this is a no-op. After the
initial population, updates should be maintained via other mechanisms.
"""
class IExtentCatalog(interface.Interface):
"""A catalog of only items within an extent.
Interface intended to be used with zope.catalog.interfaces.ICatalog"""
extent = interface.Attribute(
"""An IExtent of the objects cataloged""")
class IIndexValues(interface.Interface):
"""An index that allows introspection of the indexed values"""
def minValue(min=None):
"""return the minimum value in the index.
if min is provided, return the minimum value equal to or greater than
min.
Raises ValueError if no min.
"""
def maxValue(max=None):
"""return the maximum value in the index.
If max is provided, return the maximum value equal to or less than max.
Raises ValueError if no max.
"""
def values(min=None, max=None, excludemin=False, excludemax=False,
doc_id=None):
"""return an iterables of the values in the index.
if doc_id is provided, returns the values only for that document id.
If a min is specified, then output is constrained to values greater
than or equal to the given min, and, if excludemin is specified and
true, is further constrained to values strictly greater than min. A
min value of None is ignored. If min is None or not specified, and
excludemin is true, the smallest value is excluded.
If a max is specified, then output is constrained to values less than
or equal to the given max, and, if excludemax is specified and
true, is further constrained to values strictly less than max. A max
value of None is ignored. If max is None or not specified, and
excludemax is true, the largest value is excluded.
"""
def containsValue(value):
"""whether the value is used in any of the documents in the index"""
def ids():
"""return a BTrees.IFBTree data structure of the document ids in the
index--the ones that have values to be indexed. All document ids
should produce at least one value given a call of
IIndexValues.values(doc_id=id).
"""
class ISetIndex(interface.Interface):
def apply(query):
"""Return None or an IFBTree Set of the doc ids that match the query.
query is a dict with one of the following keys: any_of, any,
all_of, between, and none.
Any one of the keys may be used; using more than one is not allowed.
The any_of key should have a value of an iterable of values: the
result will be the docids whose values contain any of the given values.
The all_of key should have a value of an iterable of values: the
result will be the docids whose values contain all of the given values.
The between key should have a value of an iterable of one to four
members. The first is the minimum value, or None; the second is the
maximum value, or None; the third is boolean, defaulting to False,
declaring if the min should be excluded; and the last is also boolean,
defaulting to False, declaring if the max should be excluded.
The any key should take None or an extent. If the key is None, the
results will be all docids with any value. If the key is an extent,
the results will be the intersection of the extent and all docids with
any value.
The none key should take an extent. It returns the docids in
the extent that do not have any values in the index.
"""
class IValueIndex(interface.Interface):
def apply(query):
"""Return None or an IFBTree Set of the doc ids that match the query.
query is a dict with one of the following keys: any_of, any,
between, and none.
Any one of the keys may be used; using more than one is not allowed.
The any_of key should have a value of an iterable of values: the
result will be the docids whose values contain any of the given values.
The between key should have a value of an iterable of one to four
members. The first is the minimum value, or None; the second is the
maximum value, or None; the third is boolean, defaulting to False,
declaring if the min should be excluded; and the last is also boolean,
defaulting to False, declaring if the max should be excluded.
The any key should take None or an extent. If the key is None, the
results will be all docids with any value. If the key is an extent,
the results will be the intersection of the extent and all docids with
any value.
The none key should take an extent. It returns the docids in
the extent that do not have any values in the index.
"""
class ICatalogValueIndex(zope.catalog.interfaces.IAttributeIndex,
zope.catalog.interfaces.ICatalogIndex):
"""Interface-based catalog value index
"""
class ICatalogSetIndex(zope.catalog.interfaces.IAttributeIndex,
zope.catalog.interfaces.ICatalogIndex):
"""Interface-based catalog set index
"""
class INormalizationWrapper(zope.index.interfaces.IInjection,
zope.index.interfaces.IIndexSearch,
zope.index.interfaces.IStatistics,
IIndexValues):
"""A wrapper for an index that uses a normalizer to normalize injection
and querying."""
index = interface.Attribute(
"""an index implementing IInjection, IIndexSearch, IStatistics, and
IIndexValues""")
normalizer = interface.Attribute("a normalizer, implementing INormalizer")
collection_index = interface.Attribute(
"""boolean: whether indexed values should be treated as collections
(each composite value normalized) or not (original value is
normalized)""")
class INormalizer(interface.Interface):
def value(value):
"""normalize or check constraints for an input value; raise an error
or return the value to be indexed."""
def any(value, index):
"""normalize a query value for a "any_of" search; return a sequence of
values."""
def all(value, index):
"""Normalize a query value for an "all_of" search; return the value
for query"""
def minimum(value, index, exclude=False):
"""normalize a query value for minimum of a range; return the value for
query"""
def maximum(value, index, exclude=False):
"""normalize a query value for maximum of a range; return the value for
query"""
resolution_vocabulary = SimpleVocabulary(
[SimpleTerm(i, t, t) for i, t in enumerate(
(_('day'), _('hour'), _('minute'), _('second'), _('microsecond')))])
# 0 1 2 3 4
class IDateTimeNormalizer(INormalizer):
resolution = schema.Choice(
vocabulary=resolution_vocabulary,
title=_('Resolution'),
default=2,
required=True)
class ICallableWrapper(zope.index.interfaces.IInjection,
zope.index.interfaces.IIndexSearch,
zope.index.interfaces.IStatistics,
IIndexValues):
"""A wrapper for an index that uses a callable to convert injection."""
index = interface.Attribute(
"""An index implementing IInjection, IIndexSearch, IStatistics, and
IIndexValues""")
converter = interface.Attribute("A callable converter") | zc.catalog | /zc.catalog-3.0.tar.gz/zc.catalog-3.0/src/zc/catalog/interfaces.py | interfaces.py |
"""Indexes appropriate for zope.catalog
"""
import zope.interface
import zope.catalog.attribute
import zope.container.contained
import zope.index.interfaces
import zc.catalog.index
import zc.catalog.interfaces
@zope.interface.implementer(zc.catalog.interfaces.ICatalogValueIndex)
class ValueIndex(zope.catalog.attribute.AttributeIndex,
zc.catalog.index.ValueIndex,
zope.container.contained.Contained):
pass
@zope.interface.implementer(zc.catalog.interfaces.ICatalogSetIndex)
class SetIndex(zope.catalog.attribute.AttributeIndex,
zc.catalog.index.SetIndex,
zope.container.contained.Contained):
pass
class NormalizationWrapper(
zope.catalog.attribute.AttributeIndex,
zc.catalog.index.NormalizationWrapper,
zope.container.contained.Contained):
pass
@zope.interface.implementer(zc.catalog.interfaces.ICallableWrapper)
class CallableWrapper(zc.catalog.index.CallableWrapper,
zope.container.contained.Contained):
pass
@zope.interface.implementer(
zope.interface.implementedBy(NormalizationWrapper),
zc.catalog.interfaces.IValueIndex,
zope.index.interfaces.IIndexSort)
def DateTimeValueIndex(
field_name=None, interface=None, field_callable=False,
resolution=2): # hour; good for per-day searches
ix = NormalizationWrapper(
field_name, interface, field_callable, zc.catalog.index.ValueIndex(),
zc.catalog.index.DateTimeNormalizer(resolution), False)
zope.interface.alsoProvides(ix, zc.catalog.interfaces.IValueIndex)
return ix
@zope.interface.implementer(
zope.interface.implementedBy(NormalizationWrapper),
zc.catalog.interfaces.ISetIndex)
def DateTimeSetIndex(
field_name=None, interface=None, field_callable=False,
resolution=2): # hour; good for per-day searches
ix = NormalizationWrapper(
field_name, interface, field_callable, zc.catalog.index.SetIndex(),
zc.catalog.index.DateTimeNormalizer(resolution), True)
zope.interface.alsoProvides(ix, zc.catalog.interfaces.ISetIndex)
return ix | zc.catalog | /zc.catalog-3.0.tar.gz/zc.catalog-3.0/src/zc/catalog/catalogindex.py | catalogindex.py |
=======================
Support for legacy data
=======================
Prior to the introduction of btree "families" and the
``BTrees.Interfaces.IBTreeFamily`` interface, the indexes defined by
the ``zc.catalog.index`` module used the instance attributes
``btreemodule`` and ``IOBTree``, initialized in the constructor, and
the ``BTreeAPI`` property. These are replaced by the ``family``
attribute in the current implementation.
This is a white-box test that verifies that the supported values in
existing data structures (loaded from pickles) can be used effectively
with the current implementation.
There are two supported sets of values; one for 32-bit btrees::
>>> import BTrees.IOBTree
>>> legacy32 = {
... "btreemodule": "BTrees.IFBTree",
... "IOBTree": BTrees.IOBTree.IOBTree,
... }
and another for 64-bit btrees::
>>> import BTrees.LOBTree
>>> legacy64 = {
... "btreemodule": "BTrees.LFBTree",
... "IOBTree": BTrees.LOBTree.LOBTree,
... }
In each case, actual legacy structures will also include index
structures that match the right integer size::
>>> import BTrees.OOBTree
>>> import BTrees.Length
>>> legacy32["values_to_documents"] = BTrees.OOBTree.OOBTree()
>>> legacy32["documents_to_values"] = BTrees.IOBTree.IOBTree()
>>> legacy32["documentCount"] = BTrees.Length.Length(0)
>>> legacy32["wordCount"] = BTrees.Length.Length(0)
>>> legacy64["values_to_documents"] = BTrees.OOBTree.OOBTree()
>>> legacy64["documents_to_values"] = BTrees.LOBTree.LOBTree()
>>> legacy64["documentCount"] = BTrees.Length.Length(0)
>>> legacy64["wordCount"] = BTrees.Length.Length(0)
What we want to do is verify that the ``family`` attribute is properly
computed for instances loaded from legacy data, and ensure that the
structure is updated cleanly without providing cause for a read-only
transaction to become a write-transaction. We'll need to create
instances that conform to the old data structures, pickle them, and
show that unpickling them produces instances that use the correct
families.
Let's create new instances, and force the internal data to match the
old structures::
>>> import pickle
>>> import zc.catalog.index
>>> vi32 = zc.catalog.index.ValueIndex()
>>> vi32.__dict__ = legacy32.copy()
>>> legacy32_pickle = pickle.dumps(vi32)
>>> vi64 = zc.catalog.index.ValueIndex()
>>> vi64.__dict__ = legacy64.copy()
>>> legacy64_pickle = pickle.dumps(vi64)
Now, let's unpickle these structures and verify the structures. We'll
start with the 32-bit variety::
>>> vi32 = pickle.loads(legacy32_pickle)
>>> vi32.__dict__["btreemodule"]
'BTrees.IFBTree'
>>> vi32.__dict__["IOBTree"]
<type 'BTrees.IOBTree.IOBTree'>
>>> "family" in vi32.__dict__
False
>>> vi32._p_changed
False
The ``family`` property returns the ``BTrees.family32`` singleton::
>>> vi32.family is BTrees.family32
True
Once accessed, the legacy values have been cleaned out from the
instance dictionary::
>>> "btreemodule" in vi32.__dict__
False
>>> "IOBTree" in vi32.__dict__
False
>>> "BTreeAPI" in vi32.__dict__
False
Accessing these attributes as attributes provides the proper values
anyway::
>>> vi32.btreemodule
'BTrees.IFBTree'
>>> vi32.IOBTree
<type 'BTrees.IOBTree.IOBTree'>
>>> vi32.BTreeAPI
<module 'BTrees.IFBTree' from ...>
Even though the instance dictionary has been cleaned up, the change
flag hasn't been set. This is handled this way to avoid turning a
read-only transaction into a write-transaction::
>>> vi32._p_changed
False
The 64-bit variation provides equivalent behavior::
>>> vi64 = pickle.loads(legacy64_pickle)
>>> vi64.__dict__["btreemodule"]
'BTrees.LFBTree'
>>> vi64.__dict__["IOBTree"]
<type 'BTrees.LOBTree.LOBTree'>
>>> "family" in vi64.__dict__
False
>>> vi64._p_changed
False
>>> vi64.family is BTrees.family64
True
>>> "btreemodule" in vi64.__dict__
False
>>> "IOBTree" in vi64.__dict__
False
>>> "BTreeAPI" in vi64.__dict__
False
>>> vi64.btreemodule
'BTrees.LFBTree'
>>> vi64.IOBTree
<type 'BTrees.LOBTree.LOBTree'>
>>> vi64.BTreeAPI
<module 'BTrees.LFBTree' from ...>
>>> vi64._p_changed
False
Now, if we have a legacy structure and explicitly set the ``family``
attribute, the old data structures will be cleared and replaced with
the new structure. If the object is associated with a data manager,
the changed flag will be set as well::
>>> class DataManager(object):
... def register(self, ob):
... pass
>>> vi64 = pickle.loads(legacy64_pickle)
>>> vi64._p_jar = DataManager()
>>> vi64.family = BTrees.family64
>>> vi64._p_changed
True
>>> "btreemodule" in vi64.__dict__
False
>>> "IOBTree" in vi64.__dict__
False
>>> "BTreeAPI" in vi64.__dict__
False
>>> "family" in vi64.__dict__
True
>>> vi64.family is BTrees.family64
True
>>> vi64.btreemodule
'BTrees.LFBTree'
>>> vi64.IOBTree
<type 'BTrees.LOBTree.LOBTree'>
>>> vi64.BTreeAPI
<module 'BTrees.LFBTree' from ...>
| zc.catalog | /zc.catalog-3.0.tar.gz/zc.catalog-3.0/src/zc/catalog/legacy.rst | legacy.rst |
================
Callable Wrapper
================
If we want to index some value that is easily derivable from a
document, we have to define an interface with this value as an
attribute, and create an adapter that calculates this value and
implements this interface. All this is too much hassle if the want to
store a single easily derivable value. CallableWrapper solves this
problem, by converting the document to the indexed value with a
callable converter.
Here's a contrived example. Suppose we have cars that know their
mileage expressed in miles per gallon, but we want to index their
economy in litres per 100 km.
>>> class Car(object):
... def __init__(self, mpg):
... self.mpg = mpg
>>> def mpg2lp100(car):
... return 100.0/(1.609344/3.7854118 * car.mpg)
Let's create an index that would index cars' l/100 km rating.
>>> from zc.catalog import index, catalogindex
>>> idx = catalogindex.CallableWrapper(index.ValueIndex(), mpg2lp100)
Let's add a couple of cars to the index!
>>> hummer = Car(10.0)
>>> beamer = Car(22.0)
>>> civic = Car(45.0)
>>> idx.index_doc(1, hummer)
>>> idx.index_doc(2, beamer)
>>> idx.index_doc(3, civic)
The indexed values should be the converted l/100 km ratings:
>>> list(idx.values()) # doctest: +ELLIPSIS
[5.22699076283393..., 10.691572014887601, 23.521458432752723]
We can query for cars that consume fuel in some range:
>>> list(idx.apply({'between': (5.0, 7.0)}))
[3]
| zc.catalog | /zc.catalog-3.0.tar.gz/zc.catalog-3.0/src/zc/catalog/callablewrapper.rst | callablewrapper.rst |
================
Extent Catalog
================
An extent catalog is very similar to a normal catalog except that it
only indexes items addable to its extent. The extent is both a filter
and a set that may be merged with other result sets. The filtering is
an additional feature we will discuss below; we'll begin with a simple
"do nothing" extent that only supports the second use case.
We create the state that the text needs here.
>>> import zope.keyreference.persistent
>>> import zope.component
>>> import zope.intid
>>> import zope.component
>>> import zope.interface.interfaces
>>> import zope.component.persistentregistry
>>> from ZODB.MappingStorage import DB
>>> import transaction
>>> zope.component.provideAdapter(
... zope.keyreference.persistent.KeyReferenceToPersistent,
... adapts=(zope.interface.Interface,))
>>> zope.component.provideAdapter(
... zope.keyreference.persistent.connectionOfPersistent,
... adapts=(zope.interface.Interface,))
>>> site_manager = None
>>> def getSiteManager(context=None):
... if context is None:
... if site_manager is None:
... return zope.component.getGlobalSiteManager()
... else:
... return site_manager
... else:
... try:
... return zope.interface.interfaces.IComponentLookup(context)
... except TypeError as error:
... raise zope.component.ComponentLookupError(*error.args)
...
>>> def setSiteManager(sm):
... global site_manager
... site_manager = sm
... if sm is None:
... zope.component.getSiteManager.reset()
... else:
... zope.component.getSiteManager.sethook(getSiteManager)
...
>>> def makeRoot():
... db = DB()
... conn = db.open()
... root = conn.root()
... site_manager = root['components'] = (
... zope.component.persistentregistry.PersistentComponents())
... site_manager.__bases__ = (zope.component.getGlobalSiteManager(),)
... site_manager.registerUtility(
... zope.intid.IntIds(family=btrees_family),
... provided=zope.intid.interfaces.IIntIds)
... setSiteManager(site_manager)
... transaction.commit()
... return root
...
>>> @zope.component.adapter(zope.interface.Interface)
... @zope.interface.implementer(zope.interface.interfaces.IComponentLookup)
... def getComponentLookup(obj):
... return obj._p_jar.root()['components']
...
>>> zope.component.provideAdapter(getComponentLookup)
To show the extent catalog at work, we need an intid utility, an
index, some items to index. We'll do this within a real ZODB and a
real intid utility.
>>> import zc.catalog
>>> import zc.catalog.interfaces
>>> from zc.catalog import interfaces, extentcatalog
>>> from zope import interface, component
>>> from zope.interface import verify
>>> import persistent
>>> import BTrees.IFBTree
>>> root = makeRoot()
>>> intid = zope.component.getUtility(
... zope.intid.interfaces.IIntIds, context=root)
>>> TreeSet = btrees_family.IF.TreeSet
>>> from zope.container.interfaces import IContained
>>> @interface.implementer(IContained)
... class DummyIndex(persistent.Persistent):
... __parent__ = __name__ = None
... def __init__(self):
... self.uids = TreeSet()
... def unindex_doc(self, uid):
... if uid in self.uids:
... self.uids.remove(uid)
... def index_doc(self, uid, obj):
... self.uids.insert(uid)
... def clear(self):
... self.uids.clear()
... def apply(self, query):
... return [uid for uid in self.uids if uid <= query]
...
>>> class DummyContent(persistent.Persistent):
... def __init__(self, name, parent):
... self.id = name
... self.__parent__ = parent
...
>>> extent = extentcatalog.Extent(family=btrees_family)
>>> verify.verifyObject(interfaces.IExtent, extent)
True
>>> root['catalog'] = catalog = extentcatalog.Catalog(extent)
>>> verify.verifyObject(interfaces.IExtentCatalog, catalog)
True
>>> index = DummyIndex()
>>> catalog['index'] = index
>>> transaction.commit()
Now we have a catalog set up with an index and an extent. We can add
some data to the extent:
>>> matches = []
>>> for i in range(100):
... c = DummyContent(i, root)
... root[i] = c
... doc_id = intid.register(c)
... catalog.index_doc(doc_id, c)
... matches.append(doc_id)
>>> matches.sort()
>>> sorted(extent) == sorted(index.uids) == matches
True
We can get the size of the extent.
>>> len(extent)
100
Unindexing an object that is in the catalog should simply remove it from the
catalog and index as usual.
>>> matches[0] in catalog.extent
True
>>> matches[0] in catalog['index'].uids
True
>>> catalog.unindex_doc(matches[0])
>>> matches[0] in catalog.extent
False
>>> matches[0] in catalog['index'].uids
False
>>> doc_id = matches.pop(0)
>>> sorted(extent) == sorted(index.uids) == matches
True
Clearing the catalog clears both the extent and the contained indexes.
>>> catalog.clear()
>>> list(catalog.extent) == list(catalog['index'].uids) == []
True
Updating all indexes and an individual index both also update the extent.
>>> catalog.updateIndexes()
>>> matches.insert(0, doc_id)
>>> sorted(extent) == sorted(index.uids) == matches
True
>>> index2 = DummyIndex()
>>> catalog['index2'] = index2
>>> index2.__parent__ == catalog
True
>>> index.uids.remove(matches[0]) # to confirm that only index 2 is touched
>>> catalog.updateIndex(index2)
>>> sorted(extent) == sorted(index2.uids) == matches
True
>>> matches[0] in index.uids
False
>>> matches[0] in index2.uids
True
>>> res = index.uids.insert(matches[0])
But so why have an extent in the first place? It allows indices to
operate against a reliable collection of the full indexed data;
therefore, it allows the indices in zc.catalog to perform NOT
operations.
The extent itself provides a number of merging features to allow its
values to be merged with other BTrees.IFBTree data structures. These
include intersection, union, difference, and reverse difference.
Given an extent named 'extent' and another IFBTree data structure
named 'data', intersections can be spelled "extent & data" or "data &
extent"; unions can be spelled "extent | data" or "data | extent";
differences can be spelled "extent - data"; and reverse differences
can be spelled "data - extent". Unions and intersections are
weighted.
>>> extent = extentcatalog.Extent(family=btrees_family)
>>> for i in range(1, 100, 2):
... extent.add(i, None)
...
>>> alt_set = TreeSet()
>>> _ = alt_set.update(range(0, 166, 33)) # return value is unimportant here
>>> sorted(alt_set)
[0, 33, 66, 99, 132, 165]
>>> sorted(extent & alt_set)
[33, 99]
>>> sorted(alt_set & extent)
[33, 99]
>>> sorted(extent.intersection(alt_set))
[33, 99]
>>> original = set(extent)
>>> union_matches = original.copy()
>>> union_matches.update(alt_set)
>>> union_matches = sorted(union_matches)
>>> sorted(alt_set | extent) == union_matches
True
>>> sorted(extent | alt_set) == union_matches
True
>>> sorted(extent.union(alt_set)) == union_matches
True
>>> sorted(alt_set - extent)
[0, 66, 132, 165]
>>> sorted(extent.rdifference(alt_set))
[0, 66, 132, 165]
>>> original.remove(33)
>>> original.remove(99)
>>> set(extent - alt_set) == original
True
>>> set(extent.difference(alt_set)) == original
True
We can pass our own instantiated UID utility to extentcatalog.Catalog.
>>> extent = extentcatalog.Extent(family=btrees_family)
>>> uidutil = zope.intid.IntIds()
>>> cat = extentcatalog.Catalog(extent, uidutil)
>>> cat["index"] = DummyIndex()
>>> cat.UIDSource is uidutil
True
>>> cat._getUIDSource() is uidutil
True
The ResultSet instance returned by the catalog's `searchResults` method
uses our UID utility.
>>> obj = DummyContent(43, root)
>>> uid = uidutil.register(obj)
>>> cat.index_doc(uid, obj)
>>> res = cat.searchResults(index=uid)
>>> res.uidutil is uidutil
True
>>> list(res) == [obj]
True
`searchResults` may also return None.
>>> cat.searchResults() is None
True
Calling `updateIndex` and `updateIndexes` when the catalog has its uid source
set works as well.
>>> cat.clear()
>>> uid in cat.extent
False
All objects in the uid utility are indexed.
>>> cat.updateIndexes()
>>> uid in cat.extent
True
>>> len(cat.extent)
1
>>> obj2 = DummyContent(44, root)
>>> uid2 = uidutil.register(obj2)
>>> cat.updateIndexes()
>>> len(cat.extent)
2
>>> uid2 in cat.extent
True
>>> uidutil.unregister(obj2)
>>> cat.clear()
>>> uid in cat.extent
False
>>> cat.updateIndex(cat["index"])
>>> uid in cat.extent
True
With a self-populating extent, calling `updateIndex` or `updateIndexes` means
only the objects whose ids are in the extent are updated/reindexed; if present,
the catalog will use its uid source to look up the objects by id.
>>> extent = extentcatalog.NonPopulatingExtent(family=btrees_family)
>>> cat = extentcatalog.Catalog(extent, uidutil)
>>> cat["index"] = DummyIndex()
>>> extent.add(uid, obj)
>>> uid in cat["index"].uids
False
>>> cat.updateIndexes()
>>> uid in cat["index"].uids
True
>>> cat.clear()
>>> uid in cat["index"].uids
False
>>> uid in cat.extent
False
>>> cat.extent.add(uid, obj)
>>> cat.updateIndex(cat["index"])
>>> uid in cat["index"].uids
True
Unregister the objects of the previous tests from intid utility:
>>> intid = zope.component.getUtility(
... zope.intid.interfaces.IIntIds, context=root)
>>> for doc_id in matches:
... intid.unregister(intid.queryObject(doc_id))
Catalog with a filter extent
============================
As discussed at the beginning of this document, extents can not only help
with index operations, but also act as a filter, so that a given catalog
can answer questions about a subset of the objects contained in the intids.
The filter extent only stores objects that match a given filter.
>>> def filter(extent, uid, ob):
... assert interfaces.IFilterExtent.providedBy(extent)
... # This is an extent of objects with odd-numbered uids without a
... # True ignore attribute
... return uid % 2 and not getattr(ob, 'ignore', False)
...
>>> extent = extentcatalog.FilterExtent(filter, family=btrees_family)
>>> verify.verifyObject(interfaces.IFilterExtent, extent)
True
>>> root['catalog1'] = catalog = extentcatalog.Catalog(extent)
>>> verify.verifyObject(interfaces.IExtentCatalog, catalog)
True
>>> index = DummyIndex()
>>> catalog['index'] = index
>>> transaction.commit()
Now we have a catalog set up with an index and an extent. If we create
some content and ask the catalog to index it, only the ones that match
the filter will be in the extent and in the index.
>>> matches = []
>>> fails = []
>>> i = 0
>>> while True:
... c = DummyContent(i, root)
... root[i] = c
... doc_id = intid.register(c)
... catalog.index_doc(doc_id, c)
... if filter(extent, doc_id, c):
... matches.append(doc_id)
... else:
... fails.append(doc_id)
... i += 1
... if i > 99 and len(matches) > 4:
... break
...
>>> matches.sort()
>>> sorted(extent) == sorted(index.uids) == matches
True
If a content object is indexed that used to match the filter but no longer
does, it should be removed from the extent and indexes.
>>> matches[0] in catalog.extent
True
>>> obj = intid.getObject(matches[0])
>>> obj.ignore = True
>>> filter(extent, matches[0], obj)
False
>>> catalog.index_doc(matches[0], obj)
>>> doc_id = matches.pop(0)
>>> doc_id in catalog.extent
False
>>> sorted(extent) == sorted(index.uids) == matches
True
Unindexing an object that is not in the catalog should be a no-op.
>>> fails[0] in catalog.extent
False
>>> catalog.unindex_doc(fails[0])
>>> fails[0] in catalog.extent
False
>>> sorted(extent) == sorted(index.uids) == matches
True
Updating all indexes and an individual index both also update the extent.
>>> index2 = DummyIndex()
>>> catalog['index2'] = index2
>>> index2.__parent__ == catalog
True
>>> index.uids.remove(matches[0]) # to confirm that only index 2 is touched
>>> catalog.updateIndex(index2)
>>> sorted(extent) == sorted(index2.uids)
True
>>> matches[0] in index.uids
False
>>> matches[0] in index2.uids
True
>>> res = index.uids.insert(matches[0])
If you update a single index and an object is no longer a member of the extent,
it is removed from all indexes.
>>> matches[0] in catalog.extent
True
>>> matches[0] in index.uids
True
>>> matches[0] in index2.uids
True
>>> obj = intid.getObject(matches[0])
>>> obj.ignore = True
>>> catalog.updateIndex(index2)
>>> matches[0] in catalog.extent
False
>>> matches[0] in index.uids
False
>>> matches[0] in index2.uids
False
>>> doc_id = matches.pop(0)
>>> (matches == sorted(catalog.extent) == sorted(index.uids)
... == sorted(index2.uids))
True
Self-populating extents
=======================
An extent may know how to populate itself; this is especially useful if
the catalog can be initialized with fewer items than those available in
the IIntIds utility that are also within the nearest Zope 3 site (the
policy coded in the basic Zope 3 catalog).
Such an extent must implement the `ISelfPopulatingExtent` interface,
which requires two attributes. Let's use the `FilterExtent` class as a
base for implementing such an extent, with a method that selects content item
0 (created and registered above)::
>>> class PopulatingExtent(
... extentcatalog.FilterExtent,
... extentcatalog.NonPopulatingExtent):
...
... def populate(self):
... if self.populated:
... return
... self.add(intid.getId(root[0]), root[0])
... super(PopulatingExtent, self).populate()
Creating a catalog based on this extent ignores objects in the
database already::
>>> def accept_any(extent, uid, ob):
... return True
>>> extent = PopulatingExtent(accept_any, family=btrees_family)
>>> catalog = extentcatalog.Catalog(extent)
>>> index = DummyIndex()
>>> catalog['index'] = index
>>> root['catalog2'] = catalog
>>> transaction.commit()
At this point, our extent remains unpopulated::
>>> extent.populated
False
Iterating over the extent does not cause it to be automatically
populated::
>>> list(extent)
[]
Causing our new index to be filled will cause the `populate()` method
to be called, setting the `populate` flag as a side-effect::
>>> catalog.updateIndex(index)
>>> extent.populated
True
>>> list(extent) == [intid.getId(root[0])]
True
The index has been updated with the documents identified by the
extent::
>>> list(index.uids) == [intid.getId(root[0])]
True
Updating the same index repeatedly will continue to use the extent as
the source of documents to include::
>>> catalog.updateIndex(index)
>>> list(extent) == [intid.getId(root[0])]
True
>>> list(index.uids) == [intid.getId(root[0])]
True
The `updateIndexes()` method has a similar behavior. If we add an
additional index to the catalog, we see that it indexes only those
objects from the extent::
>>> index2 = DummyIndex()
>>> catalog['index2'] = index2
>>> catalog.updateIndexes()
>>> list(extent) == [intid.getId(root[0])]
True
>>> list(index.uids) == [intid.getId(root[0])]
True
>>> list(index2.uids) == [intid.getId(root[0])]
True
When we have fresh catalog and extent (not yet populated), we see that
`updateIndexes()` will cause the extent to be populated::
>>> extent = PopulatingExtent(accept_any, family=btrees_family)
>>> root['catalog3'] = catalog = extentcatalog.Catalog(extent)
>>> index1 = DummyIndex()
>>> index2 = DummyIndex()
>>> catalog['index1'] = index1
>>> catalog['index2'] = index2
>>> transaction.commit()
>>> extent.populated
False
>>> catalog.updateIndexes()
>>> extent.populated
True
>>> list(extent) == [intid.getId(root[0])]
True
>>> list(index1.uids) == [intid.getId(root[0])]
True
>>> list(index2.uids) == [intid.getId(root[0])]
True
We'll make sure everything can be safely committed.
>>> transaction.commit()
>>> setSiteManager(None)
| zc.catalog | /zc.catalog-3.0.tar.gz/zc.catalog-3.0/src/zc/catalog/extentcatalog.rst | extentcatalog.rst |
=======
Globber
=======
The globber takes a query and makes any term that isn't already a glob into
something that ends in a star. It was originally envisioned as a *very* low-
rent stemming hack. The author now questions its value, and hopes that the new
stemming pipeline option can be used instead. Nonetheless, here is an example
of it at work.
>>> from zope.index.text import textindex
>>> index = textindex.TextIndex()
>>> lex = index.lexicon
>>> from zc.catalog import globber
>>> globber.glob('foo bar and baz or (b?ng not boo)', lex)
'(((foo* and bar*) and baz*) or (b?ng and not boo*))'
| zc.catalog | /zc.catalog-3.0.tar.gz/zc.catalog-3.0/src/zc/catalog/globber.rst | globber.rst |
=============
Value Index
=============
The valueindex is an index similar to, but more flexible than a standard Zope
field index. The index allows searches for documents that contain any of a
set of values; between a set of values; any (non-None) values; and any empty
values.
Additionally, the index supports an interface that allows examination of the
indexed values.
It is as policy-free as possible, and is intended to be the engine for indexes
with more policy, as well as being useful itself.
On creation, the index has no wordCount, no documentCount, and is, as
expected, fairly empty.
>>> from zc.catalog.index import ValueIndex
>>> index = ValueIndex()
>>> index.documentCount()
0
>>> index.wordCount()
0
>>> index.maxValue() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError:...
>>> index.minValue() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError:...
>>> list(index.values())
[]
>>> len(index.apply({'any_of': (5,)}))
0
The index supports indexing any value. All values within a given index must
sort consistently across Python versions.
>>> data = {1: 'a',
... 2: 'b',
... 3: 'a',
... 4: 'c',
... 5: 'd',
... 6: 'c',
... 7: 'c',
... 8: 'b',
... 9: 'c',
... }
>>> for k, v in data.items():
... index.index_doc(k, v)
...
After indexing, the statistics and values match the newly entered content.
>>> list(index.values())
['a', 'b', 'c', 'd']
>>> index.documentCount()
9
>>> index.wordCount()
4
>>> index.maxValue()
'd'
>>> index.minValue()
'a'
>>> list(index.ids())
[1, 2, 3, 4, 5, 6, 7, 8, 9]
The index supports four types of query. The first is 'any_of'. It
takes an iterable of values, and returns an iterable of document ids that
contain any of the values. The results are not weighted.
>>> list(index.apply({'any_of': ('b', 'c')}))
[2, 4, 6, 7, 8, 9]
>>> list(index.apply({'any_of': ('b',)}))
[2, 8]
>>> list(index.apply({'any_of': ('d',)}))
[5]
>>> bool(index.apply({'any_of': (42,)}))
False
Another query is 'any', If the key is None, all indexed document ids with any
values are returned. If the key is an extent, the intersection of the extent
and all document ids with any values is returned.
>>> list(index.apply({'any': None}))
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> from zc.catalog.extentcatalog import FilterExtent
>>> extent = FilterExtent(lambda extent, uid, obj: True)
>>> for i in range(15):
... extent.add(i, i)
...
>>> list(index.apply({'any': extent}))
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> limited_extent = FilterExtent(lambda extent, uid, obj: True)
>>> for i in range(5):
... limited_extent.add(i, i)
...
>>> list(index.apply({'any': limited_extent}))
[1, 2, 3, 4]
The 'between' argument takes from 1 to four values. The first is the
minimum, and defaults to None, indicating no minimum; the second is the
maximum, and defaults to None, indicating no maximum; the next is a boolean for
whether the minimum value should be excluded, and defaults to False; and the
last is a boolean for whether the maximum value should be excluded, and also
defaults to False. The results are not weighted.
>>> list(index.apply({'between': ('b', 'd')}))
[2, 4, 5, 6, 7, 8, 9]
>>> list(index.apply({'between': ('c', None)}))
[4, 5, 6, 7, 9]
>>> list(index.apply({'between': ('c',)}))
[4, 5, 6, 7, 9]
>>> list(index.apply({'between': ('b', 'd', True, True)}))
[4, 6, 7, 9]
Using an invalid (non-comparable on Python 3) argument to between produces
nothing:
>>> list(index.apply({'between': (1, 5)}))
[]
The 'none' argument takes an extent and returns the ids in the extent
that are not indexed; it is intended to be used to return docids that have
no (or empty) values.
>>> list(index.apply({'none': extent}))
[0, 10, 11, 12, 13, 14]
Trying to use more than one of these at a time generates an error.
>>> index.apply({'between': (5,), 'any_of': (3,)})
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError:...
Using none of them simply returns None.
>>> index.apply({}) # returns None
Invalid query names cause ValueErrors.
>>> index.apply({'foo': ()})
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError:...
When you unindex a document, the searches and statistics should be updated.
>>> index.unindex_doc(5)
>>> len(index.apply({'any_of': ('d',)}))
0
>>> index.documentCount()
8
>>> index.wordCount()
3
>>> list(index.values())
['a', 'b', 'c']
>>> list(index.ids())
[1, 2, 3, 4, 6, 7, 8, 9]
Reindexing a document that has a changed value also is reflected in
subsequent searches and statistic checks.
>>> list(index.apply({'any_of': ('b',)}))
[2, 8]
>>> data[8] = 'e'
>>> index.index_doc(8, data[8])
>>> index.documentCount()
8
>>> index.wordCount()
4
>>> list(index.apply({'any_of': ('e',)}))
[8]
>>> list(index.apply({'any_of': ('b',)}))
[2]
>>> data[2] = 'e'
>>> index.index_doc(2, data[2])
>>> index.documentCount()
8
>>> index.wordCount()
3
>>> list(index.apply({'any_of': ('e',)}))
[2, 8]
>>> list(index.apply({'any_of': ('b',)}))
[]
Reindexing a document for which the value is now None causes it to be removed
from the statistics.
>>> data[3] = None
>>> index.index_doc(3, data[3])
>>> index.documentCount()
7
>>> index.wordCount()
3
>>> list(index.ids())
[1, 2, 4, 6, 7, 8, 9]
This affects both ways of determining the ids that are and are not in the index
(that do and do not have values).
>>> list(index.apply({'any': None}))
[1, 2, 4, 6, 7, 8, 9]
>>> list(index.apply({'any': extent}))
[1, 2, 4, 6, 7, 8, 9]
>>> list(index.apply({'none': extent}))
[0, 3, 5, 10, 11, 12, 13, 14]
The values method can be used to examine the indexed values for a given
document id. For a valueindex, the "values" for a given doc_id will always
have a length of 0 or 1.
>>> index.values(doc_id=8)
('e',)
And the containsValue method provides a way of determining membership in the
values.
>>> index.containsValue('a')
True
>>> index.containsValue('q')
False
Sorting Value Indexes
=====================
Value indexes supports sorting, just like zope.index.field.FieldIndex.
>>> index.clear()
>>> index.index_doc(1, 9)
>>> index.index_doc(2, 8)
>>> index.index_doc(3, 7)
>>> index.index_doc(4, 6)
>>> index.index_doc(5, 5)
>>> index.index_doc(6, 4)
>>> index.index_doc(7, 3)
>>> index.index_doc(8, 2)
>>> index.index_doc(9, 1)
>>> list(index.sort([4, 2, 9, 7, 3, 1, 5]))
[9, 7, 5, 4, 3, 2, 1]
We can also specify the ``reverse`` argument to reverse results:
>>> list(index.sort([4, 2, 9, 7, 3, 1, 5], reverse=True))
[1, 2, 3, 4, 5, 7, 9]
And as per IIndexSort, we can limit results by specifying the ``limit``
argument:
>>> list(index.sort([4, 2, 9, 7, 3, 1, 5], limit=3))
[9, 7, 5]
If we pass an id that is not indexed by this index, it won't be included
in the result.
>>> list(index.sort([2, 10]))
[2]
| zc.catalog | /zc.catalog-3.0.tar.gz/zc.catalog-3.0/src/zc/catalog/valueindex.rst | valueindex.rst |
==================
Normalized Index
==================
The index module provides a normalizing wrapper, a DateTime normalizer, and
a set index and a value index normalized with the DateTime normalizer.
The normalizing wrapper implements a full complement of index interfaces--
zope.index.interfaces.IInjection, zope.index.interfaces.IIndexSearch,
zope.index.interfaces.IStatistics, and zc.catalog.interfaces.IIndexValues--
and delegates all of the behavior to the wrapped index, normalizing values
using the normalizer before the index sees them.
The normalizing wrapper currently only supports queries offered by
zc.catalog.interfaces.ISetIndex and zc.catalog.interfaces.IValueIndex.
The normalizer interface requires the following methods, as defined in the
interface:
def value(value):
"""normalize or check constraints for an input value; raise an error
or return the value to be indexed."""
def any(value, index):
"""normalize a query value for a "any_of" search; return a sequence of
values."""
def all(value, index):
"""Normalize a query value for an "all_of" search; return the value
for query"""
def minimum(value, index):
"""normalize a query value for minimum of a range; return the value for
query"""
def maximum(value, index):
"""normalize a query value for maximum of a range; return the value for
query"""
The DateTime normalizer performs the following normalizations and validations.
Whenever a timezone is needed, it tries to get a request from the current
interaction and adapt it to zope.interface.common.idatetime.ITZInfo; failing
that (no request or no adapter) it uses the system local timezone.
- input values must be datetimes with a timezone. They are normalized to the
resolution specified when the normalizer is created: a resolution of 0
normalizes values to days; a resolution of 1 to hours; 2 to minutes; 3 to
seconds; and 4 to microseconds.
- 'any' values may be timezone-aware datetimes, timezone-naive datetimes,
or dates. dates are converted to any value from the start to the end of the
given date in the found timezone, as described above. timezone-naive
datetimes get the found timezone.
- 'all' values may be timezone-aware datetimes or timezone-naive datetimes.
timezone-naive datetimes get the found timezone.
- 'minimum' values may be timezone-aware datetimes, timezone-naive datetimes,
or dates. dates are converted to the start of the given date in the found
timezone, as described above. timezone-naive datetimes get the found
timezone.
- 'maximum' values may be timezone-aware datetimes, timezone-naive datetimes,
or dates. dates are converted to the end of the given date in the found
timezone, as described above. timezone-naive datetimes get the found
timezone.
Let's look at the DateTime normalizer first, and then an integration of it
with the normalizing wrapper and the value and set indexes.
The indexed values are parsed with 'value'.
>>> from zc.catalog.index import DateTimeNormalizer
>>> n = DateTimeNormalizer() # defaults to minutes
>>> import datetime
>>> import pytz
>>> naive_datetime = datetime.datetime(2005, 7, 15, 11, 21, 32, 104)
>>> date = naive_datetime.date()
>>> aware_datetime = naive_datetime.replace(
... tzinfo=pytz.timezone('US/Eastern'))
>>> n.value(naive_datetime)
Traceback (most recent call last):
...
ValueError: This index only indexes timezone-aware datetimes.
>>> n.value(date)
Traceback (most recent call last):
...
ValueError: This index only indexes timezone-aware datetimes.
>>> n.value(aware_datetime) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 15, 11, 21, tzinfo=<DstTzInfo 'US/Eastern'...>)
If we specify a different resolution, the results are different.
>>> another = DateTimeNormalizer(1) # hours
>>> another.value(aware_datetime) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 15, 11, 0, tzinfo=<DstTzInfo 'US/Eastern'...>)
Note that changing the resolution of an indexed value may create surprising
results, because queries do not change their resolution. Therefore, if you
index something with a datetime with a finer resolution that the normalizer's,
then searching for that datetime will not find the doc_id.
Values in an 'any_of' query are parsed with 'any'. 'any' should return a
sequence of values. It requires an index, which we will mock up here.
>>> class DummyIndex(object):
... def values(self, start, stop, exclude_start, exclude_stop):
... assert not exclude_start and exclude_stop
... six_hours = datetime.timedelta(hours=6)
... res = []
... dt = start
... while dt < stop:
... res.append(dt)
... dt += six_hours
... return res
...
>>> index = DummyIndex()
>>> tuple(n.any(naive_datetime, index)) # doctest: +ELLIPSIS
(datetime.datetime(2005, 7, 15, 11, 21, 32, 104, tzinfo=<...Local...>),)
>>> tuple(n.any(aware_datetime, index)) # doctest: +ELLIPSIS
(datetime.datetime(2005, 7, 15, 11, 21, 32, 104, tzinfo=<...Eastern...>),)
>>> tuple(n.any(date, index)) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
(datetime.datetime(2005, 7, 15, 0, 0, tzinfo=<...Local...>),
datetime.datetime(2005, 7, 15, 6, 0, tzinfo=<...Local...>),
datetime.datetime(2005, 7, 15, 12, 0, tzinfo=<...Local...>),
datetime.datetime(2005, 7, 15, 18, 0, tzinfo=<...Local...>))
Values in an 'all_of' query are parsed with 'all'.
>>> n.all(naive_datetime, index) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 15, 11, 21, 32, 104, tzinfo=<...Local...>)
>>> n.all(aware_datetime, index) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 15, 11, 21, 32, 104, tzinfo=<...Eastern...>)
>>> n.all(date, index) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...
Minimum values in a 'between' query as well as those in other methods are
parsed with 'minimum'. They also take an optional exclude boolean, which
indicates whether the minimum is to be excluded. For datetimes, it only
makes a difference if you pass in a date.
>>> n.minimum(naive_datetime, index) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 15, 11, 21, 32, 104, tzinfo=<...Local...>)
>>> n.minimum(naive_datetime, index, exclude=True) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 15, 11, 21, 32, 104, tzinfo=<...Local...>)
>>> n.minimum(aware_datetime, index) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 15, 11, 21, 32, 104, tzinfo=<...Eastern...>)
>>> n.minimum(aware_datetime, index, True) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 15, 11, 21, 32, 104, tzinfo=<...Eastern...>)
>>> n.minimum(date, index) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 15, 0, 0, tzinfo=<...Local...>)
>>> n.minimum(date, index, True) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 15, 23, 59, 59, 999999, tzinfo=<...Local...>)
Maximum values in a 'between' query as well as those in other methods are
parsed with 'maximum'. They also take an optional exclude boolean, which
indicates whether the maximum is to be excluded. For datetimes, it only
makes a difference if you pass in a date.
>>> n.maximum(naive_datetime, index) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 15, 11, 21, 32, 104, tzinfo=<...Local...>)
>>> n.maximum(naive_datetime, index, exclude=True) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 15, 11, 21, 32, 104, tzinfo=<...Local...>)
>>> n.maximum(aware_datetime, index) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 15, 11, 21, 32, 104, tzinfo=<...Eastern...>)
>>> n.maximum(aware_datetime, index, True) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 15, 11, 21, 32, 104, tzinfo=<...Eastern...>)
>>> n.maximum(date, index) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 15, 23, 59, 59, 999999, tzinfo=<...Local...>)
>>> n.maximum(date, index, True) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 15, 0, 0, tzinfo=<...Local...>)
Now let's examine these normalizers in the context of a real index.
>>> from zc.catalog.index import DateTimeValueIndex, DateTimeSetIndex
>>> setindex = DateTimeSetIndex() # minutes resolution
>>> data = [] # generate some data
>>> def date_gen(
... start=aware_datetime,
... count=12,
... period=datetime.timedelta(hours=10)):
... dt = start
... ix = 0
... while ix < count:
... yield dt
... dt += period
... ix += 1
...
>>> gen = date_gen()
>>> count = 0
>>> while True:
... try:
... next_ = [next(gen) for i in range(6)]
... except StopIteration:
... break
... data.append((count, next_[0:1]))
... count += 1
... data.append((count, next_[1:3]))
... count += 1
... data.append((count, next_[3:6]))
... count += 1
...
>>> print(data) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
[(0,
[datetime.datetime(2005, 7, 15, 11, 21, 32, 104, ...<...Eastern...>)]),
(1,
[datetime.datetime(2005, 7, 15, 21, 21, 32, 104, ...<...Eastern...>),
datetime.datetime(2005, 7, 16, 7, 21, 32, 104, ...<...Eastern...>)]),
(2,
[datetime.datetime(2005, 7, 16, 17, 21, 32, 104, ...<...Eastern...>),
datetime.datetime(2005, 7, 17, 3, 21, 32, 104, ...<...Eastern...>),
datetime.datetime(2005, 7, 17, 13, 21, 32, 104, ...<...Eastern...>)]),
(3,
[datetime.datetime(2005, 7, 17, 23, 21, 32, 104, ...<...Eastern...>)]),
(4,
[datetime.datetime(2005, 7, 18, 9, 21, 32, 104, ...<...Eastern...>),
datetime.datetime(2005, 7, 18, 19, 21, 32, 104, ...<...Eastern...>)]),
(5,
[datetime.datetime(2005, 7, 19, 5, 21, 32, 104, ...<...Eastern...>),
datetime.datetime(2005, 7, 19, 15, 21, 32, 104, ...<...Eastern...>),
datetime.datetime(2005, 7, 20, 1, 21, 32, 104, ...<...Eastern...>)])]
>>> data_dict = dict(data)
>>> for doc_id, value in data:
... setindex.index_doc(doc_id, value)
...
>>> list(setindex.ids())
[0, 1, 2, 3, 4, 5]
>>> set(setindex.values()) == set(
... setindex.normalizer.value(v) for v in date_gen())
True
For the searches, we will actually use a request and interaction, with an
adapter that returns the Eastern timezone. This makes the examples less
dependent on the machine that they use.
>>> import zope.security.management
>>> import zope.publisher.browser
>>> import zope.interface.common.idatetime
>>> import zope.publisher.interfaces
>>> request = zope.publisher.browser.TestRequest()
>>> zope.security.management.newInteraction(request)
>>> from zope import interface, component
>>> @interface.implementer(zope.interface.common.idatetime.ITZInfo)
... @component.adapter(zope.publisher.interfaces.IRequest)
... def tzinfo(req):
... return pytz.timezone('US/Eastern')
...
>>> component.provideAdapter(tzinfo)
>>> n.all(naive_datetime, index).tzinfo is pytz.timezone('US/Eastern')
True
>>> set(setindex.apply({'any_of': (datetime.date(2005, 7, 17),
... datetime.date(2005, 7, 20),
... datetime.date(2005, 12, 31))})) == set(
... (2, 3, 5))
True
Note that this search is using the normalized values.
>>> set(setindex.apply({'all_of': (
... datetime.datetime(
... 2005, 7, 16, 7, 21, tzinfo=pytz.timezone('US/Eastern')),
... datetime.datetime(
... 2005, 7, 15, 21, 21, tzinfo=pytz.timezone('US/Eastern')),)})
... ) == set((1,))
True
>>> list(setindex.apply({'any': None}))
[0, 1, 2, 3, 4, 5]
>>> set(setindex.apply({'between': (
... datetime.datetime(2005, 4, 1, 12), datetime.datetime(2006, 5, 1))})
... ) == set((0, 1, 2, 3, 4, 5))
True
>>> set(setindex.apply({'between': (
... datetime.datetime(2005, 4, 1, 12), datetime.datetime(2006, 5, 1),
... True, True)})
... ) == set((0, 1, 2, 3, 4, 5))
True
'between' searches should deal with dates well.
>>> set(setindex.apply({'between': (
... datetime.date(2005, 7, 16), datetime.date(2005, 7, 17))})
... ) == set((1, 2, 3))
True
>>> len(setindex.apply({'between': (
... datetime.date(2005, 7, 16), datetime.date(2005, 7, 17))})
... ) == len(setindex.apply({'between': (
... datetime.date(2005, 7, 15), datetime.date(2005, 7, 18),
... True, True)})
... )
True
Removing docs works as usual.
>>> setindex.unindex_doc(1)
>>> list(setindex.ids())
[0, 2, 3, 4, 5]
Value, Minvalue and Maxvalue can take timezone-less datetimes and dates.
>>> setindex.minValue() # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 15, 11, 21, ...<...Eastern...>)
>>> setindex.minValue(datetime.date(2005, 7, 17)) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 17, 3, 21, ...<...Eastern...>)
>>> setindex.maxValue() # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 20, 1, 21, ...<...Eastern...>)
>>> setindex.maxValue(datetime.date(2005, 7, 17)) # doctest: +ELLIPSIS
datetime.datetime(2005, 7, 17, 23, 21, ...<...Eastern...>)
>>> list(setindex.values(
... datetime.date(2005, 7, 17), datetime.date(2005, 7, 17)))
... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
[datetime.datetime(2005, 7, 17, 3, 21, ...<...Eastern...>),
datetime.datetime(2005, 7, 17, 13, 21, ...<...Eastern...>),
datetime.datetime(2005, 7, 17, 23, 21, ...<...Eastern...>)]
>>> zope.security.management.endInteraction() # TODO put in tests tearDown
Sorting
=======
The normalization wrapper provides the zope.index.interfaces.IIndexSort
interface if its upstream index provides it. For example, the
DateTimeValueIndex will provide IIndexSort, because ValueIndex provides
sorting. It will also delegate the ``sort`` method to the value index.
>>> from zc.catalog.index import DateTimeValueIndex
>>> from zope.index.interfaces import IIndexSort
>>> ix = DateTimeValueIndex()
>>> IIndexSort.providedBy(ix.index)
True
>>> IIndexSort.providedBy(ix)
True
>>> ix.sort.__self__ is ix.index
True
But it won't work for indexes that doesn't do sorting, for example
DateTimeSetIndex.
>>> ix = DateTimeSetIndex()
>>> IIndexSort.providedBy(ix.index)
False
>>> IIndexSort.providedBy(ix)
False
>>> ix.sort
Traceback (most recent call last):
...
AttributeError: 'SetIndex' object has no attribute 'sort'
| zc.catalog | /zc.catalog-3.0.tar.gz/zc.catalog-3.0/src/zc/catalog/normalizedindex.rst | normalizedindex.rst |
=========
Set Index
=========
The setindex is an index similar to, but more general than a traditional
keyword index. The values indexed are expected to be iterables; the index
allows searches for documents that contain any of a set of values; all of a set
of values; or between a set of values.
Additionally, the index supports an interface that allows examination of the
indexed values.
It is as policy-free as possible, and is intended to be the engine for indexes
with more policy, as well as being useful itself.
On creation, the index has no wordCount, no documentCount, and is, as
expected, fairly empty.
>>> from zc.catalog.index import SetIndex
>>> index = SetIndex()
>>> index.documentCount()
0
>>> index.wordCount()
0
>>> index.maxValue() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError:...
>>> index.minValue() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError:...
>>> list(index.values())
[]
>>> len(index.apply({'any_of': (5,)}))
0
The index supports indexing any value. All values within a given index must
sort consistently across Python versions. In practice, in Python 3
this means that the values need to be homogeneous.
>>> data = {1: ['a', '1'],
... 2: ['b', 'a', '3', '4', '7'],
... 3: ['1'],
... 4: ['1', '4', 'c'],
... 5: ['7'],
... 6: ['5', '6', '7'],
... 7: ['c'],
... 8: ['1', '6'],
... 9: ['a', 'c', '2', '3', '4', '6',],
... }
>>> for k, v in data.items():
... index.index_doc(k, v)
...
After indexing, the statistics and values match the newly entered content.
>>> list(index.values())
['1', '2', '3', '4', '5', '6', '7', 'a', 'b', 'c']
>>> index.documentCount()
9
>>> index.wordCount()
10
>>> index.maxValue()
'c'
>>> index.minValue()
'1'
>>> list(index.ids())
[1, 2, 3, 4, 5, 6, 7, 8, 9]
The index supports five types of query. The first is 'any_of'. It
takes an iterable of values, and returns an iterable of document ids that
contain any of the values. The results are weighted.
>>> list(index.apply({'any_of': ('b', '1', '5')}))
[1, 2, 3, 4, 6, 8]
>>> list(index.apply({'any_of': ('b', '1', '5')}))
[1, 2, 3, 4, 6, 8]
>>> list(index.apply({'any_of': ('42',)}))
[]
>>> index.apply({'any_of': ('a', '3', '7')}) # doctest: +ELLIPSIS
BTrees...FBucket([(1, 1.0), (2, 3.0), (5, 1.0), (6, 1.0), (9, 2.0)])
Using an invalid (non-comparable on Python 3) argument is ignored:
>>> list(index.apply({'any_of': (1,)}))
[]
>>> list(index.apply({'any_of': (1, '1')}))
[1, 3, 4, 8]
Another query is 'any'. If the key is None, all indexed document ids with any
values are returned. If the key is an extent, the intersection of the extent
and all document ids with any values is returned.
>>> list(index.apply({'any': None}))
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> from zc.catalog.extentcatalog import FilterExtent
>>> extent = FilterExtent(lambda extent, uid, obj: True)
>>> for i in range(15):
... extent.add(i, i)
...
>>> list(index.apply({'any': extent}))
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> limited_extent = FilterExtent(lambda extent, uid, obj: True)
>>> for i in range(5):
... limited_extent.add(i, i)
...
>>> list(index.apply({'any': limited_extent}))
[1, 2, 3, 4]
The 'all_of' argument also takes an iterable of values, but returns an
iterable of document ids that contains all of the values. The results are not
weighted.
>>> list(index.apply({'all_of': ('a',)}))
[1, 2, 9]
>>> list(index.apply({'all_of': ('3', '4')}))
[2, 9]
>>> list(index.apply({'all_of': (3, '4')}))
[]
>>> list(index.apply({'all_of': ('3', 4)}))
[]
These tests illustrate two related reported errors that have been fixed.
>>> list(index.apply({'all_of': ('z', '3', '4')}))
[]
>>> list(index.apply({'all_of': ('3', '4', 'z')}))
[]
The 'between' argument takes from 1 to four values. The first is the
minimum, and defaults to None, indicating no minimum; the second is the
maximum, and defaults to None, indicating no maximum; the next is a boolean for
whether the minimum value should be excluded, and defaults to False; and the
last is a boolean for whether the maximum value should be excluded, and also
defaults to False. The results are weighted.
>>> list(index.apply({'between': ('1', '7')}))
[1, 2, 3, 4, 5, 6, 8, 9]
>>> list(index.apply({'between': ('b', None)}))
[2, 4, 7, 9]
>>> list(index.apply({'between': ('b',)}))
[2, 4, 7, 9]
>>> list(index.apply({'between': ('1', '7', True, True)}))
[2, 4, 6, 8, 9]
>>> index.apply({'between': ('2', '6')}) # doctest: +ELLIPSIS
BTrees...FBucket([(2, 2.0), (4, 1.0), (6, 2.0), (8, 1.0), (9, 4.0)])
Using invalid (non-comparable on Python 3) arguments produces no results:
>>> list(index.apply({'between': (1, 7)}))
[]
The 'none' argument takes an extent and returns the ids in the extent
that are not indexed; it is intended to be used to return docids that have
no (or empty) values.
>>> list(index.apply({'none': extent}))
[0, 10, 11, 12, 13, 14]
Trying to use more than one of these at a time generates an error.
>>> index.apply({'all_of': ('5',), 'any_of': ('3',)})
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError:...
Using none of them simply returns None.
>>> index.apply({}) # returns None
Invalid query names cause ValueErrors.
>>> index.apply({'foo': ()})
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError:...
When you unindex a document, the searches and statistics should be updated.
>>> index.unindex_doc(6)
>>> len(index.apply({'any_of': ('5',)}))
0
>>> index.documentCount()
8
>>> index.wordCount()
9
>>> list(index.values())
['1', '2', '3', '4', '6', '7', 'a', 'b', 'c']
>>> list(index.ids())
[1, 2, 3, 4, 5, 7, 8, 9]
Reindexing a document that has new additional values also is reflected in
subsequent searches and statistic checks.
>>> data[8].extend(['5', 'c'])
>>> index.index_doc(8, data[8])
>>> index.documentCount()
8
>>> index.wordCount()
10
>>> list(index.apply({'any_of': ('5',)}))
[8]
>>> list(index.apply({'any_of': ('c',)}))
[4, 7, 8, 9]
The same is true for reindexing a document with both additions and removals.
>>> 2 in set(index.apply({'any_of': ('7',)}))
True
>>> 2 in set(index.apply({'any_of': ('2',)}))
False
>>> data[2].pop()
'7'
>>> data[2].append('2')
>>> index.index_doc(2, data[2])
>>> 2 in set(index.apply({'any_of': ('7',)}))
False
>>> 2 in set(index.apply({'any_of': ('2',)}))
True
Reindexing a document that no longer has any values causes it to be removed
from the statistics.
>>> del data[2][:]
>>> index.index_doc(2, data[2])
>>> index.documentCount()
7
>>> index.wordCount()
9
>>> list(index.ids())
[1, 3, 4, 5, 7, 8, 9]
This affects both ways of determining the ids that are and are not in the index
(that do and do not have values).
>>> list(index.apply({'any': None}))
[1, 3, 4, 5, 7, 8, 9]
>>> list(index.apply({'none': extent}))
[0, 2, 6, 10, 11, 12, 13, 14]
The values method can be used to examine the indexed values for a given
document id.
>>> set(index.values(doc_id=8)) == set(['1', '5', '6', 'c'])
True
And the containsValue method provides a way of determining membership in the
values.
>>> index.containsValue('5')
True
>>> index.containsValue(5)
False
>>> index.containsValue('20')
False
| zc.catalog | /zc.catalog-3.0.tar.gz/zc.catalog-3.0/src/zc/catalog/setindex.rst | setindex.rst |
import datetime
import pytz.reference
import BTrees
import persistent
from BTrees import Length
from zope import interface
import zope.component.interfaces
import zope.interface.common.idatetime
import zope.index.interfaces
from zope.index.field.sorting import SortingIndexMixin
import zope.security.management
from zope.publisher.interfaces import IRequest
import zc.catalog.interfaces
from zc.catalog.i18n import _
class FamilyProperty(object):
__name__ = "family"
def __get__(self, instance, type=None):
if instance is None:
return self
d = instance.__dict__
if "family" in d:
return d["family"]
if "btreemodule" in d:
iftype = d["btreemodule"].split(".")[-1][:2]
if iftype == "IF":
d["family"] = BTrees.family32
elif iftype == "LF":
d["family"] = BTrees.family64
else: # pragma: no cover
raise ValueError("can't determine btree family based on"
" btreemodule of %r" % (iftype,))
else:
d["family"] = BTrees.family32
self._clear_old_cruft(instance)
if isinstance(instance, persistent.Persistent):
# Mutating the dict directly is not guaranteed to
# register with the data manager.
instance._p_changed = True
return d["family"]
def __set__(self, instance, value):
instance.__dict__["family"] = value
self._clear_old_cruft(instance)
if isinstance(instance, persistent.Persistent):
# Mutating the dict directly is not guaranteed to
# register with the data manager.
instance._p_changed = True
def _clear_old_cruft(self, instance):
d = instance.__dict__
if "btreemodule" in d:
del d["btreemodule"]
if "IOBTree" in d:
del d["IOBTree"]
if "BTreeAPI" in d:
del d["BTreeAPI"]
@interface.implementer(
zope.index.interfaces.IInjection,
zope.index.interfaces.IIndexSearch,
zope.index.interfaces.IStatistics,
zc.catalog.interfaces.IIndexValues,
)
class AbstractIndex(persistent.Persistent):
family = FamilyProperty()
def __init__(self, family=None):
if family is not None:
self.family = family
self.clear()
# These three are deprecated (they were never interface), but can
# all be computed from the family attribute:
@property
def btreemodule(self):
return self.family.IF.__name__
@property
def BTreeAPI(self):
return self.family.IF
@property
def IOBTree(self):
return self.family.IO.BTree
def clear(self):
self.values_to_documents = self.family.OO.BTree()
self.documents_to_values = self.family.IO.BTree()
self.documentCount = Length.Length(0)
self.wordCount = Length.Length(0)
def minValue(self, min=None):
if min is None:
return self.values_to_documents.minKey()
else:
return self.values_to_documents.minKey(min)
def maxValue(self, max=None):
if max is None:
return self.values_to_documents.maxKey()
else:
return self.values_to_documents.maxKey(max)
def values(self, min=None, max=None, excludemin=False, excludemax=False,
doc_id=None):
if doc_id is None:
return iter(self.values_to_documents.keys(
min, max, excludemin, excludemax))
else:
values = self.documents_to_values.get(doc_id)
if values is None:
return ()
else:
return iter(values.keys(min, max, excludemin, excludemax))
def containsValue(self, value):
try:
return bool(value in self.values_to_documents)
except TypeError:
return False
def ids(self):
return self.documents_to_values.keys()
def parseQuery(query):
if not isinstance(query, dict): # pragma: no cover
raise ValueError('may only pass a dict to apply')
if len(query) > 1:
raise ValueError(
'may only pass one of key, value pair')
elif not query:
return None, None
query_type, query = list(query.items())[0]
query_type = query_type.lower()
return query_type, query
@interface.implementer(zc.catalog.interfaces.IValueIndex)
class ValueIndex(SortingIndexMixin, AbstractIndex):
# attributes used by sorting mixin
_sorting_num_docs_attr = 'documentCount' # Length object
_sorting_fwd_index_attr = 'values_to_documents' # forward BTree index
_sorting_rev_index_attr = 'documents_to_values' # reverse BTree index
def _add_value(self, doc_id, added):
values_to_documents = self.values_to_documents
docs = values_to_documents.get(added)
if docs is None:
values_to_documents[added] = self.family.IF.TreeSet((doc_id,))
self.wordCount.change(1)
else:
docs.insert(doc_id)
def index_doc(self, doc_id, value):
if value is None:
self.unindex_doc(doc_id)
else:
values_to_documents = self.values_to_documents
documents_to_values = self.documents_to_values
old = documents_to_values.get(doc_id)
documents_to_values[doc_id] = value
if old is None:
self.documentCount.change(1)
elif old != value:
docs = values_to_documents.get(old)
docs.remove(doc_id)
if not docs:
del values_to_documents[old]
self.wordCount.change(-1)
self._add_value(doc_id, value)
def unindex_doc(self, doc_id):
documents_to_values = self.documents_to_values
value = documents_to_values.get(doc_id)
if value is not None:
values_to_documents = self.values_to_documents
self.documentCount.change(-1)
del documents_to_values[doc_id]
docs = values_to_documents.get(value)
docs.remove(doc_id)
if not docs:
del values_to_documents[value]
self.wordCount.change(-1)
def apply(self, query): # any_of, any, between, none,
values_to_documents = self.values_to_documents
query_type, query = parseQuery(query)
if query_type is None:
res = None
elif query_type == 'any_of':
try:
res = self.family.IF.multiunion(
[s for s in (values_to_documents.get(v) for v in query)
if s is not None])
except TypeError:
return []
elif query_type == 'any':
if query is None:
res = self.family.IF.Set(self.ids())
else:
assert zc.catalog.interfaces.IExtent.providedBy(query)
res = query & self.family.IF.Set(self.ids())
elif query_type == 'between':
try:
res = self.family.IF.multiunion(
[s for s in (values_to_documents.get(v) for v in
values_to_documents.keys(*query))
if s is not None])
except TypeError:
return []
elif query_type == 'none':
assert zc.catalog.interfaces.IExtent.providedBy(query)
res = query - self.family.IF.Set(self.ids())
else:
raise ValueError(
"unknown query type", query_type)
return res
def values(self, min=None, max=None, excludemin=False, excludemax=False,
doc_id=None):
if doc_id is None:
return iter(self.values_to_documents.keys(
min, max, excludemin, excludemax))
else:
value = self.documents_to_values.get(doc_id)
if (value is None or
min is not None and (
value < min or excludemin and value == min) or
max is not None and (
value > max or excludemax and value == max)):
return ()
else:
return (value,)
@interface.implementer(zc.catalog.interfaces.ISetIndex)
class SetIndex(AbstractIndex):
def _add_values(self, doc_id, added):
values_to_documents = self.values_to_documents
for v in added:
docs = values_to_documents.get(v)
if docs is None:
values_to_documents[v] = self.family.IF.TreeSet((doc_id,))
self.wordCount.change(1)
else:
docs.insert(doc_id)
def index_doc(self, doc_id, value):
new = self.family.OO.TreeSet(v for v in value if v is not None)
if not new:
self.unindex_doc(doc_id)
else:
values_to_documents = self.values_to_documents
documents_to_values = self.documents_to_values
old = documents_to_values.get(doc_id)
if old is None:
documents_to_values[doc_id] = new
self.documentCount.change(1)
self._add_values(doc_id, new)
else:
removed = self.family.OO.difference(old, new)
added = self.family.OO.difference(new, old)
for v in removed:
old.remove(v)
docs = values_to_documents.get(v)
docs.remove(doc_id)
if not docs:
del values_to_documents[v]
self.wordCount.change(-1)
old.update(added)
self._add_values(doc_id, added)
def unindex_doc(self, doc_id):
documents_to_values = self.documents_to_values
values = documents_to_values.get(doc_id)
if values is not None:
values_to_documents = self.values_to_documents
self.documentCount.change(-1)
del documents_to_values[doc_id]
for v in values:
docs = values_to_documents.get(v)
docs.remove(doc_id)
if not docs:
del values_to_documents[v]
self.wordCount.change(-1)
def apply(self, query): # any_of, any, between, none, all_of
values_to_documents = self.values_to_documents
query_type, query = parseQuery(query)
if query_type is None:
res = None
elif query_type == 'any_of':
res = self.family.IF.Bucket()
for v in query:
try:
_, res = self.family.IF.weightedUnion(
res, values_to_documents.get(v))
except TypeError:
continue
elif query_type == 'any':
if query is None:
res = self.family.IF.Set(self.ids())
else:
assert zc.catalog.interfaces.IExtent.providedBy(query)
res = query & self.family.IF.Set(self.ids())
elif query_type == 'all_of':
res = None
values = iter(query)
empty = self.family.IF.TreeSet()
try:
res = values_to_documents.get(next(values), empty)
except StopIteration:
res = empty
except TypeError:
return []
while res:
try:
v = next(values)
except StopIteration:
break
try:
res = self.family.IF.intersection(
res, values_to_documents.get(v, empty))
except TypeError:
return []
elif query_type == 'between':
res = self.family.IF.Bucket()
try:
for v in values_to_documents.keys(*query):
_, res = self.family.IF.weightedUnion(
res, values_to_documents.get(v))
except TypeError:
return []
elif query_type == 'none':
assert zc.catalog.interfaces.IExtent.providedBy(query)
res = query - self.family.IF.Set(self.ids())
else:
raise ValueError(
"unknown query type", query_type)
return res
@interface.implementer(zc.catalog.interfaces.INormalizationWrapper)
class NormalizationWrapper(persistent.Persistent):
index = normalizer = None
collection_index = False
def documentCount(self):
return self.index.documentCount()
def wordCount(self):
return self.index.wordCount()
def clear(self):
"""See zope.index.interfaces.IInjection.clear"""
return self.index.clear()
def __init__(self, index, normalizer, collection_index=False):
self.index = index
if zope.index.interfaces.IIndexSort.providedBy(index):
zope.interface.alsoProvides(self, zope.index.interfaces.IIndexSort)
self.normalizer = normalizer
self.collection_index = collection_index
def index_doc(self, doc_id, value):
if self.collection_index:
self.index.index_doc(
doc_id, (self.normalizer.value(v) for v in value))
else:
self.index.index_doc(doc_id, self.normalizer.value(value))
def unindex_doc(self, doc_id):
self.index.unindex_doc(doc_id)
def apply(self, query):
query_type, query = parseQuery(query)
if query_type == 'any_of':
res = set()
for v in query:
res.update(self.normalizer.any(v, self.index))
elif query_type == 'all_of':
res = [self.normalizer.all(v, self.index) for v in query]
elif query_type == 'between':
query = tuple(query) # collect iterators
len_query = len(query)
max_exclude = len_query >= 4 and bool(query[3])
min_exclude = len_query >= 3 and bool(query[2])
max = len_query >= 2 and query[1] and self.normalizer.maximum(
query[1], self.index, max_exclude) or None
min = len_query >= 1 and query[0] and self.normalizer.minimum(
query[0], self.index, min_exclude) or None
res = (min, max, min_exclude, max_exclude)
else:
res = query
return self.index.apply({query_type: res})
def minValue(self, min=None):
if min is not None:
min = self.normalizer.minimum(min, self.index)
return self.index.minValue(min)
def maxValue(self, max=None):
if max is not None:
max = self.normalizer.maximum(max, self.index)
return self.index.maxValue(max)
def values(self, min=None, max=None, excludemin=False, excludemax=False,
doc_id=None):
if min is not None:
min = self.normalizer.minimum(min, self.index)
if max is not None:
max = self.normalizer.maximum(max, self.index)
return self.index.values(
min, max, excludemin, excludemax, doc_id=doc_id)
def containsValue(self, value):
return self.index.containsValue(value)
def ids(self):
return self.index.ids()
@property
def sort(self):
# delegate upstream or raise AttributeError
return self.index.sort
@interface.implementer(zc.catalog.interfaces.ICallableWrapper)
class CallableWrapper(persistent.Persistent):
converter = None
index = None
def __init__(self, index, converter):
self.index = index
self.converter = converter
def index_doc(self, docid, value):
"""See zope.index.interfaces.IInjection."""
self.index.index_doc(docid, self.converter(value))
def __getattr__(self, name):
return getattr(self.index, name)
def set_resolution(value, resolution):
resolution += 2
if resolution < 6:
args = []
args.extend(value.timetuple()[:resolution+1])
args.extend([0]*(6-resolution))
args.append(value.tzinfo)
value = datetime.datetime(*args)
return value
def get_request():
i = zope.security.management.queryInteraction()
if i is not None:
for p in i.participations:
if IRequest.providedBy(p):
return p
return None
def get_tz(default=pytz.reference.Local):
request = get_request()
if request is None:
return default
return zope.interface.common.idatetime.ITZInfo(request, default)
def add_tz(value):
if type(value) is datetime.datetime:
if value.tzinfo is None:
value = value.replace(tzinfo=get_tz())
return value
else:
raise ValueError(value)
def day_end(value):
return (
datetime.datetime.combine(
value, datetime.time(tzinfo=get_tz())) +
datetime.timedelta(days=1) - # separate for daylight savings
datetime.timedelta(microseconds=1))
def day_begin(value):
return datetime.datetime.combine(
value, datetime.time(tzinfo=get_tz()))
@interface.implementer(zc.catalog.interfaces.IDateTimeNormalizer)
class DateTimeNormalizer(persistent.Persistent):
def __init__(self, resolution=2):
self.resolution = resolution
# 0, 1, 2, 3, 4
# day, hour, minute, second, microsecond
def value(self, value):
if not isinstance(value, datetime.datetime) or value.tzinfo is None:
raise ValueError(
_('This index only indexes timezone-aware datetimes.'))
return set_resolution(value, self.resolution)
def any(self, value, index):
if type(value) is datetime.date:
start = datetime.datetime.combine(
value, datetime.time(tzinfo=get_tz()))
stop = start + datetime.timedelta(days=1)
return index.values(start, stop, False, True)
return (add_tz(value),)
def all(self, value, index):
return add_tz(value)
def minimum(self, value, index, exclude=False):
if type(value) is datetime.date:
if exclude:
return day_end(value)
else:
return day_begin(value)
return add_tz(value)
def maximum(self, value, index, exclude=False):
if type(value) is datetime.date:
if exclude:
return day_begin(value)
else:
return day_end(value)
return add_tz(value)
@interface.implementer(
zope.interface.implementedBy(NormalizationWrapper),
zope.index.interfaces.IIndexSort,
zc.catalog.interfaces.IValueIndex)
def DateTimeValueIndex(resolution=2): # 2 == minute; note that hour is good
# for timezone-aware per-day searches
ix = NormalizationWrapper(ValueIndex(), DateTimeNormalizer(resolution))
interface.alsoProvides(ix, zc.catalog.interfaces.IValueIndex)
return ix
@interface.implementer(
zope.interface.implementedBy(NormalizationWrapper),
zc.catalog.interfaces.ISetIndex)
def DateTimeSetIndex(resolution=2): # 2 == minute; note that hour is good
# for timezone-aware per-day searches
ix = NormalizationWrapper(SetIndex(), DateTimeNormalizer(resolution), True)
interface.alsoProvides(ix, zc.catalog.interfaces.ISetIndex)
return ix | zc.catalog | /zc.catalog-3.0.tar.gz/zc.catalog-3.0/src/zc/catalog/index.py | index.py |
============================
zc.catalog Browser Support
============================
The zc.catalog.browser package adds simple TTW addition/inspection for SetIndex
and ValueIndex.
First, we need a browser so we can test the web UI.
>>> from zope.testbrowser.wsgi import Browser
>>> browser = Browser()
>>> browser.handleErrors = False
>>> browser.addHeader('Authorization', 'Basic mgr:mgrpw')
>>> browser.addHeader('Accept-Language', 'en-US')
>>> browser.open('http://localhost/')
Now we need to add the catalog that these indexes are going to reside within.
>>> browser.open('http://localhost/++etc++site/default/@@+/')
>>> browser.getControl('Catalog').click()
>>> browser.getControl(name='id').value = 'catalog'
>>> browser.getControl('Add').click()
SetIndex
========
Add the SetIndex to the catalog.
>>> browser.open(browser.getLink('Add').url + '/')
>>> browser.getControl('Set Index').click()
>>> browser.getControl(name='id').value = 'set_index'
>>> browser.getControl('Add').click()
The add form needs values for what interface to adapt candidate objects to, and
what field name to use, and whether-or-not that field is a callable. (We'll use
a simple interfaces for demonstration purposes, it's not really significant.)
>>> browser.getControl('Interface', index=0).displayValue = [
... 'zope.size.interfaces.ISized']
>>> browser.getControl('Field Name').value = 'sizeForDisplay'
>>> browser.getControl('Field Callable').click()
>>> browser.getControl(name='add_input_name').value = 'set_index'
>>> browser.getControl('Add').click()
Now we can look at the index and see how is is configured.
>>> browser.getLink('set_index').click()
>>> print(browser.contents)
<...
...Interface...zope.size.interfaces.ISized...
...Field Name...sizeForDisplay...
...Field Callable...True...
We need to go back to the catalog so we can add a different index.
>>> browser.open('/++etc++site/default/catalog/@@contents.html')
ValueIndex
==========
Add the ValueIndex to the catalog.
>>> browser.open(browser.getLink('Add').url + '/')
>>> browser.getControl('Value Index').click()
>>> browser.getControl(name='id').value = 'value_index'
>>> browser.getControl('Add').click()
The add form needs values for what interface to adapt candidate objects to, and
what field name to use, and whether-or-not that field is a callable. (We'll use
a simple interfaces for demonstration purposes, it's not really significant.)
>>> browser.getControl('Interface', index=0).displayValue = [
... 'zope.size.interfaces.ISized']
>>> browser.getControl('Field Name').value = 'sizeForDisplay'
>>> browser.getControl('Field Callable').click()
>>> browser.getControl(name='add_input_name').value = 'value_index'
>>> browser.getControl('Add').click()
Now we can look at the index and see how is is configured.
>>> browser.getLink('value_index').click()
>>> print(browser.contents)
<...
...Interface...zope.size.interfaces.ISized...
...Field Name...sizeForDisplay...
...Field Callable...True...
| zc.catalog | /zc.catalog-3.0.tar.gz/zc.catalog-3.0/src/zc/catalog/browser/README.rst | README.rst |
from zc.catalogqueue.CatalogEventQueue import REMOVED, CHANGED, ADDED
import BTrees.Length
import datetime
import logging
import persistent
import pytz
import zc.catalogqueue.CatalogEventQueue
import zc.catalogqueue.interfaces
import zope.interface
logger = logging.getLogger(__name__)
class CatalogQueue(persistent.Persistent):
zope.interface.implements(zc.catalogqueue.interfaces.ICatalogQueue)
lastProcessedTime = None
totalProcessed = 0
def __init__(self, buckets=1009):
self._buckets = buckets
self._queues = [
zc.catalogqueue.CatalogEventQueue.CatalogEventQueue()
for i in range(buckets)
]
def __len__(self):
try:
return self._length()
except AttributeError:
return 0
def _change_length(self, change):
try:
length = self._length
except AttributeError:
length = self._length = BTrees.Length.Length()
change = 0
for queue in self._queues:
change += len(queue)
length.change(change)
def _notify(self, id, event):
self._change_length(
self._queues[hash(id) % self._buckets].update(id, event))
def add(self, id):
self._notify(id, ADDED)
def update(self, id):
self._notify(id, CHANGED)
def remove(self, id):
self._notify(id, REMOVED)
def process(self, ids, catalogs, limit):
done = 0
for queue in self._queues:
for id, (_, event) in queue.process(limit-done).iteritems():
if event is REMOVED:
for catalog in catalogs:
catalog.unindex_doc(id)
else:
ob = ids.queryObject(id)
if ob is None:
logger.warn("Couldn't find object for %s", id)
else:
for catalog in catalogs:
catalog.index_doc(id, ob)
done += 1
self._change_length(-1)
if done >= limit:
break
self.lastProcessedTime = datetime.datetime.now(pytz.UTC)
self.totalProcessed += done
return done | zc.catalogqueue | /zc.catalogqueue-0.3.1.tar.gz/zc.catalogqueue-0.3.1/src/zc/catalogqueue/queue.py | queue.py |
import logging
from persistent import Persistent
from ZODB.POSException import ConflictError
logger = logging.getLogger(__name__)
SAFE_POLICY = 0
ALTERNATIVE_POLICY = 1
REMOVED = 0
ADDED = 1
CHANGED = 2
CHANGED_ADDED = 3
EVENT_TYPES = (REMOVED, CHANGED, ADDED, CHANGED_ADDED)
antiEvent = {REMOVED: ADDED,
ADDED: REMOVED,
CHANGED: CHANGED,
CHANGED_ADDED: CHANGED_ADDED,
}.get
ADDED_EVENTS = (CHANGED, ADDED, CHANGED_ADDED)
class CatalogEventQueue(Persistent):
"""Event queue for catalog events
This is a rather odd queue. It organizes events by object, where
objects are identified by uids, which happen to be string paths.
One way that this queue is extremely odd is that it really only
keeps track of the last event for an object. This is because we
really only *care* about the last event for an object.
There are three types of events:
ADDED -- An object was added to the catalog
CHANGED -- An object was changed
REMOVED -- An object was removed from the catalog
CHANGED_ADDED -- Add object was added and subsequently changed.
This event is a consequence of the queue implementation.
Note that, although we only keep track of the most recent
event. there are rules for how the most recent event can be
updated:
- It is illegal to update an ADDED, CHANGED, or CHANGED_ADDED
event with an ADDED event or
- to update a REMOVED event with a CHANGED event.
We have a problem because applications don't really indicate
whether they are are adding, or just updating. We deduce add
events by examining the catalog and event queue states.
Also note that, when events are applied to the catalog, events may
have no effect.
- If an object is in the catalog, ADDED events are equivalent to
CHANGED events.
- If an object is not in the catalog, REMOVED and CHANGED events
have no effect.
If we undo a transaction, we generate an anti-event. The anti
event of ADDED id REMOVED, of REMOVED is ADDED, and of CHANGED is
CHANGED.
Note that these rules represent heuristics that attempt to provide
efficient and sensible behavior for most cases. They are not "correct" in
that they handle cases that may not seem handleable. For example,
consider a sequence of transactions:
T1 adds an object
T2 removes the object
T3 adds the object
T4 processes the queue
T5 undoes T1
It's not clear what should be done in this case? We decide to
generate a remove event, even though a later transaction added the
object again. Is this correct? It's hard to say. The decision we
make is not horrible and it allows us to provide a very efficient
implementation. See the unit tests for other scenarios. Feel
free to think of cases for which our decisions are unacceptably
wrong and write unit tests for these cases.
There are two kinds of transactions that affect the queue:
- Application transactions always add or modify events. They never
remove events.
- Queue processing transactions always remove events.
"""
_conflict_policy = SAFE_POLICY
def __init__(self, conflict_policy=SAFE_POLICY):
# Mapping from uid -> (generation, event type)
self._data = {}
self._conflict_policy = conflict_policy
def __nonzero__(self):
return not not self._data
def __len__(self):
return len(self._data)
def update(self, uid, etype):
assert etype in EVENT_TYPES
data = self._data
current = data.get(uid)
if current is not None:
delta = 0
generation, current = current
if current in ADDED_EVENTS and etype is ADDED:
raise TypeError("Attempt to add an object that is already "
"in the catalog")
if current is REMOVED and etype is CHANGED:
raise TypeError("Attempt to change an object that has "
"been removed")
if ((current is ADDED or current is CHANGED_ADDED)
and etype is CHANGED):
etype = CHANGED_ADDED
else:
delta = 1
generation = 0
data[uid] = generation+1, etype
self._p_changed = 1
return delta
def getEvent(self, uid):
state = self._data.get(uid)
if state is not None:
state = state[1]
return state
def process(self, limit=None):
"""Removes and returns events from this queue.
If limit is specified, at most (limit) events are removed.
"""
data = self._data
if not limit or len(data) <= limit:
self._data = {}
return data
else:
self._p_changed = 1
res = {}
keys = data.keys()[:limit]
for key in keys:
res[key] = data[key]
del data[key]
return res
def _p_resolveConflict(self, oldstate, committed, newstate):
# Apply the changes made in going from old to newstate to
# committed
# Note that in the case of undo, the olddata is the data for
# the transaction being undone and newdata is the data for the
# transaction previous to the undone transaction.
# Find the conflict policy on the new state to make sure changes
# to it will be applied
policy = newstate['_conflict_policy']
# Committed is always the currently committed data.
oldstate_data = oldstate['_data']
committed_data = committed['_data']
newstate_data = newstate['_data']
# Merge newstate changes into committed
for uid, new in newstate_data.items():
# Decide if this is a change
old = oldstate_data.get(uid)
current = committed_data.get(uid)
if new != old:
# something changed
if old is not None:
# got a repeat event
if new[0] < old[0]:
# This was an undo, so give the event the undo
# time and convert to an anti event of the old
# (undone) event.
new = (0, antiEvent(old[1]))
elif new[1] is ADDED:
if policy == SAFE_POLICY:
logger.error(
'Queue conflict on %s: ADDED on existing item'
% uid)
raise ConflictError
else:
if current and current[1] == REMOVED:
new = current
else:
new = (current[0]+1, CHANGED_ADDED)
# remove this event from old, so that we don't
# mess with it later.
del oldstate_data[uid]
# Check aqainst current value. Either we want a
# different event, in which case we give up, or we
# do nothing.
if current is not None:
if current[1] != new[1]:
if policy == SAFE_POLICY:
# This is too complicated, bail
logger.error('Queue conflict on %s' % uid)
raise ConflictError
elif REMOVED not in (new[1], current[1]):
new = (current[0]+1, CHANGED_ADDED)
committed_data[uid] = new
elif ( current[0] < new[0] and
new[1] == REMOVED ):
committed_data[uid] = new
# remove this event from old, so that we don't
# mess with it later.
if oldstate_data.get(uid) is not None:
del oldstate_data[uid]
# nothing to do
continue
committed_data[uid] = new
else:
# Both old and new have this event so new didn't touch it.
# remove it from old so we don't treat it as undone below.
del oldstate_data[uid]
# Now handle remaining events in old that weren't in new.
# These *must* be undone events!
for uid, old in oldstate_data.items():
new = (0, antiEvent(old[1]))
# See above
current = committed_data.get(uid)
if current is not None:
if current[1] != new[1]:
# This is too complicated, bail
logger.error('Queue conflict on %s processing undos' % uid)
raise ConflictError
# nothing to do
continue
committed_data[uid] = new
return { '_data': committed_data
, '_conflict_policy' : policy
}
__doc__ = CatalogEventQueue.__doc__ + __doc__
# Old worries
# We have a problem. We have to make sure that we don't lose too
# much history to undo, but we don't want to retain the entire
# history. We certainly don't want to execute the entire history
# when we execute a trans.
#
# Baah, no worry, if we undo in a series of unprocessed events, we
# simply restore the old event, which we have in the old state. | zc.catalogqueue | /zc.catalogqueue-0.3.1.tar.gz/zc.catalogqueue-0.3.1/src/zc/catalogqueue/CatalogEventQueue.py | CatalogEventQueue.py |
========
Comments
========
The comment package is a simple way to add comments to any ``IAnnotatable``
Zope content. The datetime and current principals are stamped on to the
comment. The comment body is currently simply unicode text but intended to be
html snippets ("rich text") at a later date.
The inclusion of current principals requires an interaction, which is what we
need to set up before we can use the system here. Below, we set up a dummy
interaction with dummy participants, create some content that is
``IAttributeAnnotatable``, and then finally show the system in use.
In order to create a participation, we need a few principals:
>>> import zope.security.management
>>> import zope.security.interfaces
>>> from zope import interface
>>> class Principal(object):
... interface.implements(zope.security.interfaces.IPrincipal)
...
... def __init__(self, id, title, description):
... self.id = id
... self.title = title
... self.description = description
...
... def __repr__(self):
... return '<%s %r>' %(self.__class__.__name__, self.id)
>>> alice = Principal('alice', 'Alice Aal', 'first principal')
>>> betty = Principal('betty', 'Betty Barnes', 'second principal')
Now we can create a participation:
>>> class Participation(object):
... zope.interface.implements(
... zope.security.interfaces.IParticipation,
... zope.publisher.interfaces.IRequest)
... interaction = principal = None
...
... def __init__(self, principal):
... self.principal = principal
...
... def __repr__(self):
... return '<%s %r>' %(self.__class__.__name__, self.principal)
Next we need to make sute the annotation mechanism is setup, because the
comments adapter needs to be able to annotate the adapted object:
>>> import zope.component
>>> import zope.annotation
>>> zope.component.provideAdapter(
... zope.annotation.attribute.AttributeAnnotations)
Let's now make sure that all commentable objects can receive comments:
>>> from zc.comment import comment
>>> zope.component.provideAdapter(comment.CommentsFactory)
Now that we have everything setup, let's have a look at how it works. First we
need a simple content component:
>>> class SimpleContent(object):
... interface.implements(
... zope.annotation.interfaces.IAttributeAnnotatable)
... def __init__(self, name):
... self.name = name
... def __repr__(self):
... return '<%s %r>' %(self.__class__.__name__, self.name)
>>> content = SimpleContent(u'content')
In order to play with the comments, we now have to register a new
participation. In our case, Alice wants to create a comment:
>>> zope.security.management.endInteraction()
>>> zope.security.management.newInteraction(Participation(alice))
We can access the comments of an object by adapting to ``IComments``:
>>> from zc.comment import interfaces
>>> comments = interfaces.IComments(content)
Traceback (most recent call last):
...
TypeError: ('Could not adapt',
<SimpleContent u'content'>,
<InterfaceClass zc.comment.interfaces.IComments>)
Initially, the component is not commentable, because it does not provide the
correct interface:
>>> zope.interface.directlyProvides(content, interfaces.ICommentable)
>>> comments = interfaces.IComments(content)
>>> comments
<Comments (0) for <SimpleContent u'content'>>
Let's now add a comment:
>>> import datetime, pytz
>>> before = datetime.datetime.now(pytz.utc)
>>> comments.add(u"Foo! Bar!")
>>> after = datetime.datetime.now(pytz.utc)
As you can see it was not necessary to create the comments object manually,
but simply pass in the text. Clearly a comment has been added:
>>> len(comments)
1
Let's now make sure that the data was set correctly:
>>> comments[0].body
u'Foo! Bar!'
>>> before <= comments[0].date <= after
True
>>> comments[0].principal_ids
('alice',)
Let's now log in as Betty:
>>> zope.security.management.endInteraction()
>>> zope.security.management.newInteraction(Participation(betty))
Betty can also add a comment:
>>> comments = interfaces.IComments(content)
>>> before = datetime.datetime.now(pytz.utc)
>>> comments.add(u"Shazam")
>>> after = datetime.datetime.now(pytz.utc)
>>> len(comments)
2
And her comment is also correctly stored:
>>> comments[1].body
u'Shazam'
>>> before <= comments[1].date <= after
True
>>> comments[1].principal_ids
('betty',)
Let's now make sure that if multiple participants are in the interaction that
all of them get picked up:
>>> zope.security.management.endInteraction()
>>> zope.security.management.newInteraction(
... Participation(alice), Participation(betty))
>>> comments.add(u"Boom.")
>>> len(comments)
3
>>> comments[2].body
u'Boom.'
>>> comments[2].principal_ids
('alice', 'betty')
Finally, note that we can only add unicode text as a valid comment:
>>> comments.add(42)
Traceback (most recent call last):
...
WrongType: (42, <type 'unicode'>)
If you like, you can always clear all comments:
>>> comments.clear()
>>> len(comments)
0
And of course some cleanup:
>>> zope.security.management.endInteraction()
| zc.comment | /zc.comment-0.1.0.tar.gz/zc.comment-0.1.0/src/zc/comment/README.txt | README.txt |
__docformat__ = "reStructuredText"
import HTMLParser
import xml.sax.saxutils
import zope.app.form.browser.textwidgets
import zope.app.form.browser.widget
class Input(zope.app.form.browser.textwidgets.TextAreaWidget):
cssClass = "zc-comment-text"
def _toFieldValue(self, value):
if value:
# normalize newlines:
value = value.replace("\r\n", "\n")
value = value.replace("\r", "\n")
# encode magical characters:
value = xml.sax.saxutils.escape(value)
# add <br/> tags:
value = value.replace("\n", "<br />\n")
return value
def _toFormValue(self, value):
if value == self.context.missing_value:
return ""
if value:
# rip out XHTML encoding, converting markup back to plain text
# (we're encoding simple rich text as plain text!)
p = ConversionParser()
p.feed(value)
p.close()
value = p.get_data()
return value
def __call__(self):
return zope.app.form.browser.widget.renderElement(
"textarea",
name=self.name,
id=self.name,
cssClass=self.cssClass,
rows=self.height,
cols=self.width,
style=self.style,
contents=self._getFormValue(), # already escaped
extra=self.extra,
)
class Display(zope.app.form.browser.widget.DisplayWidget):
cssClass = "zc-comment-text"
tag = "div"
def __call__(self):
if self._renderedValueSet():
value = self._data
else:
value = self.context.default
if value == self.context.missing_value:
return ""
if self.tag:
value = zope.app.form.browser.widget.renderElement(
self.tag, cssClass=self.cssClass, contents=value)
return value
class ConversionParser(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.__buffer = []
def handle_data(self, data):
self.__buffer.append(data)
def handle_entityref(self, name):
self.__buffer.append("&%s;" % name)
def get_data(self):
return "".join(self.__buffer) | zc.comment | /zc.comment-0.1.0.tar.gz/zc.comment-0.1.0/src/zc/comment/browser/widget.py | widget.py |
=============
Commenting UI
=============
Create the browser object we'll be using.
>>> from zope.testbrowser.testing import Browser
>>> browser = Browser()
>>> browser.addHeader('Accept-Language', 'test')
To see how comments work, we'll create an instance of a simple content
object:
>>> browser.open('http://localhost/@@contents.html')
>>> browser.getLink('[[zope][[top]]]').click()
>>> browser.getLink('[[zc.comment][Content]]').click()
>>> browser.getControl(name='new_value').value = 'number'
>>> browser.getControl('[[zope][container-apply-button (Apply)]]').click()
Let's visit the object and click on the comments tab:
>>> browser.handleErrors = False
>>> browser.getLink('number').click()
>>> browser.getLink('[[zc.comment][Comments]]').click()
We see that no comments have been made yet:
>>> '[[zc.intranet][No comments have been made.]]' in browser.contents
True
Let's add a new multi-line comment:
>>> browser.getControl('[[zc.comment][New Comment]]').value = '''\
... I give my pledge, as an Earthling
... to save, and faithfully defend from waste
... the natural resources of my planet.
... It's soils, minerals, forests, waters, and wildlife.
... '''
>>> browser.getControl('[[zc.comment][Add Comment]]').click()
Now, we get a table that displays the comment with it's date, text,
and the user who made it:
>>> print browser.contents
<...
<th>
...[[zc.comment][comment_column-date (Date)]]...
</th>
<th>
...[[zc.comment][comment_column-principals (Principals)]]...
</th>
<th>
[[zc.comment][comment_column-comment (Comment)]]
</th>
...
<td>
2005 11 14 12:00:55 -500
</td>
<td>
Unauthenticated User
</td>
<td>
I give my pledge, as an Earthling<br />
to save, and faithfully defend from waste<br />
the natural resources of my planet.<br />
It's soils, minerals, forests, waters, and wildlife.<br />
...
<label for="form.comment">
<span class="required">*</span><span>[[zc.comment][New Comment]]</span>
</label>
...<textarea class="zc-comment-text"
style="width: 50ex; height: 6em;"
cols="60" id="form.comment"
name="form.comment" rows="15" ></textarea></div>
...
<input type="submit"
id="form.actions.41646420436f6d6d656e74"
name="form.actions.41646420436f6d6d656e74"
value="[[zc.comment][Add Comment]]"
class="button" />
...
Now, we'll add another comment.
>>> browser.getControl('[[zc.comment][New Comment]]'
... ).value = 'another comment'
>>> browser.getControl('[[zc.comment][Add Comment]]').click()
>>> print browser.contents
<...
<th>
...[[zc.comment][comment_column-date (Date)]]...
</th>
<th>
...[[zc.comment][comment_column-principals (Principals)]]...
</th>
<th>
[[zc.comment][comment_column-comment (Comment)]]
</th>
</tr>
...
<td>
2005 11 14 12:10:18 -500
</td>
<td>
Unauthenticated User
</td>
<td>
I give my pledge, as an Earthling<br />
to save, and faithfully defend from waste<br />
the natural resources of my planet.<br />
It's soils, minerals, forests, waters, and wildlife.<br />
<BLANKLINE>
</td>
</tr>
...
<td>
2005 11 14 12:10:18 -500
</td>
<td>
Unauthenticated User
</td>
<td>
another comment
</td>
</tr>
...
<label for="form.comment">
<span class="required">*</span><span>[[zc.comment][New Comment]]</span>
</label>
...
...<textarea class="zc-comment-text"
style="width: 50ex; height: 6em;"
cols="60"
id="form.comment"
name="form.comment"
rows="15" ></textarea>...
<input type="submit"
id="form.actions.41646420436f6d6d656e74"
name="form.actions.41646420436f6d6d656e74"
value="[[zc.comment][Add Comment]]"
class="button" />
...
| zc.comment | /zc.comment-0.1.0.tar.gz/zc.comment-0.1.0/src/zc/comment/browser/README.txt | README.txt |
from zope import interface, schema, component
import zope.cachedescriptors.property
from zope.app import zapi
from zope.app.pagetemplate import ViewPageTemplateFile
import zope.formlib.form
import zc.table.column
import zc.table.interfaces
from zc.table import table
from zope.interface.common.idatetime import ITZInfo
from zc.comment import interfaces
from zc.comment.i18n import _
class SortableColumn(zc.table.column.GetterColumn):
interface.implements(zc.table.interfaces.ISortableColumn)
def dateFormatter(value, context, formatter):
value = value.astimezone(ITZInfo(formatter.request))
dateFormatter = formatter.request.locale.dates.getFormatter(
'dateTime', length='long')
return dateFormatter.format(value)
def principalsGetter(context, formatter):
principals = zapi.principals()
return [principals.getPrincipal(pid) for pid in context.principal_ids]
def principalsFormatter(value, context, formatter):
return ', '.join([v.title for v in value])
columns = [
SortableColumn(
_('comment_column-date','Date'), lambda c, f: c.date, dateFormatter),
SortableColumn(
_('comment_column-principals', 'Principals'), principalsGetter,
principalsFormatter),
zc.table.column.GetterColumn( # XXX escape?
_('comment_column-comment', 'Comment'), lambda c, f: c.body)
]
class Comments(zope.formlib.form.PageForm):
label = _("Comments")
template = ViewPageTemplateFile('comments.pt')
form_fields = zope.formlib.form.Fields(
interfaces.CommentText(
__name__ = 'comment',
title=_("New Comment"),
),
)
def setUpWidgets(self, ignore_request=False):
super(Comments, self).setUpWidgets(ignore_request=ignore_request)
comment = self.widgets.get('comment')
if comment is not None:
comment.style="width: 50ex; height: 6em;"
comment.setRenderedValue(u'')
@zope.cachedescriptors.property.Lazy
def formatter(self):
adapted = interfaces.IComments(self.context)
factory = component.getUtility(zc.table.interfaces.IFormatterFactory)
formatter = factory(self.context, self.request, adapted,
columns=columns)
return formatter
@zope.formlib.form.action(_("Add Comment"))
def add(self, action, data):
comment = data.get('comment')
self.form_reset = True
if comment:
adapted = interfaces.IComments(self.context)
adapted.add(comment)
self.request.response.redirect(self.request.URL)
class CommentsSubPage(zope.formlib.form.SubPageForm, Comments):
label = u''
template = ViewPageTemplateFile('commentssub.pt')
class CommentsViewSubPage(CommentsSubPage):
actions = form_fields = () | zc.comment | /zc.comment-0.1.0.tar.gz/zc.comment-0.1.0/src/zc/comment/browser/views.py | views.py |
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
__version__ = '2015-07-01'
# See zc.buildout's changelog if this version is up to date.
tmpeggs = tempfile.mkdtemp(prefix='bootstrap-')
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("--version",
action="store_true", default=False,
help=("Return bootstrap.py version."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--buildout-version",
help="Use a specific zc.buildout version")
parser.add_option("--setuptools-version",
help="Use a specific setuptools version")
parser.add_option("--setuptools-to-dir",
help=("Allow for re-use of existing directory of "
"setuptools versions"))
options, args = parser.parse_args()
if options.version:
print("bootstrap.py version %s" % __version__)
sys.exit(0)
######################################################################
# load/install setuptools
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
if os.path.exists('ez_setup.py'):
exec(open('ez_setup.py').read(), ez)
else:
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
# Strip all site-packages directories from sys.path that
# are not sys.prefix; this is because on Windows
# sys.prefix is a site-package directory.
if sitepackage_path != sys.prefix:
sys.path[:] = [x for x in sys.path
if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
if options.setuptools_to_dir is not None:
setup_args['to_dir'] = options.setuptools_to_dir
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
# Fix sys.path here as easy_install.pth added before PYTHONPATH
cmd = [sys.executable, '-c',
'import sys; sys.path[0:0] = [%r]; ' % setuptools_path +
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
requirement = 'zc.buildout'
version = options.buildout_version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs) | zc.configuration | /zc.configuration-1.2.0.tar.gz/zc.configuration-1.2.0/bootstrap.py | bootstrap.py |
Configuration Extensions for Filtering or Inhibiting Configuration
==================================================================
The zc.configuration package used to provide the ``exclude`` directive
for inhibiting configuration. It was included in the zope.configuration
and this package currently provides a backward-compatibility imports
and tests that ensure that it will work for people who are already
using zc.configuration and not the newer zope.configuration.
This package may contain more configuration extensions in future, but
currently, it's not useful anymore as the only feature it provided,
the ``exclude`` directive was merged into the original
``zope.configuration`` package.
First, let's look at an example. The zc.configuration.demo package
has a ZCML configuration that includes some other configuration files.
We'll set a log handler so we can see what's going on:
>>> import logging, sys
>>> logger = logging.getLogger('config')
>>> oldlevel = logger.level
>>> logger.setLevel(logging.DEBUG)
>>> handler = logging.StreamHandler(sys.stdout)
>>> logger.addHandler(handler)
Now, we'll include the zc.configuration.demo config:
>>> from zope.configuration import xmlconfig
>>> _ = xmlconfig.string('<include package="zc.configuration.demo" />')
include /zc.configuration/src/zc/configuration/demo/configure.zcml
include /zc.configuration/src/zc/configuration/demo/sub/configure.zcml
include /zc.configuration/src/zc/configuration/demo/spam.zcml
Each run of the configuration machinery runs with fresh state, so
rerunning gives the same thing:
>>> _ = xmlconfig.string('<include package="zc.configuration.demo" />')
include /zc.configuration/src/zc/configuration/demo/configure.zcml
include /zc.configuration/src/zc/configuration/demo/sub/configure.zcml
include /zc.configuration/src/zc/configuration/demo/spam.zcml
Now, we'll load the zc.configuration meta.zcml and use the exclude
directive to exclude the two files included by the configuration file
in zc.configuration.demo:
>>> _ = xmlconfig.string(
... '''
... <configure xmlns="http://namespaces.zope.org/zope">
... <include package="zc.configuration" file="meta.zcml" />
... <exclude package="zc.configuration.demo.sub" />
... <exclude package="zc.configuration.demo" file="spam.zcml" />
... <include package="zc.configuration.demo" />
... </configure>
... ''')
include /zc.configuration/src/zc/configuration/meta.zcml
include /zc.configuration/src/zc/configuration/demo/configure.zcml
.. cleanup
>>> logger.setLevel(oldlevel)
>>> logger.removeHandler(handler)
| zc.configuration | /zc.configuration-1.2.0.tar.gz/zc.configuration-1.2.0/src/zc/configuration/README.txt | README.txt |
AMEX = 'AMEX'
DISCOVER = 'Discover'
MASTERCARD = 'MasterCard'
VISA = 'Visa'
UNKNOWN_CARD_TYPE = 'Unknown'
def identifyCreditCardType(card_num, card_len):
""" Identifies the credit card type based on information on the
following site(s):
http://en.wikipedia.org/wiki/Credit_card_number
http://www.beachnet.com/~hstiles/cardtype.html
This checks the prefix (first 4 digits) and the length of the card number to
identify the type of the card. This method is used because Authorize.net
does not provide this information. This method currently identifies only
the following four types:
1. VISA
2. MASTERCARD
3. Discover
4. AMEX
Before we test, lets create a few dummy credit-card numbers:
>>> amex_card_num = '370000000000002'
>>> disc_card_num = '6011000000000012'
>>> mc_card_num = '5424000000000015'
>>> visa_card_num = '4007000000027'
>>> unknown_card_num = '400700000002'
>>> identifyCreditCardType(amex_card_num, len(amex_card_num)) == AMEX
True
>>> identifyCreditCardType(disc_card_num,
... len(disc_card_num)) == DISCOVER
True
>>> identifyCreditCardType(mc_card_num, len(mc_card_num)) == MASTERCARD
True
>>> identifyCreditCardType(visa_card_num, len(visa_card_num)) == VISA
True
>>> identifyCreditCardType(unknown_card_num,
... len(unknown_card_num)) == UNKNOWN_CARD_TYPE
True
"""
card_type = UNKNOWN_CARD_TYPE
card_1_digit = card_num[0]
card_2_digits = card_num[:2]
card_4_digits = card_num[:4]
# AMEX
if (card_len == 15) and card_2_digits in ('34', '37'):
card_type = AMEX
# MASTERCARD, DISCOVER & VISA
elif card_len == 16:
# MASTERCARD
if card_2_digits in ('51', '52', '53', '54', '55'):
card_type = MASTERCARD
# DISCOVER
elif (card_4_digits == '6011') or (card_2_digits == '65'):
card_type = DISCOVER
# VISA
elif (card_1_digit == '4'):
card_type = VISA
# VISA
elif (card_len == 13) and (card_1_digit == '4'):
card_type = VISA
return card_type | zc.creditcard | /zc.creditcard-1.0.tar.gz/zc.creditcard-1.0/src/zc/creditcard/__init__.py | __init__.py |
import datetime
from zope.schema import TextLine, Bool, Int, Date, Choice
from zope.schema import getFieldsInOrder
from zope.interface import Interface, implements
from zope.interface.common.idatetime import ITZInfo
from zope.datetime import parseDatetimetz, DateTimeError
from zope.formlib import textwidgets
from zope.formlib.widget import renderElement
import zope.datetime
import zc.i18n.date
import zc.resourcelibrary
import glob
import os
# initialize the language files
LANGS = []
for langFile in glob.glob(
os.path.join(os.path.dirname(__file__),'resources','languages') + '/calendar-??.js'):
LANGS.append(os.path.basename(langFile)[9:11])
def normalizeDateTime(dt, request):
if dt is not None:
if (dt.tzinfo is not None and
isinstance(dt.tzinfo, zope.datetime._tzinfo)):
tzinfo = ITZInfo(request)
dt = dt.replace(tzinfo=None) # TODO: this is a hack
# to accomodate pre-Zope-3.2 datetime widgets that assume UTC
# timezone. Zope 3.2+ datetime widgets should use the
# request's timezone, or pytz.utc for UTC rather than the
# datetimeutils version.
dt = zc.i18n.date.normalize(request, dt)
return dt
def localizeDateTime(dt, request):
if (isinstance(dt, datetime.datetime) and
dt.tzinfo is not None and
dt.tzinfo.utcoffset(None) == datetime.timedelta(0)):
tzinfo = ITZInfo(request, None)
if tzinfo is not None:
dt = dt.astimezone(tzinfo)
return dt
class JavascriptObject(TextLine):
pass
class ICalendarWidgetConfiguration(Interface):
"""Configuration schema for the calendar widget.
See http://www.dynarch.com/demos/jscalendar/doc/html/
reference.html#node_sec_2.1
"""
inputField = TextLine(
title=u"Id of input field",
default=None,
required=False)
displayArea = TextLine(
title=u"Id of element which displays the date",
default=None,
required=False)
button = TextLine(
title=u"Id of trigger",
default=None,
required=False)
eventName = TextLine(
title=u"Event name of trigger",
default=u'click',
required=False)
ifFormat = TextLine(
title=u"Input field date format",
default=u'%Y/%m/%d')
daFormat = TextLine(
title=u"displayArea date format",
default=u'%Y/%m/%d')
singleClick = Bool(
title=u"Calendar is in single-click mode",
default=True)
# disableFunc - deprecated
dateStatusFunc = JavascriptObject(
title=u"Date status function",
description=u"""
A function that receives a JS Date object and returns a boolean or a
string. This function allows one to set a certain CSS class to some
date, therefore making it look different. If it returns true then the
date will be disabled. If it returns false nothing special happens with
the given date. If it returns a string then that will be taken as a CSS
class and appended to the date element. If this string is ``disabled''
then the date is also disabled (therefore is like returning true).
""",
default=None,
required=False)
firstDay = Int(
title=u"First day of week (0 is Sunday, 1 is Monday, 6 is Saturday)",
default=0)
weekNumbers = Bool(
title=u"Display week numbers",
default=True)
align = TextLine(
title=u"Alingment of calendar",
default=u'Bl')
range = TextLine(
title=u"Range of allowed years",
default=u"[1900, 2999]")
flat = TextLine(
title=u"Id of parent object for flat calendars",
default=None,
required=False)
flatCallback = TextLine(
title=u"Function to call when the calendar is changed",
default=None)
onSelect = TextLine(
title=u"Custom click-on-date handler",
default=None,
required=False)
onClose = JavascriptObject(
title=u"Custom handler of 'calendar closed' event",
default=None,
required=False)
onUpdate = JavascriptObject(
title=u"Custom handler of 'calendar updated' event",
default=None,
required=False)
date = Date(
title=u"Initial date",
default=None,
required=False)
showsTime = Bool(
title=u"Show time",
default=False)
timeFormat = Choice(
title=u"Time format (12 hours / 24 hours)",
values=['12', '24'],
default='24')
electric = Bool(
title=u"Update date field only when calendar is closed",
default=True)
position = TextLine(
title=u"Default [x, y] position of calendar",
default=None,
required=False)
cache = Bool(
title=u"Cache calendar object",
default=False)
showOthers = Bool(
title=u"Show days belonging to other months",
default=False)
multiple = JavascriptObject(
title=u"Multiple dates",
description=u"""
A JavaScript list of dates that stores the dates to be preselected
on the widget.
""",
default=None)
class CalendarWidgetConfiguration(object):
implements(ICalendarWidgetConfiguration)
_multiple_dates = None
def __init__(self, name, **kw):
self.name = name.replace('.', '_')
for name, field in getFieldsInOrder(ICalendarWidgetConfiguration):
if name in kw:
value = kw.pop(name)
else:
value = field.default
setattr(self, name, value)
if kw:
raise ValueError('unknown arguments: %s' % ', '.join(kw.keys()))
def setMultiple(self, dates):
self._multiple_dates = dates
self.multiple = 'multi_%s' % self.name
self.onClose = ('getMultipleDateClosedHandler("%s", multi_%s)'
% (self.inputField, self.name))
def setEnabledWeekdays(self, enabled_weekdays):
"""Enable just a set of weekdays.
`enabled_weekdays` is a list of ints (0 = Sunday, 1 = Monday).
"""
weekdays = ', '.join(str(weekday) for weekday in enabled_weekdays)
self.dateStatusFunc = 'enabledWeekdays([%s])' % weekdays
def dumpJS(self):
"""Dump configuration as a JavaScript Calendar.setup call."""
rows = []
for name, field in getFieldsInOrder(ICalendarWidgetConfiguration):
value = getattr(self, name)
if value != field.default:
if value is None:
value_repr = 'null'
elif isinstance(field, JavascriptObject):
value_repr = str(value)
elif isinstance(value, basestring):
value_repr = repr(str(value))
elif isinstance(value, bool):
value_repr = value and 'true' or 'false'
elif isinstance(value, datetime.date):
value_repr = 'new Date(%d, %d, %d)' % (value.year,
value.month-1, value.day)
else:
raise ValueError(value)
row = ' %s: %s,' % (name, value_repr)
rows.append(row)
if rows:
rows[-1] = rows[-1][:-1] # remove last comma
return "Calendar.setup({\n" + '\n'.join(rows) + '\n});\n'
template = """
%(widget_html)s
<input type="button" value="..." id="%(trigger_name)s">
<script type="text/javascript">
%(langDef)s
%(multiple_init)s
%(calendarSetup)s
</script>
"""
class DatetimeBase(object):
enabled_weekdays = None
def __call__(self):
widget_html = super(DatetimeBase, self).__call__()
return self._render(widget_html)
def hidden(self):
"""Render the widget with the actual date list field hidden."""
widget_html = super(DatetimeBase, self).hidden()
return self._render(widget_html)
def _render(self, widget_html):
"""Render the date widget.
`widget_html` is the HTML for the simple date field. This method
wraps that field in some extra code for the advanced JavaScript widget.
"""
zc.resourcelibrary.need('zc.datetimewidget')
lang = self.request.locale.id.language
lang = lang in LANGS and lang or 'en'
if lang != 'en':
# en is always loaded via the resourcelibrary, so that all
# variables are defined in js
# TODO: do not hardcode this
langFile = '/++resource++zc.datetimewidget/'\
'languages/calendar-%s.js' % lang
langDef = "dateTimeWidgetLoadLanguageFile('%s');" % langFile
else:
langDef = ''
conf = self._configuration()
trigger_name = '%s_trigger' % self.name
multiple_init = ''
if getattr(conf, 'multiple', None):
initial_dates = self.datesInJS(conf._multiple_dates)
multi_varname = 'multi_' + self.name.replace('.', '_')
multiple_init = 'var %s = %s;' % (multi_varname, initial_dates)
return template % dict(widget_html=widget_html,
trigger_name=trigger_name,
langDef=langDef,
multiple_init=multiple_init,
calendarSetup=conf.dumpJS())
def datesInJS(self, dates):
"""Return a list of dates in JavaScript-ready format.
`dates` may be None or a set of datetime.date() objects.
"""
if not dates:
return '[]'
date_reprs = ['new Date(%d, %d, %d)' % (dt.year, dt.month-1, dt.day)
for dt in sorted(dates)]
return '[' + ', '.join(date_reprs) + ']'
def _configuration(self):
trigger_name = '%s_trigger' % self.name
conf = CalendarWidgetConfiguration(self.name,
showsTime=self._showsTime,
ifFormat=self._format,
button=trigger_name,
inputField=self.name)
if self.enabled_weekdays is not None:
conf.setEnabledWeekdays(self.enabled_weekdays)
return conf
def setEnabledWeekdays(self, enabled_weekdays):
"""Enable only particular weekdays.
Other weekdays will simply not be selectable in the calendar
widget.
`enabled_weekdays` is a set of integers (0 = Sunday, 1 = Monday).
"""
self.enabled_weekdays = enabled_weekdays
def _toFieldValue(self, input):
# TODO: Manually check if weekday is enabled -- the user could have
# directly entered the date.
if input == self._missing:
return self.context.missing_value
else:
try:
dt = parseDatetimetz(input)
except (DateTimeError, ValueError, IndexError), v:
return super(DatetimeBase, self)._toFieldValue(input)
else:
if self._showsTime:
return dt
else:
return dt.date()
def _toFormValue(self, value):
if value == self.context.missing_value:
return self._missing
if value:
value = localizeDateTime(value, self.request)
return value.strftime(self._format)
else:
return u''
class DatetimeWidget(DatetimeBase, textwidgets.DatetimeWidget):
"""Datetime entry widget."""
_format = '%Y-%m-%d %H:%M:%S'
_showsTime = True
def _toFieldValue(self, input):
res = super(DatetimeWidget, self)._toFieldValue(input)
if res is not self.context.missing_value:
res = normalizeDateTime(res, self.request)
return res
class DateWidget(DatetimeBase, textwidgets.DateWidget):
"""Date entry widget."""
displayWidth = 10
_format = '%Y-%m-%d'
_showsTime = False
class DateSetWidget(DatetimeBase, textwidgets.DateWidget):
"""Widget for entry of sets of dates."""
displayWidth = 30
_format = '%Y-%m-%d'
_showsTime = False
def __init__(self, field, item, request):
super(DateSetWidget, self).__init__(field, request)
def _configuration(self):
conf = DatetimeBase._configuration(self)
value = self.context.query(self.context.context, default=[])
conf.setMultiple(value)
return conf
def _toFieldValue(self, input):
if input == self._missing:
return self.context.missing_value
else:
dates = input.split()
values = set()
for date in dates:
value = super(DateSetWidget, self)._toFieldValue(date)
values.add(value)
return values
def _toFormValue(self, value):
if value == self.context.missing_value:
return self._missing
date_strs = [super(DateSetWidget, self)._toFormValue(date)
for date in sorted(value)]
return ' '.join(date_strs)
class DatetimeDisplayBase(object):
def __call__(self):
if self._renderedValueSet():
content = self._data
else:
content = self.context.default
if content == self.context.missing_value:
return ""
content = localizeDateTime(content, self.request)
formatter = self.request.locale.dates.getFormatter(
self._category, (self.displayStyle or None))
content = formatter.format(content)
return renderElement("span", contents=textwidgets.escape(content),
cssClass=self.cssClass)
class DatetimeDisplayWidget(
DatetimeDisplayBase, textwidgets.DatetimeDisplayWidget):
pass
class DateDisplayWidget(DatetimeDisplayBase, textwidgets.DateDisplayWidget):
pass | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/datetimewidget.py | datetimewidget.py |
Calendar.setup = function (params) {
function param_default(pname, def) { if (typeof params[pname] == "undefined") { params[pname] = def; } };
param_default("inputField", null);
param_default("displayArea", null);
param_default("button", null);
param_default("eventName", "click");
param_default("ifFormat", "%Y/%m/%d");
param_default("daFormat", "%Y/%m/%d");
param_default("singleClick", true);
param_default("disableFunc", null);
param_default("dateStatusFunc", params["disableFunc"]); // takes precedence if both are defined
param_default("dateText", null);
param_default("firstDay", null);
param_default("align", "Br");
param_default("range", [1900, 2999]);
param_default("weekNumbers", true);
param_default("flat", null);
param_default("flatCallback", null);
param_default("onSelect", null);
param_default("onClose", null);
param_default("onUpdate", null);
param_default("date", null);
param_default("showsTime", false);
param_default("timeFormat", "24");
param_default("electric", true);
param_default("step", 2);
param_default("position", null);
param_default("cache", false);
param_default("showOthers", false);
param_default("multiple", null);
var tmp = ["inputField", "displayArea", "button"];
for (var i in tmp) {
if (typeof params[tmp[i]] == "string") {
params[tmp[i]] = document.getElementById(params[tmp[i]]);
}
}
if (!(params.flat || params.multiple || params.inputField || params.displayArea || params.button)) {
alert("Calendar.setup:\n Nothing to setup (no fields found). Please check your code");
return false;
}
function onSelect(cal) {
var p = cal.params;
var update = (cal.dateClicked || p.electric);
if (update && p.inputField) {
p.inputField.value = cal.date.print(p.ifFormat);
if (typeof p.inputField.onchange == "function")
p.inputField.onchange();
}
if (update && p.displayArea)
p.displayArea.innerHTML = cal.date.print(p.daFormat);
if (update && typeof p.onUpdate == "function")
p.onUpdate(cal);
if (update && p.flat) {
if (typeof p.flatCallback == "function")
p.flatCallback(cal);
}
if (update && p.singleClick && cal.dateClicked)
cal.callCloseHandler();
};
if (params.flat != null) {
if (typeof params.flat == "string")
params.flat = document.getElementById(params.flat);
if (!params.flat) {
alert("Calendar.setup:\n Flat specified but can't find parent.");
return false;
}
var cal = new Calendar(params.firstDay, params.date, params.onSelect || onSelect);
cal.showsOtherMonths = params.showOthers;
cal.showsTime = params.showsTime;
cal.time24 = (params.timeFormat == "24");
cal.params = params;
cal.weekNumbers = params.weekNumbers;
cal.setRange(params.range[0], params.range[1]);
cal.setDateStatusHandler(params.dateStatusFunc);
cal.getDateText = params.dateText;
if (params.ifFormat) {
cal.setDateFormat(params.ifFormat);
}
if (params.inputField && typeof params.inputField.value == "string") {
cal.parseDate(params.inputField.value);
}
cal.create(params.flat);
cal.show();
return false;
}
var triggerEl = params.button || params.displayArea || params.inputField;
triggerEl["on" + params.eventName] = function() {
var dateEl = params.inputField || params.displayArea;
var dateFmt = params.inputField ? params.ifFormat : params.daFormat;
var mustCreate = false;
var cal = window.calendar;
if (dateEl)
params.date = Date.parseDate(dateEl.value || dateEl.innerHTML, dateFmt);
if (!(cal && params.cache)) {
window.calendar = cal = new Calendar(params.firstDay,
params.date,
params.onSelect || onSelect,
params.onClose || function(cal) { cal.hide(); });
cal.showsTime = params.showsTime;
cal.time24 = (params.timeFormat == "24");
cal.weekNumbers = params.weekNumbers;
mustCreate = true;
} else {
if (params.date)
cal.setDate(params.date);
cal.hide();
}
if (params.multiple) {
cal.multiple = {};
for (var i = params.multiple.length; --i >= 0;) {
var d = params.multiple[i];
var ds = d.print("%Y%m%d");
cal.multiple[ds] = d;
}
}
cal.showsOtherMonths = params.showOthers;
cal.yearStep = params.step;
cal.setRange(params.range[0], params.range[1]);
cal.params = params;
cal.setDateStatusHandler(params.dateStatusFunc);
cal.getDateText = params.dateText;
cal.setDateFormat(dateFmt);
if (mustCreate)
cal.create();
cal.refresh();
if (!params.position)
cal.showAtElement(params.button || params.displayArea || params.inputField, params.align);
else
cal.showAt(params.position[0], params.position[1]);
return false;
};
return cal;
}; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/calendar-setup.js | calendar-setup.js |
/** The Calendar object constructor. */
Calendar = function (firstDayOfWeek, dateStr, onSelected, onClose) {
// member variables
this.activeDiv = null;
this.currentDateEl = null;
this.getDateStatus = null;
this.getDateToolTip = null;
this.getDateText = null;
this.timeout = null;
this.onSelected = onSelected || null;
this.onClose = onClose || null;
this.dragging = false;
this.hidden = false;
this.minYear = 1970;
this.maxYear = 2050;
this.dateFormat = Calendar._TT["DEF_DATE_FORMAT"];
this.ttDateFormat = Calendar._TT["TT_DATE_FORMAT"];
this.isPopup = true;
this.weekNumbers = true;
this.firstDayOfWeek = typeof firstDayOfWeek == "number" ? firstDayOfWeek : Calendar._FD; // 0 for Sunday, 1 for Monday, etc.
this.showsOtherMonths = false;
this.dateStr = dateStr;
this.ar_days = null;
this.showsTime = false;
this.time24 = true;
this.yearStep = 2;
this.hiliteToday = true;
this.multiple = null;
// HTML elements
this.table = null;
this.element = null;
this.tbody = null;
this.firstdayname = null;
// Combo boxes
this.monthsCombo = null;
this.yearsCombo = null;
this.hilitedMonth = null;
this.activeMonth = null;
this.hilitedYear = null;
this.activeYear = null;
// Information
this.dateClicked = false;
// one-time initializations
if (typeof Calendar._SDN == "undefined") {
// table of short day names
if (typeof Calendar._SDN_len == "undefined")
Calendar._SDN_len = 3;
var ar = new Array();
for (var i = 8; i > 0;) {
ar[--i] = Calendar._DN[i].substr(0, Calendar._SDN_len);
}
Calendar._SDN = ar;
// table of short month names
if (typeof Calendar._SMN_len == "undefined")
Calendar._SMN_len = 3;
ar = new Array();
for (var i = 12; i > 0;) {
ar[--i] = Calendar._MN[i].substr(0, Calendar._SMN_len);
}
Calendar._SMN = ar;
}
};
// ** constants
/// "static", needed for event handlers.
Calendar._C = null;
/// detect a special case of "web browser"
Calendar.is_ie = ( /msie/i.test(navigator.userAgent) &&
!/opera/i.test(navigator.userAgent) );
Calendar.is_ie5 = ( Calendar.is_ie && /msie 5\.0/i.test(navigator.userAgent) );
/// detect Opera browser
Calendar.is_opera = /opera/i.test(navigator.userAgent);
/// detect KHTML-based browsers
Calendar.is_khtml = /Konqueror|Safari|KHTML/i.test(navigator.userAgent);
// BEGIN: UTILITY FUNCTIONS; beware that these might be moved into a separate
// library, at some point.
Calendar.getAbsolutePos = function(el) {
var SL = 0, ST = 0;
var is_div = /^div$/i.test(el.tagName);
if (is_div && el.scrollLeft)
SL = el.scrollLeft;
if (is_div && el.scrollTop)
ST = el.scrollTop;
var r = { x: el.offsetLeft - SL, y: el.offsetTop - ST };
if (el.offsetParent) {
var tmp = this.getAbsolutePos(el.offsetParent);
r.x += tmp.x;
r.y += tmp.y;
}
return r;
};
Calendar.isRelated = function (el, evt) {
var related = evt.relatedTarget;
if (!related) {
var type = evt.type;
if (type == "mouseover") {
related = evt.fromElement;
} else if (type == "mouseout") {
related = evt.toElement;
}
}
while (related) {
if (related == el) {
return true;
}
related = related.parentNode;
}
return false;
};
Calendar.removeClass = function(el, className) {
if (!(el && el.className)) {
return;
}
var cls = el.className.split(" ");
var ar = new Array();
for (var i = cls.length; i > 0;) {
if (cls[--i] != className) {
ar[ar.length] = cls[i];
}
}
el.className = ar.join(" ");
};
Calendar.addClass = function(el, className) {
Calendar.removeClass(el, className);
el.className += " " + className;
};
// FIXME: the following 2 functions totally suck, are useless and should be replaced immediately.
Calendar.getElement = function(ev) {
var f = Calendar.is_ie ? window.event.srcElement : ev.currentTarget;
while (f.nodeType != 1 || /^div$/i.test(f.tagName))
f = f.parentNode;
return f;
};
Calendar.getTargetElement = function(ev) {
var f = Calendar.is_ie ? window.event.srcElement : ev.target;
while (f.nodeType != 1)
f = f.parentNode;
return f;
};
Calendar.stopEvent = function(ev) {
ev || (ev = window.event);
if (Calendar.is_ie) {
ev.cancelBubble = true;
ev.returnValue = false;
} else {
ev.preventDefault();
ev.stopPropagation();
}
return false;
};
Calendar.addEvent = function(el, evname, func) {
if (el.attachEvent) { // IE
el.attachEvent("on" + evname, func);
} else if (el.addEventListener) { // Gecko / W3C
el.addEventListener(evname, func, true);
} else {
el["on" + evname] = func;
}
};
Calendar.removeEvent = function(el, evname, func) {
if (el.detachEvent) { // IE
el.detachEvent("on" + evname, func);
} else if (el.removeEventListener) { // Gecko / W3C
el.removeEventListener(evname, func, true);
} else {
el["on" + evname] = null;
}
};
Calendar.createElement = function(type, parent) {
var el = null;
if (document.createElementNS) {
// use the XHTML namespace; IE won't normally get here unless
// _they_ "fix" the DOM2 implementation.
el = document.createElementNS("http://www.w3.org/1999/xhtml", type);
} else {
el = document.createElement(type);
}
if (typeof parent != "undefined") {
parent.appendChild(el);
}
return el;
};
// END: UTILITY FUNCTIONS
// BEGIN: CALENDAR STATIC FUNCTIONS
/** Internal -- adds a set of events to make some element behave like a button. */
Calendar._add_evs = function(el) {
with (Calendar) {
addEvent(el, "mouseover", dayMouseOver);
addEvent(el, "mousedown", dayMouseDown);
addEvent(el, "mouseout", dayMouseOut);
if (is_ie) {
addEvent(el, "dblclick", dayMouseDblClick);
el.setAttribute("unselectable", true);
}
}
};
Calendar.findMonth = function(el) {
if (typeof el.month != "undefined") {
return el;
} else if (typeof el.parentNode.month != "undefined") {
return el.parentNode;
}
return null;
};
Calendar.findYear = function(el) {
if (typeof el.year != "undefined") {
return el;
} else if (typeof el.parentNode.year != "undefined") {
return el.parentNode;
}
return null;
};
Calendar.showMonthsCombo = function () {
var cal = Calendar._C;
if (!cal) {
return false;
}
var cal = cal;
var cd = cal.activeDiv;
var mc = cal.monthsCombo;
if (cal.hilitedMonth) {
Calendar.removeClass(cal.hilitedMonth, "hilite");
}
if (cal.activeMonth) {
Calendar.removeClass(cal.activeMonth, "active");
}
var mon = cal.monthsCombo.getElementsByTagName("div")[cal.date.getMonth()];
Calendar.addClass(mon, "active");
cal.activeMonth = mon;
var s = mc.style;
s.display = "block";
if (cd.navtype < 0)
s.left = cd.offsetLeft + "px";
else {
var mcw = mc.offsetWidth;
if (typeof mcw == "undefined")
// Konqueror brain-dead techniques
mcw = 50;
s.left = (cd.offsetLeft + cd.offsetWidth - mcw) + "px";
}
s.top = (cd.offsetTop + cd.offsetHeight) + "px";
};
Calendar.showYearsCombo = function (fwd) {
var cal = Calendar._C;
if (!cal) {
return false;
}
var cal = cal;
var cd = cal.activeDiv;
var yc = cal.yearsCombo;
if (cal.hilitedYear) {
Calendar.removeClass(cal.hilitedYear, "hilite");
}
if (cal.activeYear) {
Calendar.removeClass(cal.activeYear, "active");
}
cal.activeYear = null;
var Y = cal.date.getFullYear() + (fwd ? 1 : -1);
var yr = yc.firstChild;
var show = false;
for (var i = 12; i > 0; --i) {
if (Y >= cal.minYear && Y <= cal.maxYear) {
yr.innerHTML = Y;
yr.year = Y;
yr.style.display = "block";
show = true;
} else {
yr.style.display = "none";
}
yr = yr.nextSibling;
Y += fwd ? cal.yearStep : -cal.yearStep;
}
if (show) {
var s = yc.style;
s.display = "block";
if (cd.navtype < 0)
s.left = cd.offsetLeft + "px";
else {
var ycw = yc.offsetWidth;
if (typeof ycw == "undefined")
// Konqueror brain-dead techniques
ycw = 50;
s.left = (cd.offsetLeft + cd.offsetWidth - ycw) + "px";
}
s.top = (cd.offsetTop + cd.offsetHeight) + "px";
}
};
// event handlers
Calendar.tableMouseUp = function(ev) {
var cal = Calendar._C;
if (!cal) {
return false;
}
if (cal.timeout) {
clearTimeout(cal.timeout);
}
var el = cal.activeDiv;
if (!el) {
return false;
}
var target = Calendar.getTargetElement(ev);
ev || (ev = window.event);
Calendar.removeClass(el, "active");
if (target == el || target.parentNode == el) {
Calendar.cellClick(el, ev);
}
var mon = Calendar.findMonth(target);
var date = null;
if (mon) {
date = new Date(cal.date);
if (mon.month != date.getMonth()) {
date.setMonth(mon.month);
cal.setDate(date);
cal.dateClicked = false;
cal.callHandler();
}
} else {
var year = Calendar.findYear(target);
if (year) {
date = new Date(cal.date);
if (year.year != date.getFullYear()) {
date.setFullYear(year.year);
cal.setDate(date);
cal.dateClicked = false;
cal.callHandler();
}
}
}
with (Calendar) {
removeEvent(document, "mouseup", tableMouseUp);
removeEvent(document, "mouseover", tableMouseOver);
removeEvent(document, "mousemove", tableMouseOver);
cal._hideCombos();
_C = null;
return stopEvent(ev);
}
};
Calendar.tableMouseOver = function (ev) {
var cal = Calendar._C;
if (!cal) {
return;
}
var el = cal.activeDiv;
var target = Calendar.getTargetElement(ev);
if (target == el || target.parentNode == el) {
Calendar.addClass(el, "hilite active");
Calendar.addClass(el.parentNode, "rowhilite");
} else {
if (typeof el.navtype == "undefined" || (el.navtype != 50 && (el.navtype == 0 || Math.abs(el.navtype) > 2)))
Calendar.removeClass(el, "active");
Calendar.removeClass(el, "hilite");
Calendar.removeClass(el.parentNode, "rowhilite");
}
ev || (ev = window.event);
if (el.navtype == 50 && target != el) {
var pos = Calendar.getAbsolutePos(el);
var w = el.offsetWidth;
var x = ev.clientX;
var dx;
var decrease = true;
if (x > pos.x + w) {
dx = x - pos.x - w;
decrease = false;
} else
dx = pos.x - x;
if (dx < 0) dx = 0;
var range = el._range;
var current = el._current;
var count = Math.floor(dx / 10) % range.length;
for (var i = range.length; --i >= 0;)
if (range[i] == current)
break;
while (count-- > 0)
if (decrease) {
if (--i < 0)
i = range.length - 1;
} else if ( ++i >= range.length )
i = 0;
var newval = range[i];
el.innerHTML = newval;
cal.onUpdateTime();
}
var mon = Calendar.findMonth(target);
if (mon) {
if (mon.month != cal.date.getMonth()) {
if (cal.hilitedMonth) {
Calendar.removeClass(cal.hilitedMonth, "hilite");
}
Calendar.addClass(mon, "hilite");
cal.hilitedMonth = mon;
} else if (cal.hilitedMonth) {
Calendar.removeClass(cal.hilitedMonth, "hilite");
}
} else {
if (cal.hilitedMonth) {
Calendar.removeClass(cal.hilitedMonth, "hilite");
}
var year = Calendar.findYear(target);
if (year) {
if (year.year != cal.date.getFullYear()) {
if (cal.hilitedYear) {
Calendar.removeClass(cal.hilitedYear, "hilite");
}
Calendar.addClass(year, "hilite");
cal.hilitedYear = year;
} else if (cal.hilitedYear) {
Calendar.removeClass(cal.hilitedYear, "hilite");
}
} else if (cal.hilitedYear) {
Calendar.removeClass(cal.hilitedYear, "hilite");
}
}
return Calendar.stopEvent(ev);
};
Calendar.tableMouseDown = function (ev) {
if (Calendar.getTargetElement(ev) == Calendar.getElement(ev)) {
return Calendar.stopEvent(ev);
}
};
Calendar.calDragIt = function (ev) {
var cal = Calendar._C;
if (!(cal && cal.dragging)) {
return false;
}
var posX;
var posY;
if (Calendar.is_ie) {
posY = window.event.clientY;
posX = window.event.clientX;
} else {
posX = ev.pageX;
posY = ev.pageY;
}
cal.hideShowCovered();
var st = cal.element.style;
st.left = (posX - cal.xOffs) + "px";
st.top = (posY - cal.yOffs) + "px";
return Calendar.stopEvent(ev);
};
Calendar.calDragEnd = function (ev) {
var cal = Calendar._C;
if (!cal) {
return false;
}
cal.dragging = false;
with (Calendar) {
removeEvent(document, "mousemove", calDragIt);
removeEvent(document, "mouseup", calDragEnd);
tableMouseUp(ev);
}
cal.hideShowCovered();
};
Calendar.dayMouseDown = function(ev) {
var el = Calendar.getElement(ev);
if (el.disabled) {
return false;
}
var cal = el.calendar;
cal.activeDiv = el;
Calendar._C = cal;
if (el.navtype != 300) with (Calendar) {
if (el.navtype == 50) {
el._current = el.innerHTML;
addEvent(document, "mousemove", tableMouseOver);
} else
addEvent(document, Calendar.is_ie5 ? "mousemove" : "mouseover", tableMouseOver);
addClass(el, "hilite active");
addEvent(document, "mouseup", tableMouseUp);
} else if (cal.isPopup) {
cal._dragStart(ev);
}
if (el.navtype == -1 || el.navtype == 1) {
if (cal.timeout) clearTimeout(cal.timeout);
cal.timeout = setTimeout("Calendar.showMonthsCombo()", 250);
} else if (el.navtype == -2 || el.navtype == 2) {
if (cal.timeout) clearTimeout(cal.timeout);
cal.timeout = setTimeout((el.navtype > 0) ? "Calendar.showYearsCombo(true)" : "Calendar.showYearsCombo(false)", 250);
} else {
cal.timeout = null;
}
return Calendar.stopEvent(ev);
};
Calendar.dayMouseDblClick = function(ev) {
Calendar.cellClick(Calendar.getElement(ev), ev || window.event);
if (Calendar.is_ie) {
document.selection.empty();
}
};
Calendar.dayMouseOver = function(ev) {
var el = Calendar.getElement(ev);
if (Calendar.isRelated(el, ev) || Calendar._C || el.disabled) {
return false;
}
if (el.ttip) {
if (el.ttip.substr(0, 1) == "_") {
el.ttip = el.caldate.print(el.calendar.ttDateFormat) + el.ttip.substr(1);
}
el.calendar.tooltips.innerHTML = el.ttip;
}
if (el.navtype != 300) {
Calendar.addClass(el, "hilite");
if (el.caldate) {
Calendar.addClass(el.parentNode, "rowhilite");
}
}
return Calendar.stopEvent(ev);
};
Calendar.dayMouseOut = function(ev) {
with (Calendar) {
var el = getElement(ev);
if (isRelated(el, ev) || _C || el.disabled)
return false;
removeClass(el, "hilite");
if (el.caldate)
removeClass(el.parentNode, "rowhilite");
if (el.calendar)
el.calendar.tooltips.innerHTML = _TT["SEL_DATE"];
return stopEvent(ev);
}
};
/**
* A generic "click" handler :) handles all types of buttons defined in this
* calendar.
*/
Calendar.cellClick = function(el, ev) {
var cal = el.calendar;
var closing = false;
var newdate = false;
var date = null;
if (typeof el.navtype == "undefined") {
if (cal.currentDateEl) {
Calendar.removeClass(cal.currentDateEl, "selected");
Calendar.addClass(el, "selected");
closing = (cal.currentDateEl == el);
if (!closing) {
cal.currentDateEl = el;
}
}
cal.date.setDateOnly(el.caldate);
date = cal.date;
var other_month = !(cal.dateClicked = !el.otherMonth);
if (!other_month && !cal.currentDateEl)
cal._toggleMultipleDate(new Date(date));
else
newdate = !el.disabled;
// a date was clicked
if (other_month)
cal._init(cal.firstDayOfWeek, date);
} else {
if (el.navtype == 200) {
Calendar.removeClass(el, "hilite");
cal.callCloseHandler();
return;
}
date = new Date(cal.date);
if (el.navtype == 0)
date.setDateOnly(new Date()); // TODAY
// unless "today" was clicked, we assume no date was clicked so
// the selected handler will know not to close the calenar when
// in single-click mode.
// cal.dateClicked = (el.navtype == 0);
cal.dateClicked = false;
var year = date.getFullYear();
var mon = date.getMonth();
function setMonth(m) {
var day = date.getDate();
var max = date.getMonthDays(m);
if (day > max) {
date.setDate(max);
}
date.setMonth(m);
};
switch (el.navtype) {
case 400:
Calendar.removeClass(el, "hilite");
var text = Calendar._TT["ABOUT"];
if (typeof text != "undefined") {
text += cal.showsTime ? Calendar._TT["ABOUT_TIME"] : "";
} else {
// FIXME: this should be removed as soon as lang files get updated!
text = "Help and about box text is not translated into this language.\n" +
"If you know this language and you feel generous please update\n" +
"the corresponding file in \"lang\" subdir to match calendar-en.js\n" +
"and send it back to <[email protected]> to get it into the distribution ;-)\n\n" +
"Thank you!\n" +
"http://dynarch.com/mishoo/calendar.epl\n";
}
alert(text);
return;
case -2:
if (year > cal.minYear) {
date.setFullYear(year - 1);
}
break;
case -1:
if (mon > 0) {
setMonth(mon - 1);
} else if (year-- > cal.minYear) {
date.setFullYear(year);
setMonth(11);
}
break;
case 1:
if (mon < 11) {
setMonth(mon + 1);
} else if (year < cal.maxYear) {
date.setFullYear(year + 1);
setMonth(0);
}
break;
case 2:
if (year < cal.maxYear) {
date.setFullYear(year + 1);
}
break;
case 100:
cal.setFirstDayOfWeek(el.fdow);
return;
case 50:
var range = el._range;
var current = el.innerHTML;
for (var i = range.length; --i >= 0;)
if (range[i] == current)
break;
if (ev && ev.shiftKey) {
if (--i < 0)
i = range.length - 1;
} else if ( ++i >= range.length )
i = 0;
var newval = range[i];
el.innerHTML = newval;
cal.onUpdateTime();
return;
case 0:
// TODAY will bring us here
if ((typeof cal.getDateStatus == "function") &&
cal.getDateStatus(date, date.getFullYear(), date.getMonth(), date.getDate())) {
return false;
}
break;
}
if (!date.equalsTo(cal.date)) {
cal.setDate(date);
newdate = true;
} else if (el.navtype == 0)
newdate = closing = true;
}
if (newdate) {
ev && cal.callHandler();
}
if (closing) {
Calendar.removeClass(el, "hilite");
ev && cal.callCloseHandler();
}
};
// END: CALENDAR STATIC FUNCTIONS
// BEGIN: CALENDAR OBJECT FUNCTIONS
/**
* This function creates the calendar inside the given parent. If _par is
* null than it creates a popup calendar inside the BODY element. If _par is
* an element, be it BODY, then it creates a non-popup calendar (still
* hidden). Some properties need to be set before calling this function.
*/
Calendar.prototype.create = function (_par) {
var parent = null;
if (! _par) {
// default parent is the document body, in which case we create
// a popup calendar.
parent = document.getElementsByTagName("body")[0];
this.isPopup = true;
} else {
parent = _par;
this.isPopup = false;
}
this.date = this.dateStr ? new Date(this.dateStr) : new Date();
var table = Calendar.createElement("table");
this.table = table;
table.cellSpacing = 0;
table.cellPadding = 0;
table.calendar = this;
Calendar.addEvent(table, "mousedown", Calendar.tableMouseDown);
var div = Calendar.createElement("div");
this.element = div;
div.className = "calendar";
if (this.isPopup) {
div.style.position = "absolute";
div.style.display = "none";
}
div.appendChild(table);
var thead = Calendar.createElement("thead", table);
var cell = null;
var row = null;
var cal = this;
var hh = function (text, cs, navtype) {
cell = Calendar.createElement("td", row);
cell.colSpan = cs;
cell.className = "button";
if (navtype != 0 && Math.abs(navtype) <= 2)
cell.className += " nav";
Calendar._add_evs(cell);
cell.calendar = cal;
cell.navtype = navtype;
cell.innerHTML = "<div unselectable='on'>" + text + "</div>";
return cell;
};
row = Calendar.createElement("tr", thead);
var title_length = 6;
(this.isPopup) && --title_length;
(this.weekNumbers) && ++title_length;
hh("?", 1, 400).ttip = Calendar._TT["INFO"];
this.title = hh("", title_length, 300);
this.title.className = "title";
if (this.isPopup) {
this.title.ttip = Calendar._TT["DRAG_TO_MOVE"];
this.title.style.cursor = "move";
hh("×", 1, 200).ttip = Calendar._TT["CLOSE"];
}
row = Calendar.createElement("tr", thead);
row.className = "headrow";
this._nav_py = hh("«", 1, -2);
this._nav_py.ttip = Calendar._TT["PREV_YEAR"];
this._nav_pm = hh("‹", 1, -1);
this._nav_pm.ttip = Calendar._TT["PREV_MONTH"];
this._nav_now = hh(Calendar._TT["TODAY"], this.weekNumbers ? 4 : 3, 0);
this._nav_now.ttip = Calendar._TT["GO_TODAY"];
this._nav_nm = hh("›", 1, 1);
this._nav_nm.ttip = Calendar._TT["NEXT_MONTH"];
this._nav_ny = hh("»", 1, 2);
this._nav_ny.ttip = Calendar._TT["NEXT_YEAR"];
// day names
row = Calendar.createElement("tr", thead);
row.className = "daynames";
if (this.weekNumbers) {
cell = Calendar.createElement("td", row);
cell.className = "name wn";
cell.innerHTML = Calendar._TT["WK"];
}
for (var i = 7; i > 0; --i) {
cell = Calendar.createElement("td", row);
if (!i) {
cell.navtype = 100;
cell.calendar = this;
Calendar._add_evs(cell);
}
}
this.firstdayname = (this.weekNumbers) ? row.firstChild.nextSibling : row.firstChild;
this._displayWeekdays();
var tbody = Calendar.createElement("tbody", table);
this.tbody = tbody;
for (i = 6; i > 0; --i) {
row = Calendar.createElement("tr", tbody);
if (this.weekNumbers) {
cell = Calendar.createElement("td", row);
}
for (var j = 7; j > 0; --j) {
cell = Calendar.createElement("td", row);
cell.calendar = this;
Calendar._add_evs(cell);
}
}
if (this.showsTime) {
row = Calendar.createElement("tr", tbody);
row.className = "time";
cell = Calendar.createElement("td", row);
cell.className = "time";
cell.colSpan = 2;
cell.innerHTML = Calendar._TT["TIME"] || " ";
cell = Calendar.createElement("td", row);
cell.className = "time";
cell.colSpan = this.weekNumbers ? 4 : 3;
(function(){
function makeTimePart(className, init, range_start, range_end) {
var part = Calendar.createElement("span", cell);
part.className = className;
part.innerHTML = init;
part.calendar = cal;
part.ttip = Calendar._TT["TIME_PART"];
part.navtype = 50;
part._range = [];
if (typeof range_start != "number")
part._range = range_start;
else {
for (var i = range_start; i <= range_end; ++i) {
var txt;
if (i < 10 && range_end >= 10) txt = '0' + i;
else txt = '' + i;
part._range[part._range.length] = txt;
}
}
Calendar._add_evs(part);
return part;
};
var hrs = cal.date.getHours();
var mins = cal.date.getMinutes();
var t12 = !cal.time24;
var pm = (hrs > 12);
if (t12 && pm) hrs -= 12;
var H = makeTimePart("hour", hrs, t12 ? 1 : 0, t12 ? 12 : 23);
var span = Calendar.createElement("span", cell);
span.innerHTML = ":";
span.className = "colon";
var M = makeTimePart("minute", mins, 0, 59);
var AP = null;
cell = Calendar.createElement("td", row);
cell.className = "time";
cell.colSpan = 2;
if (t12)
AP = makeTimePart("ampm", pm ? "pm" : "am", ["am", "pm"]);
else
cell.innerHTML = " ";
cal.onSetTime = function() {
var pm, hrs = this.date.getHours(),
mins = this.date.getMinutes();
if (t12) {
pm = (hrs >= 12);
if (pm) hrs -= 12;
if (hrs == 0) hrs = 12;
AP.innerHTML = pm ? "pm" : "am";
}
H.innerHTML = (hrs < 10) ? ("0" + hrs) : hrs;
M.innerHTML = (mins < 10) ? ("0" + mins) : mins;
};
cal.onUpdateTime = function() {
var date = this.date;
var h = parseInt(H.innerHTML, 10);
if (t12) {
if (/pm/i.test(AP.innerHTML) && h < 12)
h += 12;
else if (/am/i.test(AP.innerHTML) && h == 12)
h = 0;
}
var d = date.getDate();
var m = date.getMonth();
var y = date.getFullYear();
date.setHours(h);
date.setMinutes(parseInt(M.innerHTML, 10));
date.setFullYear(y);
date.setMonth(m);
date.setDate(d);
this.dateClicked = false;
this.callHandler();
};
})();
} else {
this.onSetTime = this.onUpdateTime = function() {};
}
var tfoot = Calendar.createElement("tfoot", table);
row = Calendar.createElement("tr", tfoot);
row.className = "footrow";
cell = hh(Calendar._TT["SEL_DATE"], this.weekNumbers ? 8 : 7, 300);
cell.className = "ttip";
if (this.isPopup) {
cell.ttip = Calendar._TT["DRAG_TO_MOVE"];
cell.style.cursor = "move";
}
this.tooltips = cell;
// Customization: add an extra "Close" button at the bottom for
// multi multi-select calendars to make them more intuitive.
if (this.multiple != null) {
row = Calendar.createElement("tr", tfoot);
row.className = "footrow";
cell = Calendar.createElement("td", row);
hh(Calendar._TT["CLOSE"], this.weekNumbers ? 6 : 5, 200);
cell = Calendar.createElement("td", row);
}
div = Calendar.createElement("div", this.element);
this.monthsCombo = div;
div.className = "combo";
for (i = 0; i < Calendar._MN.length; ++i) {
var mn = Calendar.createElement("div");
mn.className = Calendar.is_ie ? "label-IEfix" : "label";
mn.month = i;
mn.innerHTML = Calendar._SMN[i];
div.appendChild(mn);
}
div = Calendar.createElement("div", this.element);
this.yearsCombo = div;
div.className = "combo";
for (i = 12; i > 0; --i) {
var yr = Calendar.createElement("div");
yr.className = Calendar.is_ie ? "label-IEfix" : "label";
div.appendChild(yr);
}
this._init(this.firstDayOfWeek, this.date);
parent.appendChild(this.element);
};
/** keyboard navigation, only for popup calendars */
Calendar._keyEvent = function(ev) {
var cal = window._dynarch_popupCalendar;
if (!cal || cal.multiple)
return false;
(Calendar.is_ie) && (ev = window.event);
var act = (Calendar.is_ie || ev.type == "keypress"),
K = ev.keyCode;
if (ev.ctrlKey) {
switch (K) {
case 37: // KEY left
act && Calendar.cellClick(cal._nav_pm);
break;
case 38: // KEY up
act && Calendar.cellClick(cal._nav_py);
break;
case 39: // KEY right
act && Calendar.cellClick(cal._nav_nm);
break;
case 40: // KEY down
act && Calendar.cellClick(cal._nav_ny);
break;
default:
return false;
}
} else switch (K) {
case 32: // KEY space (now)
Calendar.cellClick(cal._nav_now);
break;
case 27: // KEY esc
act && cal.callCloseHandler();
break;
case 37: // KEY left
case 38: // KEY up
case 39: // KEY right
case 40: // KEY down
if (act) {
var prev, x, y, ne, el, step;
prev = K == 37 || K == 38;
step = (K == 37 || K == 39) ? 1 : 7;
function setVars() {
el = cal.currentDateEl;
var p = el.pos;
x = p & 15;
y = p >> 4;
ne = cal.ar_days[y][x];
};setVars();
function prevMonth() {
var date = new Date(cal.date);
date.setDate(date.getDate() - step);
cal.setDate(date);
};
function nextMonth() {
var date = new Date(cal.date);
date.setDate(date.getDate() + step);
cal.setDate(date);
};
while (1) {
switch (K) {
case 37: // KEY left
if (--x >= 0)
ne = cal.ar_days[y][x];
else {
x = 6;
K = 38;
continue;
}
break;
case 38: // KEY up
if (--y >= 0)
ne = cal.ar_days[y][x];
else {
prevMonth();
setVars();
}
break;
case 39: // KEY right
if (++x < 7)
ne = cal.ar_days[y][x];
else {
x = 0;
K = 40;
continue;
}
break;
case 40: // KEY down
if (++y < cal.ar_days.length)
ne = cal.ar_days[y][x];
else {
nextMonth();
setVars();
}
break;
}
break;
}
if (ne) {
if (!ne.disabled)
Calendar.cellClick(ne);
else if (prev)
prevMonth();
else
nextMonth();
}
}
break;
case 13: // KEY enter
if (act)
Calendar.cellClick(cal.currentDateEl, ev);
break;
default:
return false;
}
return Calendar.stopEvent(ev);
};
/**
* (RE)Initializes the calendar to the given date and firstDayOfWeek
*/
Calendar.prototype._init = function (firstDayOfWeek, date) {
var today = new Date(),
TY = today.getFullYear(),
TM = today.getMonth(),
TD = today.getDate();
this.table.style.visibility = "hidden";
var year = date.getFullYear();
if (year < this.minYear) {
year = this.minYear;
date.setFullYear(year);
} else if (year > this.maxYear) {
year = this.maxYear;
date.setFullYear(year);
}
this.firstDayOfWeek = firstDayOfWeek;
this.date = new Date(date);
var month = date.getMonth();
var mday = date.getDate();
var no_days = date.getMonthDays();
// calendar voodoo for computing the first day that would actually be
// displayed in the calendar, even if it's from the previous month.
// WARNING: this is magic. ;-)
date.setDate(1);
var day1 = (date.getDay() - this.firstDayOfWeek) % 7;
if (day1 < 0)
day1 += 7;
date.setDate(-day1);
date.setDate(date.getDate() + 1);
var row = this.tbody.firstChild;
var MN = Calendar._SMN[month];
var ar_days = this.ar_days = new Array();
var weekend = Calendar._TT["WEEKEND"];
var dates = this.multiple ? (this.datesCells = {}) : null;
for (var i = 0; i < 6; ++i, row = row.nextSibling) {
var cell = row.firstChild;
if (this.weekNumbers) {
cell.className = "day wn";
cell.innerHTML = date.getWeekNumber();
cell = cell.nextSibling;
}
row.className = "daysrow";
var hasdays = false, iday, dpos = ar_days[i] = [];
for (var j = 0; j < 7; ++j, cell = cell.nextSibling, date.setDate(iday + 1)) {
iday = date.getDate();
var wday = date.getDay();
cell.className = "day";
cell.pos = i << 4 | j;
dpos[j] = cell;
var current_month = (date.getMonth() == month);
if (!current_month) {
if (this.showsOtherMonths) {
cell.className += " othermonth";
cell.otherMonth = true;
} else {
cell.className = "emptycell";
cell.innerHTML = " ";
cell.disabled = true;
continue;
}
} else {
cell.otherMonth = false;
hasdays = true;
}
cell.disabled = false;
cell.innerHTML = this.getDateText ? this.getDateText(date, iday) : iday;
if (dates)
dates[date.print("%Y%m%d")] = cell;
if (this.getDateStatus) {
var status = this.getDateStatus(date, year, month, iday);
if (this.getDateToolTip) {
var toolTip = this.getDateToolTip(date, year, month, iday);
if (toolTip)
cell.title = toolTip;
}
if (status === true) {
cell.className += " disabled";
cell.disabled = true;
} else {
if (/disabled/i.test(status))
cell.disabled = true;
cell.className += " " + status;
}
}
if (!cell.disabled) {
cell.caldate = new Date(date);
cell.ttip = "_";
if (!this.multiple && current_month
&& iday == mday && this.hiliteToday) {
cell.className += " selected";
this.currentDateEl = cell;
}
if (date.getFullYear() == TY &&
date.getMonth() == TM &&
iday == TD) {
cell.className += " today";
cell.ttip += Calendar._TT["PART_TODAY"];
}
if (weekend.indexOf(wday.toString()) != -1)
cell.className += cell.otherMonth ? " oweekend" : " weekend";
}
}
if (!(hasdays || this.showsOtherMonths))
row.className = "emptyrow";
}
this.title.innerHTML = Calendar._MN[month] + ", " + year;
this.onSetTime();
this.table.style.visibility = "visible";
this._initMultipleDates();
// PROFILE
// this.tooltips.innerHTML = "Generated in " + ((new Date()) - today) + " ms";
};
Calendar.prototype._initMultipleDates = function() {
if (this.multiple) {
for (var i in this.multiple) {
var cell = this.datesCells[i];
var d = this.multiple[i];
if (!d)
continue;
if (cell)
cell.className += " selected";
}
}
};
Calendar.prototype._toggleMultipleDate = function(date) {
if (this.multiple) {
var ds = date.print("%Y%m%d");
var cell = this.datesCells[ds];
if (cell) {
var d = this.multiple[ds];
if (!d) {
Calendar.addClass(cell, "selected");
this.multiple[ds] = date;
} else {
Calendar.removeClass(cell, "selected");
delete this.multiple[ds];
}
}
}
};
Calendar.prototype.setDateToolTipHandler = function (unaryFunction) {
this.getDateToolTip = unaryFunction;
};
/**
* Calls _init function above for going to a certain date (but only if the
* date is different than the currently selected one).
*/
Calendar.prototype.setDate = function (date) {
if (!date.equalsTo(this.date)) {
this._init(this.firstDayOfWeek, date);
}
};
/**
* Refreshes the calendar. Useful if the "disabledHandler" function is
* dynamic, meaning that the list of disabled date can change at runtime.
* Just * call this function if you think that the list of disabled dates
* should * change.
*/
Calendar.prototype.refresh = function () {
this._init(this.firstDayOfWeek, this.date);
};
/** Modifies the "firstDayOfWeek" parameter (pass 0 for Synday, 1 for Monday, etc.). */
Calendar.prototype.setFirstDayOfWeek = function (firstDayOfWeek) {
this._init(firstDayOfWeek, this.date);
this._displayWeekdays();
};
/**
* Allows customization of what dates are enabled. The "unaryFunction"
* parameter must be a function object that receives the date (as a JS Date
* object) and returns a boolean value. If the returned value is true then
* the passed date will be marked as disabled.
*/
Calendar.prototype.setDateStatusHandler = Calendar.prototype.setDisabledHandler = function (unaryFunction) {
this.getDateStatus = unaryFunction;
};
/** Customization of allowed year range for the calendar. */
Calendar.prototype.setRange = function (a, z) {
this.minYear = a;
this.maxYear = z;
};
/** Calls the first user handler (selectedHandler). */
Calendar.prototype.callHandler = function () {
if (this.onSelected) {
this.onSelected(this, this.date.print(this.dateFormat));
}
};
/** Calls the second user handler (closeHandler). */
Calendar.prototype.callCloseHandler = function () {
if (this.onClose) {
this.onClose(this);
}
this.hideShowCovered();
};
/** Removes the calendar object from the DOM tree and destroys it. */
Calendar.prototype.destroy = function () {
var el = this.element.parentNode;
el.removeChild(this.element);
Calendar._C = null;
window._dynarch_popupCalendar = null;
};
/**
* Moves the calendar element to a different section in the DOM tree (changes
* its parent).
*/
Calendar.prototype.reparent = function (new_parent) {
var el = this.element;
el.parentNode.removeChild(el);
new_parent.appendChild(el);
};
// This gets called when the user presses a mouse button anywhere in the
// document, if the calendar is shown. If the click was outside the open
// calendar this function closes it.
Calendar._checkCalendar = function(ev) {
var calendar = window._dynarch_popupCalendar;
if (!calendar) {
return false;
}
var el = Calendar.is_ie ? Calendar.getElement(ev) : Calendar.getTargetElement(ev);
for (; el != null && el != calendar.element; el = el.parentNode);
if (el == null) {
// calls closeHandler which should hide the calendar.
window._dynarch_popupCalendar.callCloseHandler();
return Calendar.stopEvent(ev);
}
};
/** Shows the calendar. */
Calendar.prototype.show = function () {
var rows = this.table.getElementsByTagName("tr");
for (var i = rows.length; i > 0;) {
var row = rows[--i];
Calendar.removeClass(row, "rowhilite");
var cells = row.getElementsByTagName("td");
for (var j = cells.length; j > 0;) {
var cell = cells[--j];
Calendar.removeClass(cell, "hilite");
Calendar.removeClass(cell, "active");
}
}
this.element.style.display = "block";
this.hidden = false;
if (this.isPopup) {
window._dynarch_popupCalendar = this;
Calendar.addEvent(document, "keydown", Calendar._keyEvent);
Calendar.addEvent(document, "keypress", Calendar._keyEvent);
Calendar.addEvent(document, "mousedown", Calendar._checkCalendar);
}
this.hideShowCovered();
};
/**
* Hides the calendar. Also removes any "hilite" from the class of any TD
* element.
*/
Calendar.prototype.hide = function () {
if (this.isPopup) {
Calendar.removeEvent(document, "keydown", Calendar._keyEvent);
Calendar.removeEvent(document, "keypress", Calendar._keyEvent);
Calendar.removeEvent(document, "mousedown", Calendar._checkCalendar);
}
this.element.style.display = "none";
this.hidden = true;
this.hideShowCovered();
};
/**
* Shows the calendar at a given absolute position (beware that, depending on
* the calendar element style -- position property -- this might be relative
* to the parent's containing rectangle).
*/
Calendar.prototype.showAt = function (x, y) {
var s = this.element.style;
s.left = x + "px";
s.top = y + "px";
this.show();
};
/** Shows the calendar near a given element. */
Calendar.prototype.showAtElement = function (el, opts) {
var self = this;
var p = Calendar.getAbsolutePos(el);
if (!opts || typeof opts != "string") {
this.showAt(p.x, p.y + el.offsetHeight);
return true;
}
function fixPosition(box) {
if (box.x < 0)
box.x = 0;
if (box.y < 0)
box.y = 0;
var cp = document.createElement("div");
var s = cp.style;
s.position = "absolute";
s.right = s.bottom = s.width = s.height = "0px";
document.body.appendChild(cp);
var br = Calendar.getAbsolutePos(cp);
document.body.removeChild(cp);
if (Calendar.is_ie) {
br.y += document.body.scrollTop || document.documentElement.scrollTop;
br.x += document.body.scrollLeft || document.documentElement.scrollLeft;
} else {
br.y += window.scrollY;
br.x += window.scrollX;
}
var tmp = box.x + box.width - br.x;
if (tmp > 0) box.x -= tmp;
tmp = box.y + box.height - br.y;
if (tmp > 0) box.y -= tmp;
};
this.element.style.display = "block";
Calendar.continuation_for_the_fucking_khtml_browser = function() {
var w = self.element.offsetWidth;
var h = self.element.offsetHeight;
self.element.style.display = "none";
var valign = opts.substr(0, 1);
var halign = "l";
if (opts.length > 1) {
halign = opts.substr(1, 1);
}
// vertical alignment
switch (valign) {
case "T": p.y -= h; break;
case "B": p.y += el.offsetHeight; break;
case "C": p.y += (el.offsetHeight - h) / 2; break;
case "t": p.y += el.offsetHeight - h; break;
case "b": break; // already there
}
// horizontal alignment
switch (halign) {
case "L": p.x -= w; break;
case "R": p.x += el.offsetWidth; break;
case "C": p.x += (el.offsetWidth - w) / 2; break;
case "l": p.x += el.offsetWidth - w; break;
case "r": break; // already there
}
p.width = w;
p.height = h + 40;
self.monthsCombo.style.display = "none";
fixPosition(p);
self.showAt(p.x, p.y);
};
if (Calendar.is_khtml)
setTimeout("Calendar.continuation_for_the_fucking_khtml_browser()", 10);
else
Calendar.continuation_for_the_fucking_khtml_browser();
};
/** Customizes the date format. */
Calendar.prototype.setDateFormat = function (str) {
this.dateFormat = str;
};
/** Customizes the tooltip date format. */
Calendar.prototype.setTtDateFormat = function (str) {
this.ttDateFormat = str;
};
/**
* Tries to identify the date represented in a string. If successful it also
* calls this.setDate which moves the calendar to the given date.
*/
Calendar.prototype.parseDate = function(str, fmt) {
if (!fmt)
fmt = this.dateFormat;
this.setDate(Date.parseDate(str, fmt));
};
Calendar.prototype.hideShowCovered = function () {
if (!Calendar.is_ie && !Calendar.is_opera)
return;
function getVisib(obj){
var value = obj.style.visibility;
if (!value) {
if (document.defaultView && typeof (document.defaultView.getComputedStyle) == "function") { // Gecko, W3C
if (!Calendar.is_khtml)
value = document.defaultView.
getComputedStyle(obj, "").getPropertyValue("visibility");
else
value = '';
} else if (obj.currentStyle) { // IE
value = obj.currentStyle.visibility;
} else
value = '';
}
return value;
};
var tags = new Array("applet", "iframe", "select");
var el = this.element;
var p = Calendar.getAbsolutePos(el);
var EX1 = p.x;
var EX2 = el.offsetWidth + EX1;
var EY1 = p.y;
var EY2 = el.offsetHeight + EY1;
for (var k = tags.length; k > 0; ) {
var ar = document.getElementsByTagName(tags[--k]);
var cc = null;
for (var i = ar.length; i > 0;) {
cc = ar[--i];
p = Calendar.getAbsolutePos(cc);
var CX1 = p.x;
var CX2 = cc.offsetWidth + CX1;
var CY1 = p.y;
var CY2 = cc.offsetHeight + CY1;
if (this.hidden || (CX1 > EX2) || (CX2 < EX1) || (CY1 > EY2) || (CY2 < EY1)) {
if (!cc.__msh_save_visibility) {
cc.__msh_save_visibility = getVisib(cc);
}
cc.style.visibility = cc.__msh_save_visibility;
} else {
if (!cc.__msh_save_visibility) {
cc.__msh_save_visibility = getVisib(cc);
}
cc.style.visibility = "hidden";
}
}
}
};
/** Internal function; it displays the bar with the names of the weekday. */
Calendar.prototype._displayWeekdays = function () {
var fdow = this.firstDayOfWeek;
var cell = this.firstdayname;
var weekend = Calendar._TT["WEEKEND"];
for (var i = 0; i < 7; ++i) {
cell.className = "day name";
var realday = (i + fdow) % 7;
if (i) {
cell.ttip = Calendar._TT["DAY_FIRST"].replace("%s", Calendar._DN[realday]);
cell.navtype = 100;
cell.calendar = this;
cell.fdow = realday;
Calendar._add_evs(cell);
}
if (weekend.indexOf(realday.toString()) != -1) {
Calendar.addClass(cell, "weekend");
}
cell.innerHTML = Calendar._SDN[(i + fdow) % 7];
cell = cell.nextSibling;
}
};
/** Internal function. Hides all combo boxes that might be displayed. */
Calendar.prototype._hideCombos = function () {
this.monthsCombo.style.display = "none";
this.yearsCombo.style.display = "none";
};
/** Internal function. Starts dragging the element. */
Calendar.prototype._dragStart = function (ev) {
if (this.dragging) {
return;
}
this.dragging = true;
var posX;
var posY;
if (Calendar.is_ie) {
posY = window.event.clientY;
posX = window.event.clientX;
} else {
posY = ev.clientY + window.scrollY;
posX = ev.clientX + window.scrollX;
}
var st = this.element.style;
this.xOffs = posX - parseInt(st.left);
this.yOffs = posY - parseInt(st.top);
with (Calendar) {
addEvent(document, "mousemove", calDragIt);
addEvent(document, "mouseup", calDragEnd);
}
};
// BEGIN: DATE OBJECT PATCHES
/** Adds the number of days array to the Date object. */
Date._MD = new Array(31,28,31,30,31,30,31,31,30,31,30,31);
/** Constants used for time computations */
Date.SECOND = 1000 /* milliseconds */;
Date.MINUTE = 60 * Date.SECOND;
Date.HOUR = 60 * Date.MINUTE;
Date.DAY = 24 * Date.HOUR;
Date.WEEK = 7 * Date.DAY;
Date.parseDate = function(str, fmt) {
var today = new Date();
var y = 0;
var m = -1;
var d = 0;
var a = str.split(/\W+/);
var b = fmt.match(/%./g);
var i = 0, j = 0;
var hr = 0;
var min = 0;
for (i = 0; i < a.length; ++i) {
if (!a[i])
continue;
switch (b[i]) {
case "%d":
case "%e":
d = parseInt(a[i], 10);
break;
case "%m":
m = parseInt(a[i], 10) - 1;
break;
case "%Y":
case "%y":
y = parseInt(a[i], 10);
(y < 100) && (y += (y > 29) ? 1900 : 2000);
break;
case "%b":
case "%B":
for (j = 0; j < 12; ++j) {
if (Calendar._MN[j].substr(0, a[i].length).toLowerCase() == a[i].toLowerCase()) { m = j; break; }
}
break;
case "%H":
case "%I":
case "%k":
case "%l":
hr = parseInt(a[i], 10);
break;
case "%P":
case "%p":
if (/pm/i.test(a[i]) && hr < 12)
hr += 12;
else if (/am/i.test(a[i]) && hr >= 12)
hr -= 12;
break;
case "%M":
min = parseInt(a[i], 10);
break;
}
}
if (isNaN(y)) y = today.getFullYear();
if (isNaN(m)) m = today.getMonth();
if (isNaN(d)) d = today.getDate();
if (isNaN(hr)) hr = today.getHours();
if (isNaN(min)) min = today.getMinutes();
if (y != 0 && m != -1 && d != 0)
return new Date(y, m, d, hr, min, 0);
y = 0; m = -1; d = 0;
for (i = 0; i < a.length; ++i) {
if (a[i].search(/[a-zA-Z]+/) != -1) {
var t = -1;
for (j = 0; j < 12; ++j) {
if (Calendar._MN[j].substr(0, a[i].length).toLowerCase() == a[i].toLowerCase()) { t = j; break; }
}
if (t != -1) {
if (m != -1) {
d = m+1;
}
m = t;
}
} else if (parseInt(a[i], 10) <= 12 && m == -1) {
m = a[i]-1;
} else if (parseInt(a[i], 10) > 31 && y == 0) {
y = parseInt(a[i], 10);
(y < 100) && (y += (y > 29) ? 1900 : 2000);
} else if (d == 0) {
d = a[i];
}
}
if (y == 0)
y = today.getFullYear();
if (m != -1 && d != 0)
return new Date(y, m, d, hr, min, 0);
return today;
};
/** Returns the number of days in the current month */
Date.prototype.getMonthDays = function(month) {
var year = this.getFullYear();
if (typeof month == "undefined") {
month = this.getMonth();
}
if (((0 == (year%4)) && ( (0 != (year%100)) || (0 == (year%400)))) && month == 1) {
return 29;
} else {
return Date._MD[month];
}
};
/** Returns the number of day in the year. */
Date.prototype.getDayOfYear = function() {
var now = new Date(this.getFullYear(), this.getMonth(), this.getDate(), 0, 0, 0);
var then = new Date(this.getFullYear(), 0, 0, 0, 0, 0);
var time = now - then;
return Math.floor(time / Date.DAY);
};
/** Returns the number of the week in year, as defined in ISO 8601. */
Date.prototype.getWeekNumber = function() {
var d = new Date(this.getFullYear(), this.getMonth(), this.getDate(), 0, 0, 0);
var DoW = d.getDay();
d.setDate(d.getDate() - (DoW + 6) % 7 + 3); // Nearest Thu
var ms = d.valueOf(); // GMT
d.setMonth(0);
d.setDate(4); // Thu in Week 1
return Math.round((ms - d.valueOf()) / (7 * 864e5)) + 1;
};
/** Checks date and time equality */
Date.prototype.equalsTo = function(date) {
return ((this.getFullYear() == date.getFullYear()) &&
(this.getMonth() == date.getMonth()) &&
(this.getDate() == date.getDate()) &&
(this.getHours() == date.getHours()) &&
(this.getMinutes() == date.getMinutes()));
};
/** Set only the year, month, date parts (keep existing time) */
Date.prototype.setDateOnly = function(date) {
var tmp = new Date(date);
this.setDate(1);
this.setFullYear(tmp.getFullYear());
this.setMonth(tmp.getMonth());
this.setDate(tmp.getDate());
};
/** Prints the date in a string according to the given format. */
Date.prototype.print = function (str) {
var m = this.getMonth();
var d = this.getDate();
var y = this.getFullYear();
var wn = this.getWeekNumber();
var w = this.getDay();
var s = {};
var hr = this.getHours();
var pm = (hr >= 12);
var ir = (pm) ? (hr - 12) : hr;
var dy = this.getDayOfYear();
if (ir == 0)
ir = 12;
var min = this.getMinutes();
var sec = this.getSeconds();
s["%a"] = Calendar._SDN[w]; // abbreviated weekday name [FIXME: I18N]
s["%A"] = Calendar._DN[w]; // full weekday name
s["%b"] = Calendar._SMN[m]; // abbreviated month name [FIXME: I18N]
s["%B"] = Calendar._MN[m]; // full month name
// FIXME: %c : preferred date and time representation for the current locale
s["%C"] = 1 + Math.floor(y / 100); // the century number
s["%d"] = (d < 10) ? ("0" + d) : d; // the day of the month (range 01 to 31)
s["%e"] = d; // the day of the month (range 1 to 31)
// FIXME: %D : american date style: %m/%d/%y
// FIXME: %E, %F, %G, %g, %h (man strftime)
s["%H"] = (hr < 10) ? ("0" + hr) : hr; // hour, range 00 to 23 (24h format)
s["%I"] = (ir < 10) ? ("0" + ir) : ir; // hour, range 01 to 12 (12h format)
s["%j"] = (dy < 100) ? ((dy < 10) ? ("00" + dy) : ("0" + dy)) : dy; // day of the year (range 001 to 366)
s["%k"] = hr; // hour, range 0 to 23 (24h format)
s["%l"] = ir; // hour, range 1 to 12 (12h format)
s["%m"] = (m < 9) ? ("0" + (1+m)) : (1+m); // month, range 01 to 12
s["%M"] = (min < 10) ? ("0" + min) : min; // minute, range 00 to 59
s["%n"] = "\n"; // a newline character
s["%p"] = pm ? "PM" : "AM";
s["%P"] = pm ? "pm" : "am";
// FIXME: %r : the time in am/pm notation %I:%M:%S %p
// FIXME: %R : the time in 24-hour notation %H:%M
s["%s"] = Math.floor(this.getTime() / 1000);
s["%S"] = (sec < 10) ? ("0" + sec) : sec; // seconds, range 00 to 59
s["%t"] = "\t"; // a tab character
// FIXME: %T : the time in 24-hour notation (%H:%M:%S)
s["%U"] = s["%W"] = s["%V"] = (wn < 10) ? ("0" + wn) : wn;
s["%u"] = w + 1; // the day of the week (range 1 to 7, 1 = MON)
s["%w"] = w; // the day of the week (range 0 to 6, 0 = SUN)
// FIXME: %x : preferred date representation for the current locale without the time
// FIXME: %X : preferred time representation for the current locale without the date
s["%y"] = ('' + y).substr(2, 2); // year without the century (range 00 to 99)
s["%Y"] = y; // year with the century
s["%%"] = "%"; // a literal '%' character
var re = /%./g;
if (!Calendar.is_ie5 && !Calendar.is_khtml)
return str.replace(re, function (par) { return s[par] || par; });
var a = str.match(re);
for (var i = 0; i < a.length; i++) {
var tmp = s[a[i]];
if (tmp) {
re = new RegExp(a[i], 'g');
str = str.replace(re, tmp);
}
}
return str;
};
Date.prototype.__msh_oldSetFullYear = Date.prototype.setFullYear;
Date.prototype.setFullYear = function(y) {
var d = new Date(this);
d.__msh_oldSetFullYear(y);
if (d.getMonth() != this.getMonth())
this.setDate(28);
this.__msh_oldSetFullYear(y);
};
// END: DATE OBJECT PATCHES
// global object that remembers the calendar
window._dynarch_popupCalendar = null; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/calendar.js | calendar.js |
// Calendar RU language
// Translation: Sly Golovanov, http://golovanov.net, <[email protected]>
// Encoding: any
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// full day names
Calendar._DN = new Array
("воскресенье",
"понедельник",
"вторник",
"среда",
"четверг",
"пятница",
"суббота",
"воскресенье");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
// short day names
Calendar._SDN = new Array
("вск",
"пон",
"втр",
"срд",
"чет",
"пят",
"суб",
"вск");
// full month names
Calendar._MN = new Array
("январь",
"февраль",
"март",
"апрель",
"май",
"июнь",
"июль",
"август",
"сентябрь",
"октябрь",
"ноябрь",
"декабрь");
// short month names
Calendar._SMN = new Array
("янв",
"фев",
"мар",
"апр",
"май",
"июн",
"июл",
"авг",
"сен",
"окт",
"ноя",
"дек");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "О календаре...";
Calendar._TT["ABOUT"] =
"DHTML Date/Time Selector\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"For latest version visit: http://www.dynarch.com/projects/calendar/\n" +
"Distributed under GNU LGPL. See http://gnu.org/licenses/lgpl.html for details." +
"\n\n" +
"Как выбрать дату:\n" +
"- При помощи кнопок \xab, \xbb можно выбрать год\n" +
"- При помощи кнопок " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " можно выбрать месяц\n" +
"- Подержите эти кнопки нажатыми, чтобы появилось меню быстрого выбора.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Как выбрать время:\n" +
"- При клике на часах или минутах они увеличиваются\n" +
"- при клике с нажатой клавишей Shift они уменьшаются\n" +
"- если нажать и двигать мышкой влево/вправо, они будут меняться быстрее.";
Calendar._TT["PREV_YEAR"] = "На год назад (удерживать для меню)";
Calendar._TT["PREV_MONTH"] = "На месяц назад (удерживать для меню)";
Calendar._TT["GO_TODAY"] = "Сегодня";
Calendar._TT["NEXT_MONTH"] = "На месяц вперед (удерживать для меню)";
Calendar._TT["NEXT_YEAR"] = "На год вперед (удерживать для меню)";
Calendar._TT["SEL_DATE"] = "Выберите дату";
Calendar._TT["DRAG_TO_MOVE"] = "Перетаскивайте мышкой";
Calendar._TT["PART_TODAY"] = " (сегодня)";
// the following is to inform that "%s" is to be the first day of week
// %s will be replaced with the day name.
Calendar._TT["DAY_FIRST"] = "Первый день недели будет %s";
// This may be locale-dependent. It specifies the week-end days, as an array
// of comma-separated numbers. The numbers are from 0 to 6: 0 means Sunday, 1
// means Monday, etc.
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "Закрыть";
Calendar._TT["TODAY"] = "Сегодня";
Calendar._TT["TIME_PART"] = "(Shift-)клик или нажать и двигать";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%Y-%m-%d";
Calendar._TT["TT_DATE_FORMAT"] = "%e %b, %a";
Calendar._TT["WK"] = "нед";
Calendar._TT["TIME"] = "Время:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-ru.js | calendar-ru.js |
// Calendar HU language
// Author: ???
// Modifier: KARASZI Istvan, <[email protected]>
// Encoding: any
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// full day names
Calendar._DN = new Array
("Vasárnap",
"Hétfõ",
"Kedd",
"Szerda",
"Csütörtök",
"Péntek",
"Szombat",
"Vasárnap");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
// short day names
Calendar._SDN = new Array
("v",
"h",
"k",
"sze",
"cs",
"p",
"szo",
"v");
// full month names
Calendar._MN = new Array
("január",
"február",
"március",
"április",
"május",
"június",
"július",
"augusztus",
"szeptember",
"október",
"november",
"december");
// short month names
Calendar._SMN = new Array
("jan",
"feb",
"már",
"ápr",
"máj",
"jún",
"júl",
"aug",
"sze",
"okt",
"nov",
"dec");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "A kalendáriumról";
Calendar._TT["ABOUT"] =
"DHTML dátum/idõ kiválasztó\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"a legfrissebb verzió megtalálható: http://www.dynarch.com/projects/calendar/\n" +
"GNU LGPL alatt terjesztve. Lásd a http://gnu.org/licenses/lgpl.html oldalt a részletekhez." +
"\n\n" +
"Dátum választás:\n" +
"- használja a \xab, \xbb gombokat az év kiválasztásához\n" +
"- használja a " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " gombokat a hónap kiválasztásához\n" +
"- tartsa lenyomva az egérgombot a gyors választáshoz.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Idõ választás:\n" +
"- kattintva növelheti az idõt\n" +
"- shift-tel kattintva csökkentheti\n" +
"- lenyomva tartva és húzva gyorsabban kiválaszthatja.";
Calendar._TT["PREV_YEAR"] = "Elõzõ év (tartsa nyomva a menühöz)";
Calendar._TT["PREV_MONTH"] = "Elõzõ hónap (tartsa nyomva a menühöz)";
Calendar._TT["GO_TODAY"] = "Mai napra ugrás";
Calendar._TT["NEXT_MONTH"] = "Köv. hónap (tartsa nyomva a menühöz)";
Calendar._TT["NEXT_YEAR"] = "Köv. év (tartsa nyomva a menühöz)";
Calendar._TT["SEL_DATE"] = "Válasszon dátumot";
Calendar._TT["DRAG_TO_MOVE"] = "Húzza a mozgatáshoz";
Calendar._TT["PART_TODAY"] = " (ma)";
// the following is to inform that "%s" is to be the first day of week
// %s will be replaced with the day name.
Calendar._TT["DAY_FIRST"] = "%s legyen a hét elsõ napja";
// This may be locale-dependent. It specifies the week-end days, as an array
// of comma-separated numbers. The numbers are from 0 to 6: 0 means Sunday, 1
// means Monday, etc.
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "Bezár";
Calendar._TT["TODAY"] = "Ma";
Calendar._TT["TIME_PART"] = "(Shift-)Klikk vagy húzás az érték változtatásához";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%Y-%m-%d";
Calendar._TT["TT_DATE_FORMAT"] = "%b %e, %a";
Calendar._TT["WK"] = "hét";
Calendar._TT["TIME"] = "idõ:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-hu.js | calendar-hu.js |
// Calendar big5 language
// Author: Gary Fu, <[email protected]>
// Encoding: big5
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// full day names
Calendar._DN = new Array
("¬P´Á¤é",
"¬P´Á¤@",
"¬P´Á¤G",
"¬P´Á¤T",
"¬P´Á¥|",
"¬P´Á¤",
"¬P´Á¤»",
"¬P´Á¤é");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
// short day names
Calendar._SDN = new Array
("¤é",
"¤@",
"¤G",
"¤T",
"¥|",
"¤",
"¤»",
"¤é");
// full month names
Calendar._MN = new Array
("¤@¤ë",
"¤G¤ë",
"¤T¤ë",
"¥|¤ë",
"¤¤ë",
"¤»¤ë",
"¤C¤ë",
"¤K¤ë",
"¤E¤ë",
"¤Q¤ë",
"¤Q¤@¤ë",
"¤Q¤G¤ë");
// short month names
Calendar._SMN = new Array
("¤@¤ë",
"¤G¤ë",
"¤T¤ë",
"¥|¤ë",
"¤¤ë",
"¤»¤ë",
"¤C¤ë",
"¤K¤ë",
"¤E¤ë",
"¤Q¤ë",
"¤Q¤@¤ë",
"¤Q¤G¤ë");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "Ãö©ó";
Calendar._TT["ABOUT"] =
"DHTML Date/Time Selector\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"For latest version visit: http://www.dynarch.com/projects/calendar/\n" +
"Distributed under GNU LGPL. See http://gnu.org/licenses/lgpl.html for details." +
"\n\n" +
"¤é´Á¿ï¾Ü¤èªk:\n" +
"- ¨Ï¥Î \xab, \xbb «ö¶s¥i¿ï¾Ü¦~¥÷\n" +
"- ¨Ï¥Î " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " «ö¶s¥i¿ï¾Ü¤ë¥÷\n" +
"- «ö¦í¤W±ªº«ö¶s¥i¥H¥[§Ö¿ï¨ú";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"®É¶¡¿ï¾Ü¤èªk:\n" +
"- ÂIÀ»¥ô¦óªº®É¶¡³¡¥÷¥i¼W¥[¨äÈ\n" +
"- ¦P®É«öShiftÁä¦AÂIÀ»¥i´î¤Ö¨äÈ\n" +
"- ÂIÀ»¨Ã©ì¦²¥i¥[§Ö§ïÅܪºÈ";
Calendar._TT["PREV_YEAR"] = "¤W¤@¦~ («ö¦í¿ï³æ)";
Calendar._TT["PREV_MONTH"] = "¤U¤@¦~ («ö¦í¿ï³æ)";
Calendar._TT["GO_TODAY"] = "¨ì¤µ¤é";
Calendar._TT["NEXT_MONTH"] = "¤W¤@¤ë («ö¦í¿ï³æ)";
Calendar._TT["NEXT_YEAR"] = "¤U¤@¤ë («ö¦í¿ï³æ)";
Calendar._TT["SEL_DATE"] = "¿ï¾Ü¤é´Á";
Calendar._TT["DRAG_TO_MOVE"] = "©ì¦²";
Calendar._TT["PART_TODAY"] = " (¤µ¤é)";
// the following is to inform that "%s" is to be the first day of week
// %s will be replaced with the day name.
Calendar._TT["DAY_FIRST"] = "±N %s Åã¥Ü¦b«e";
// This may be locale-dependent. It specifies the week-end days, as an array
// of comma-separated numbers. The numbers are from 0 to 6: 0 means Sunday, 1
// means Monday, etc.
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "Ãö³¬";
Calendar._TT["TODAY"] = "¤µ¤é";
Calendar._TT["TIME_PART"] = "ÂIÀ»or©ì¦²¥i§ïÅܮɶ¡(¦P®É«öShift¬°´î)";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%Y-%m-%d";
Calendar._TT["TT_DATE_FORMAT"] = "%a, %b %e";
Calendar._TT["WK"] = "¶g";
Calendar._TT["TIME"] = "Time:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-big5.js | calendar-big5.js |
// Calendar EN language
// Author: Mihai Bazon, <[email protected]>
// Translator: Fabio Di Bernardini, <[email protected]>
// Encoding: any
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// full day names
Calendar._DN = new Array
("Domenica",
"Lunedì",
"Martedì",
"Mercoledì",
"Giovedì",
"Venerdì",
"Sabato",
"Domenica");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
// short day names
Calendar._SDN = new Array
("Dom",
"Lun",
"Mar",
"Mer",
"Gio",
"Ven",
"Sab",
"Dom");
// full month names
Calendar._MN = new Array
("Gennaio",
"Febbraio",
"Marzo",
"Aprile",
"Maggio",
"Giugno",
"Luglio",
"Augosto",
"Settembre",
"Ottobre",
"Novembre",
"Dicembre");
// short month names
Calendar._SMN = new Array
("Gen",
"Feb",
"Mar",
"Apr",
"Mag",
"Giu",
"Lug",
"Ago",
"Set",
"Ott",
"Nov",
"Dic");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "Informazioni sul calendario";
Calendar._TT["ABOUT"] =
"DHTML Date/Time Selector\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"Per gli aggiornamenti: http://www.dynarch.com/projects/calendar/\n" +
"Distribuito sotto licenza GNU LGPL. Vedi http://gnu.org/licenses/lgpl.html per i dettagli." +
"\n\n" +
"Selezione data:\n" +
"- Usa \xab, \xbb per selezionare l'anno\n" +
"- Usa " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " per i mesi\n" +
"- Tieni premuto a lungo il mouse per accedere alle funzioni di selezione veloce.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Selezione orario:\n" +
"- Clicca sul numero per incrementarlo\n" +
"- o Shift+click per decrementarlo\n" +
"- o click e sinistra o destra per variarlo.";
Calendar._TT["PREV_YEAR"] = "Anno prec.(clicca a lungo per il menù)";
Calendar._TT["PREV_MONTH"] = "Mese prec. (clicca a lungo per il menù)";
Calendar._TT["GO_TODAY"] = "Oggi";
Calendar._TT["NEXT_MONTH"] = "Pross. mese (clicca a lungo per il menù)";
Calendar._TT["NEXT_YEAR"] = "Pross. anno (clicca a lungo per il menù)";
Calendar._TT["SEL_DATE"] = "Seleziona data";
Calendar._TT["DRAG_TO_MOVE"] = "Trascina per spostarlo";
Calendar._TT["PART_TODAY"] = " (oggi)";
// the following is to inform that "%s" is to be the first day of week
// %s will be replaced with the day name.
Calendar._TT["DAY_FIRST"] = "Mostra prima %s";
// This may be locale-dependent. It specifies the week-end days, as an array
// of comma-separated numbers. The numbers are from 0 to 6: 0 means Sunday, 1
// means Monday, etc.
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "Chiudi";
Calendar._TT["TODAY"] = "Oggi";
Calendar._TT["TIME_PART"] = "(Shift-)Click o trascina per cambiare il valore";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%d-%m-%Y";
Calendar._TT["TT_DATE_FORMAT"] = "%a:%b:%e";
Calendar._TT["WK"] = "set";
Calendar._TT["TIME"] = "Ora:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-it.js | calendar-it.js |
// Calendar BG language
// Author: Mihai Bazon, <[email protected]>
// Translator: Valentin Sheiretsky, <[email protected]>
// Encoding: Windows-1251
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// full day names
Calendar._DN = new Array
("Íåäåëÿ",
"Ïîíåäåëíèê",
"Âòîðíèê",
"Ñðÿäà",
"×åòâúðòúê",
"Ïåòúê",
"Ñúáîòà",
"Íåäåëÿ");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
// short day names
Calendar._SDN = new Array
("Íåä",
"Ïîí",
"Âòî",
"Ñðÿ",
"×åò",
"Ïåò",
"Ñúá",
"Íåä");
// full month names
Calendar._MN = new Array
("ßíóàðè",
"Ôåâðóàðè",
"Ìàðò",
"Àïðèë",
"Ìàé",
"Þíè",
"Þëè",
"Àâãóñò",
"Ñåïòåìâðè",
"Îêòîìâðè",
"Íîåìâðè",
"Äåêåìâðè");
// short month names
Calendar._SMN = new Array
("ßíó",
"Ôåâ",
"Ìàð",
"Àïð",
"Ìàé",
"Þíè",
"Þëè",
"Àâã",
"Ñåï",
"Îêò",
"Íîå",
"Äåê");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "Èíôîðìàöèÿ çà êàëåíäàðà";
Calendar._TT["ABOUT"] =
"DHTML Date/Time Selector\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"For latest version visit: http://www.dynarch.com/projects/calendar/\n" +
"Distributed under GNU LGPL. See http://gnu.org/licenses/lgpl.html for details." +
"\n\n" +
"Date selection:\n" +
"- Use the \xab, \xbb buttons to select year\n" +
"- Use the " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " buttons to select month\n" +
"- Hold mouse button on any of the above buttons for faster selection.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Time selection:\n" +
"- Click on any of the time parts to increase it\n" +
"- or Shift-click to decrease it\n" +
"- or click and drag for faster selection.";
Calendar._TT["PREV_YEAR"] = "Ïðåäíà ãîäèíà (çàäðúæòå çà ìåíþ)";
Calendar._TT["PREV_MONTH"] = "Ïðåäåí ìåñåö (çàäðúæòå çà ìåíþ)";
Calendar._TT["GO_TODAY"] = "Èçáåðåòå äíåñ";
Calendar._TT["NEXT_MONTH"] = "Ñëåäâàù ìåñåö (çàäðúæòå çà ìåíþ)";
Calendar._TT["NEXT_YEAR"] = "Ñëåäâàùà ãîäèíà (çàäðúæòå çà ìåíþ)";
Calendar._TT["SEL_DATE"] = "Èçáåðåòå äàòà";
Calendar._TT["DRAG_TO_MOVE"] = "Ïðåìåñòâàíå";
Calendar._TT["PART_TODAY"] = " (äíåñ)";
// the following is to inform that "%s" is to be the first day of week
// %s will be replaced with the day name.
Calendar._TT["DAY_FIRST"] = "%s êàòî ïúðâè äåí";
// This may be locale-dependent. It specifies the week-end days, as an array
// of comma-separated numbers. The numbers are from 0 to 6: 0 means Sunday, 1
// means Monday, etc.
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "Çàòâîðåòå";
Calendar._TT["TODAY"] = "Äíåñ";
Calendar._TT["TIME_PART"] = "(Shift-)Click èëè drag çà äà ïðîìåíèòå ñòîéíîñòòà";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%Y-%m-%d";
Calendar._TT["TT_DATE_FORMAT"] = "%A - %e %B %Y";
Calendar._TT["WK"] = "Ñåäì";
Calendar._TT["TIME"] = "×àñ:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-bg.js | calendar-bg.js |
// Calendar LV language
// Author: Juris Valdovskis, <[email protected]>
// Encoding: cp1257
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// full day names
Calendar._DN = new Array
("Svçtdiena",
"Pirmdiena",
"Otrdiena",
"Treðdiena",
"Ceturdiena",
"Piektdiena",
"Sestdiena",
"Svçtdiena");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
// short day names
Calendar._SDN = new Array
("Sv",
"Pr",
"Ot",
"Tr",
"Ce",
"Pk",
"Se",
"Sv");
// full month names
Calendar._MN = new Array
("Janvâris",
"Februâris",
"Marts",
"Aprîlis",
"Maijs",
"Jûnijs",
"Jûlijs",
"Augusts",
"Septembris",
"Oktobris",
"Novembris",
"Decembris");
// short month names
Calendar._SMN = new Array
("Jan",
"Feb",
"Mar",
"Apr",
"Mai",
"Jûn",
"Jûl",
"Aug",
"Sep",
"Okt",
"Nov",
"Dec");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "Par kalendâru";
Calendar._TT["ABOUT"] =
"DHTML Date/Time Selector\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"For latest version visit: http://www.dynarch.com/projects/calendar/\n" +
"Distributed under GNU LGPL. See http://gnu.org/licenses/lgpl.html for details." +
"\n\n" +
"Datuma izvçle:\n" +
"- Izmanto \xab, \xbb pogas, lai izvçlçtos gadu\n" +
"- Izmanto " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + "pogas, lai izvçlçtos mçnesi\n" +
"- Turi nospiestu peles pogu uz jebkuru no augstâk minçtajâm pogâm, lai paâtrinâtu izvçli.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Laika izvçle:\n" +
"- Uzklikðíini uz jebkuru no laika daïâm, lai palielinâtu to\n" +
"- vai Shift-klikðíis, lai samazinâtu to\n" +
"- vai noklikðíini un velc uz attiecîgo virzienu lai mainîtu âtrâk.";
Calendar._TT["PREV_YEAR"] = "Iepr. gads (turi izvçlnei)";
Calendar._TT["PREV_MONTH"] = "Iepr. mçnesis (turi izvçlnei)";
Calendar._TT["GO_TODAY"] = "Ðodien";
Calendar._TT["NEXT_MONTH"] = "Nâkoðais mçnesis (turi izvçlnei)";
Calendar._TT["NEXT_YEAR"] = "Nâkoðais gads (turi izvçlnei)";
Calendar._TT["SEL_DATE"] = "Izvçlies datumu";
Calendar._TT["DRAG_TO_MOVE"] = "Velc, lai pârvietotu";
Calendar._TT["PART_TODAY"] = " (ðodien)";
// the following is to inform that "%s" is to be the first day of week
// %s will be replaced with the day name.
Calendar._TT["DAY_FIRST"] = "Attçlot %s kâ pirmo";
// This may be locale-dependent. It specifies the week-end days, as an array
// of comma-separated numbers. The numbers are from 0 to 6: 0 means Sunday, 1
// means Monday, etc.
Calendar._TT["WEEKEND"] = "1,7";
Calendar._TT["CLOSE"] = "Aizvçrt";
Calendar._TT["TODAY"] = "Ðodien";
Calendar._TT["TIME_PART"] = "(Shift-)Klikðíis vai pârvieto, lai mainîtu";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%d-%m-%Y";
Calendar._TT["TT_DATE_FORMAT"] = "%a, %e %b";
Calendar._TT["WK"] = "wk";
Calendar._TT["TIME"] = "Laiks:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-lv.js | calendar-lv.js |
// ** I18N
// Calendar pt-BR language
// Author: Fernando Dourado, <[email protected]>
// Encoding: any
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// full day names
Calendar._DN = new Array
("Domingo",
"Segunda",
"Terça",
"Quarta",
"Quinta",
"Sexta",
"Sabádo",
"Domingo");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
// short day names
// [No changes using default values]
// full month names
Calendar._MN = new Array
("Janeiro",
"Fevereiro",
"Março",
"Abril",
"Maio",
"Junho",
"Julho",
"Agosto",
"Setembro",
"Outubro",
"Novembro",
"Dezembro");
// short month names
// [No changes using default values]
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "Sobre o calendário";
Calendar._TT["ABOUT"] =
"DHTML Date/Time Selector\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"For latest version visit: http://www.dynarch.com/projects/calendar/\n" +
"Distributed under GNU LGPL. See http://gnu.org/licenses/lgpl.html for details." +
"\n\n" +
"Translate to portuguese Brazil (pt-BR) by Fernando Dourado ([email protected])\n" +
"Tradução para o português Brasil (pt-BR) por Fernando Dourado ([email protected])" +
"\n\n" +
"Selecionar data:\n" +
"- Use as teclas \xab, \xbb para selecionar o ano\n" +
"- Use as teclas " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " para selecionar o mês\n" +
"- Clique e segure com o mouse em qualquer botão para selecionar rapidamente.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Selecionar hora:\n" +
"- Clique em qualquer uma das partes da hora para aumentar\n" +
"- ou Shift-clique para diminuir\n" +
"- ou clique e arraste para selecionar rapidamente.";
Calendar._TT["PREV_YEAR"] = "Ano anterior (clique e segure para menu)";
Calendar._TT["PREV_MONTH"] = "Mês anterior (clique e segure para menu)";
Calendar._TT["GO_TODAY"] = "Ir para a data atual";
Calendar._TT["NEXT_MONTH"] = "Próximo mês (clique e segure para menu)";
Calendar._TT["NEXT_YEAR"] = "Próximo ano (clique e segure para menu)";
Calendar._TT["SEL_DATE"] = "Selecione uma data";
Calendar._TT["DRAG_TO_MOVE"] = "Clique e segure para mover";
Calendar._TT["PART_TODAY"] = " (hoje)";
// the following is to inform that "%s" is to be the first day of week
// %s will be replaced with the day name.
Calendar._TT["DAY_FIRST"] = "Exibir %s primeiro";
// This may be locale-dependent. It specifies the week-end days, as an array
// of comma-separated numbers. The numbers are from 0 to 6: 0 means Sunday, 1
// means Monday, etc.
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "Fechar";
Calendar._TT["TODAY"] = "Hoje";
Calendar._TT["TIME_PART"] = "(Shift-)Clique ou arraste para mudar o valor";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%d/%m/%Y";
Calendar._TT["TT_DATE_FORMAT"] = "%d de %B de %Y";
Calendar._TT["WK"] = "sem";
Calendar._TT["TIME"] = "Hora:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-br.js | calendar-br.js |
// Calendar LT language
// Author: Martynas Majeris, <[email protected]>
// Encoding: UTF-8
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// full day names
Calendar._DN = new Array
("Sekmadienis",
"Pirmadienis",
"Antradienis",
"Trečiadienis",
"Ketvirtadienis",
"Penktadienis",
"Šeštadienis",
"Sekmadienis");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
// First day of the week. "0" means display Sunday first, "1" means display
// Monday first, etc.
Calendar._FD = 1;
// short day names
Calendar._SDN = new Array
("Sek",
"Pir",
"Ant",
"Tre",
"Ket",
"Pen",
"Šeš",
"Sek");
// full month names
Calendar._MN = new Array
("Sausis",
"Vasaris",
"Kovas",
"Balandis",
"Gegužė",
"Birželis",
"Liepa",
"Rugpjūtis",
"Rugsėjis",
"Spalis",
"Lapkritis",
"Gruodis");
// short month names
Calendar._SMN = new Array
("Sau",
"Vas",
"Kov",
"Bal",
"Geg",
"Bir",
"Lie",
"Rgp",
"Rgs",
"Spa",
"Lap",
"Gru");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "Apie kalendorių";
Calendar._TT["ABOUT"] =
"DHTML Date/Time Selector\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"Naujausią versiją rasite: http://www.dynarch.com/projects/calendar/\n" +
"Platinamas pagal GNU LGPL licenciją. Aplankykite http://gnu.org/licenses/lgpl.html" +
"\n\n" +
"Datos pasirinkimas:\n" +
"- Metų pasirinkimas: \xab, \xbb\n" +
"- Mėnesio pasirinkimas: " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + "\n" +
"- Nuspauskite ir laikykite pelės klavišą greitesniam pasirinkimui.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Laiko pasirinkimas:\n" +
"- Spustelkite ant valandų arba minučių - skaičius padidės vienetu.\n" +
"- Jei spausite kartu su Shift, skaičius sumažės.\n" +
"- Greitam pasirinkimui spustelkite ir pajudinkite pelę.";
Calendar._TT["PREV_YEAR"] = "Ankstesni metai (laikykite, jei norite meniu)";
Calendar._TT["PREV_MONTH"] = "Ankstesnis mėnuo (laikykite, jei norite meniu)";
Calendar._TT["GO_TODAY"] = "Pasirinkti šiandieną";
Calendar._TT["NEXT_MONTH"] = "Kitas mėnuo (laikykite, jei norite meniu)";
Calendar._TT["NEXT_YEAR"] = "Kiti metai (laikykite, jei norite meniu)";
Calendar._TT["SEL_DATE"] = "Pasirinkite datą";
Calendar._TT["DRAG_TO_MOVE"] = "Tempkite";
Calendar._TT["PART_TODAY"] = " (šiandien)";
Calendar._TT["DAY_FIRST"] = "Pirma savaitės diena - %s";
Calendar._TT["MON_FIRST"] = "Pirma savaitės diena - pirmadienis";
Calendar._TT["SUN_FIRST"] = "Pirma savaitės diena - sekmadienis";
Calendar._TT["CLOSE"] = "Uždaryti";
Calendar._TT["TODAY"] = "Šiandien";
Calendar._TT["TIME_PART"] = "Spustelkite arba tempkite jei norite pakeisti";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%Y-%m-%d";
Calendar._TT["TT_DATE_FORMAT"] = "%A, %Y-%m-%d";
Calendar._TT["WK"] = "sav";
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["TIME"] = "Laikas:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-lt.js | calendar-lt.js |
// ditet
Calendar._DN = new Array
("E Diele",
"E Hene",
"E Marte",
"E Merkure",
"E Enjte",
"E Premte",
"E Shtune",
"E Diele");
//ditet shkurt
Calendar._SDN = new Array
("Die",
"Hen",
"Mar",
"Mer",
"Enj",
"Pre",
"Sht",
"Die");
// muajt
Calendar._MN = new Array
("Janar",
"Shkurt",
"Mars",
"Prill",
"Maj",
"Qeshor",
"Korrik",
"Gusht",
"Shtator",
"Tetor",
"Nentor",
"Dhjetor");
// muajte shkurt
Calendar._SMN = new Array
("Jan",
"Shk",
"Mar",
"Pri",
"Maj",
"Qes",
"Kor",
"Gus",
"Sht",
"Tet",
"Nen",
"Dhj");
// ndihmesa
Calendar._TT = {};
Calendar._TT["INFO"] = "Per kalendarin";
Calendar._TT["ABOUT"] =
"Zgjedhes i ores/dates ne DHTML \n" +
"\n\n" +"Zgjedhja e Dates:\n" +
"- Perdor butonat \xab, \xbb per te zgjedhur vitin\n" +
"- Perdor butonat" + String.fromCharCode(0x2039) + ", " +
String.fromCharCode(0x203a) +
" per te zgjedhur muajin\n" +
"- Mbani shtypur butonin e mousit per nje zgjedje me te shpejte.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Zgjedhja e kohes:\n" +
"- Kliko tek ndonje nga pjeset e ores per ta rritur ate\n" +
"- ose kliko me Shift per ta zvogeluar ate\n" +
"- ose cliko dhe terhiq per zgjedhje me te shpejte.";
Calendar._TT["PREV_YEAR"] = "Viti i shkuar (prit per menune)";
Calendar._TT["PREV_MONTH"] = "Muaji i shkuar (prit per menune)";
Calendar._TT["GO_TODAY"] = "Sot";
Calendar._TT["NEXT_MONTH"] = "Muaji i ardhshem (prit per menune)";
Calendar._TT["NEXT_YEAR"] = "Viti i ardhshem (prit per menune)";
Calendar._TT["SEL_DATE"] = "Zgjidh daten";
Calendar._TT["DRAG_TO_MOVE"] = "Terhiqe per te levizur";
Calendar._TT["PART_TODAY"] = " (sot)";
// "%s" eshte dita e pare e javes
// %s do te zevendesohet me emrin e dite
Calendar._TT["DAY_FIRST"] = "Trego te %s te paren";
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "Mbyll";
Calendar._TT["TODAY"] = "Sot";
Calendar._TT["TIME_PART"] = "Kliko me (Shift-)ose terhiqe per te ndryshuar
vleren";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%Y-%m-%d";
Calendar._TT["TT_DATE_FORMAT"] = "%a, %b %e";
Calendar._TT["WK"] = "Java";
Calendar._TT["TIME"] = "Koha:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-al.js | calendar-al.js |
// Calendar EN language
// Author: Mihai Bazon, <[email protected]>
// Encoding: any
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// Translator: David Duret, <[email protected]> from previous french version
// full day names
Calendar._DN = new Array
("Dimanche",
"Lundi",
"Mardi",
"Mercredi",
"Jeudi",
"Vendredi",
"Samedi",
"Dimanche");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
// First day of the week. "0" means display Sunday first, "1" means display
// Monday first, etc.
Calendar._FD = 1;
// short day names
Calendar._SDN = new Array
("Dim",
"Lun",
"Mar",
"Mer",
"Jeu",
"Ven",
"Sam",
"Dim");
// full month names
Calendar._MN = new Array
("Janvier",
"Février",
"Mars",
"Avril",
"Mai",
"Juin",
"Juillet",
"Août",
"Septembre",
"Octobre",
"Novembre",
"Décembre");
// short month names
Calendar._SMN = new Array
("Jan",
"Fev",
"Mar",
"Avr",
"Mai",
"Juin",
"Juil",
"Aout",
"Sep",
"Oct",
"Nov",
"Dec");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "A propos du calendrier";
Calendar._TT["ABOUT"] =
"DHTML Date/Heure Selecteur\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"Pour la derniere version visitez : http://www.dynarch.com/projects/calendar/\n" +
"Distribué par GNU LGPL. Voir http://gnu.org/licenses/lgpl.html pour les details." +
"\n\n" +
"Selection de la date :\n" +
"- Utiliser les bouttons \xab, \xbb pour selectionner l\'annee\n" +
"- Utiliser les bouttons " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " pour selectionner les mois\n" +
"- Garder la souris sur n'importe quels boutons pour une selection plus rapide";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Selection de l\'heure :\n" +
"- Cliquer sur heures ou minutes pour incrementer\n" +
"- ou Maj-clic pour decrementer\n" +
"- ou clic et glisser-deplacer pour une selection plus rapide";
Calendar._TT["PREV_YEAR"] = "Année préc. (maintenir pour menu)";
Calendar._TT["PREV_MONTH"] = "Mois préc. (maintenir pour menu)";
Calendar._TT["GO_TODAY"] = "Atteindre la date du jour";
Calendar._TT["NEXT_MONTH"] = "Mois suiv. (maintenir pour menu)";
Calendar._TT["NEXT_YEAR"] = "Année suiv. (maintenir pour menu)";
Calendar._TT["SEL_DATE"] = "Sélectionner une date";
Calendar._TT["DRAG_TO_MOVE"] = "Déplacer";
Calendar._TT["PART_TODAY"] = " (Aujourd'hui)";
// the following is to inform that "%s" is to be the first day of week
// %s will be replaced with the day name.
Calendar._TT["DAY_FIRST"] = "Afficher %s en premier";
// This may be locale-dependent. It specifies the week-end days, as an array
// of comma-separated numbers. The numbers are from 0 to 6: 0 means Sunday, 1
// means Monday, etc.
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "Fermer";
Calendar._TT["TODAY"] = "Aujourd'hui";
Calendar._TT["TIME_PART"] = "(Maj-)Clic ou glisser pour modifier la valeur";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%d/%m/%Y";
Calendar._TT["TT_DATE_FORMAT"] = "%a, %b %e";
Calendar._TT["WK"] = "Sem.";
Calendar._TT["TIME"] = "Heure :"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-fr.js | calendar-fr.js |
// Calendar pt_BR language
// Author: Adalberto Machado, <[email protected]>
// Encoding: any
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// full day names
Calendar._DN = new Array
("Domingo",
"Segunda",
"Terca",
"Quarta",
"Quinta",
"Sexta",
"Sabado",
"Domingo");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
// short day names
Calendar._SDN = new Array
("Dom",
"Seg",
"Ter",
"Qua",
"Qui",
"Sex",
"Sab",
"Dom");
// full month names
Calendar._MN = new Array
("Janeiro",
"Fevereiro",
"Marco",
"Abril",
"Maio",
"Junho",
"Julho",
"Agosto",
"Setembro",
"Outubro",
"Novembro",
"Dezembro");
// short month names
Calendar._SMN = new Array
("Jan",
"Fev",
"Mar",
"Abr",
"Mai",
"Jun",
"Jul",
"Ago",
"Set",
"Out",
"Nov",
"Dez");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "Sobre o calendario";
Calendar._TT["ABOUT"] =
"DHTML Date/Time Selector\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"Ultima versao visite: http://www.dynarch.com/projects/calendar/\n" +
"Distribuido sobre GNU LGPL. Veja http://gnu.org/licenses/lgpl.html para detalhes." +
"\n\n" +
"Selecao de data:\n" +
"- Use os botoes \xab, \xbb para selecionar o ano\n" +
"- Use os botoes " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " para selecionar o mes\n" +
"- Segure o botao do mouse em qualquer um desses botoes para selecao rapida.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Selecao de hora:\n" +
"- Clique em qualquer parte da hora para incrementar\n" +
"- ou Shift-click para decrementar\n" +
"- ou clique e segure para selecao rapida.";
Calendar._TT["PREV_YEAR"] = "Ant. ano (segure para menu)";
Calendar._TT["PREV_MONTH"] = "Ant. mes (segure para menu)";
Calendar._TT["GO_TODAY"] = "Hoje";
Calendar._TT["NEXT_MONTH"] = "Prox. mes (segure para menu)";
Calendar._TT["NEXT_YEAR"] = "Prox. ano (segure para menu)";
Calendar._TT["SEL_DATE"] = "Selecione a data";
Calendar._TT["DRAG_TO_MOVE"] = "Arraste para mover";
Calendar._TT["PART_TODAY"] = " (hoje)";
// the following is to inform that "%s" is to be the first day of week
// %s will be replaced with the day name.
Calendar._TT["DAY_FIRST"] = "Mostre %s primeiro";
// This may be locale-dependent. It specifies the week-end days, as an array
// of comma-separated numbers. The numbers are from 0 to 6: 0 means Sunday, 1
// means Monday, etc.
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "Fechar";
Calendar._TT["TODAY"] = "Hoje";
Calendar._TT["TIME_PART"] = "(Shift-)Click ou arraste para mudar valor";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%d/%m/%Y";
Calendar._TT["TT_DATE_FORMAT"] = "%a, %e %b";
Calendar._TT["WK"] = "sm";
Calendar._TT["TIME"] = "Hora:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-pt.js | calendar-pt.js |
Calendar._DN = new Array
("Zondag",
"Maandag",
"Dinsdag",
"Woensdag",
"Donderdag",
"Vrijdag",
"Zaterdag",
"Zondag");
Calendar._SDN_len = 2;
Calendar._MN = new Array
("Januari",
"Februari",
"Maart",
"April",
"Mei",
"Juni",
"Juli",
"Augustus",
"September",
"Oktober",
"November",
"December");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "Info";
Calendar._TT["ABOUT"] =
"DHTML Datum/Tijd Selector\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" +
"Ga voor de meest recente versie naar: http://www.dynarch.com/projects/calendar/\n" +
"Verspreid onder de GNU LGPL. Zie http://gnu.org/licenses/lgpl.html voor details." +
"\n\n" +
"Datum selectie:\n" +
"- Gebruik de \xab \xbb knoppen om een jaar te selecteren\n" +
"- Gebruik de " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " knoppen om een maand te selecteren\n" +
"- Houd de muis ingedrukt op de genoemde knoppen voor een snellere selectie.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Tijd selectie:\n" +
"- Klik op een willekeurig onderdeel van het tijd gedeelte om het te verhogen\n" +
"- of Shift-klik om het te verlagen\n" +
"- of klik en sleep voor een snellere selectie.";
//Calendar._TT["TOGGLE"] = "Selecteer de eerste week-dag";
Calendar._TT["PREV_YEAR"] = "Vorig jaar (ingedrukt voor menu)";
Calendar._TT["PREV_MONTH"] = "Vorige maand (ingedrukt voor menu)";
Calendar._TT["GO_TODAY"] = "Ga naar Vandaag";
Calendar._TT["NEXT_MONTH"] = "Volgende maand (ingedrukt voor menu)";
Calendar._TT["NEXT_YEAR"] = "Volgend jaar (ingedrukt voor menu)";
Calendar._TT["SEL_DATE"] = "Selecteer datum";
Calendar._TT["DRAG_TO_MOVE"] = "Klik en sleep om te verplaatsen";
Calendar._TT["PART_TODAY"] = " (vandaag)";
//Calendar._TT["MON_FIRST"] = "Toon Maandag eerst";
//Calendar._TT["SUN_FIRST"] = "Toon Zondag eerst";
Calendar._TT["DAY_FIRST"] = "Toon %s eerst";
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "Sluiten";
Calendar._TT["TODAY"] = "(vandaag)";
Calendar._TT["TIME_PART"] = "(Shift-)Klik of sleep om de waarde te veranderen";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%d-%m-%Y";
Calendar._TT["TT_DATE_FORMAT"] = "%a, %e %b %Y";
Calendar._TT["WK"] = "wk";
Calendar._TT["TIME"] = "Tijd:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-nl.js | calendar-nl.js |
// Calendar CA language
// Author: Mihai Bazon, <[email protected]>
// Encoding: any
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// full day names
Calendar._DN = new Array
("Diumenge",
"Dilluns",
"Dimarts",
"Dimecres",
"Dijous",
"Divendres",
"Dissabte",
"Diumenge");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
// short day names
Calendar._SDN = new Array
("Diu",
"Dil",
"Dmt",
"Dmc",
"Dij",
"Div",
"Dis",
"Diu");
// full month names
Calendar._MN = new Array
("Gener",
"Febrer",
"Març",
"Abril",
"Maig",
"Juny",
"Juliol",
"Agost",
"Setembre",
"Octubre",
"Novembre",
"Desembre");
// short month names
Calendar._SMN = new Array
("Gen",
"Feb",
"Mar",
"Abr",
"Mai",
"Jun",
"Jul",
"Ago",
"Set",
"Oct",
"Nov",
"Des");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "Sobre el calendari";
Calendar._TT["ABOUT"] =
"DHTML Selector de Data/Hora\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"For latest version visit: http://www.dynarch.com/projects/calendar/\n" +
"Distributed under GNU LGPL. See http://gnu.org/licenses/lgpl.html for details." +
"\n\n" +
"Sel.lecció de Dates:\n" +
"- Fes servir els botons \xab, \xbb per sel.leccionar l'any\n" +
"- Fes servir els botons " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " per se.lecciconar el mes\n" +
"- Manté el ratolí apretat en qualsevol dels anteriors per sel.lecció ràpida.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Time selection:\n" +
"- claca en qualsevol de les parts de la hora per augmentar-les\n" +
"- o Shift-click per decrementar-la\n" +
"- or click and arrastra per sel.lecció ràpida.";
Calendar._TT["PREV_YEAR"] = "Any anterior (Mantenir per menu)";
Calendar._TT["PREV_MONTH"] = "Mes anterior (Mantenir per menu)";
Calendar._TT["GO_TODAY"] = "Anar a avui";
Calendar._TT["NEXT_MONTH"] = "Mes següent (Mantenir per menu)";
Calendar._TT["NEXT_YEAR"] = "Any següent (Mantenir per menu)";
Calendar._TT["SEL_DATE"] = "Sel.leccionar data";
Calendar._TT["DRAG_TO_MOVE"] = "Arrastrar per moure";
Calendar._TT["PART_TODAY"] = " (avui)";
// the following is to inform that "%s" is to be the first day of week
// %s will be replaced with the day name.
Calendar._TT["DAY_FIRST"] = "Mostra %s primer";
// This may be locale-dependent. It specifies the week-end days, as an array
// of comma-separated numbers. The numbers are from 0 to 6: 0 means Sunday, 1
// means Monday, etc.
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "Tanca";
Calendar._TT["TODAY"] = "Avui";
Calendar._TT["TIME_PART"] = "(Shift-)Click a arrastra per canviar el valor";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%Y-%m-%d";
Calendar._TT["TT_DATE_FORMAT"] = "%a, %b %e";
Calendar._TT["WK"] = "st";
Calendar._TT["TIME"] = "Hora:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-ca.js | calendar-ca.js |
// Calendar ES (spanish) language
// Author: Mihai Bazon, <[email protected]>
// Updater: Servilio Afre Puentes <[email protected]>
// Updated: 2004-06-03
// Encoding: utf-8
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// full day names
Calendar._DN = new Array
("Domingo",
"Lunes",
"Martes",
"Miércoles",
"Jueves",
"Viernes",
"Sábado",
"Domingo");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
// short day names
Calendar._SDN = new Array
("Dom",
"Lun",
"Mar",
"Mié",
"Jue",
"Vie",
"Sáb",
"Dom");
// First day of the week. "0" means display Sunday first, "1" means display
// Monday first, etc.
Calendar._FD = 1;
// full month names
Calendar._MN = new Array
("Enero",
"Febrero",
"Marzo",
"Abril",
"Mayo",
"Junio",
"Julio",
"Agosto",
"Septiembre",
"Octubre",
"Noviembre",
"Diciembre");
// short month names
Calendar._SMN = new Array
("Ene",
"Feb",
"Mar",
"Abr",
"May",
"Jun",
"Jul",
"Ago",
"Sep",
"Oct",
"Nov",
"Dic");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "Acerca del calendario";
Calendar._TT["ABOUT"] =
"Selector DHTML de Fecha/Hora\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"Para conseguir la última versión visite: http://www.dynarch.com/projects/calendar/\n" +
"Distribuido bajo licencia GNU LGPL. Visite http://gnu.org/licenses/lgpl.html para más detalles." +
"\n\n" +
"Selección de fecha:\n" +
"- Use los botones \xab, \xbb para seleccionar el año\n" +
"- Use los botones " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " para seleccionar el mes\n" +
"- Mantenga pulsado el ratón en cualquiera de estos botones para una selección rápida.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Selección de hora:\n" +
"- Pulse en cualquiera de las partes de la hora para incrementarla\n" +
"- o pulse las mayúsculas mientras hace clic para decrementarla\n" +
"- o haga clic y arrastre el ratón para una selección más rápida.";
Calendar._TT["PREV_YEAR"] = "Año anterior (mantener para menú)";
Calendar._TT["PREV_MONTH"] = "Mes anterior (mantener para menú)";
Calendar._TT["GO_TODAY"] = "Ir a hoy";
Calendar._TT["NEXT_MONTH"] = "Mes siguiente (mantener para menú)";
Calendar._TT["NEXT_YEAR"] = "Año siguiente (mantener para menú)";
Calendar._TT["SEL_DATE"] = "Seleccionar fecha";
Calendar._TT["DRAG_TO_MOVE"] = "Arrastrar para mover";
Calendar._TT["PART_TODAY"] = " (hoy)";
// the following is to inform that "%s" is to be the first day of week
// %s will be replaced with the day name.
Calendar._TT["DAY_FIRST"] = "Hacer %s primer día de la semana";
// This may be locale-dependent. It specifies the week-end days, as an array
// of comma-separated numbers. The numbers are from 0 to 6: 0 means Sunday, 1
// means Monday, etc.
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "Cerrar";
Calendar._TT["TODAY"] = "Hoy";
Calendar._TT["TIME_PART"] = "(Mayúscula-)Clic o arrastre para cambiar valor";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%d/%m/%Y";
Calendar._TT["TT_DATE_FORMAT"] = "%A, %e de %B de %Y";
Calendar._TT["WK"] = "sem";
Calendar._TT["TIME"] = "Hora:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-es.js | calendar-es.js |
// Calendar EN language
// Author: Mihai Bazon, <[email protected]>
// Encoding: any
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// full day names
Calendar._DN = new Array
("Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
// short day names
Calendar._SDN = new Array
("Sun",
"Mon",
"Tue",
"Wed",
"Thu",
"Fri",
"Sat",
"Sun");
// First day of the week. "0" means display Sunday first, "1" means display
// Monday first, etc.
Calendar._FD = 0;
// full month names
Calendar._MN = new Array
("January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December");
// short month names
Calendar._SMN = new Array
("Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "About the calendar";
Calendar._TT["ABOUT"] =
"DHTML Date/Time Selector\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"For latest version visit: http://www.dynarch.com/projects/calendar/\n" +
"Distributed under GNU LGPL. See http://gnu.org/licenses/lgpl.html for details." +
"\n\n" +
"Date selection:\n" +
"- Use the \xab, \xbb buttons to select year\n" +
"- Use the " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " buttons to select month\n" +
"- Hold mouse button on any of the above buttons for faster selection.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Time selection:\n" +
"- Click on any of the time parts to increase it\n" +
"- or Shift-click to decrease it\n" +
"- or click and drag for faster selection.";
Calendar._TT["PREV_YEAR"] = "Prev. year (hold for menu)";
Calendar._TT["PREV_MONTH"] = "Prev. month (hold for menu)";
Calendar._TT["GO_TODAY"] = "Go Today";
Calendar._TT["NEXT_MONTH"] = "Next month (hold for menu)";
Calendar._TT["NEXT_YEAR"] = "Next year (hold for menu)";
Calendar._TT["SEL_DATE"] = "Select date";
Calendar._TT["DRAG_TO_MOVE"] = "Drag to move";
Calendar._TT["PART_TODAY"] = " (today)";
// the following is to inform that "%s" is to be the first day of week
// %s will be replaced with the day name.
Calendar._TT["DAY_FIRST"] = "Display %s first";
// This may be locale-dependent. It specifies the week-end days, as an array
// of comma-separated numbers. The numbers are from 0 to 6: 0 means Sunday, 1
// means Monday, etc.
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "Close";
Calendar._TT["TODAY"] = "Today";
Calendar._TT["TIME_PART"] = "(Shift-)Click or drag to change value";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%Y-%m-%d";
Calendar._TT["TT_DATE_FORMAT"] = "%a, %b %e";
Calendar._TT["WK"] = "wk";
Calendar._TT["TIME"] = "Time:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-en.js | calendar-en.js |
// Calendar SP language
// Author: Rafael Velasco <rvu_at_idecnet_dot_com>
// Encoding: any
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// full day names
Calendar._DN = new Array
("Domingo",
"Lunes",
"Martes",
"Miercoles",
"Jueves",
"Viernes",
"Sabado",
"Domingo");
Calendar._SDN = new Array
("Dom",
"Lun",
"Mar",
"Mie",
"Jue",
"Vie",
"Sab",
"Dom");
// full month names
Calendar._MN = new Array
("Enero",
"Febrero",
"Marzo",
"Abril",
"Mayo",
"Junio",
"Julio",
"Agosto",
"Septiembre",
"Octubre",
"Noviembre",
"Diciembre");
// short month names
Calendar._SMN = new Array
("Ene",
"Feb",
"Mar",
"Abr",
"May",
"Jun",
"Jul",
"Ago",
"Sep",
"Oct",
"Nov",
"Dic");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "Información del Calendario";
Calendar._TT["ABOUT"] =
"DHTML Date/Time Selector\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"Nuevas versiones en: http://www.dynarch.com/projects/calendar/\n" +
"Distribuida bajo licencia GNU LGPL. Para detalles vea http://gnu.org/licenses/lgpl.html ." +
"\n\n" +
"Selección de Fechas:\n" +
"- Use \xab, \xbb para seleccionar el año\n" +
"- Use " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " para seleccionar el mes\n" +
"- Mantenga presionado el botón del ratón en cualquiera de las opciones superiores para un acceso rapido .";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Selección del Reloj:\n" +
"- Seleccione la hora para cambiar el reloj\n" +
"- o presione Shift-click para disminuirlo\n" +
"- o presione click y arrastre del ratón para una selección rapida.";
Calendar._TT["PREV_YEAR"] = "Año anterior (Presione para menu)";
Calendar._TT["PREV_MONTH"] = "Mes Anterior (Presione para menu)";
Calendar._TT["GO_TODAY"] = "Ir a Hoy";
Calendar._TT["NEXT_MONTH"] = "Mes Siguiente (Presione para menu)";
Calendar._TT["NEXT_YEAR"] = "Año Siguiente (Presione para menu)";
Calendar._TT["SEL_DATE"] = "Seleccione fecha";
Calendar._TT["DRAG_TO_MOVE"] = "Arrastre y mueva";
Calendar._TT["PART_TODAY"] = " (Hoy)";
// the following is to inform that "%s" is to be the first day of week
// %s will be replaced with the day name.
Calendar._TT["DAY_FIRST"] = "Mostrar %s primero";
// This may be locale-dependent. It specifies the week-end days, as an array
// of comma-separated numbers. The numbers are from 0 to 6: 0 means Sunday, 1
// means Monday, etc.
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "Cerrar";
Calendar._TT["TODAY"] = "Hoy";
Calendar._TT["TIME_PART"] = "(Shift-)Click o arrastra para cambar el valor";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%dd-%mm-%yy";
Calendar._TT["TT_DATE_FORMAT"] = "%A, %e de %B de %Y";
Calendar._TT["WK"] = "Sm";
Calendar._TT["TIME"] = "Hora:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-sp.js | calendar-sp.js |
Calendar._DN = new Array
("Duminică",
"Luni",
"Marţi",
"Miercuri",
"Joi",
"Vineri",
"Sâmbătă",
"Duminică");
Calendar._SDN_len = 2;
Calendar._MN = new Array
("Ianuarie",
"Februarie",
"Martie",
"Aprilie",
"Mai",
"Iunie",
"Iulie",
"August",
"Septembrie",
"Octombrie",
"Noiembrie",
"Decembrie");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "Despre calendar";
Calendar._TT["ABOUT"] =
"DHTML Date/Time Selector\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"Pentru ultima versiune vizitaţi: http://www.dynarch.com/projects/calendar/\n" +
"Distribuit sub GNU LGPL. See http://gnu.org/licenses/lgpl.html for details." +
"\n\n" +
"Selecţia datei:\n" +
"- Folosiţi butoanele \xab, \xbb pentru a selecta anul\n" +
"- Folosiţi butoanele " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " pentru a selecta luna\n" +
"- Tineţi butonul mouse-ului apăsat pentru selecţie mai rapidă.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Selecţia orei:\n" +
"- Click pe ora sau minut pentru a mări valoarea cu 1\n" +
"- Sau Shift-Click pentru a micşora valoarea cu 1\n" +
"- Sau Click şi drag pentru a selecta mai repede.";
Calendar._TT["PREV_YEAR"] = "Anul precedent (lung pt menu)";
Calendar._TT["PREV_MONTH"] = "Luna precedentă (lung pt menu)";
Calendar._TT["GO_TODAY"] = "Data de azi";
Calendar._TT["NEXT_MONTH"] = "Luna următoare (lung pt menu)";
Calendar._TT["NEXT_YEAR"] = "Anul următor (lung pt menu)";
Calendar._TT["SEL_DATE"] = "Selectează data";
Calendar._TT["DRAG_TO_MOVE"] = "Trage pentru a mişca";
Calendar._TT["PART_TODAY"] = " (astăzi)";
Calendar._TT["DAY_FIRST"] = "Afişează %s prima zi";
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "Închide";
Calendar._TT["TODAY"] = "Astăzi";
Calendar._TT["TIME_PART"] = "(Shift-)Click sau drag pentru a selecta";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%d-%m-%Y";
Calendar._TT["TT_DATE_FORMAT"] = "%A, %d %B";
Calendar._TT["WK"] = "spt";
Calendar._TT["TIME"] = "Ora:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-ro.js | calendar-ro.js |
// Calendar DE language
// Author: Jack (tR), <[email protected]>
// Encoding: any
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// full day names
Calendar._DN = new Array
("Sonntag",
"Montag",
"Dienstag",
"Mittwoch",
"Donnerstag",
"Freitag",
"Samstag",
"Sonntag");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
// short day names
Calendar._SDN = new Array
("So",
"Mo",
"Di",
"Mi",
"Do",
"Fr",
"Sa",
"So");
// full month names
Calendar._MN = new Array
("Januar",
"Februar",
"M\u00e4rz",
"April",
"Mai",
"Juni",
"Juli",
"August",
"September",
"Oktober",
"November",
"Dezember");
// short month names
Calendar._SMN = new Array
("Jan",
"Feb",
"M\u00e4r",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Okt",
"Nov",
"Dez");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "\u00DCber dieses Kalendarmodul";
Calendar._TT["ABOUT"] =
"DHTML Date/Time Selector\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this ;-)
"For latest version visit: http://www.dynarch.com/projects/calendar/\n" +
"Distributed under GNU LGPL. See http://gnu.org/licenses/lgpl.html for details." +
"\n\n" +
"Datum ausw\u00e4hlen:\n" +
"- Benutzen Sie die \xab, \xbb Buttons um das Jahr zu w\u00e4hlen\n" +
"- Benutzen Sie die " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " Buttons um den Monat zu w\u00e4hlen\n" +
"- F\u00fcr eine Schnellauswahl halten Sie die Maustaste \u00fcber diesen Buttons fest.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Zeit ausw\u00e4hlen:\n" +
"- Klicken Sie auf die Teile der Uhrzeit, um diese zu erh\u00F6hen\n" +
"- oder klicken Sie mit festgehaltener Shift-Taste um diese zu verringern\n" +
"- oder klicken und festhalten f\u00fcr Schnellauswahl.";
Calendar._TT["TOGGLE"] = "Ersten Tag der Woche w\u00e4hlen";
Calendar._TT["PREV_YEAR"] = "Voriges Jahr (Festhalten f\u00fcr Schnellauswahl)";
Calendar._TT["PREV_MONTH"] = "Voriger Monat (Festhalten f\u00fcr Schnellauswahl)";
Calendar._TT["GO_TODAY"] = "Heute ausw\u00e4hlen";
Calendar._TT["NEXT_MONTH"] = "N\u00e4chst. Monat (Festhalten f\u00fcr Schnellauswahl)";
Calendar._TT["NEXT_YEAR"] = "N\u00e4chst. Jahr (Festhalten f\u00fcr Schnellauswahl)";
Calendar._TT["SEL_DATE"] = "Datum ausw\u00e4hlen";
Calendar._TT["DRAG_TO_MOVE"] = "Zum Bewegen festhalten";
Calendar._TT["PART_TODAY"] = " (Heute)";
// the following is to inform that "%s" is to be the first day of week
// %s will be replaced with the day name.
Calendar._TT["DAY_FIRST"] = "Woche beginnt mit %s ";
// This may be locale-dependent. It specifies the week-end days, as an array
// of comma-separated numbers. The numbers are from 0 to 6: 0 means Sunday, 1
// means Monday, etc.
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "Schlie\u00dfen";
Calendar._TT["TODAY"] = "Heute";
Calendar._TT["TIME_PART"] = "(Shift-)Klick oder Festhalten und Ziehen um den Wert zu \u00e4ndern";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%d.%m.%Y";
Calendar._TT["TT_DATE_FORMAT"] = "%a, %b %e";
Calendar._TT["WK"] = "wk";
Calendar._TT["TIME"] = "Zeit:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-de.js | calendar-de.js |
// Calendar DA language
// Author: Michael Thingmand Henriksen, <michael (a) thingmand dot dk>
// Encoding: any
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// full day names
Calendar._DN = new Array
("Søndag",
"Mandag",
"Tirsdag",
"Onsdag",
"Torsdag",
"Fredag",
"Lørdag",
"Søndag");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
// short day names
Calendar._SDN = new Array
("Søn",
"Man",
"Tir",
"Ons",
"Tor",
"Fre",
"Lør",
"Søn");
// full month names
Calendar._MN = new Array
("Januar",
"Februar",
"Marts",
"April",
"Maj",
"Juni",
"Juli",
"August",
"September",
"Oktober",
"November",
"December");
// short month names
Calendar._SMN = new Array
("Jan",
"Feb",
"Mar",
"Apr",
"Maj",
"Jun",
"Jul",
"Aug",
"Sep",
"Okt",
"Nov",
"Dec");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "Om Kalenderen";
Calendar._TT["ABOUT"] =
"DHTML Date/Time Selector\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"For den seneste version besøg: http://www.dynarch.com/projects/calendar/\n"; +
"Distribueret under GNU LGPL. Se http://gnu.org/licenses/lgpl.html for detajler." +
"\n\n" +
"Valg af dato:\n" +
"- Brug \xab, \xbb knapperne for at vælge år\n" +
"- Brug " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " knapperne for at vælge måned\n" +
"- Hold knappen på musen nede på knapperne ovenfor for hurtigere valg.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Valg af tid:\n" +
"- Klik på en vilkårlig del for større værdi\n" +
"- eller Shift-klik for for mindre værdi\n" +
"- eller klik og træk for hurtigere valg.";
Calendar._TT["PREV_YEAR"] = "Ét år tilbage (hold for menu)";
Calendar._TT["PREV_MONTH"] = "Én måned tilbage (hold for menu)";
Calendar._TT["GO_TODAY"] = "Gå til i dag";
Calendar._TT["NEXT_MONTH"] = "Én måned frem (hold for menu)";
Calendar._TT["NEXT_YEAR"] = "Ét år frem (hold for menu)";
Calendar._TT["SEL_DATE"] = "Vælg dag";
Calendar._TT["DRAG_TO_MOVE"] = "Træk vinduet";
Calendar._TT["PART_TODAY"] = " (i dag)";
// the following is to inform that "%s" is to be the first day of week
// %s will be replaced with the day name.
Calendar._TT["DAY_FIRST"] = "Vis %s først";
// This may be locale-dependent. It specifies the week-end days, as an array
// of comma-separated numbers. The numbers are from 0 to 6: 0 means Sunday, 1
// means Monday, etc.
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "Luk";
Calendar._TT["TODAY"] = "I dag";
Calendar._TT["TIME_PART"] = "(Shift-)klik eller træk for at ændre værdi";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%d-%m-%Y";
Calendar._TT["TT_DATE_FORMAT"] = "%a, %b %e";
Calendar._TT["WK"] = "Uge";
Calendar._TT["TIME"] = "Tid:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-da.js | calendar-da.js |
// Calendar EN language
// Author: Mihai Bazon, <[email protected]>
// Encoding: any
// Distributed under the same terms as the calendar itself.
// For translators: please use UTF-8 if possible. We strongly believe that
// Unicode is the answer to a real internationalized world. Also please
// include your contact information in the header, as can be seen above.
// full day names
Calendar._DN = new Array
("日曜",
"月曜",
"火曜",
"水曜",
"木曜",
"金曜",
"土曜",
"日曜");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
// short day names
Calendar._SDN = new Array
("日",
"月",
"火",
"水",
"木",
"金",
"土",
"日");
// First day of the week. "0" means display Sunday first, "1" means display
// Monday first, etc.
Calendar._FD = 0;
// full month names
Calendar._MN = new Array
("一月",
"二月",
"三月",
"四月",
"五月",
"六月",
"七月",
"八月",
"九月",
"十月",
"十一月",
"十二月");
// short month names
Calendar._SMN = new Array
("1月",
"2月",
"3月",
"4月",
"5月",
"6月",
"7月",
"8月",
"9月",
"10月",
"11月",
"12月");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "カレンダーについて";
Calendar._TT["ABOUT"] =
"DHTML Date/Time Selector\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"For latest version visit: http://www.dynarch.com/projects/calendar/\n" +
"Distributed under GNU LGPL. See http://gnu.org/licenses/lgpl.html for details." +
"\n\n" +
"Date selection:\n" +
"- Use the \xab, \xbb buttons to select year\n" +
"- Use the " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " buttons to select month\n" +
"- Hold mouse button on any of the above buttons for faster selection.";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Time selection:\n" +
"- Click on any of the time parts to increase it\n" +
"- or Shift-click to decrease it\n" +
"- or click and drag for faster selection.";
Calendar._TT["PREV_YEAR"] = "前年";
Calendar._TT["PREV_MONTH"] = "先月";
Calendar._TT["GO_TODAY"] = "今日に行く";
Calendar._TT["NEXT_MONTH"] = "次月";
Calendar._TT["NEXT_YEAR"] = "次年";
Calendar._TT["SEL_DATE"] = "日付を選択";
Calendar._TT["DRAG_TO_MOVE"] = "ドラッグして動かす";
Calendar._TT["PART_TODAY"] = " (今日)";
// the following is to inform that "%s" is to be the first day of week
// %s will be replaced with the day name.
Calendar._TT["DAY_FIRST"] = "%s を先頭に表示";
// This may be locale-dependent. It specifies the week-end days, as an array
// of comma-separated numbers. The numbers are from 0 to 6: 0 means Sunday, 1
// means Monday, etc.
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "閉じる";
Calendar._TT["TODAY"] = "今日";
Calendar._TT["TIME_PART"] = "(Shift-)Click or drag to change value";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%Y-%m-%d";
Calendar._TT["TT_DATE_FORMAT"] = "%b %e (%a)";
Calendar._TT["WK"] = "週";
Calendar._TT["TIME"] = "時間:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-ja.js | calendar-ja.js |
// Calendar ZH language
// Author: muziq, <[email protected]>
// Encoding: GB2312 or GBK
// Distributed under the same terms as the calendar itself.
// full day names
Calendar._DN = new Array
("ÐÇÆÚÈÕ",
"ÐÇÆÚÒ»",
"ÐÇÆÚ¶þ",
"ÐÇÆÚÈý",
"ÐÇÆÚËÄ",
"ÐÇÆÚÎå",
"ÐÇÆÚÁù",
"ÐÇÆÚÈÕ");
// Please note that the following array of short day names (and the same goes
// for short month names, _SMN) isn't absolutely necessary. We give it here
// for exemplification on how one can customize the short day names, but if
// they are simply the first N letters of the full name you can simply say:
//
// Calendar._SDN_len = N; // short day name length
// Calendar._SMN_len = N; // short month name length
//
// If N = 3 then this is not needed either since we assume a value of 3 if not
// present, to be compatible with translation files that were written before
// this feature.
// short day names
Calendar._SDN = new Array
("ÈÕ",
"Ò»",
"¶þ",
"Èý",
"ËÄ",
"Îå",
"Áù",
"ÈÕ");
// full month names
Calendar._MN = new Array
("Ò»ÔÂ",
"¶þÔÂ",
"ÈýÔÂ",
"ËÄÔÂ",
"ÎåÔÂ",
"ÁùÔÂ",
"ÆßÔÂ",
"°ËÔÂ",
"¾ÅÔÂ",
"Ê®ÔÂ",
"ʮһÔÂ",
"Ê®¶þÔÂ");
// short month names
Calendar._SMN = new Array
("Ò»ÔÂ",
"¶þÔÂ",
"ÈýÔÂ",
"ËÄÔÂ",
"ÎåÔÂ",
"ÁùÔÂ",
"ÆßÔÂ",
"°ËÔÂ",
"¾ÅÔÂ",
"Ê®ÔÂ",
"ʮһÔÂ",
"Ê®¶þÔÂ");
// tooltips
Calendar._TT = {};
Calendar._TT["INFO"] = "°ïÖú";
Calendar._TT["ABOUT"] =
"DHTML Date/Time Selector\n" +
"(c) dynarch.com 2002-2005 / Author: Mihai Bazon\n" + // don't translate this this ;-)
"For latest version visit: http://www.dynarch.com/projects/calendar/\n" +
"Distributed under GNU LGPL. See http://gnu.org/licenses/lgpl.html for details." +
"\n\n" +
"Ñ¡ÔñÈÕÆÚ:\n" +
"- µã»÷ \xab, \xbb °´Å¥Ñ¡ÔñÄê·Ý\n" +
"- µã»÷ " + String.fromCharCode(0x2039) + ", " + String.fromCharCode(0x203a) + " °´Å¥Ñ¡ÔñÔ·Ý\n" +
"- ³¤°´ÒÔÉϰ´Å¥¿É´Ó²Ëµ¥ÖпìËÙÑ¡ÔñÄê·Ý»òÔ·Ý";
Calendar._TT["ABOUT_TIME"] = "\n\n" +
"Ñ¡Ôñʱ¼ä:\n" +
"- µã»÷Сʱ»ò·ÖÖÓ¿Éʹ¸ÄÊýÖµ¼ÓÒ»\n" +
"- °´×¡Shift¼üµã»÷Сʱ»ò·ÖÖÓ¿Éʹ¸ÄÊýÖµ¼õÒ»\n" +
"- µã»÷Í϶¯Êó±ê¿É½øÐпìËÙÑ¡Ôñ";
Calendar._TT["PREV_YEAR"] = "ÉÏÒ»Äê (°´×¡³ö²Ëµ¥)";
Calendar._TT["PREV_MONTH"] = "ÉÏÒ»Ô (°´×¡³ö²Ëµ¥)";
Calendar._TT["GO_TODAY"] = "תµ½½ñÈÕ";
Calendar._TT["NEXT_MONTH"] = "ÏÂÒ»Ô (°´×¡³ö²Ëµ¥)";
Calendar._TT["NEXT_YEAR"] = "ÏÂÒ»Äê (°´×¡³ö²Ëµ¥)";
Calendar._TT["SEL_DATE"] = "Ñ¡ÔñÈÕÆÚ";
Calendar._TT["DRAG_TO_MOVE"] = "Í϶¯";
Calendar._TT["PART_TODAY"] = " (½ñÈÕ)";
// the following is to inform that "%s" is to be the first day of week
// %s will be replaced with the day name.
Calendar._TT["DAY_FIRST"] = "×î×ó±ßÏÔʾ%s";
// This may be locale-dependent. It specifies the week-end days, as an array
// of comma-separated numbers. The numbers are from 0 to 6: 0 means Sunday, 1
// means Monday, etc.
Calendar._TT["WEEKEND"] = "0,6";
Calendar._TT["CLOSE"] = "¹Ø±Õ";
Calendar._TT["TODAY"] = "½ñÈÕ";
Calendar._TT["TIME_PART"] = "(Shift-)µã»÷Êó±ê»òÍ϶¯¸Ä±äÖµ";
// date formats
Calendar._TT["DEF_DATE_FORMAT"] = "%Y-%m-%d";
Calendar._TT["TT_DATE_FORMAT"] = "%A, %b %eÈÕ";
Calendar._TT["WK"] = "ÖÜ";
Calendar._TT["TIME"] = "ʱ¼ä:"; | zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/resources/languages/calendar-zh.js | calendar-zh.js |
====================
Datetime Widget Demo
====================
This demo packe provides a simple content class which uses the
zc.datetimewidget
>>> from zope.testbrowser.testing import Browser
>>> browser = Browser()
>>> browser.handleErrors = False
>>> browser.addHeader('Authorization', 'Basic mgr:mgrpw')
>>> browser.open('http://localhost/@@contents.html')
It can be added by clicking on the "Datetimewidget Demo" link in the
add menu. And giving it a name.
>>> link = browser.getLink('Datetimewidget Demo')
>>> link.click()
>>> nameCtrl = browser.getControl(name='new_value')
>>> nameCtrl.value = 'mydemo'
>>> applyCtrl = browser.getControl('Apply')
>>> applyCtrl.click()
>>> link = browser.getLink('mydemo')
>>> link.click()
>>> browser.url
'http://localhost/mydemo/@@edit.html'
We can fill in the values
>>> browser.getControl('Start Date').value = '2006-11-15'
>>> browser.getControl('End Date').value = '2006-11-16'
>>> browser.getControl('Start Datetime').value = '2006-11-15T07:49:31Z'
>>> browser.getControl('End Datetime').value = '2006-11-16T19:46:00Z'
>>> browser.getControl('Several dates').value = '2006-11-20 2006-11-21 2006-11-22'
>>> browser.getControl('Change').click()
And they will be saved:
>>> 'Required input is missing' in browser.contents
False
>>> '2006-11-15' in browser.contents
True
>>> '2006-11-16' in browser.contents
True
>>> '07:49' in browser.contents
True
>>> '19:46' in browser.contents
True
>>> '2006-11-20 2006-11-21 2006-11-22' in browser.contents
True
If we do not fill some fields, we get missing value errors
>>> browser.getControl('Start Date').value = ''
>>> browser.getControl('Change').click()
>>> 'Required input is missing' in browser.contents
True
Let's step back:
>>> browser.getControl('Start Date').value = '2006-11-15'
>>> browser.getControl('Change').click()
>>> 'Required input is missing' in browser.contents
False
Now let's try not filling a date set field:
>>> browser.getControl('Several dates').value = ''
>>> browser.getControl('Change').click()
>>> 'Required input is missing' in browser.contents
True
| zc.datetimewidget | /zc.datetimewidget-0.8.0.tar.gz/zc.datetimewidget-0.8.0/src/zc/datetimewidget/demo/README.txt | README.txt |
Second-generation demo storage
==============================
The zc.demostorage2 module provides a storage implementation that
wraps two storages, a base storage and a storage to hold changes.
The base storage is never written to. All new records are written to
the changes storage. Both storages are expected to:
- Use packed 64-bit unsigned integers as object ids,
- Allocate object ids sequentially, starting from 0, and
- in the case of the changes storage, accept object ids assigned externally.
In addition, it is assumed that less than 2**63 object ids have been
allocated in the first storage.
Note that DemoStorage also assumes that it's base storage uses 64-bit
unsigned integer object ids allocated sequentially.
.. contents::
Change History
--------------
0.1.1 (2008-02-07)
******************
Fixed a packaging bug that caused some files to be omitted.
0.1 (2008-02-04)
****************
Initial release.
Configuration
-------------
The section below shows how to create zc.demostorage2 storages from
Python. If you're using ZConfig, you need to:
- import zc.demostroage2
- include a demostroage2 section
Here's an example that shows how to configure demo storage and how to
use the configuration from python:
>>> import ZODB.config
>>> storage = ZODB.config.storageFromString("""
...
... %import zc.demostorage2
...
... <demostorage2>
... <filestorage base>
... path base.fs
... </filestorage>
... <filestorage changes>
... path changes.fs
... </filestorage>
... </demostorage2>
... """)
This creates a demo storage that gets base data from a file storage
named base.fs and stores changes in a file storage named changes.fs.
>>> storage
<DemoStorage2: DemoStorage2(base.fs, changes.fs)>
>>> storage.close()
Demo (doctest)
--------------
Note that most people will configure the storage through ZConfig. If
you are one of those people, you may want to stop here. :) The
examples below show you how to use the storage from Python, but they
also exercise lots of details you might not be interested in.
To see how this works, we'll start by creating a base storage and
puting an object (in addition to the root object) in it:
>>> from ZODB.FileStorage import FileStorage
>>> base = FileStorage('base.fs')
>>> from ZODB.DB import DB
>>> db = DB(base)
>>> from persistent.mapping import PersistentMapping
>>> conn = db.open()
>>> conn.root()['1'] = PersistentMapping({'a': 1, 'b':2})
>>> import transaction
>>> transaction.commit()
>>> db.close()
>>> import os
>>> original_size = os.path.getsize('base.fs')
Now, lets reopen the base storage in read-only mode:
>>> base = FileStorage('base.fs', read_only=True)
And open a new storage to store changes:
>>> changes = FileStorage('changes.fs')
and combine the 2 in a demofilestorage:
>>> from zc.demostorage2 import DemoStorage2
>>> storage = DemoStorage2(base, changes)
If there are no transactions, the storage reports the lastTransaction
of the base database:
>>> storage.lastTransaction() == base.lastTransaction()
True
Let's add some data:
>>> db = DB(storage)
>>> conn = db.open()
>>> items = conn.root()['1'].items()
>>> items.sort()
>>> items
[('a', 1), ('b', 2)]
>>> conn.root()['2'] = PersistentMapping({'a': 3, 'b':4})
>>> transaction.commit()
>>> conn.root()['2']['c'] = 5
>>> transaction.commit()
Here we can see that we haven't modified the base storage:
>>> original_size == os.path.getsize('base.fs')
True
But we have modified the changes database:
>>> len(changes)
2
Our lastTransaction reflects the lastTransaction of the changes:
>>> storage.lastTransaction() > base.lastTransaction()
True
>>> storage.lastTransaction() == changes.lastTransaction()
True
Let's walk over some of the methods so ewe can see how we delegate to
the new oderlying storages:
>>> from ZODB.utils import p64, u64
>>> storage.load(p64(0), '') == changes.load(p64(0), '')
True
>>> storage.load(p64(0), '') == base.load(p64(0), '')
False
>>> storage.load(p64(1), '') == base.load(p64(1), '')
True
>>> serial = base.getTid(p64(0))
>>> storage.loadSerial(p64(0), serial) == base.loadSerial(p64(0), serial)
True
>>> serial = changes.getTid(p64(0))
>>> storage.loadSerial(p64(0), serial) == changes.loadSerial(p64(0),
... serial)
True
The object id of the new object is quite large:
>>> u64(conn.root()['2']._p_oid)
4611686018427387905L
Let's look at some other methods:
>>> storage.getName()
'DemoStorage2(base.fs, changes.fs)'
>>> storage.sortKey() == changes.sortKey()
True
>>> storage.getSize() == changes.getSize()
True
>>> len(storage) == len(changes)
True
Undo methods are simply copied from the changes storage:
>>> [getattr(storage, name) == getattr(changes, name)
... for name in ('supportsUndo', 'undo', 'undoLog', 'undoInfo')
... ]
[True, True, True, True]
| zc.demostorage2 | /zc.demostorage2-0.1.3.tar.gz/zc.demostorage2-0.1.3/src/zc/demostorage2/README.txt | README.txt |
import threading
import ZODB.POSException
from ZODB.utils import p64, u64, z64
from zc.demostorage2.synchronized import synchronized
class DemoStorage2:
def __init__(self, base, changes):
self.changes = changes
self.base = base
supportsUndo = getattr(changes, 'supportsUndo', None)
if supportsUndo is not None and supportsUndo():
for meth in ('supportsUndo', 'undo', 'undoLog', 'undoInfo'):
setattr(self, meth, getattr(changes, meth))
for meth in ('getSize', 'history', 'isReadOnly', 'sortKey',
'tpc_transaction'):
setattr(self, meth, getattr(changes, meth))
lastInvalidations = getattr(changes, 'lastInvalidations', None)
if lastInvalidations is not None:
self.lastInvalidations = lastInvalidations
self._oid = max(u64(changes.new_oid()), 1l << 62)
self._lock = threading.RLock()
self._commit_lock = threading.Lock()
self._transaction = None
def close(self):
self.base.close()
self.changes.close()
def getName(self):
return "DemoStorage2(%s, %s)" % (
self.base.getName(), self.changes.getName())
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.getName())
def getTid(self, oid):
try:
return self.changes.getTid(oid)
except ZODB.POSException.POSKeyError:
return self.base.getTid(oid)
@synchronized
def lastTransaction(self):
t = self.changes.lastTransaction()
if t == z64:
t = self.base.lastTransaction()
return t
def __len__(self):
return len(self.changes)
@synchronized
def load(self, oid, version=''):
try:
return self.changes.load(oid, version)
except ZODB.POSException.POSKeyError:
return self.base.load(oid, version)
@synchronized
def loadBefore(self, oid, tid):
try:
result = self.changes.loadBefore(oid, tid)
except ZODB.POSException.POSKeyError:
return self.base.loadBefore(oid, tid)
@synchronized
def loadSerial(self, oid, serial):
try:
return self.changes.loadSerial(oid, serial)
except ZODB.POSException.POSKeyError:
return self.base.loadSerial(oid, serial)
@synchronized
def new_oid(self):
self._oid += 1
return p64(self._oid)
def pack(self, pack_time, referencesf):
pass
def registerDB(self, db):
self.base.registerDB(db)
self.changes.registerDB(db)
@synchronized
def store(self, oid, serial, data, version, transaction):
assert version==''
if transaction is not self._transaction:
raise ZODB.POSException.StorageTransactionError(self, transaction)
# See if we already have changes for this oid
try:
old = self.changes.load(oid, '')[1]
except ZODB.POSException.POSKeyError:
try:
old = self.base.load(oid, '')[1]
except ZODB.POSException.POSKeyError:
old = serial
if old != serial:
raise ZODB.POSException.ConflictError(
oid=oid, serials=(old, serial)) # XXX untested branch
return self.changes.store(oid, serial, data, '', transaction)
@synchronized
def tpc_abort(self, transaction):
if self._transaction is not transaction:
return
self._transaction = None
try:
self.changes.tpc_abort(transaction)
finally:
self._commit_lock.release()
def tpc_begin(self, transaction, tid=None, status=' '):
if self._transaction is transaction:
return
self._commit_lock.acquire()
self._begin(transaction, tid, status)
@synchronized
def _begin(self, transaction, tid, status):
self._transaction = transaction
self.changes.tpc_begin(transaction, tid, status)
@synchronized
def tpc_finish(self, transaction, func = lambda: None):
if self._transaction is not transaction:
return
self._transaction = None
self.changes.tpc_finish(transaction)
self._commit_lock.release()
@synchronized
def tpc_vote(self, transaction):
if self._transaction is not transaction:
return
return self.changes.tpc_vote(transaction)
# Gaaaaaa! Work around ZEO bug.
def modifiedInVersion(self, oid):
return ''
class ZConfig:
def __init__(self, config):
self.config = config
self.name = config.getSectionName()
def open(self):
base = self.config.base.open()
changes = self.config.changes.open()
return DemoStorage2(base, changes) | zc.demostorage2 | /zc.demostorage2-0.1.3.tar.gz/zc.demostorage2-0.1.3/src/zc/demostorage2/__init__.py | __init__.py |
import copy
import BTrees
import BTrees.Length
import persistent
import zc.blist
class Dict(persistent.Persistent):
"""A BTree-based dict-like persistent object that can be safely
inherited from.
"""
def __init__(self, *args, **kwargs):
self._data = BTrees.OOBTree.OOBTree()
self._len = BTrees.Length.Length()
if args or kwargs:
self.update(*args, **kwargs)
def __setitem__(self, key, value):
delta = 1
if key in self._data:
delta = 0
self._data[key] = value
if delta:
self._len.change(delta)
def __delitem__(self, key):
self.pop(key)
def update(self, *args, **kwargs):
if args:
if len(args) > 1:
raise TypeError(
'update expected at most 1 arguments, got %d' %
(len(args),))
if getattr(args[0], 'keys', None):
for k in args[0].keys():
self[k] = args[0][k]
else:
for k, v in args[0]:
self[k] = v
for k, v in kwargs.items():
self[k] = v
def setdefault(self, key, failobj=None):
# we can't use BTree's setdefault because then we don't know to
# increment _len
try:
res = self._data[key]
except KeyError:
res = failobj
self[key] = res
return res
def pop(self, key, *args):
try:
res = self._data.pop(key)
except KeyError:
if args:
res = args[0]
else:
raise
else:
self._len.change(-1)
return res
def clear(self):
self._data.clear()
self._len.set(0)
def __len__(self):
return self._len()
def keys(self):
return list(self._data.keys())
def values(self):
return list(self._data.values())
def items(self):
return list(self._data.items())
def copy(self):
if self.__class__ is Dict:
return Dict(self._data)
data = self._data
try:
self._data = BTrees.OOBTree.OOBTree()
c = copy.copy(self)
finally:
self._data = data
c.update(self._data)
return c
def __getitem__(self, key): return self._data[key]
def __iter__(self): return iter(self._data)
def iteritems(self): return self._data.iteritems()
def iterkeys(self): return self._data.iterkeys()
def itervalues(self): return self._data.itervalues()
def has_key(self, key): return bool(self._data.has_key(key))
def get(self, key, failobj=None): return self._data.get(key, failobj)
def __contains__(self, key): return self._data.__contains__(key)
def popitem(self):
try:
key = self._data.minKey()
except ValueError:
raise KeyError, 'container is empty'
return (key, self.pop(key))
class OrderedDict(Dict):
"""An ordered BTree-based dict-like persistent object that can be safely
inherited from.
"""
# what do we get from the superclass:
# update, setdefault, __len__, popitem, __getitem__, has_key, __contains__,
# get, __delitem__
def __init__(self, *args, **kwargs):
self._order = zc.blist.BList()
super(OrderedDict, self).__init__(*args, **kwargs)
def keys(self):
return list(self._order)
def __iter__(self):
return iter(self._order)
def values(self):
return [self._data[key] for key in self._order]
def items(self):
return [(key, self._data[key]) for key in self._order]
def __setitem__(self, key, value):
if key not in self._data:
self._order.append(key)
self._len.change(1)
self._data[key] = value
def updateOrder(self, order):
order = list(order)
if len(order) != len(self._order):
raise ValueError("Incompatible key set.")
order_set = set(order)
if len(order) != len(order_set):
raise ValueError("Duplicate keys in order.")
if order_set.difference(self._order):
raise ValueError("Incompatible key set.")
self._order[:] = order
def clear(self):
super(OrderedDict, self).clear()
del self._order[:]
def copy(self):
if self.__class__ is OrderedDict:
return OrderedDict(self)
data = self._data
order = self._order
try:
self._data = OOBTree()
self._order = zc.blist.BList()
c = copy.copy(self)
finally:
self._data = data
self._order = order
c.update(self)
return c
def iteritems(self):
return ((key, self._data[key]) for key in self._order)
def iterkeys(self):
return iter(self._order)
def itervalues(self):
return (self._data[key] for key in self._order)
def pop(self, key, *args):
try:
res = self._data.pop(key)
except KeyError:
if args:
res = args[0]
else:
raise
else:
self._len.change(-1)
self._order.remove(key)
return res | zc.dict | /zc.dict-1.3b1.tar.gz/zc.dict-1.3b1/src/zc/dict/dict.py | dict.py |
from zope import interface, component, i18n, proxy
from zope.i18nmessageid import Message
from zope.security.interfaces import Unauthorized
import zope.dublincore.interfaces
import zope.location.interfaces
from zope.publisher.interfaces import IRequest
from zope.publisher.interfaces.http import IHTTPRequest
from zope.publisher.browser import BrowserView
from zope.traversing.browser.absoluteurl import absoluteURL
from zope.traversing.interfaces import IContainmentRoot
from zc.displayname import interfaces
from zc.displayname.i18n import _
INSUFFICIENT_CONTEXT = _("There isn't enough context to get URL information. "
"This is probably due to a bug in setting up location "
"information.")
class DefaultDisplayNameGenerator(BrowserView):
component.adapts(zope.location.interfaces.ILocation, IRequest)
interface.implementsOnly(interfaces.IDisplayNameGenerator)
def __call__(self, maxlength=None):
ob = self.context
try:
try:
dc = zope.dublincore.interfaces.IDCDescriptiveProperties(ob)
except TypeError:
name = ob.__name__
else:
name = dc.title or ob.__name__
except Unauthorized:
name = ob.__name__ # __name__ should always be available; if it is
# not, we consider that a configuration error.
return convertName(name, self.request, maxlength)
class SiteDisplayNameGenerator(BrowserView):
component.adapts(IContainmentRoot, IRequest)
interface.implementsOnly(interfaces.IDisplayNameGenerator)
def __call__(self, maxlength=None):
return convertName(_('[root]'), self.request, maxlength)
def convertName(name, request, maxlength):
"""trim name to maxlength, if given. Translate, if appropriate.
Not appropriate for names with HTML.
"""
if isinstance(name, Message):
name = i18n.translate(name, context=request, default=name)
if maxlength is not None:
if not isinstance(maxlength, (int, long)):
raise TypeError('maxlength must be int', maxlength)
if maxlength < 0:
raise ValueError('maxlength must be 0 or greater', maxlength)
name_len = len(name)
if name_len > maxlength:
if maxlength < 4:
name = '?' * maxlength
else:
name = name[:maxlength-3] + "..."
return name
class Breadcrumbs(BrowserView):
interface.implementsOnly(interfaces.IBreadcrumbs)
component.adapts(None, IHTTPRequest)
def __call__(self, maxlength=None):
context = self.context
request = self.request
if proxy.sameProxiedObjects(context, request.getVirtualHostRoot()):
base = ()
else:
container = getattr(context, '__parent__', None)
if container is None:
raise TypeError, INSUFFICIENT_CONTEXT
view = component.getMultiAdapter(
(container, request), interfaces.IBreadcrumbs)
base = tuple(view(maxlength))
name_gen = component.getMultiAdapter(
(context, request), interfaces.IDisplayNameGenerator)
url = absoluteURL(context, request)
return base + (
{"name_gen": name_gen, "url": url, "name": name_gen(maxlength),
"object": context},)
class TerminalBreadcrumbs(BrowserView):
interface.implementsOnly(interfaces.IBreadcrumbs)
component.adapts(IContainmentRoot, IHTTPRequest) # may adapt others
def __call__(self, maxlength=None):
context = self.context
request = self.request
name_gen = component.getMultiAdapter(
(context, request), interfaces.IDisplayNameGenerator)
url = absoluteURL(context, request)
return ({"name_gen": name_gen, "url": url, "name": name_gen(maxlength),
"object": context},)
class HiddenBreadcrumbs(BrowserView):
"""Breadcrumbs for an object that doesn't want to be in the breadcrumbs"""
interface.implementsOnly(interfaces.IBreadcrumbs)
component.adapts(None, IHTTPRequest)
def __call__(self, maxlength=None):
context = self.context
request = self.request
if proxy.sameProxiedObjects(context, request.getVirtualHostRoot()):
base = ()
else:
container = getattr(context, '__parent__', None)
if container is None:
raise TypeError, INSUFFICIENT_CONTEXT
view = component.getMultiAdapter(
(container, request), interfaces.IBreadcrumbs)
base = tuple(view(maxlength))
return base
@component.adapter(zope.location.interfaces.ILocation, IHTTPRequest)
@interface.implementer(interface.Interface)
def breadcrumbs(context, request):
"breadcrumbs; unlimited display name length for each traversed object"
return component.getMultiAdapter(
(context, request), interfaces.IBreadcrumbs)()
@component.adapter(zope.location.interfaces.ILocation, IHTTPRequest)
@interface.implementer(interface.Interface)
def breadcrumbs20char(context, request):
"breadcrumbs; display name length limited to 20 characters for each object"
return component.getMultiAdapter(
(context, request), interfaces.IBreadcrumbs)(20) | zc.displayname | /zc.displayname-1.1.tar.gz/zc.displayname-1.1/src/zc/displayname/adapters.py | adapters.py |
import zc.ajaxform.application
import zc.ajaxform.calculator_example
import zc.ajaxform.calculator_subapplication_example
import zc.ajaxform.form_example
class Calculator(zc.ajaxform.calculator_example.Calculator):
def template(self):
return """<html><head>
<style type="text/css">@import "http://o.aolcdn.com/dojo/1.4.0/dijit/themes/tundra/tundra.css";</style>
<script type="text/javascript" src="http://o.aolcdn.com/dojo/1.4.0/dojo/dojo.xd.js"></script>
<script type="text/javascript" src="/@@/zc.dojoform/zc.dojo.js"></script>
<script type="text/javascript" src="/@@/zc.dojoform/calculator_example.js"></script>
</head><body class=tundra></body></html>"""
class Container(zc.ajaxform.calculator_subapplication_example.Container):
def template(self):
return """<html><head>
<style type="text/css">@import "http://o.aolcdn.com/dojo/1.4.0/dijit/themes/tundra/tundra.css";</style>
<script type="text/javascript" src="http://o.aolcdn.com/dojo/1.4.0/dojo/dojo.xd.js"></script>
<script type="text/javascript" src="/@@/zc.dojoform/zc.dojo.js"></script>
<script type="text/javascript" src="/@@/zc.dojoform/container_example.js"></script>
</head><body class=tundra></body></html>"""
class Form(zc.ajaxform.form_example.FormExample):
def template(self):
return """<html><head>
<style type="text/css">
@import "http://o.aolcdn.com/dojo/1.4.0/dijit/themes/tundra/tundra.css";</style>
<style type="text/css" media="all">
@import url("http://o.aolcdn.com/dojo/1.4.0/dojox/grid/enhanced/resources/tundraEnhancedGrid.css");
</style>
<script type="text/javascript" src="http://o.aolcdn.com/dojo/1.4.0/dojo/dojo.xd.js"></script>
<script type="text/javascript" src="/@@/zc.dojoform/zc.dojo.js"></script>
<script type="text/javascript" src="/@@/zc.dojoform/form_example.js"></script>
</head><body class=tundra></body></html>""" | zc.dojoform | /zc.dojoform-0.15.0.tar.gz/zc.dojoform-0.15.0/src/zc/dojoform/examples.py | examples.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.