code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
===========
Utilities
===========
The utils module contains various helpers for working with data goverened
by MIME content type information, as found in the HTTP Content-Type header:
mime types and character sets.
The decode function takes a string and an IANA character set name and
returns a unicode object decoded from the string, using the codec associated
with the character set name. Errors will generally arise from the unicode
conversion rather than the mapping of character set to codec, and will be
LookupErrors (the character set did not cleanly convert to a codec that
Python knows about) or UnicodeDecodeErrors (the string included characters
that were not in the range of the codec associated with the character set).
>>> original = b'This is an o with a slash through it: \xb8.'
>>> charset = 'Latin-7' # Baltic Rim or iso-8859-13
>>> from zope.mimetype import utils
>>> utils.decode(original, charset)
'This is an o with a slash through it: \xf8.'
>>> utils.decode(original, 'foo bar baz')
Traceback (most recent call last):
...
LookupError: unknown encoding: foo bar baz
>>> utils.decode(original, 'iso-ir-6') # alias for ASCII
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
UnicodeDecodeError: 'ascii' codec can't decode...
| zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/utils.rst | utils.rst |
import os.path
from zope.configuration import fields
from zope import interface
from zope import schema
from zope.mimetype import codec
from zope.mimetype import interfaces
from zope.mimetype import mtypes
from zope.mimetype.i18n import _
try:
from zope.browserresource.metaconfigure import icon
except ImportError: # pragma: no cover
def icon(*args):
import warnings
warnings.warn("No icon support: zope.browserresource is not installed")
import zope.component.interface
import zope.component.zcml
class IMimeTypesDirective(interface.Interface):
"""Request loading of a MIME type definition table.
Example::
<zope:mimeTypes file='types.csv' module="zope.mimetype.interfaces" />
"""
file = fields.Path(
title=_("File"),
description=_("Path of the CSV file to load registrations from."),
required=True,
)
module = fields.GlobalObject(
title=_("Module"),
description=_("Module which contains the interfaces"
" referenced from the CSV file."),
required=True,
)
def mimeTypesDirective(_context, file, module):
codec.initialize(_context)
directory = os.path.dirname(file)
data = mtypes.read(file)
provides = interfaces.IContentTypeInterface
for name, info in data.items():
iface = getattr(module, name, None)
if iface is None:
# create missing interface
iface = mtypes.makeInterface(
name, info, getattr(module, "__name__", None))
setattr(module, name, iface)
# Register the interface as a utility:
_context.action(
discriminator=None,
callable=zope.component.interface.provideInterface,
args=(iface.__module__ + '.' + iface.getName(), iface)
)
for mime_type in info[2]:
# Register the interface as the IContentTypeInterface
# utility for each appropriate MIME type:
_context.action(
discriminator=('utility', provides, mime_type),
callable=zope.component.zcml.handler,
args=('registerUtility', iface, provides, mime_type),
)
icon_path = os.path.join(directory, info[3])
if icon_path and os.path.isfile(icon_path):
icon(_context, "zmi_icon", iface, icon_path)
class ICodecDirective(interface.Interface):
"""Defines a codec.
Example::
<zope:codec name="iso8859-1" title="Western (ISO-8859-1)">
...
</zope:codec>
"""
name = schema.ASCIILine(
title=_('Name'),
description=_('The name of the Python codec.'),
required=True,
)
title = fields.MessageID(
title=_('Title'),
description=_('The human-readable name for this codec.'),
required=False,
)
class ICharsetDirective(interface.Interface):
"""Defines a charset in a codec.
Example::
<charset name="iso8859-1" preferred="True" />
<charset name="latin1" />
"""
name = schema.ASCIILine(
title=_('Name'),
description=_('The name of the Python codec.'),
required=True,
)
preferred = schema.Bool(
title=_('Preferred'),
description=_('Is this is the preferred charset for the encoding.'),
required=False,
)
class CodecDirective:
def __init__(self, _context, name, title):
self.name = name
self.title = title
_context.action(
discriminator=None,
callable=codec.addCodec,
args=(name, title),
)
def charset(self, _context, name, preferred=False):
_context.action(
discriminator=(self.name, name),
callable=codec.addCharset,
args=(self.name, name, preferred),
) | zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/zcml.py | zcml.py |
"""interfaces for mimetype package
"""
import re
import zope.interface.interfaces
import zope.schema.interfaces
from zope.configuration.fields import MessageID
from zope import interface
from zope import schema
from zope.mimetype.i18n import _
# Note that MIME types and content type parameter names are considered
# case-insensitive. For our purposes, they should always be
# lower-cased on input. This should be handled by the input machinery
# (widgets) rather than in the application or MIME-handling code.
# Constraints defined here specify lower case values.
#
# The MIME type is defined to be two tokens separated by a slash; for
# our purposes, any whitespace between the tokens must be normalized
# by removing it. This too should be handled by input mechanisms.
# This RE really assumes you know the ASCII codes; note that upper
# case letters are not accepted; tokens must be normalized.
# http://czyborra.com/charsets/iso646.html
# http://www.faqs.org/rfcs/rfc2045.html
_token_re = r"[!#$%&'*+\-.\d^_`a-z{|}~]+"
_token_rx = re.compile("%s$" % _token_re)
_mime_type_rx = re.compile("{}/{}$".format(_token_re, _token_re))
# These helpers are used to define constraints for specific schema
# fields. Documentation and tests for these are located in
# constraints.txt.
def mimeTypeConstraint(value):
"""Return `True` iff `value` is a syntactically legal MIME type."""
return _mime_type_rx.match(value) is not None
def tokenConstraint(value):
"""Return `True` iff `value` is a syntactically legal RFC 2045 token."""
return _token_rx.match(value) is not None
class IContentTypeAware(interface.Interface):
"""Interface for MIME content type information.
Objects that can provide content type information about the data
they contain, such as file objects, should be adaptable to this
interface.
"""
parameters = schema.Dict(
title=_('Mime Type Parameters'),
description=_("The MIME type parameters (such as charset)."),
required=True,
key_type=schema.ASCIILine(constraint=tokenConstraint)
)
mimeType = schema.ASCIILine(
title=_('Mime Type'),
description=_("The mime type explicitly specified for the object"
" that this MIME information describes, if any. "
" May be None, or an ASCII MIME type string of the"
" form major/minor."),
constraint=mimeTypeConstraint,
required=False,
)
class IContentTypeInterface(interface.Interface):
"""Interface that describes a logical mime type.
Interfaces that provide this interfaces are content-type
interfaces.
Most MIME types are described by the IANA MIME-type registry
(http://www.iana.org/assignments/media-types/).
"""
class IContentType(interface.Interface):
"""Marker interface for objects that represent content with a MIME type.
"""
interface.directlyProvides(IContentType, IContentTypeInterface)
class IContentTypeEncoded(IContentType):
"""Marker interface for content types that care about encoding.
This does not imply that encoding information is known for a
specific object.
Content types that derive from `IContentTypeEncoded` support a
content type parameter named 'charset', and that parameter is used
to control encoding and decoding of the text.
For example, interfaces for text/* content types all derive from
this base interface.
"""
interface.directlyProvides(IContentTypeEncoded, IContentTypeInterface)
class IContentTypeChangedEvent(zope.interface.interfaces.IObjectEvent):
"""The content type for an object has changed.
All changes of the `IContentTypeInterface` for an object are
reported by this event, including the setting of an initial
content type and the removal of the content type interface.
This event should only be used if the content type actually
changes.
"""
newContentType = interface.Attribute(
"""Content type interface after the change, if any, or `None`.
""")
oldContentType = interface.Attribute(
"""Content type interface before the change, if any, or `None`.
""")
class IContentTypeTerm(zope.schema.interfaces.ITitledTokenizedTerm):
"""Extended term that describes a content type interface."""
mimeTypes = schema.List(
title=_("MIME types"),
description=_("List of MIME types represented by this interface;"
" the first should be considered the preferred"
" MIME type."),
required=True,
min_length=1,
value_type=schema.ASCIILine(constraint=mimeTypeConstraint),
readonly=True,
)
extensions = schema.List(
title=_("Extensions"),
description=_("Filename extensions commonly associated with this"
" type of file."),
required=True,
min_length=0,
readonly=True,
)
class IContentTypeSource(zope.schema.interfaces.IIterableSource,
zope.schema.interfaces.ISource):
"""Source for content types."""
class IContentInfo(interface.Interface): # XXX
"""Interface describing effective MIME type information.
When using MIME data from an object, an application should adapt
the object to this interface to determine how it should be
interpreted. This may be different from the information
"""
effectiveMimeType = schema.ASCIILine(
title=_("Effective MIME type"),
description=_("MIME type that should be reported when"
" downloading the document this `IContentInfo`"
" object is for."),
required=False,
constraint=mimeTypeConstraint,
)
effectiveParameters = schema.Dict(
title=_("Effective parameters"),
description=_("Content-Type parameters that should be reported "
" when downloading the document this `IContentInfo`"
" object is for."),
required=True,
key_type=schema.ASCIILine(constraint=tokenConstraint),
value_type=schema.ASCII(),
)
contentType = schema.ASCIILine(
title=_("Content type"),
description=_("The value of the Content-Type header,"
" including both the MIME type and any parameters."),
required=False,
)
def getCodec():
"""Return an `ICodec` that should be used to decode/encode data.
This should return `None` if the object's `IContentType` interface
does not derive from `IContentTypeEncoded`.
If the content type is encoded and no encoding information is
available in the `effectiveParameters`, this method may return
None, or may provide a codec based on application policy.
If `effectiveParameters` indicates a specific charset, and no
codec is registered to support that charset, `ValueError` will
be raised.
"""
def decode(s):
"""Return the decoding of `s` based on the effective encoding.
The effective encoding is determined by the return from the
`getCodec()` method.
`ValueError` is raised if no codec can be found for the
effective charset.
"""
class IMimeTypeGetter(interface.Interface):
"""A utility that looks up a MIME type string."""
def __call__(name=None, data=None, content_type=None):
"""Look up a MIME type.
If a MIME type cannot be determined based on the input,
this returns `None`.
:keyword bytes data: If given, the bytes data to get a MIME
type for. This may be examined for clues about the type.
"""
class ICharsetGetter(interface.Interface):
"""A utility that looks up a character set (charset)."""
def __call__(name=None, data=None, content_type=None):
"""Look up a charset.
If a charset cannot be determined based on the input,
this returns `None`.
"""
class ICodec(interface.Interface):
"""Information about a codec."""
name = schema.ASCIILine(
title=_('Name'),
description=_('The name of the Python codec.'),
required=True,
)
title = MessageID(
title=_('Title'),
description=_('The human-readable name of this codec.'),
required=True,
)
def encode(input, errors='strict'):
"""Encodes the input and returns a tuple (output, length consumed).
"""
def decode(input, errors='strict'):
"""Decodes the input and returns a tuple (output, length consumed).
"""
def reader(stream, errors='strict'):
"""Construct a StreamReader object for this codec."""
def writer(stream, errors='strict'):
"""Construct a StramWriter object for this codec."""
class ICharset(interface.Interface):
"""Information about a charset"""
name = schema.ASCIILine(
title=_('Name'),
description=_("The charset name. This is what is used for the "
"'charset' parameter in content-type headers."),
required=True,
)
encoding = schema.ASCIILine(
# This *must* match the `name` of the ICodec that's used to
# handle this charset.
title=_('Encoding'),
description=_("The id of the encoding used for this charset."),
required=True,
)
class ICodecPreferredCharset(interface.Interface):
"""Marker interface for locating the preferred charset for a Codec."""
class ICharsetCodec(interface.Interface):
"""Marker interface for locating the codec for a given charset."""
class ICodecTerm(zope.schema.interfaces.ITitledTokenizedTerm):
"""Extended term that describes a content type interface."""
preferredCharset = schema.ASCIILine(
title=_("Preferred Charset"),
description=_("Charset that should be used to represent the codec"),
required=False,
readonly=True,
)
class ICodecSource(zope.schema.interfaces.IIterableSource):
"""Source for codecs.""" | zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/interfaces.py | interfaces.py |
=================================
Source for MIME type interfaces
=================================
.. currentmodule:: zope.mimetype.interfaces
Some sample interfaces have been created in the zope.mimetype.tests
module for use in this test. Let's import them::
>>> from zope.mimetype.tests import (
... ISampleContentTypeOne, ISampleContentTypeTwo)
The source should only include `IContentTypeInterface` interfaces that
have been registered. Let's register one of these two interfaces so
we can test this::
>>> import zope.component
>>> from zope.mimetype.interfaces import IContentTypeInterface
>>> zope.component.provideUtility(
... ISampleContentTypeOne, IContentTypeInterface, name="type/one")
>>> zope.component.provideUtility(
... ISampleContentTypeOne, IContentTypeInterface, name="type/two")
We should see that these interfaces are included in the source::
>>> from zope.mimetype import source
>>> s = source.ContentTypeSource()
>>> ISampleContentTypeOne in s
True
>>> ISampleContentTypeTwo in s
False
Interfaces that do not implement the `IContentTypeInterface` are not
included in the source::
>>> import zope.interface
>>> class ISomethingElse(zope.interface.Interface):
... """This isn't a content type interface."""
>>> ISomethingElse in s
False
The source is iterable, so we can get a list of the values::
>>> values = list(s)
>>> len(values)
1
>>> values[0] is ISampleContentTypeOne
True
We can get terms for the allowed values::
>>> terms = source.ContentTypeTerms(s, None)
>>> t = terms.getTerm(ISampleContentTypeOne)
>>> terms.getValue(t.token) is ISampleContentTypeOne
True
Interfaces that are not in the source cause an error when a term is
requested::
>>> terms.getTerm(ISomethingElse)
Traceback (most recent call last):
...
LookupError: value is not an element in the source
The term provides a token based on the module name of the interface::
>>> t.token
'zope.mimetype.tests.ISampleContentTypeOne'
The term also provides the title based on the "title" tagged value
from the interface::
>>> t.title
'Type One'
Each interface provides a list of MIME types with which the interface
is associated. The term object provides access to this list::
>>> t.mimeTypes
['type/one', 'type/foo']
A list of common extensions for files of this type is also available,
though it may be empty::
>>> t.extensions
[]
The term's value, of course, is the interface passed in::
>>> t.value is ISampleContentTypeOne
True
This extended term API is defined by the `IContentTypeTerm`
interface::
>>> from zope.mimetype.interfaces import IContentTypeTerm
>>> IContentTypeTerm.providedBy(t)
True
The value can also be retrieved using the `getValue()` method::
>>> iface = terms.getValue('zope.mimetype.tests.ISampleContentTypeOne')
>>> iface is ISampleContentTypeOne
True
Attempting to retrieve an interface that isn't in the source using the
terms object generates a LookupError::
>>> terms.getValue('zope.mimetype.tests.ISampleContentTypeTwo')
Traceback (most recent call last):
...
LookupError: token does not represent an element in the source
Attempting to look up a junk token also generates an error::
>>> terms.getValue('just.some.dotted.name.that.does.not.exist')
Traceback (most recent call last):
...
LookupError: could not import module for token
| zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/source.rst | source.rst |
import codecs
import os
import re
from zope import component
from zope import interface
from zope.mimetype.interfaces import ICharset
from zope.mimetype.interfaces import ICharsetCodec
from zope.mimetype.interfaces import ICodec
from zope.mimetype.interfaces import ICodecPreferredCharset
@interface.implementer(ICodec)
class Codec:
def __init__(self, name, title):
self.name = name
self.title = title
(self.encode,
self.decode,
self.reader,
self.writer
) = codecs.lookup(name)
def addCodec(name, title=None):
codec = Codec(name, title)
component.provideUtility(codec, provides=ICodec, name=name)
@interface.implementer(ICharset)
class Charset:
def __init__(self, name, encoding):
self.name = name
self.encoding = encoding
def addCharset(encoding, name, preferred=False):
codec = component.getUtility(ICodec, name=encoding)
charset = Charset(name, codec.name)
component.provideUtility(charset, provides=ICharset, name=name)
interface.alsoProvides(codec, ICharsetCodec)
component.provideUtility(codec, provides=ICharsetCodec, name=name)
if preferred:
utility = component.queryUtility(
ICodecPreferredCharset, name=codec.name)
if utility is not None:
raise ValueError("Codec already has a preferred charset.")
interface.alsoProvides(charset, ICodecPreferredCharset)
component.provideUtility(
charset, provides=ICodecPreferredCharset, name=codec.name)
FILENAME = "character-sets.txt"
DATA_RE = re.compile(
r'(Name|Alias|MIBenum):\s*(\S+)\s*(\(preferred MIME name\))?'
)
def initialize(_context):
# if any ICodec has been registered, we're done:
for _ in component.getUtilitiesFor(ICodec):
return
_names = []
_codecs = {}
_aliases = {} # alias -> codec name
here = os.path.dirname(os.path.abspath(__file__))
fn = os.path.join(here, FILENAME)
f = open(fn)
class Codec:
preferred_alias = None
def __init__(self, name):
self.name = name
self.aliases = [name.lower()]
def findPyCodecs(self):
self.pyCodecs = {}
for alias in self.aliases:
try:
codec = codecs.lookup(alias)
except LookupError:
pass
else:
self.pyCodecs[alias] = codec
for line in f:
if not line.strip():
lastname = None
continue
m = DATA_RE.match(line)
if m is None:
continue
type, name, preferred = m.groups()
if type == "Name":
if name in _codecs: # pragma: no cover
raise ValueError("codec %s already exists" % name)
_names.append(name)
lastname = name
_codecs[name] = Codec(name)
if preferred:
_codecs[name].preferred_alias = name.lower()
elif type == "Alias" and name != "None":
if not lastname: # pragma: no cover
raise ValueError("Parsing failed. Alias found without a name.")
name = name.lower()
if name in _aliases: # pragma: no cover
raise ValueError("Alias %s already exists." % name)
codec = _codecs[lastname]
codec.aliases.append(name)
_aliases[name] = lastname
if preferred:
codec.preferred_alias = name
f.close()
for name in _names:
codec = _codecs[name]
codec.findPyCodecs()
if codec.pyCodecs.get(codec.preferred_alias):
pyName = codec.preferred_alias
else:
for pyName in codec.aliases:
if pyName in codec.pyCodecs:
break
else:
continue # not found under any name
_context.action(
discriminator=None,
callable=addCodec,
args=(pyName, codec.name),
)
if not codec.preferred_alias:
codec.preferred_alias = codec.aliases[0]
for alias in codec.aliases:
_context.action(
discriminator=(pyName, alias),
callable=addCharset,
args=(pyName, alias, alias == codec.preferred_alias)
) | zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/codec.py | codec.py |
__docformat__ = "reStructuredText"
import zope.formlib.source
import zope.i18n
class TranslatableSourceSelectWidget(
zope.formlib.source.SourceSelectWidget):
def __init__(self, field, source, request):
super().__init__(
field, source, request)
self.displays = {} # value --> (display, token)
self.order = [] # values in sorted order
# XXX need a better way to sort in an internationalized context
sortable = []
for value in source:
t = self.vocabulary.terms.getTerm(value)
title = zope.i18n.translate(t.title, context=request)
self.displays[value] = title, t.token
lower = title.lower()
sortable.append((lower, value))
sortable.sort()
self.order = [value for (lower, value) in sortable]
def renderItemsWithValues(self, values):
"""Render the list of possible values, with those found in
`values` being marked as selected."""
cssClass = self.cssClass
# multiple items with the same value are not allowed from a
# vocabulary, so that need not be considered here
rendered_items = []
count = 0
# Handle case of missing value
missing = self._toFormValue(self.context.missing_value)
if self._displayItemForMissingValue and not self.context.required:
render = (self.renderSelectedItem if missing in values
else self.renderItem)
missing_item = render(
count,
self.translate(self._messageNoValue),
missing,
self.name,
cssClass)
rendered_items.append(missing_item)
count += 1
# Render normal values
for value in self.order:
item_text, token = self.displays[value]
render = (self.renderSelectedItem if value in values
else self.renderItem)
rendered_item = render(
count,
item_text,
token,
self.name,
cssClass)
rendered_items.append(rendered_item)
count += 1
return rendered_items
def textForValue(self, term):
return self.displays[term.value]
class TranslatableSourceDropdownWidget(TranslatableSourceSelectWidget):
size = 1 | zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/widget.py | widget.py |
=====================================
Minimal IContentInfo Implementation
=====================================
.. currentmodule:: zope.mimetype.interfaces
The `zope.mimetype.contentinfo` module provides a minimal
`IContentInfo` implementation that adds no information to what's
provided by a content object. This represents the most conservative
content-type policy that might be useful.
Let's take a look at how this operates by creating a couple of
concrete content-type interfaces::
>>> from zope.mimetype import interfaces
>>> class ITextPlain(interfaces.IContentTypeEncoded):
... """text/plain"""
>>> class IApplicationOctetStream(interfaces.IContentType):
... """application/octet-stream"""
Now, we'll create a minimal content object that provide the necessary
information::
>>> import zope.interface
>>> @zope.interface.implementer(interfaces.IContentTypeAware)
... class Content(object):
... def __init__(self, mimeType, charset=None):
... self.mimeType = mimeType
... self.parameters = {}
... if charset:
... self.parameters["charset"] = charset
We can now create examples of both encoded and non-encoded content::
>>> encoded = Content("text/plain", "utf-8")
>>> zope.interface.alsoProvides(encoded, ITextPlain)
>>> unencoded = Content("application/octet-stream")
>>> zope.interface.alsoProvides(unencoded, IApplicationOctetStream)
The minimal ``IContentInfo`` implementation only exposes the information
available to it from the base content object. Let's take a look at
the unencoded content first::
>>> from zope.mimetype import contentinfo
>>> ci = contentinfo.ContentInfo(unencoded)
>>> ci.effectiveMimeType
'application/octet-stream'
>>> ci.effectiveParameters
{}
>>> ci.contentType
'application/octet-stream'
For unencoded content, there is never a codec::
>>> print(ci.getCodec())
None
It is also disallowed to try decoding such content::
>>> ci.decode("foo")
Traceback (most recent call last):
...
ValueError: no matching codec found
Attemping to decode data using an uncoded object causes an exception
to be raised::
>>> print(ci.decode("data"))
Traceback (most recent call last):
...
ValueError: no matching codec found
If we try this with encoded data, we get somewhat different behavior::
>>> ci = contentinfo.ContentInfo(encoded)
>>> ci.effectiveMimeType
'text/plain'
>>> ci.effectiveParameters
{'charset': 'utf-8'}
>>> ci.contentType
'text/plain;charset=utf-8'
The `IContentInfo.getCodec()` and `IContentInfo.decode()` methods can be used to handle encoded
data using the encoding indicated by the ``charset`` parameter. Let's
store some UTF-8 data in a variable::
>>> utf8_data = b"\xAB\xBB".decode("iso-8859-1").encode("utf-8")
>>> utf8_data
b'\xc2\xab\xc2\xbb'
We want to be able to decode the data using the `IContentInfo`
object. Let's try getting the corresponding `ICodec` object using
`IContentInfo.getCodec()`::
>>> codec = ci.getCodec()
Traceback (most recent call last):
...
ValueError: unsupported charset: 'utf-8'
So, we can't proceed without some further preparation. What we need
is to register an `ICharset` for UTF-8. The `ICharset` will need a
reference (by name) to a `ICodec` for UTF-8. So let's create those
objects and register them::
>>> import codecs
>>> from zope.mimetype.i18n import _
>>> @zope.interface.implementer(interfaces.ICodec)
... class Utf8Codec(object):
...
... name = "utf-8"
... title = _("UTF-8")
...
... def __init__(self):
... ( self.encode,
... self.decode,
... self.reader,
... self.writer
... ) = codecs.lookup(self.name)
>>> utf8_codec = Utf8Codec()
>>> @zope.interface.implementer(interfaces.ICharset)
... class Utf8Charset(object):
...
... name = utf8_codec.name
... encoding = name
>>> utf8_charset = Utf8Charset()
>>> import zope.component
>>> zope.component.provideUtility(
... utf8_codec, interfaces.ICodec, utf8_codec.name)
>>> zope.component.provideUtility(
... utf8_charset, interfaces.ICharset, utf8_charset.name)
Now that that's been initialized, let's try getting the codec again::
>>> codec = ci.getCodec()
>>> codec.name
'utf-8'
>>> codec.decode(utf8_data)
('\xab\xbb', 4)
We can now check that the ``decode()`` method of the `IContentInfo` will
decode the entire data, returning the Unicode representation of the
text::
>>> ci.decode(utf8_data)
'\xab\xbb'
Another possibilty, of course, is that you have content that you know
is encoded text of some sort, but you don't actually know what
encoding it's in::
>>> encoded2 = Content("text/plain")
>>> zope.interface.alsoProvides(encoded2, ITextPlain)
>>> ci = contentinfo.ContentInfo(encoded2)
>>> ci.effectiveMimeType
'text/plain'
>>> ci.effectiveParameters
{}
>>> ci.contentType
'text/plain'
>>> ci.getCodec()
Traceback (most recent call last):
...
ValueError: charset not known
It's also possible that the initial content type information for an
object is incorrect for some reason. If the browser provides a
content type of "text/plain; charset=utf-8", the content will be seen
as encoded. A user correcting this content type using UI elements
can cause the content to be considered un-encoded. At this point,
there should no longer be a charset parameter to the content type, and
the content info object should reflect this, though the previous
encoding information will be retained in case the content type should
be changed to an encoded type in the future.
Let's see how this behavior will be exhibited in this API. We'll
start by creating some encoded content::
>>> content = Content("text/plain", "utf-8")
>>> zope.interface.alsoProvides(content, ITextPlain)
We can see that the encoding information is included in the effective
MIME type information provided by the content-info object::
>>> ci = contentinfo.ContentInfo(content)
>>> ci.effectiveMimeType
'text/plain'
>>> ci.effectiveParameters
{'charset': 'utf-8'}
We now change the content type information for the object::
>>> ifaces = zope.interface.directlyProvidedBy(content)
>>> ifaces -= ITextPlain
>>> ifaces += IApplicationOctetStream
>>> zope.interface.directlyProvides(content, *ifaces)
>>> content.mimeType = 'application/octet-stream'
At this point, a content type object would provide different
information::
>>> ci = contentinfo.ContentInfo(content)
>>> ci.effectiveMimeType
'application/octet-stream'
>>> ci.effectiveParameters
{}
The underlying content type parameters still contain the original
encoding information, however::
>>> content.parameters
{'charset': 'utf-8'}
| zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/contentinfo.rst | contentinfo.rst |
import codecs
# There's a zope.contenttype module that exports a similar API,
# but that's pretty hueristic. Some of this should perhaps be folded
# back into that, or this package could provide a replacement.
#
import mimetypes
import zope.contenttype.parse
from zope import interface
from zope.mimetype import interfaces
def mimeTypeGetter(name=None, data=None, content_type=None):
"""A minimal extractor that never attempts to guess."""
if name is None and data is None and content_type is None:
return None
if content_type:
try:
major, minor, _params = zope.contenttype.parse.parseOrdered(
content_type)
except ValueError:
pass
else:
return "{}/{}".format(major, minor)
return None
interface.directlyProvides(mimeTypeGetter, interfaces.IMimeTypeGetter)
def mimeTypeGuesser(name=None, data=None, content_type=None):
"""An extractor that tries to guess the content type based on the
name and data if the input contains no content type information.
"""
if name is None and data is None and content_type is None:
return None
mimeType = mimeTypeGetter(name=name, data=data, content_type=content_type)
if name and not mimeType:
mimeType, _encoding = mimetypes.guess_type(name, strict=True)
if not mimeType:
mimeType, _encoding = mimetypes.guess_type(name, strict=False)
#
# XXX If `encoding` is not None, we should re-consider the
# guess, since the encoding here is Content-Encoding, not
# charset. In particular, things like .tar.gz map to
# ('application/x-tar', 'gzip'), which may require different
# handling, or at least a separate content-type.
if data and not mimeType:
# no idea, really, but let's sniff a few common things:
for prefix, sniffed_type, _charset in _prefix_table:
if data.startswith(prefix):
mimeType = sniffed_type
break
return mimeType
interface.directlyProvides(mimeTypeGuesser, interfaces.IMimeTypeGetter)
def smartMimeTypeGuesser(name=None, data=None, content_type=None):
"""An extractor that checks the content for a variety of
constructs to try and refine the results of the
`mimeTypeGuesser()`. This is able to do things like check for
XHTML that's labelled as HTML in upload data.
"""
mimeType = mimeTypeGuesser(name=name, data=data, content_type=content_type)
if data and mimeType == "text/html":
for prefix, _mimetype, _charset in _xml_prefix_table:
if data.startswith(prefix):
# don't use text/xml from the table, but take
# advantage of the text/html hint (from the upload
# or mimetypes.guess_type())
mimeType = "application/xhtml+xml"
break
return mimeType
interface.directlyProvides(smartMimeTypeGuesser, interfaces.IMimeTypeGetter)
# Very simple magic numbers table for a few things we want to be good
# at identifying even if we get no help from the input:
#
_xml_prefix_table = (
# prefix, mimeType, charset
(b"<?xml", "text/xml", None),
(b"\xef\xbb\xbf<?xml", "text/xml", "utf-8"), # w/ BOM
(b"\0<\0?\0x\0m\0l", "text/xml", "utf-16be"),
(b"<\0?\0x\0m\0l\0", "text/xml", "utf-16le"),
(b"\xfe\xff\0<\0?\0x\0m\0l", "text/xml", "utf-16be"), # w/ BOM
(b"\xff\xfe<\0?\0x\0m\0l\0", "text/xml", "utf-16le"), # w/ BOM
)
_prefix_table = _xml_prefix_table + (
(b"<html", "text/html", None),
(b"<HTML", "text/html", None),
(b"GIF89a", "image/gif", None),
# PNG Signature: bytes 137 80 78 71 13 10 26 10
(b"\x89PNG\r\n\x1a\n", "image/png", None),
)
def charsetGetter(name=None, data=None, content_type=None):
"""Default implementation of `zope.mimetype.interfaces.ICharsetGetter`."""
if name is None and data is None and content_type is None:
return None
if content_type:
try:
major, minor, params = zope.contenttype.parse.parse(content_type)
except ValueError:
pass
else:
if params.get("charset"):
return params["charset"].lower()
if data:
if data.startswith(codecs.BOM_UTF16_LE):
return 'utf-16le'
elif data.startswith(codecs.BOM_UTF16_BE):
return 'utf-16be'
try:
data.decode('ascii')
return 'ascii'
except UnicodeDecodeError:
try:
data.decode('utf-8')
return 'utf-8'
except UnicodeDecodeError:
pass
return None
interface.directlyProvides(charsetGetter, interfaces.ICharsetGetter) | zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/typegetter.py | typegetter.py |
=====================================
Constraint Functions for Interfaces
=====================================
.. currentmodule:: zope.mimetype.interfaces
The :mod:`zope.mimetype.interfaces` module defines interfaces that use some
helper functions to define constraints on the accepted data. These
helpers are used to determine whether values conform to the what's
allowed for parts of a MIME type specification and other parts of a
Content-Type header as specified in RFC 2045.
Single Token
============
The first is the simplest: the `tokenConstraint()` function returns
`True` if the ASCII string it is passed conforms to the ``token``
production in section 5.1 of the RFC. Let's import the function::
>>> from zope.mimetype.interfaces import tokenConstraint
Typical token are the major and minor parts of the MIME type and the
parameter names for the Content-Type header. The function should
return `True` for these values::
>>> tokenConstraint("text")
True
>>> tokenConstraint("plain")
True
>>> tokenConstraint("charset")
True
The function should also return `True` for unusual but otherwise
normal token that may be used in some situations::
>>> tokenConstraint("not-your-fathers-token")
True
It must also allow extension tokens and vendor-specific tokens::
>>> tokenConstraint("x-magic")
True
>>> tokenConstraint("vnd.zope.special-data")
True
Since we expect input handlers to normalize values to lower case,
upper case text is not allowed::
>>> tokenConstraint("Text")
False
Non-ASCII text is also not allowed::
>>> tokenConstraint("\x80")
False
>>> tokenConstraint("\xC8")
False
>>> tokenConstraint("\xFF")
False
Note that lots of characters are allowed in tokens, and there are no
constraints that the token "look like" something a person would want
to read::
>>> tokenConstraint(".-.-.-.")
True
Other characters are disallowed, however, including all forms of
whitespace::
>>> tokenConstraint("foo bar")
False
>>> tokenConstraint("foo\tbar")
False
>>> tokenConstraint("foo\nbar")
False
>>> tokenConstraint("foo\rbar")
False
>>> tokenConstraint("foo\x7Fbar")
False
Whitespace before or after the token is not accepted either::
>>> tokenConstraint(" text")
False
>>> tokenConstraint("plain ")
False
Other disallowed characters are defined in the ``tspecials`` production
from the RFC (also in section 5.1)::
>>> tokenConstraint("(")
False
>>> tokenConstraint(")")
False
>>> tokenConstraint("<")
False
>>> tokenConstraint(">")
False
>>> tokenConstraint("@")
False
>>> tokenConstraint(",")
False
>>> tokenConstraint(";")
False
>>> tokenConstraint(":")
False
>>> tokenConstraint("\\")
False
>>> tokenConstraint('"')
False
>>> tokenConstraint("/")
False
>>> tokenConstraint("[")
False
>>> tokenConstraint("]")
False
>>> tokenConstraint("?")
False
>>> tokenConstraint("=")
False
A token must contain at least one character, so `tokenConstraint()`
returns false for an empty string::
>>> tokenConstraint("")
False
MIME Type
=========
A MIME type is specified using two tokens separated by a slash;
whitespace between the tokens and the slash must be normalized away in
the input handler.
The `mimeTypeConstraint()` function is available to test a normalized
MIME type value; let's import that function now::
>>> from zope.mimetype.interfaces import mimeTypeConstraint
Let's test some common MIME types to make sure the function isn't
obviously insane::
>>> mimeTypeConstraint("text/plain")
True
>>> mimeTypeConstraint("application/xml")
True
>>> mimeTypeConstraint("image/svg+xml")
True
If parts of the MIME type are missing, it isn't accepted::
>>> mimeTypeConstraint("text")
False
>>> mimeTypeConstraint("text/")
False
>>> mimeTypeConstraint("/plain")
False
As for individual tokens, whitespace is not allowed::
>>> mimeTypeConstraint("foo bar/plain")
False
>>> mimeTypeConstraint("text/foo bar")
False
Whitespace is not accepted around the slash either::
>>> mimeTypeConstraint("text /plain")
False
>>> mimeTypeConstraint("text/ plain")
False
Surrounding whitespace is also not accepted::
>>> mimeTypeConstraint(" text/plain")
False
>>> mimeTypeConstraint("text/plain ")
False
| zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/constraints.rst | constraints.rst |
"""Mime-Types management
"""
import csv
import os
import zope.interface
from zope.component import provideUtility
from zope.interface.interface import InterfaceClass
from zope.mimetype.i18n import _
from zope.mimetype.interfaces import IContentType
from zope.mimetype.interfaces import IContentTypeEncoded
from zope.mimetype.interfaces import IContentTypeInterface
def read(file_name):
file = open(file_name)
result = {}
for name, title, extensions, mime_types, icon_name, encoded in csv.reader(
file):
extensions = extensions.split()
mime_types = mime_types.split()
encoded = (encoded.strip().lower() == 'yes')
result[name] = (title.strip(), extensions, mime_types,
icon_name.strip(), encoded)
file.close()
return result
def getInterfaces(data, module=None):
results = {}
if module is None:
module = __name__
globs = globals()
for name, info in data.items():
interface = globs.get(name)
if interface is None:
interface = makeInterface(name, info, module)
globs[name] = interface
results[name] = interface
return results
def makeInterface(name, info, module):
title, extensions, mime_types, icon_name, encoded = info
if encoded:
base = IContentTypeEncoded
else:
base = IContentType
interface = InterfaceClass(name, bases=(base,), __module__=module)
zope.interface.directlyProvides(interface, IContentTypeInterface)
interface.setTaggedValue('extensions', extensions)
interface.setTaggedValue('mimeTypes', mime_types)
interface.setTaggedValue('title', _(title, default=title))
return interface
def registerUtilities(interfaces, data):
for name, interface in interfaces.items():
for mime_type in data[name][2]:
provideUtility(interface, provides=IContentTypeInterface,
name=mime_type)
here = os.path.dirname(os.path.abspath(__file__))
types_data = os.path.join(here, "types.csv")
def setup():
data = read(types_data)
interfaces = getInterfaces(data)
registerUtilities(interfaces, data) | zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/mtypes.py | mtypes.py |
========================================
MIME type and character set extraction
========================================
.. currentmodule:: zope.mimetype.typegetter
The `zope.mimetype.typegetter` module provides a selection of MIME
type extractors (implementations of
`zope.mimetype.interfaces.IMimeTypeGetter`) and charset extractors
(implementations of `zope.mimetype.interfaces.ICharsetGetter`).
These may be used to determine what the MIME type and character set
for uploaded data should be.
These two interfaces represent the site policy regarding interpreting
upload data in the face of missing or inaccurate input.
Let's go ahead and import the module::
>>> from zope.mimetype import typegetter
MIME types
==========
There are a number of interesting MIME-type extractors:
`mimeTypeGetter()`
A minimal extractor that never attempts to guess.
`mimeTypeGuesser()`
An extractor that tries to guess the content type based on the name
and data if the input contains no content type information.
`smartMimeTypeGuesser()`
An extractor that checks the content for a variety of constructs to
try and refine the results of the `mimeTypeGuesser()`. This is able
to do things like check for XHTML that's labelled as HTML in upload
data.
`mimeTypeGetter()`
------------------
We'll start with the simplest, which does no content-based guessing at
all, but uses the information provided by the browser directly. If
the browser did not provide any content-type information, or if it
cannot be parsed, the extractor simply asserts a "safe" MIME type of
application/octet-stream. (The rationale for selecting this type is
that since there's really nothing productive that can be done with it
other than download it, it's impossible to mis-interpret the data.)
When there's no information at all about the content, the extractor
returns None::
>>> print(typegetter.mimeTypeGetter())
None
Providing only the upload filename or data, or both, still produces
None, since no guessing is being done::
>>> print(typegetter.mimeTypeGetter(name="file.html"))
None
>>> print(typegetter.mimeTypeGetter(data=b"<html>...</html>"))
None
>>> print(typegetter.mimeTypeGetter(
... name="file.html", data=b"<html>...</html>"))
None
If a content type header is available for the input, that is used
since that represents explicit input from outside the application
server. The major and minor parts of the content type are extracted
and returned as a single string::
>>> typegetter.mimeTypeGetter(content_type="text/plain")
'text/plain'
>>> typegetter.mimeTypeGetter(content_type="text/plain; charset=utf-8")
'text/plain'
If the content-type information is provided but malformed (not in
conformance with RFC 2822), it is ignored, since the intent cannot be
reliably guessed::
>>> print(typegetter.mimeTypeGetter(content_type="foo bar"))
None
This combines with ignoring the other values that may be provided as
expected::
>>> print(typegetter.mimeTypeGetter(
... name="file.html", data=b"<html>...</html>", content_type="foo bar"))
None
`mimeTypeGuesser()`
-------------------
A more elaborate extractor that tries to work around completely
missing information can be found as the `mimeTypeGuesser()` function.
This function will only guess if there is no usable content type
information in the input. This extractor can be thought of as having
the following pseudo-code::
def mimeTypeGuesser(name=None, data=None, content_type=None):
type = mimeTypeGetter(name=name, data=data, content_type=content_type)
if type is None:
type = guess the content type
return type
Let's see how this affects the results we saw earlier. When there's
no input to use, we still get None::
>>> print(typegetter.mimeTypeGuesser())
None
Providing only the upload filename or data, or both, now produces a
non-None guess for common content types::
>>> typegetter.mimeTypeGuesser(name="file.html")
'text/html'
>>> typegetter.mimeTypeGuesser(data=b"<html>...</html>")
'text/html'
>>> typegetter.mimeTypeGuesser(name="file.html", data=b"<html>...</html>")
'text/html'
Note that if the filename and data provided separately produce
different MIME types, the result of providing both will be one of
those types, but which is unspecified::
>>> mt_1 = typegetter.mimeTypeGuesser(name="file.html")
>>> mt_1
'text/html'
>>> mt_2 = typegetter.mimeTypeGuesser(data=b"<?xml version='1.0'?>...")
>>> mt_2
'text/xml'
>>> mt = typegetter.mimeTypeGuesser(
... data=b"<?xml version='1.0'?>...", name="file.html")
>>> mt in (mt_1, mt_2)
True
If a content type header is available for the input, that is used in
the same way as for the `mimeTypeGetter()` function::
>>> typegetter.mimeTypeGuesser(content_type="text/plain")
'text/plain'
>>> typegetter.mimeTypeGuesser(content_type="text/plain; charset=utf-8")
'text/plain'
If the content-type information is provided but malformed, it is
ignored::
>>> print(typegetter.mimeTypeGetter(content_type="foo bar"))
None
When combined with values for the filename or content data, those are
still used to provide reasonable guesses for the content type::
>>> typegetter.mimeTypeGuesser(name="file.html", content_type="foo bar")
'text/html'
>>> typegetter.mimeTypeGuesser(
... data=b"<html>...</html>", content_type="foo bar")
'text/html'
Information from a parsable content-type is still used even if a guess
from the data or filename would provide a different or more-refined
result::
>>> typegetter.mimeTypeGuesser(
... data=b"GIF89a...", content_type="application/octet-stream")
'application/octet-stream'
`smartMimeTypeGuesser()`
------------------------
The `smartMimeTypeGuesser()` function applies more knowledge to the
process of determining the MIME-type to use. Essentially, it takes
the result of the `mimeTypeGuesser()` function and attempts to refine
the content-type based on various heuristics.
We still see the basic behavior that no input produces None::
>>> print(typegetter.smartMimeTypeGuesser())
None
An unparsable content-type is still ignored::
>>> print(typegetter.smartMimeTypeGuesser(content_type="foo bar"))
None
The interpretation of uploaded data will be different in at least some
interesting cases. For instance, the `mimeTypeGuesser()` function
provides these results for some XHTML input data::
>>> typegetter.mimeTypeGuesser(
... data=b"<?xml version='1.0' encoding='utf-8'?><html>...</html>",
... name="file.html")
'text/html'
The smart extractor is able to refine this into more usable data::
>>> typegetter.smartMimeTypeGuesser(
... data=b"<?xml version='1.0' encoding='utf-8'?>...",
... name="file.html")
'application/xhtml+xml'
In this case, the smart extractor has refined the information
determined from the filename using information from the uploaded
data. The specific approach taken by the extractor is not part of the
interface, however.
`charsetGetter()`
-----------------
If you're interested in the character set of textual data, you can use
the `charsetGetter` function (which can also be registered as the
`ICharsetGetter` utility):
The simplest case is when the character set is already specified in the
content type.
>>> typegetter.charsetGetter(content_type='text/plain; charset=mambo-42')
'mambo-42'
Note that the charset name is lowercased, because all the default ICharset
and ICharsetCodec utilities are registered for lowercase names.
>>> typegetter.charsetGetter(content_type='text/plain; charset=UTF-8')
'utf-8'
If it isn't, `charsetGetter` can try to guess by looking at actual data
>>> typegetter.charsetGetter(content_type='text/plain', data=b'just text')
'ascii'
>>> typegetter.charsetGetter(content_type='text/plain', data=b'\xe2\x98\xba')
'utf-8'
>>> import codecs
>>> typegetter.charsetGetter(data=codecs.BOM_UTF16_BE + b'\x12\x34')
'utf-16be'
>>> typegetter.charsetGetter(data=codecs.BOM_UTF16_LE + b'\x12\x34')
'utf-16le'
If the character set cannot be determined, `charsetGetter` returns None.
>>> typegetter.charsetGetter(content_type='text/plain', data=b'\xff')
>>> typegetter.charsetGetter()
| zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/typegetter.rst | typegetter.rst |
Retrieving Content Type Information
===================================
MIME Types
----------
We'll start by initializing the interfaces and registrations for the
content type interfaces. This is normally done via ZCML.
>>> from zope.mimetype import mtypes
>>> mtypes.setup()
A utility is used to retrieve MIME types.
>>> from zope import component
>>> from zope.mimetype import typegetter
>>> from zope.mimetype.interfaces import IMimeTypeGetter
>>> component.provideUtility(typegetter.smartMimeTypeGuesser,
... provides=IMimeTypeGetter)
>>> mime_getter = component.getUtility(IMimeTypeGetter)
To map a particular file name, file contents, and content type to a MIME type.
>>> mime_getter(name='file.txt', data='A text file.',
... content_type='text/plain')
'text/plain'
In the default implementation if not enough information is given to discern a
MIME type, None is returned.
>>> mime_getter() is None
True
Character Sets
--------------
A utility is also used to retrieve character sets (charsets).
>>> from zope.mimetype.interfaces import ICharsetGetter
>>> component.provideUtility(typegetter.charsetGetter,
... provides=ICharsetGetter)
>>> charset_getter = component.getUtility(ICharsetGetter)
To map a particular file name, file contents, and content type to a charset.
>>> charset_getter(name='file.txt', data='This is a text file.',
... content_type='text/plain;charset=ascii')
'ascii'
In the default implementation if not enough information is given to discern a
charset, None is returned.
>>> charset_getter() is None
True
Finding Interfaces
------------------
Given a MIME type we need to be able to find the appropriate interface.
>>> from zope.mimetype.interfaces import IContentTypeInterface
>>> component.getUtility(IContentTypeInterface, name=u'text/plain')
<InterfaceClass zope.mimetype.mtypes.IContentTypeTextPlain>
It is also possible to enumerate all content type interfaces.
>>> utilities = list(component.getUtilitiesFor(IContentTypeInterface))
If you want to find an interface from a MIME string, you can use the
utilityies.
>>> component.getUtility(IContentTypeInterface, name='text/plain')
<InterfaceClass zope.mimetype.mtypes.IContentTypeTextPlain>
| zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/retrieving_mime_types.rst | retrieving_mime_types.rst |
=================================
Events and content-type changes
=================================
.. currentmodule:: zope.mimetype.interfaces
The `IContentTypeChangedEvent` is fired whenever an object's
`IContentTypeInterface` is changed. This includes the cases when a
content type interface is applied to an object that doesn't have one,
and when the content type interface is removed from an object.
Let's start the demonstration by defining a subscriber for the event
that simply prints out the information from the event object::
>>> def handler(event):
... print("changed content type interface:")
... print(" from:", repr(event.oldContentType))
... print(" to:", repr(event.newContentType))
We'll also define a simple content object::
>>> import zope.interface
>>> class IContent(zope.interface.Interface):
... pass
>>> @zope.interface.implementer(IContent)
... class Content(object):
... def __str__(self):
... return "<MyContent>"
>>> obj = Content()
We'll also need a couple of content type interfaces::
>>> from zope.mimetype import interfaces
>>> class ITextPlain(interfaces.IContentTypeEncoded):
... """text/plain"""
>>> ITextPlain.setTaggedValue("mimeTypes", ["text/plain"])
>>> ITextPlain.setTaggedValue("extensions", [".txt"])
>>> zope.interface.directlyProvides(
... ITextPlain, interfaces.IContentTypeInterface)
>>> class IOctetStream(interfaces.IContentType):
... """application/octet-stream"""
>>> IOctetStream.setTaggedValue("mimeTypes", ["application/octet-stream"])
>>> IOctetStream.setTaggedValue("extensions", [".bin"])
>>> zope.interface.directlyProvides(
... IOctetStream, interfaces.IContentTypeInterface)
Let's register our subscriber::
>>> import zope.component
>>> import zope.interface.interfaces
>>> zope.component.provideHandler(
... handler,
... (zope.interface.interfaces.IObjectEvent,))
Changing the content type interface on an object is handled by the
`zope.mimetype.event.changeContentType()` function. Let's import that
module and demonstrate that the expected event is fired
appropriately::
>>> from zope.mimetype import event
Since the object currently has no content type interface, "removing"
the interface does not affect the object and the event is not fired::
>>> event.changeContentType(obj, None)
Setting a content type interface on an object that doesn't have one
will cause the event to be fired, with the ``oldContentType`` attribute
on the event set to `None`::
>>> event.changeContentType(obj, ITextPlain)
changed content type interface:
from: None
to: <InterfaceClass builtins.ITextPlain>
Calling the `~.changeContentType()` function again with the same "new"
content type interface causes no change, so the event is not fired
again::
>>> event.changeContentType(obj, ITextPlain)
Providing a new interface does cause the event to be fired again::
>>> event.changeContentType(obj, IOctetStream)
changed content type interface:
from: <InterfaceClass builtins.ITextPlain>
to: <InterfaceClass builtins.IOctetStream>
Similarly, removing the content type interface triggers the event as
well::
>>> event.changeContentType(obj, None)
changed content type interface:
from: <InterfaceClass builtins.IOctetStream>
to: None
| zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/event.rst | event.rst |
__docformat__ = "reStructuredText"
import sys
import zope.component
import zope.publisher.interfaces.browser
from zope.browser.interfaces import ITerms
import zope.mimetype.interfaces
# Base classes
class UtilitySource:
"""Source of utilities providing a specific interface."""
def __init__(self):
self._length = None
def __contains__(self, value):
ok = self._interface.providedBy(value)
if ok:
for name, interface in zope.component.getUtilitiesFor(
self._interface):
if interface is value:
return True
return False
def __iter__(self):
length = 0
seen = set()
# haven't been iterated over all the way yet, go ahead and
# build the cached results list
for name, interface in zope.component.getUtilitiesFor(
self._interface):
if interface not in seen:
seen.add(interface)
yield interface
self._length = length
def __len__(self):
if self._length is None:
self._length = len(list(iter(self)))
return self._length
@zope.interface.implementer(ITerms)
class Terms:
"""Utility to provide terms for content type interfaces."""
def __init__(self, source, request):
self.context = source
self.request = request
def getTerm(self, value):
if value in self.context:
return self._createTerm(value)
raise LookupError("value is not an element in the source")
# Source & vocabulary for `IContentTypeInterface` providers
@zope.interface.implementer(zope.mimetype.interfaces.IContentTypeSource)
class ContentTypeSource(UtilitySource):
"""Source of IContentTypeInterface providers."""
_interface = zope.mimetype.interfaces.IContentTypeInterface
@zope.component.adapter(
zope.mimetype.interfaces.IContentTypeSource,
zope.publisher.interfaces.browser.IBrowserRequest)
class ContentTypeTerms(Terms):
"""Utility to provide terms for content type interfaces."""
def getValue(self, token):
module, name = token.rsplit(".", 1)
if module not in sys.modules:
try:
__import__(module)
except ImportError:
raise LookupError("could not import module for token")
interface = getattr(sys.modules[module], name)
if interface in self.context:
return interface
raise LookupError("token does not represent an element in the source")
def _createTerm(self, value):
return ContentTypeTerm(value)
@zope.interface.implementer(zope.mimetype.interfaces.IContentTypeTerm)
class ContentTypeTerm:
def __init__(self, interface):
self.value = interface
@property
def token(self):
return "{}.{}".format(self.value.__module__, self.value.__name__)
@property
def title(self):
return self.value.getTaggedValue("title")
@property
def mimeTypes(self):
return self.value.getTaggedValue("mimeTypes")
@property
def extensions(self):
return self.value.getTaggedValue("extensions")
contentTypeSource = ContentTypeSource()
# Source & vocabulary for `IContentTypeInterface` providers
@zope.interface.implementer(zope.mimetype.interfaces.ICodecSource)
class CodecSource(UtilitySource):
"""Source of ICodec providers."""
_interface = zope.mimetype.interfaces.ICodec
@zope.component.adapter(
zope.mimetype.interfaces.ICodecSource,
zope.publisher.interfaces.browser.IBrowserRequest)
class CodecTerms(Terms):
"""Utility to provide terms for codecs."""
def getValue(self, token):
codec = zope.component.queryUtility(
zope.mimetype.interfaces.ICodec, token)
if codec is None:
raise LookupError("no matching code: %r" % token)
if codec not in self.context:
raise LookupError("codec not in source: %r" % token)
return codec
def _createTerm(self, value):
return CodecTerm(value)
@zope.interface.implementer(zope.mimetype.interfaces.ICodecTerm)
class CodecTerm:
def __init__(self, codec):
self.value = codec
@property
def token(self):
return self.value.name
@property
def title(self):
return self.value.title
@property
def preferredCharset(self):
charset = zope.component.queryUtility(
zope.mimetype.interfaces.ICodecPreferredCharset,
name=self.value.name)
if charset is None:
available = [(name, charset)
for (name, charset) in zope.component.getUtilitiesFor(
zope.mimetype.interfaces.ICharset)
if charset.encoding == self.value.name]
if available:
# no charset marked preferred; pick one
available.sort()
charset = available[0][1]
# the case that charset is None, meaning no charsets are
# available, should not happen in practice
return charset.name if charset is not None else None
codecSource = CodecSource() | zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/source.py | source.py |
================
Codec handling
================
.. currentmodule:: zope.mimetype.interfaces
We can create codecs programatically. Codecs are registered as
utilities for `ICodec` with the name of their python codec.
>>> from zope import component
>>> from zope.mimetype.interfaces import ICodec
>>> from zope.mimetype.codec import addCodec
>>> sorted(component.getUtilitiesFor(ICodec))
[]
>>> addCodec('iso8859-1', 'Western (ISO-8859-1)')
>>> codec = component.getUtility(ICodec, name='iso8859-1')
>>> codec
<zope.mimetype.codec.Codec ...>
>>> codec.name
'iso8859-1'
>>> addCodec('utf-8', 'Unicode (UTF-8)')
>>> codec2 = component.getUtility(ICodec, name='utf-8')
We can programmatically add charsets to a given codec. This registers
each charset as a named utility for `ICharset`. It also registers the codec
as a utility for `ICharsetCodec` with the name of the charset.
>>> from zope.mimetype.codec import addCharset
>>> from zope.mimetype.interfaces import ICharset, ICharsetCodec
>>> sorted(component.getUtilitiesFor(ICharset))
[]
>>> sorted(component.getUtilitiesFor(ICharsetCodec))
[]
>>> addCharset(codec.name, 'latin1')
>>> charset = component.getUtility(ICharset, name='latin1')
>>> charset
<zope.mimetype.codec.Charset ...>
>>> charset.name
'latin1'
>>> component.getUtility(ICharsetCodec, name='latin1') is codec
True
When adding a charset we can state that we want that charset to be the
preferred charset for its codec.
>>> addCharset(codec.name, 'iso8859-1', preferred=True)
>>> addCharset(codec2.name, 'utf-8', preferred=True)
A codec can have at most one preferred charset.
>>> addCharset(codec.name, 'test', preferred=True)
Traceback (most recent call last):
...
ValueError: Codec already has a preferred charset.
Preferred charsets are registered as utilities for
`ICodecPreferredCharset` under the name of the python codec.
>>> from zope.mimetype.interfaces import ICodecPreferredCharset
>>> preferred = component.getUtility(ICodecPreferredCharset, name='iso8859-1')
>>> preferred
<zope.mimetype.codec.Charset ...>
>>> preferred.name
'iso8859-1'
>>> sorted(component.getUtilitiesFor(ICodecPreferredCharset))
[('iso8859-1', <zope.mimetype.codec.Charset ...>),
('utf-8', <zope.mimetype.codec.Charset ...>)]
We can look up a codec by the name of its charset:
>>> component.getUtility(ICharsetCodec, name='latin1') is codec
True
>>> component.getUtility(ICharsetCodec, name='utf-8') is codec2
True
Or we can look up all codecs:
>>> sorted(component.getUtilitiesFor(ICharsetCodec))
[('iso8859-1', <zope.mimetype.codec.Codec ...>),
('latin1', <zope.mimetype.codec.Codec ...>),
('test', <zope.mimetype.codec.Codec ...>),
('utf-8', <zope.mimetype.codec.Codec ...>)]
| zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/codec.rst | codec.rst |
=========
Widgets
=========
TranslatableSourceSelectWidget
==============================
.. currentmodule:: zope.mimetype.widget
`TranslatableSourceSelectWidget` is a `.SourceSelectWidget` that translates
and sorts the choices.
We will borrow the boring set up code from the SourceSelectWidget test
(source.txt in zope.formlib).
>>> import zope.interface
>>> import zope.component
>>> import zope.schema
>>> import zope.schema.interfaces
>>> @zope.interface.implementer(zope.schema.interfaces.IIterableSource)
... class SourceList(list):
... pass
>>> import base64, binascii
>>> import zope.publisher.interfaces.browser
>>> from zope.browser.interfaces import ITerms
>>> from zope.schema.vocabulary import SimpleTerm
>>> @zope.interface.implementer(ITerms)
... class ListTerms(object):
...
... def __init__(self, source, request):
... pass # We don't actually need the source or the request :)
...
... def getTerm(self, value):
... title = value.decode() if isinstance(value, bytes) else value
... try:
... token = base64.b64encode(title.encode()).strip().decode()
... except binascii.Error:
... raise LookupError(token)
... return SimpleTerm(value, token=token, title=title)
...
... def getValue(self, token):
... return token.decode('base64')
>>> zope.component.provideAdapter(
... ListTerms,
... (SourceList, zope.publisher.interfaces.browser.IBrowserRequest))
>>> dog = zope.schema.Choice(
... __name__ = 'dog',
... title=u"Dogs",
... source=SourceList(['spot', 'bowser', 'prince', 'duchess', 'lassie']),
... )
>>> dog = dog.bind(object())
Now that we have a field and a working source, we can construct and render
a widget.
>>> from zope.mimetype.widget import TranslatableSourceSelectWidget
>>> from zope.publisher.browser import TestRequest
>>> request = TestRequest()
>>> widget = TranslatableSourceSelectWidget(
... dog, dog.source, request)
>>> print(widget())
<div>
<div class="value">
<select id="field.dog" name="field.dog" size="5" >
<option value="Ym93c2Vy">bowser</option>
<option value="ZHVjaGVzcw==">duchess</option>
<option value="bGFzc2ll">lassie</option>
<option value="cHJpbmNl">prince</option>
<option value="c3BvdA==">spot</option>
</select>
</div>
<input name="field.dog-empty-marker" type="hidden" value="1" />
</div>
>>> widget.textForValue(widget.vocabulary.terms.getTerm('spot'))
('spot', 'c3BvdA==')
Note that the options are ordered alphabetically.
If the field is not required, we will also see a special choice labeled
"(nothing selected)" at the top of the list
>>> dog.required = False
>>> print(widget())
<div>
<div class="value">
<select id="field.dog" name="field.dog" size="5" >
<option selected="selected" value="">(nothing selected)</option>
<option value="Ym93c2Vy">bowser</option>
<option value="ZHVjaGVzcw==">duchess</option>
<option value="bGFzc2ll">lassie</option>
<option value="cHJpbmNl">prince</option>
<option value="c3BvdA==">spot</option>
</select>
</div>
<input name="field.dog-empty-marker" type="hidden" value="1" />
</div>
| zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/widget.rst | widget.rst |
__docformat__ = "reStructuredText"
import zope.component
import zope.contenttype.parse
import zope.interface
import zope.mimetype.interfaces
@zope.interface.implementer(zope.mimetype.interfaces.IContentInfo)
@zope.component.adapter(zope.interface.Interface)
class ContentInfo:
"""Basic IContentInfo that provides information from an IContentTypeAware.
"""
def __init__(self, context):
self.context = context
aware = zope.mimetype.interfaces.IContentTypeAware(context)
self.effectiveMimeType = aware.mimeType
self.effectiveParameters = dict(aware.parameters)
if self.effectiveParameters:
encoded = zope.mimetype.interfaces.IContentTypeEncoded.providedBy(
context)
if "charset" in self.effectiveParameters and not encoded:
del self.effectiveParameters["charset"]
major, minor = self.effectiveMimeType.split("/")
self.contentType = zope.contenttype.parse.join(
(major, minor, self.effectiveParameters))
else:
self.contentType = self.effectiveMimeType
def getCodec(self):
if "_codec" in self.__dict__:
return self._codec
isencoded = zope.mimetype.interfaces.IContentTypeEncoded.providedBy(
self.context)
if isencoded:
charset = self.effectiveParameters.get("charset")
if charset:
utility = zope.component.queryUtility(
zope.mimetype.interfaces.ICharset, charset)
if utility is None:
raise ValueError("unsupported charset: %r" % charset)
codec = zope.component.getUtility(
zope.mimetype.interfaces.ICodec, utility.encoding)
self._codec = codec
else:
raise ValueError("charset not known")
else:
self._codec = None
return self._codec
def decode(self, s):
codec = self.getCodec()
if codec is not None:
text, consumed = codec.decode(s)
if consumed != len(s):
raise ValueError("data not completely consumed")
return text
else:
raise ValueError("no matching codec found") | zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/contentinfo.py | contentinfo.py |
Package configuration
=====================
The ``zope.mimetype`` package provides a ZCML file that configures some
adapters and utilities and a couple of views:
>>> from zope.configuration.xmlconfig import XMLConfig
>>> import zope.mimetype
>>> len(list(zope.component.getGlobalSiteManager().registeredUtilities()))
0
>>> XMLConfig('configure.zcml', zope.mimetype)()
>>> len(list(zope.component.getGlobalSiteManager().registeredUtilities())) >= 755
True
The 'zmi_icon' adapters are only installed if zope.browserresource
is available:
>>> try:
... import zope.browserresource
... except ImportError:
... expected = 1
... else:
... expected = 107
>>> len(list(zope.component.getGlobalSiteManager().registeredAdapters())) == expected
True
| zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/src/zope/mimetype/configure.rst | configure.rst |
===============
API Reference
===============
zope.mimetype.interfaces
========================
.. automodule:: zope.mimetype.interfaces
zope.mimetype.codec
===================
.. automodule:: zope.mimetype.codec
zope.mimetype.contentinfo
=========================
.. automodule:: zope.mimetype.contentinfo
zope.mimetype.event
===================
.. automodule:: zope.mimetype.event
zope.mimetype.i18n
==================
.. automodule:: zope.mimetype.i18n
zope.mimetype.mtypes
====================
.. automodule:: zope.mimetype.mtypes
zope.mimetype.source
====================
.. automodule:: zope.mimetype.source
zope.mimetype.typegetter
========================
.. automodule:: zope.mimetype.typegetter
zope.mimetype.utils
===================
.. automodule:: zope.mimetype.utils
zope.mimetype.widget
====================
.. automodule:: zope.mimetype.widget
zope.mimetype.zcml
==================
.. automodule:: zope.mimetype.zcml
| zope.mimetype | /zope.mimetype-3.0.tar.gz/zope.mimetype-3.0/docs/api/index.rst | index.rst |
=========
Changes
=========
2.3 (2022-08-30)
================
- Drop support for Python 3.4.
- Add support for Python 3.7, 3.8, 3.9, 3.10.
- Make the ``AbstractValue`` class public in ``zope.minmax``. It was
already documented to be public.
2.2.0 (2017-08-14)
==================
- Add support for Python 3.5 and 3.6.
- Drop support for Python 2.6 and 3.3.
- Bring unit test coverage to 100% (including branches).
- Convert doctests to Sphinx documentation, including building docs
and running doctest snippets under ``tox``.
- Host documentation at https://zopeminmax.readthedocs.io
2.1.0 (2014-12-27)
==================
- Add support for PyPy3.
- Add support Python 3.4.
2.0.0 (2013-02-19)
==================
- Add support for Python 3.3 and PyPy.
- Replace deprecated ``zope.interface.implements`` usage with equivalent
``zope.interface.implementer`` decorator.
- Drop support for Python 2.4 and 2.5.
1.1.2 (2009-09-24)
==================
- Use the standard Python doctest module instead of the deprecated
zope.testing.doctest.
1.1.1 (2009-09-09)
==================
- Fix homepage link and mailing list address.
1.1 (2007-10-02)
================
- Refactor package setup.
1.0 (2007-09-28)
================
- No further changes since 1.0b2
1.0b2 (2007-07-09)
==================
- Remove ``_p_independent`` method from ``AbstractValue`` class.
1.0b1 (2007-07-03)
==================
- Initial release.
| zope.minmax | /zope.minmax-2.3.tar.gz/zope.minmax-2.3/CHANGES.rst | CHANGES.rst |
=================
``zope.minmax``
=================
.. image:: https://img.shields.io/pypi/v/zope.minmax.svg
:target: https://pypi.python.org/pypi/zope.minmax/
:alt: Latest release
.. image:: https://img.shields.io/pypi/pyversions/zope.minmax.svg
:target: https://pypi.org/project/zope.minmax/
:alt: Supported Python versions
.. image:: https://github.com/zopefoundation/zope.minmax/actions/workflows/tests.yml/badge.svg
:target: https://github.com/zopefoundation/zope.minmax/actions/workflows/tests.yml
.. image:: https://coveralls.io/repos/github/zopefoundation/zope.minmax/badge.svg?branch=master
:target: https://coveralls.io/github/zopefoundation/zope.minmax?branch=master
.. image:: https://readthedocs.org/projects/zopeminmax/badge/?version=latest
:target: https://zopeminmax.readthedocs.io/en/latest/
:alt: Documentation Status
This package provides support for homogeneous values favoring maximum
or minimum (e.g., numbers) for ZODB conflict resolution.
See https://zopeminmax.readthedocs.io for a detailed description.
| zope.minmax | /zope.minmax-2.3.tar.gz/zope.minmax-2.3/README.rst | README.rst |
Hacking on :mod:`zope.minmax`
=============================
Getting the Code
################
The main repository for :mod:`zope.minmax` is in the Zope Foundation
Github repository:
https://github.com/zopefoundation/zope.minmax
You can get a read-only checkout from there:
.. code-block:: sh
$ git clone https://github.com/zopefoundation/zope.minmax.git
or fork it and get a writeable checkout of your fork:
.. code-block:: sh
$ git clone [email protected]/jrandom/zope.minmax.git
The project also mirrors the trunk from the Github repository as a
Bazaar branch on Launchpad:
https://code.launchpad.net/zope.minmax
You can branch the trunk from there using Bazaar:
.. code-block:: sh
$ bzr branch lp:zope.minmax
Working in a ``virtualenv``
###########################
Installing
----------
If you use the ``virtualenv`` package to create lightweight Python
development environments, you can run the tests using nothing more
than the ``python`` binary in a virtualenv. First, create a scratch
environment:
.. code-block:: sh
$ /path/to/virtualenv --no-site-packages /tmp/hack-zope.minmax
Next, get this package registered as a "development egg" in the
environment:
.. code-block:: sh
$ /tmp/hack-zope.minmax/bin/python setup.py develop
Running the tests
-----------------
Run the tests using the build-in ``setuptools`` testrunner:
.. code-block:: sh
$ /tmp/hack-zope.minmax/bin/python setup.py test
running test
.........
----------------------------------------------------------------------
Ran 9 tests in 0.000s
OK
If you have the :mod:`nose` package installed in the virtualenv, you can
use its testrunner too:
.. code-block:: sh
$ /tmp/hack-zope.minmax/bin/easy_install nose
...
$ /tmp/hack-zope.minmax/bin/nosetests
.........
----------------------------------------------------------------------
Ran 18 tests in 0.000s
OK
If you have the :mod:`coverage` pacakge installed in the virtualenv,
you can see how well the tests cover the code:
.. code-block:: sh
$ /tmp/hack-zope.minmax/bin/easy_install nose coverage
...
$ /tmp/hack-zope.minmax/bin/nosetests --with coverage
running nosetests
..................
Name Stmts Miss Branch BrPart Cover Missing
-----------------------------------------------------------------------
zope/minmax.py 1 0 0 0 100%
zope/minmax/_minmax.py 22 0 2 0 100%
zope/minmax/interfaces.py 2 0 0 0 100%
-----------------------------------------------------------------------
TOTAL 25 0 2 0 100%
----------------------------------------------------------------------
Ran 18 tests in 0.027s
OK
Building the documentation
--------------------------
:mod:`zope.minmax` uses the nifty :mod:`Sphinx` documentation system
for building its docs. Using the same virtualenv you set up to run the
tests, you can build the docs:
.. code-block:: sh
$ /tmp/hack-zope.minmax/bin/easy_install Sphinx
...
$ bin/sphinx-build -b html -d docs/_build/doctrees docs docs/_build/html
...
build succeeded.
You can also test the code snippets in the documentation:
.. code-block:: sh
$ bin/sphinx-build -b doctest -d docs/_build/doctrees docs docs/_build/doctest
...
Doctest summary
===============
42 tests
0 failures in tests
0 failures in setup code
build succeeded.
Testing of doctests in the sources finished, look at the \
results in _build/doctest/output.txt.
Using :mod:`zc.buildout`
########################
Setting up the buildout
-----------------------
:mod:`zope.minmax` ships with its own :file:`buildout.cfg` file and
:file:`bootstrap.py` for setting up a development buildout:
.. code-block:: sh
$ /path/to/python2.7 bootstrap.py
...
Generated script '.../bin/buildout'
$ bin/buildout
Develop: '/home/jrandom/projects/Zope/zope.minmax/.'
...
Generated script '.../bin/sphinx-quickstart'.
Generated script '.../bin/sphinx-build'.
Running the tests
-----------------
Run the tests:
.. code-block:: sh
$ bin/test --all
Running zope.testing.testrunner.layer.UnitTests tests:
Set up zope.testing.testrunner.layer.UnitTests in 0.000 seconds.
Ran 400 tests with 0 failures and 0 errors in 0.366 seconds.
Tearing down left over layers:
Tear down zope.testing.testrunner.layer.UnitTests in 0.000 seconds.
Using :mod:`tox`
################
Running Tests on Multiple Python Versions
-----------------------------------------
`tox <http://tox.testrun.org/latest/>`_ is a Python-based test automation
tool designed to run tests against multiple Python versions. It creates
a ``virtualenv`` for each configured version, installs the current package
and configured dependencies into each ``virtualenv``, and then runs the
configured commands.
:mod:`zope.minmax` configures the following :mod:`tox` environments via
its ``tox.ini`` file:
- The ``py26``, ``py27``, ``py33``, ``py34``, ``pypy`, and ``pypy3``
environments build a ``virtualenv`` with the appropriate interpreter,
installs :mod:`zope.minmax` and dependencies, and runs the tests
via ``python setup.py test -q``.
- The ``coverage`` environment builds a ``virtualenv`` with ``python2.6``,
installs :mod:`zope.minmax`, installs
:mod:`nose` and :mod:`coverage`, and runs ``nosetests`` with statement
coverage.
- The ``docs`` environment builds a virtualenv with ``python2.6``, installs
:mod:`zope.minmax`, installs ``Sphinx`` and
dependencies, and then builds the docs and exercises the doctest snippets.
This example requires that you have a working ``python2.6`` on your path,
as well as installing ``tox``:
.. code-block:: sh
$ tox -e py26
GLOB sdist-make: .../zope.interface/setup.py
py26 sdist-reinst: .../zope.interface/.tox/dist/zope.interface-4.0.2dev.zip
py26 runtests: commands[0]
.........
----------------------------------------------------------------------
Ran 9 tests in 0.152s
OK
___________________________________ summary ____________________________________
py26: commands succeeded
congratulations :)
Running ``tox`` with no arguments runs all the configured environments,
including building the docs and testing their snippets:
.. code-block:: sh
$ tox
GLOB sdist-make: .../zope.interface/setup.py
py26 sdist-reinst: .../zope.interface/.tox/dist/zope.interface-4.0.2dev.zip
py26 runtests: commands[0]
...
Doctest summary
===============
42 tests
0 failures in tests
0 failures in setup code
0 failures in cleanup code
build succeeded.
___________________________________ summary ____________________________________
py26: commands succeeded
py27: commands succeeded
py33: commands succeeded
py34: commands succeeded
pypy: commands succeeded
coverage: commands succeeded
docs: commands succeeded
congratulations :)
Contributing to :mod:`zope.minmax`
##################################
Submitting a Bug Report
-----------------------
:mod:`zope.minmax` tracks its bugs on Github:
https://github.com/zopefoundation/zope.minmax/issues
Please submit bug reports and feature requests there.
Sharing Your Changes
--------------------
.. note::
Please ensure that all tests are passing before you submit your code.
If possible, your submission should include new tests for new features
or bug fixes, although it is possible that you may have tested your
new code by updating existing tests.
If have made a change you would like to share, the best route is to fork
the Githb repository, check out your fork, make your changes on a branch
in your fork, and push it. You can then submit a pull request from your
branch:
https://github.com/zopefoundation/zope.minmax/pulls
If you branched the code from Launchpad using Bazaar, you have another
option: you can "push" your branch to Launchpad:
.. code-block:: sh
$ bzr push lp:~jrandom/zope.minmax/cool_feature
After pushing your branch, you can link it to a bug report on Launchpad,
or request that the maintainers merge your branch using the Launchpad
"merge request" feature.
| zope.minmax | /zope.minmax-2.3.tar.gz/zope.minmax-2.3/docs/hacking.rst | hacking.rst |
=====================================================
Conflict Resolution using Maximum or Minimum Values
=====================================================
The :class:`zope.minmax.AbstractValue` class provides a super class which can
be subclassed to store arbitrary *homogeneous* values in a persistent
storage and apply different conflict resolution policies.
.. autoclass:: zope.minmax.interfaces.IAbstractValue
.. autoclass:: zope.minmax.AbstractValue
:private-members:
The subclasses defined here are resolving the conflicts using always
either the maximum or the minimum of the conflicting values.
Maximum
=======
.. autoclass:: zope.minmax.Maximum
The :class:`zope.minmax.Maximum` class always resolves conflicts favoring the
maximum value. Let's instantiate one object and verify that it
satisfies the interface.
.. doctest::
>>> import zope.minmax
>>> import zope.interface.verify
>>> max_favored = zope.minmax.Maximum()
>>> zope.interface.verify.verifyObject(
... zope.minmax.interfaces.IAbstractValue, max_favored)
True
We can confirm that the initial value is zero.
.. doctest::
>>> bool(max_favored)
False
>>> print(max_favored.value)
None
Now, we can store a new value in the object.
.. doctest::
>>> max_favored.value = 11
>>> print(max_favored.value)
11
>>> bool(max_favored)
True
Or we can use the methods.
.. doctest::
>>> max_favored.__setstate__(4532)
>>> max_favored.__getstate__()
4532
>>> print(max_favored.value)
4532
>>> bool(max_favored)
True
Do notice that using a direct assignment to the value attribute is a
more natural use.
Minimum
=======
.. autoclass:: zope.minmax.Minimum
The :class:`zope.minmax.Minimum` class always resolves conflicts favoring the
minimum value. Again, we instantiate an object and verify that it
satisfies the interface.
.. doctest::
>>> min_favored = zope.minmax.Minimum()
>>> zope.interface.verify.verifyObject(
... zope.minmax.interfaces.IAbstractValue, min_favored)
True
We need a confirmation that the initial value is zero.
.. doctest::
>>> bool(min_favored)
False
>>> print(min_favored.value)
None
Let's populate this one too.
.. doctest::
>>> min_favored.value = 22
>>> print(min_favored.value)
22
>>> bool(min_favored)
True
Or we can use the methods, again.
.. doctest::
>>> min_favored.__setstate__(8796)
>>> min_favored.__getstate__()
8796
>>> print(min_favored.value)
8796
>>> bool(min_favored)
True
Please, notice, again, that using a direct assignment to the value
attribute is a more natural use.
Conflict Resolution
===================
Now, we need to exercise the conflict resolution interface.
First for the :class:`zope.minmax.Maximum`:
Let's try differing values larger than the old value.
.. doctest::
>>> max_favored._p_resolveConflict(max_favored.value, 4536, 4535)
4536
>>> max_favored._p_resolveConflict(max_favored.value, 4573, 4574)
4574
What happens when all the values are equal, including the old.
.. doctest::
>>> max_favored._p_resolveConflict(max_favored.value, 4532, 4532)
4532
Notice that when the old value is larger than both the committed and
new, it is still disregarded.
.. doctest::
>>> max_favored._p_resolveConflict(max_favored.value, 4531, 4530)
4531
Now, the :class:`zope.minmax.Minimum`:
Let's try differing values smaller than the old value.
.. doctest::
>>> min_favored._p_resolveConflict(min_favored.value, 8792, 8791)
8791
>>> min_favored._p_resolveConflict(min_favored.value, 8785, 8786)
8785
What happens when all the values are equal, including the old.
.. doctest::
>>> min_favored._p_resolveConflict(min_favored.value, 8796, 8796)
8796
Notice that when the old value is smaller than both the committed and
new, it is still disregarded.
.. doctest::
>>> min_favored._p_resolveConflict(min_favored.value, 8798, 8799)
8798
How about an example that is not numerical?
.. doctest::
>>> max_word = zope.minmax.Maximum('joy')
>>> print(max_word.value)
joy
>>> bool(max_word)
True
>>> max_word._p_resolveConflict(max_word.value, 'happiness', 'exuberance')
'happiness'
>>> max_word._p_resolveConflict(max_word.value, 'exuberance', 'happiness')
'happiness'
>>> min_word = zope.minmax.Minimum(max_word.value)
>>> print(min_word.value)
joy
>>> bool(min_word)
True
>>> min_word._p_resolveConflict(min_word.value, 'happiness', 'exuberance')
'exuberance'
>>> min_word._p_resolveConflict(min_word.value, 'exuberance', 'happiness')
'exuberance'
As indicated, we don't need to have numbers, just *homegeneous* items.
The homogeneous values are not really inherently required. However, it
makes no sense to apply min() or max() on, say, one number and one
string. Simply, the ordering relations do not work at all on
heterogeneous values.
| zope.minmax | /zope.minmax-2.3.tar.gz/zope.minmax-2.3/docs/narrative.rst | narrative.rst |
Changelog
=========
5.1.1 (2023-05-05)
------------------
- Make ``blob_dir`` parameter added in 5.1 optional.
(`#18 <https://github.com/zopefoundation/zope.mkzeoinstance/pull/18>`_)
5.1 (2023-04-28)
----------------
- Add configuration option ``-b`` resp. ``--blobs`` for passing blob directory
path. (`#16 <https://github.com/zopefoundation/zope.mkzeoinstance/pull/16>`_)
5.0 (2023-02-09)
----------------
- Drop support for Python 2.7, 3.4, 3.5, 3.6.
- Add support for Python 3.7, 3.8, 3.9, 3.10, 3.11.
- Drop support for running tests using ``python setup.py test``.
4.1 (2017-05-26)
----------------
- Fix generated ``runzeo`` and ``zeoctl`` scripts to run with ZEO 5.
4.0 (2017-02-28)
----------------
- 100% unit test coverage.
- Drop support for Python 2.6.
- Add support for Python 3.4, 3.5, and 3.6.
- Move dependency from ``ZODB3`` -> [``zdaemon``, ``ZODB``, ``ZEO``].
Even though this package doesn't actually import anything from the last
two, the generated instance won't be usable unless the host python
has them installed.
3.9.6 (2014-12-23)
------------------
- Add support for testing on Travis, and with tox.
3.9.5 (2011-10-31)
------------------
- Place the socket used by the ``zeoctl`` control process to conmmunicate
with its ``runzeo`` daemaon in ``$INSTANCE_HOME/var``, instead of
``$INSTANCE_HOME/etc`` (which would idealy not be writable by the process).
See: https://bugs.launchpad.net/zope.mkzeoinstance/+bug/175981
3.9.4 (2010-04-22)
------------------
- Rename the script / package ``mkzeoinstance`` to avoid clashing with the
script bundled with ZODB.
- Add an option to spell the host interface to be listened on, as well as
the port the generated ZEO server configuration. Thanks to Igor Stroh
for the patch. See: https://bugs.launchpad.net/zodb/+bug/143361
- Fix generated templates to cope with the move of ``zdaemon`` code into
its own project.
- Fork from the version of the ``mkzeoinst`` script contained in
ZODB 3.9.4.
| zope.mkzeoinstance | /zope.mkzeoinstance-5.1.1.tar.gz/zope.mkzeoinstance-5.1.1/CHANGES.rst | CHANGES.rst |
# WARNING! Several templates and functions here are reused by ZRS.
# So be careful with changes.
import argparse
import os
import stat
import sys
import zdaemon
import ZODB
PROGRAM = os.path.basename(sys.argv[0])
ZEO_CONF_TEMPLATE = """\
# ZEO configuration file
%%define INSTANCE %(instance_home)s
<zeo>
address %(address)s
read-only false
invalidation-queue-size 100
# pid-filename $INSTANCE/var/ZEO.pid
# monitor-address PORT
# transaction-timeout SECONDS
</zeo>
<filestorage 1>
path $INSTANCE/var/Data.fs
%(blob_dir)s
</filestorage>
<eventlog>
level info
<logfile>
path $INSTANCE/log/zeo.log
</logfile>
</eventlog>
<runner>
program $INSTANCE/bin/runzeo
socket-name $INSTANCE/var/%(package)s.zdsock
daemon true
forever false
backoff-limit 10
exit-codes 0, 2
directory $INSTANCE
default-to-interactive true
# user zope
python %(python)s
zdrun %(zdaemon_home)s/zdaemon/zdrun.py
# This logfile should match the one in the %(package)s.conf file.
# It is used by zdctl's logtail command, zdrun/zdctl doesn't write it.
logfile $INSTANCE/log/%(package)s.log
</runner>
"""
ZEOCTL_TEMPLATE = """\
#!/bin/sh
# %(PACKAGE)s instance control script
# The following two lines are for chkconfig. On Red Hat Linux (and
# some other systems), you can copy or symlink this script into
# /etc/rc.d/init.d/ and then use chkconfig(8) to automatically start
# %(PACKAGE)s at boot time.
# chkconfig: 345 90 10
# description: start a %(PACKAGE)s server
PYTHON="%(python)s"
INSTANCE_HOME="%(instance_home)s"
ZODB3_HOME="%(zodb_home)s"
CONFIG_FILE="%(instance_home)s/etc/%(package)s.conf"
PYTHONPATH="$ZODB3_HOME"
export PYTHONPATH INSTANCE_HOME
exec "$PYTHON" -m ZEO.zeoctl -C "$CONFIG_FILE" ${1+"$@"}
"""
RUNZEO_TEMPLATE = """\
#!/bin/sh
# %(PACKAGE)s instance start script
PYTHON="%(python)s"
INSTANCE_HOME="%(instance_home)s"
ZODB3_HOME="%(zodb_home)s"
CONFIG_FILE="%(instance_home)s/etc/%(package)s.conf"
PYTHONPATH="$ZODB3_HOME"
export PYTHONPATH INSTANCE_HOME
exec "$PYTHON" -m ZEO.runzeo -C "$CONFIG_FILE" ${1+"$@"}
"""
ZEO_DEFAULT_BLOB_DIR = '$INSTANCE/var/blobs'
def print_(msg, *args, **kw):
if args:
msg = msg % args
if kw:
msg = msg % kw
if not isinstance(msg, str):
msg = msg.decode('utf8')
sys.stdout.write('%s\n' % msg)
def usage(msg='', rc=1,
exit=sys.exit, # testing hook
):
if not isinstance(msg, str):
msg = str(msg)
print_(__doc__, program=PROGRAM)
if msg:
print_(msg)
exit(rc)
class ZEOInstanceBuilder:
def get_params(self, zodb_home, zdaemon_home,
instance_home, address, blob_dir):
return {
"package": "zeo",
"PACKAGE": "ZEO",
"zodb_home": zodb_home,
"zdaemon_home": zdaemon_home,
"instance_home": instance_home,
"blob_dir": f"blob-dir {blob_dir}" if blob_dir else "",
"address": address,
"python": sys.executable,
}
def create(self, home, params):
makedir(home)
makedir(home, "etc")
makedir(home, "var")
makedir(home, "log")
makedir(home, "bin")
# Create dir only when default is selected
if ZEO_DEFAULT_BLOB_DIR in params.setdefault('blob_dir', ''):
makedir(home, "var/blobs")
makefile(ZEO_CONF_TEMPLATE, home, "etc", "zeo.conf", **params)
makexfile(ZEOCTL_TEMPLATE, home, "bin", "zeoctl", **params)
makexfile(RUNZEO_TEMPLATE, home, "bin", "runzeo", **params)
def run(self, argv,
usage=usage, # testing hook
):
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('instance_home', nargs='?', default=None)
parser.add_argument('addr_string', nargs='?', default='9999')
parser.add_argument('-h', '--help', action='store_true')
parser.add_argument('-b', '--blobs', required=False, default=None,
const=ZEO_DEFAULT_BLOB_DIR, nargs='?')
parsed_args, unknown_args = parser.parse_known_args(argv)
if len(unknown_args) > 0:
usage(rc=1)
if parsed_args.help:
usage(rc=2)
elif parsed_args.instance_home is None:
usage(rc=1)
instance_home = os.path.abspath(parsed_args.instance_home)
zodb_home = os.path.split(ZODB.__path__[0])[0]
zdaemon_home = os.path.split(zdaemon.__path__[0])[0]
addr_string = parsed_args.addr_string
if ':' in addr_string:
host, port = addr_string.split(':', 1)
address = host + ':' + port
elif addr_string.isdigit():
address = int(addr_string)
else:
usage(rc=1)
blob_dir = parsed_args.blobs if parsed_args.blobs else None
params = self.get_params(
zodb_home, zdaemon_home, instance_home, address, blob_dir)
self.create(instance_home, params)
def makedir(*args):
path = ""
for arg in args:
path = os.path.join(path, arg)
mkdirs(path)
return path
def mkdirs(path):
if os.path.isdir(path):
return
head, tail = os.path.split(path)
if head and tail and not os.path.isdir(head):
mkdirs(head)
os.mkdir(path)
print_("Created directory %s", path)
def makefile(template, *args, **kwds):
path = makedir(*args[:-1])
path = os.path.join(path, args[-1])
data = template % kwds
if os.path.exists(path):
with open(path) as f:
olddata = f.read().strip()
if olddata:
if olddata != data.strip():
print_("Warning: not overwriting existing file %s", path)
return path
with open(path, "w") as f:
f.write(data)
print_("Wrote file %s", path)
return path
def makexfile(template, *args, **kwds):
path = makefile(template, *args, **kwds)
umask = os.umask(0o022)
os.umask(umask)
mode = 0o0777 & ~umask
if stat.S_IMODE(os.stat(path)[stat.ST_MODE]) != mode:
os.chmod(path, mode)
print_("Changed mode for %s to %o", path, mode)
return path
def main(): # pragma: nocover
ZEOInstanceBuilder().run(sys.argv[1:])
print_("All done.") | zope.mkzeoinstance | /zope.mkzeoinstance-5.1.1.tar.gz/zope.mkzeoinstance-5.1.1/src/zope/mkzeoinstance/__init__.py | __init__.py |
``zope.optionalextension`` README
=================================
This package provides a distutils extension for building optional C
extensions. It is intended for use in projects which have a Python reference
implementation of one or more features, and which can function without
needing any C extensions to be successfully compiled.
Using the Command with bare ``distutils``
-----------------------------------------
In the ``setup.py`` for your package::
from distutils.core import setup
setup(name='your.package',
...
command_packages = ['zope.optionalextension',
'distutils.command',
]
...
)
You need to ensure that ``zope.optionalextension`` is installed first
yourself.
Using the Command with bare ``setuptools``
------------------------------------------
In the ``setup.py`` for your package::
from setuptools import setup
setup(name='your.package',
...
setup_requires=['zope.optionalextension'],
command_packages=['zope.optionalextension',
'distutils.command',
]
...
)
| zope.optionalextension | /zope.optionalextension-1.1.tar.gz/zope.optionalextension-1.1/README.txt | README.txt |
==============
Output Checker
==============
The output checker normalizes HTML and regular expressions. I have already
setup a checker for this file, so let's check it out.
>>> print('<p id="greet" class="highlight">Hi</p>')
<p class="highlight" id="greet">Hi</p>
You can see that the order of the attributes deos not matter. Next a regular
expression replacement:
>>> print('Good morning Stephan!')
Guten Tag Stephan!
We can also combine the two:
>>> print('<p id="greet" class="highlight">Good morning Stephan!</p>')
<p class="highlight" id="greet">Guten Tag Stephan!</p>
| zope.outputchecker | /zope.outputchecker-1.0.0.zip/zope.outputchecker-1.0.0/src/zope/outputchecker/README.txt | README.txt |
"""Custom Output Checker
"""
import doctest as pythondoctest
import re
import lxml.etree
import lxml.doctestcompare
from lxml.doctestcompare import LHTMLOutputChecker
from zope.testing.renormalizing import RENormalizing
class OutputChecker(LHTMLOutputChecker, RENormalizing):
"""Doctest output checker which is better equippied to identify
HTML markup than the checker from the ``lxml.doctestcompare``
module. It also uses the text comparison function from the
built-in ``doctest`` module to allow the use of ellipsis.
Also, we need to support RENormalizing.
"""
_repr_re = re.compile(
r'^<([A-Z]|[^>]+ (at|object) |[a-z]+ \'[A-Za-z0-9_.]+\'>)')
def __init__(self, doctest=pythondoctest, patterns=()):
RENormalizing.__init__(self, patterns)
self.doctest = doctest
# make sure these optionflags are registered
doctest.register_optionflag('PARSE_HTML')
doctest.register_optionflag('PARSE_XML')
doctest.register_optionflag('NOPARSE_MARKUP')
def _looks_like_markup(self, s):
s = s.replace('<BLANKLINE>', '\n').strip()
return (s.startswith('<')
and not self._repr_re.search(s))
def text_compare(self, want, got, strip):
if want is None:
want = ""
if got is None:
got = ""
checker = self.doctest.OutputChecker()
return checker.check_output(
want, got, self.doctest.ELLIPSIS|self.doctest.NORMALIZE_WHITESPACE)
def check_output(self, want, got, optionflags):
if got == want:
return True
for transformer in self.transformers:
want = transformer(want)
got = transformer(got)
return LHTMLOutputChecker.check_output(self, want, got, optionflags)
def output_difference(self, example, got, optionflags):
want = example.want
if not want.strip():
return LHTMLOutputChecker.output_difference(
self, example, got, optionflags)
# Dang, this isn't as easy to override as we might wish
original = want
for transformer in self.transformers:
want = transformer(want)
got = transformer(got)
# temporarily hack example with normalized want:
example.want = want
result = LHTMLOutputChecker.output_difference(
self, example, got, optionflags)
example.want = original
return result
def get_parser(self, want, got, optionflags):
NOPARSE_MARKUP = self.doctest.OPTIONFLAGS_BY_NAME.get(
"NOPARSE_MARKUP", 0)
PARSE_HTML = self.doctest.OPTIONFLAGS_BY_NAME.get(
"PARSE_HTML", 0)
PARSE_XML = self.doctest.OPTIONFLAGS_BY_NAME.get(
"PARSE_XML", 0)
parser = None
if NOPARSE_MARKUP & optionflags:
return None
if PARSE_HTML & optionflags:
parser = lxml.doctestcompare.html_fromstring
elif PARSE_XML & optionflags:
parser = lxml.etree.XML
elif (want.strip().lower().startswith('<html')
and got.strip().startswith('<html')):
parser = lxml.doctestcompare.html_fromstring
elif (self._looks_like_markup(want)
and self._looks_like_markup(got)):
parser = self.get_default_parser()
return parser | zope.outputchecker | /zope.outputchecker-1.0.0.zip/zope.outputchecker-1.0.0/src/zope/outputchecker/checker.py | checker.py |
=========
Changes
=========
5.0 (2023-02-07)
================
- Add support for ``zope.untrustedpython`` on Python 3. With it, Python
expressions are now protected. It is activated using the ``untrusted`` extra.
- Add support for Python 3.11.
- Drop support for Python 2.7, 3.5, 3.6.
4.6.0 (2021-11-04)
==================
- Avoid traceback reference cycle in ``PageTemplate._cook``.
- Add support for Python 3.9 and 3.10.
4.5.0 (2020-02-10)
==================
- Add support for Python 3.8.
- Drop support for Python 3.4.
4.4.1 (2018-10-16)
==================
- Fix DeprecationWarnings for ``ComponentLookupError`` by
importing them from ``zope.interface.interfaces``. See `issue 17
<https://github.com/zopefoundation/zope.pagetemplate/issues/17>`_.
4.4 (2018-10-05)
================
- Add support for Python 3.7.
- Host documentation at https://zopepagetemplate.readthedocs.io/
4.3.0 (2017-09-04)
==================
- Add support for Python 3.5 and 3.6.
- Drop support for Python 2.6, 3.2 and 3.3.
- Certain internal test support objects in the ``tests`` package were
removed or modified.
- The ``TraversableModuleImporter`` properly turns ``ImportError``
into ``TraversalError``. Previously it was catching ``KeyError``,
which cannot be raised.
- Reach 100% code coverage and maintain it through automated testing.
4.2.1 (2015-06-06)
==================
- Add support for Python 3.2.
4.2.0 (2015-06-02)
==================
- Allow short-circuit traversal for non-proxied dict subclasses. See:
https://github.com/zopefoundation/zope.pagetemplate/pull/3 .
- Add support for PyPy / PyPy3.
4.1.0 (2014-12-27)
==================
- Add support for Python 3.4.
- Add support for testing on Travis.
4.0.4 (2013-03-15)
==================
- Ensure that ``ZopePythonExpr`` and ``PythonExpr`` are separate classes even
when ``zope.untrustedpython`` is not available. Fixes a ZCML conflict error
in ``zope.app.pagetemplate``.
4.0.3 (2013-02-28)
==================
- Only allow ``zope.untrustedpython`` to be a dependency in Python 2.
- Fix buildout to work properly.
4.0.2 (2013-02-22)
==================
- Migrate from ``zope.security.untrustedpython`` to ``zope.untrustedpython``.
- Make ``zope.untrustedpython`` an extra dependency. Without it, python
expressions are not protected, even though path expressions are still
security wrapped.
- Add support for Python 3.3.
4.0.1 (2012-01-23)
==================
- LP#732972: PageTemplateTracebackSupplement no longer passes
``check_macro_expansion=False`` to old templates which do not
accept this argument.
4.0.0 (2012-12-13)
==================
- Replace deprecated ``zope.interface.classProvides`` usage with equivalent
``zope.interface.provider`` decorator.
- Replace deprecated ``zope.interface.implements`` usage with equivalent
``zope.interface.implementer`` decorator.
- Drop support for Python 2.4 and 2.5.
- PageTemplate.pt_render() has a new argument, ``check_macro_expansion``,
defaulting to True.
- PageTemplateTracebackSupplement passes ``check_macro_expansion=False``, to
avoid LP#732972.
3.6.3 (2011-09-21)
==================
- Fix test assertions to be compatible with ``zope.tal`` 3.6.
3.6.2 (2011-09-21)
==================
- Change interface for engine and program such that the return type of
the ``cook`` method is a tuple ``(program, macros)``. This follows
the interface for the TAL parser's ``getCode`` method.
Fixes a legacy compatibility issue where code would expect an
``_v_macros`` volatile attribute which was missing.
3.6.1 (2011-08-23)
==================
- Fix issue with missing default value for ``strictinsert``.
3.6.0 (2011-08-20)
==================
- Replace StringIO stream class with a faster list-based implementation.
- Abstract out the template engine and program interfaces and allow
implementation replacement via a utility registration.
- Remove ancient copyright from test files (LP: #607228)
3.5.2 (2010-07-08)
==================
- Fix ``PTRuntimeError`` exception messages to be consistent across Python
versions, and compatibile with the output under Python 2.4. (More
readable than the previous output under Python 2.6 as well.)
3.5.1 (2010-04-30)
==================
- Remove use of ``zope.testing.doctestunit`` in favor of stdlib's doctest.
- Add dependency on "zope.security [untrustedpython]" because the ``engine``
module uses it.
3.5.0 (2009-05-25)
==================
- Add test coverage reporting support.
- Move 'engine' module and related test scaffolding here from
``zope.app.pagetemplate`` package.
3.4.2 (2009-03-17)
==================
- Remove old zpkg-related DEPENDENCIES.cfg file.
- Change package's mailing list address to zope-dev at zope.org, as
zope3-dev at zope.org is now retired.
- Change `cheeseshop` to `pypi` in the packages' homepage url.
3.4.1 (2009-01-27)
==================
- Fix test due to recent changes in zope.tal.
3.4.0 (2007-10-02)
==================
- Initial release independent of the Zope 3 tree.
3.2.0 (2006-01-05)
==================
- Corresponds to the version of the zope.pagetemplate package shipped
as part of the Zope 3.2.0 release.
- ZPTPage macro expansion: changed label text to match the corresponding
label in Zope 2 and activated the name spaces for macro expansion
in 'read'. See http://www.zope.org/Collectors/Zope3-dev/199
- Coding style cleanups.
3.1.0 (2005-10-03)
==================
- Corresponds to the version of the zope.pagetemplate package shipped
as part of the Zope 3.1.0 release.
- Fixed apidoc and Cookie, which were using wrong descriptor class
(changed to 'property'). See http://www.zope.org/Collectors/Zope3-dev/387
- Documentation / style / testing cleanups.
3.0.0 (2004-11-07)
==================
- Corresponds to the version of the zope.pagetemplate package shipped
as part of the Zope X3.0.0 release.
| zope.pagetemplate | /zope.pagetemplate-5.0.tar.gz/zope.pagetemplate-5.0/CHANGES.rst | CHANGES.rst |
===================
zope.pagetemplate
===================
.. image:: https://img.shields.io/pypi/v/zope.pagetemplate.svg
:target: https://pypi.python.org/pypi/zope.pagetemplate/
:alt: Latest release
.. image:: https://img.shields.io/pypi/pyversions/zope.pagetemplate.svg
:target: https://pypi.org/project/zope.pagetemplate/
:alt: Supported Python versions
.. image:: https://github.com/zopefoundation/zope.pagetemplate/actions/workflows/tests.yml/badge.svg
:target: https://github.com/zopefoundation/zope.pagetemplate/actions/workflows/tests.yml
.. image:: https://coveralls.io/repos/github/zopefoundation/zope.pagetemplate/badge.svg?branch=master
:target: https://coveralls.io/github/zopefoundation/zope.pagetemplate?branch=master
.. image:: https://readthedocs.org/projects/zopepagetemplate/badge/?version=latest
:target: https://zopepagetemplate.readthedocs.org/en/latest/
:alt: Documentation Status
Page Templates provide an elegant templating mechanism that achieves a
clean separation of presentation and application logic while allowing
for designers to work with templates in their visual editing tools
(FrontPage, Dreamweaver, GoLive, etc.).
Page Templates are based on `a Template Attribute Language
<https://pypi.python.org/pypi/zope.tal>`_ with expressions provided by
`TALES <https://pypi.python.org/pypi/zope.tales>`_. For a description
of their syntax, see `the reference documentation
<https://pagetemplates.readthedocs.io/en/latest/>`_.
For detailed documentation on the usage of this package, see
https://zopepagetemplate.readthedocs.io
| zope.pagetemplate | /zope.pagetemplate-5.0.tar.gz/zope.pagetemplate-5.0/README.rst | README.rst |
"""Interface that describes the 'macros' attribute of a PageTemplate.
"""
from zope.interface import Attribute
from zope.interface import Interface
class IPageTemplate(Interface):
"""Objects that can render page templates
"""
def __call__(*args, **kw):
"""Render a page template
The argument handling is specific to particular
implementations. Normally, however, positional arguments are
bound to the top-level ``args`` variable and keyword arguments
are bound to the top-level ``options`` variable.
"""
def pt_edit(source, content_type):
"""Set the source and content type
"""
def pt_errors(namespace):
"""Return a sequence of strings that describe errors in the template.
The errors may occur when the template is compiled or
rendered.
*namespace* is the set of names passed to the TALES expression
evaluator, similar to what's returned by pt_getContext().
This can be used to let a template author know what went wrong
when an attempt was made to render the template.
"""
def read():
"""Get the template source
"""
macros = Attribute(
"An object that implements the ``__getitem__`` "
"protocol (e.g., a :class:`dict`), containing page template macros.")
class IPageTemplateSubclassing(IPageTemplate):
"""Behavior that may be overridden or used by subclasses
"""
def pt_getContext(**kw):
"""Compute a dictionary of top-level template names
Responsible for returning the set of
top-level names supported in path expressions
"""
def pt_getEngine():
"""Returns the TALES expression evaluator.
"""
def pt_getEngineContext(namespace):
"""Return an execution context from the expression engine."""
def __call__(*args, **kw):
"""Render a page template
This is sometimes overridden to provide additional argument
binding.
"""
def pt_source_file():
"""return some text describing where a bit of ZPT code came from.
This could be a file path, a object path, etc.
"""
def _cook():
"""Compile the source
Results are saved in the variables: ``_v_errors``, ``_v_warnings``,
``_v_program``, and ``_v_macros``, and the flag ``_v_cooked`` is set.
"""
def _cook_check():
"""Compiles the source if necessary
Subclasses might override this to influence the decision about
whether compilation is necessary.
"""
content_type = Attribute("The content-type of the generated output")
expand = Attribute(
"Flag indicating whether the read method should expand macros")
class IPageTemplateEngine(Interface):
"""Template engine implementation.
The engine must provide a ``cook`` method to return a cooked
template program from a source input.
"""
def cook(source_file, text, engine, content_type):
"""Parse text and return prepared template program and macros.
Note that while *source_file* is provided to name the source
of the input *text*, it should not be relied on to be an
actual filename (it may be an application-specific, virtual
path).
The return type is a tuple ``(program, macros)``.
"""
class IPageTemplateProgram(Interface):
"""Cooked template program."""
def __call__(
context, macros, debug=0, wrap=60, metal=1, tal=1, showtal=-1,
strictinsert=1, stackLimit=100, i18nInterpolate=1,
sourceAnnotations=0):
"""
Render template in the provided template *context*.
Optional arguments:
:keyword bool debug: enable debugging output to sys.stderr (off by
default).
:keyword int wrap: try to wrap attributes on opening tags to this
number of column (default: 60).
:keyword bool metal: enable METAL macro processing (on by default).
:keyword bool tal: enable TAL processing (on by default).
:keyword int showtal: do not strip away TAL directives. A special
value of -1 (which is the default setting) enables showtal when TAL
processing is disabled, and disables showtal when TAL processing is
enabled. Note that you must use 0, 1, or -1; true boolean values
are not supported (for historical reasons).
:keyword bool strictinsert: enable TAL processing and stricter HTML/XML
checking on text produced by structure inserts (on by default).
Note that Zope turns this value off by default.
:keyword int stackLimit: set macro nesting limit (default: 100).
:keyword bool i18nInterpolate: enable i18n translations (default: on).
:keyword bool sourceAnnotations: enable source annotations with HTML
comments (default: off).
""" | zope.pagetemplate | /zope.pagetemplate-5.0.tar.gz/zope.pagetemplate-5.0/src/zope/pagetemplate/interfaces.py | interfaces.py |
__docformat__ = 'restructuredtext'
import sys
from zope.i18n import translate
from zope.interface import implementer
from zope.interface.interfaces import ComponentLookupError
from zope.proxy import isProxy
from zope.security.proxy import ProxyFactory
from zope.security.proxy import removeSecurityProxy
from zope.traversing.adapters import traversePathElement
from zope.traversing.interfaces import IPathAdapter
from zope.traversing.interfaces import ITraversable
from zope.traversing.interfaces import TraversalError
from zope import component
try:
# The ``untrusted`` extra is needed to have zope.untrustedpython:
from zope.untrustedpython import rcompile
from zope.untrustedpython.builtins import SafeBuiltins
def guarded_getitem(ob, index):
"""getitem access which gets guarded in the next line."""
return ob[index]
guarded_getitem = ProxyFactory(guarded_getitem)
HAVE_UNTRUSTED = True
except ImportError: # pragma: no cover
HAVE_UNTRUSTED = False
# PyPy doesn't support assigning to '__builtins__', even when using eval()
# (http://pypy.readthedocs.org/en/latest/cpython_differences.html), so don't
# try to use it. It won't work.
if HAVE_UNTRUSTED: # pragma: no cover
import platform
if platform.python_implementation() == 'PyPy':
HAVE_UNTRUSTED = False
del rcompile
del SafeBuiltins
from zope.tales.expressions import DeferExpr
from zope.tales.expressions import NotExpr
from zope.tales.expressions import PathExpr
from zope.tales.expressions import SimpleModuleImporter
from zope.tales.expressions import StringExpr
from zope.tales.pythonexpr import PythonExpr
from zope.tales.tales import Context
from zope.tales.tales import ExpressionEngine
from zope.pagetemplate.i18n import ZopeMessageFactory as _
class InlineCodeError(Exception):
pass
class ZopeTraverser:
def __init__(self, proxify=None):
if proxify is None:
self.proxify = lambda x: x
else:
self.proxify = proxify
def __call__(self, object, path_items, econtext):
"""Traverses a sequence of names, first trying attributes then items.
"""
request = getattr(econtext, 'request', None)
path_items = list(path_items)
path_items.reverse()
while path_items:
name = path_items.pop()
# special-case dicts for performance reasons
if getattr(object, '__class__', None) == dict:
object = object[name]
elif isinstance(object, dict) and not isProxy(object):
object = object[name]
else:
object = traversePathElement(object, name, path_items,
request=request)
object = self.proxify(object)
return object
zopeTraverser = ZopeTraverser(ProxyFactory)
class ZopePathExpr(PathExpr):
def __init__(self, name, expr, engine):
super().__init__(name, expr, engine, zopeTraverser)
trustedZopeTraverser = ZopeTraverser()
class TrustedZopePathExpr(PathExpr):
def __init__(self, name, expr, engine):
super().__init__(name, expr, engine, trustedZopeTraverser)
# Create a version of the restricted built-ins that uses a safe
# version of getattr() that wraps values in security proxies where
# appropriate:
class ZopePythonExpr(PythonExpr):
if HAVE_UNTRUSTED:
def __call__(self, econtext):
__traceback_info__ = self.text
vars = self._bind_used_names(econtext, SafeBuiltins)
vars['_getattr_'] = SafeBuiltins.getattr
vars['_getitem_'] = guarded_getitem
return eval(self._code, vars)
def _compile(self, text, filename):
return rcompile.compile(text, filename, 'eval')
def _get_iinterpreter():
from zope.app.interpreter.interfaces import IInterpreter
return IInterpreter # pragma: no cover
class ZopeContextBase(Context):
"""Base class for both trusted and untrusted evaluation contexts."""
request = None
def translate(self, msgid, domain=None, mapping=None, default=None):
return translate(msgid, domain, mapping,
context=self.request, default=default)
evaluateInlineCode = False
def evaluateCode(self, lang, code):
if not self.evaluateInlineCode:
raise InlineCodeError(
_('Inline Code Evaluation is deactivated, which means that '
'you cannot have inline code snippets in your Page '
'Template. Activate Inline Code Evaluation and try again.'))
# TODO This is only needed when self.evaluateInlineCode is true,
# so should only be needed for zope.app.pythonpage.
IInterpreter = _get_iinterpreter()
interpreter = component.queryUtility(IInterpreter, lang)
if interpreter is None:
error = _('No interpreter named "${lang_name}" was found.',
mapping={'lang_name': lang})
raise InlineCodeError(error)
globs = self.vars.copy()
result = interpreter.evaluateRawCode(code, globs)
# Add possibly new global variables.
old_names = self.vars.keys()
for name, value in globs.items():
if name not in old_names:
self.setGlobal(name, value)
return result
class ZopeContext(ZopeContextBase):
"""Evaluation context for untrusted programs."""
def evaluateMacro(self, expr):
"""evaluateMacro gets security-proxied macro programs when this
is run with the zopeTraverser, and in other untrusted
situations. This will cause evaluation to fail in
zope.tal.talinterpreter, which knows nothing of security proxies.
Therefore, this method removes any proxy from the evaluated
expression.
>>> from zope.pagetemplate.engine import ZopeContext
>>> from zope.tales.tales import ExpressionEngine
>>> from zope.security.proxy import ProxyFactory
>>> output = [
... ('version', 'xxx'),
... ('mode', 'html'),
... ('other', 'things')]
>>> def expression(context):
... return ProxyFactory(output)
...
>>> zc = ZopeContext(ExpressionEngine, {})
>>> out = zc.evaluateMacro(expression)
>>> type(out) is list
True
The method does some trivial checking to make sure we are getting
back a macro like we expect: it must be a sequence of sequences, in
which the first sequence must start with 'version', and the second
must start with 'mode'.
>>> del output[0]
>>> zc.evaluateMacro(expression) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ('unexpected result from macro evaluation.', ...)
>>> del output[:]
>>> zc.evaluateMacro(expression) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ('unexpected result from macro evaluation.', ...)
>>> output = None
>>> zc.evaluateMacro(expression) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ('unexpected result from macro evaluation.', ...)
"""
macro = removeSecurityProxy(Context.evaluateMacro(self, expr))
# we'll do some basic checks that it is the sort of thing we expect
problem = False
try:
problem = macro[0][0] != 'version' or macro[1][0] != 'mode'
except (TypeError, IndexError):
problem = True
if problem:
raise ValueError('unexpected result from macro evaluation.', macro)
return macro
def setContext(self, name, value):
# Hook to allow subclasses to do things like adding security proxies
Context.setContext(self, name, ProxyFactory(value))
class TrustedZopeContext(ZopeContextBase):
"""Evaluation context for trusted programs."""
class AdapterNamespaces:
"""Simulate tales function namespaces with adapter lookup.
When we are asked for a namespace, we return an object that
actually computes an adapter when called:
To demonstrate this, we need to register an adapter:
>>> from zope.component.testing import setUp, tearDown
>>> setUp()
>>> from zope.component import provideAdapter
>>> def adapter1(ob):
... return 1
>>> adapter1.__component_adapts__ = (None,)
>>> from zope.traversing.interfaces import IPathAdapter
>>> provideAdapter(adapter1, None, IPathAdapter, 'a1')
Now, with this adapter in place, we can try out the namespaces:
>>> ob = object()
>>> from zope.pagetemplate.engine import AdapterNamespaces
>>> namespaces = AdapterNamespaces()
>>> namespace = namespaces['a1']
>>> namespace(ob)
1
>>> namespace = namespaces['a2']
>>> namespace(ob)
Traceback (most recent call last):
...
KeyError: 'a2'
Cleanup:
>>> tearDown()
"""
def __init__(self):
self.namespaces = {}
def __getitem__(self, name):
namespace = self.namespaces.get(name)
if namespace is None:
def namespace(object):
try:
return component.getAdapter(object, IPathAdapter, name)
except ComponentLookupError:
raise KeyError(name)
self.namespaces[name] = namespace
return namespace
class ZopeBaseEngine(ExpressionEngine):
_create_context = ZopeContext
def __init__(self):
ExpressionEngine.__init__(self)
self.namespaces = AdapterNamespaces()
def getContext(self, __namespace=None, **namespace):
if __namespace:
if namespace:
namespace.update(__namespace)
else:
namespace = __namespace
context = self._create_context(self, namespace)
# Put request into context so path traversal can find it
if 'request' in namespace:
context.request = namespace['request']
# Put context into context so path traversal can find it
if 'context' in namespace:
context.context = namespace['context']
return context
class ZopeEngine(ZopeBaseEngine):
"""
Untrusted expression engine.
This engine does not allow modules to be imported; only modules
already available may be accessed::
>>> from zope.pagetemplate.engine import _Engine
>>> modname = 'zope.pagetemplate.tests.trusted'
>>> engine = _Engine()
>>> context = engine.getContext(engine.getBaseNames())
>>> modname in sys.modules
False
>>> context.evaluate('modules/' + modname)
Traceback (most recent call last):
...
KeyError: 'zope.pagetemplate.tests.trusted'
(The use of ``KeyError`` is an unfortunate implementation detail; I
think this should be a ``TraversalError``.)
Modules which have already been imported by trusted code are
available, wrapped in security proxies::
>>> m = context.evaluate('modules/sys')
>>> m.__name__
'sys'
>>> m._getframe
Traceback (most recent call last):
...
zope.security.interfaces.ForbiddenAttribute: ('_getframe', <module 'sys' (built-in)>)
The results of Python expressions evaluated by this engine are
wrapped in security proxies if the 'untrusted' extra is installed::
>>> r = context.evaluate('python: {12: object()}.values')
>>> str(type(r).__name__) if HAVE_UNTRUSTED else '_Proxy'
'_Proxy'
>>> ((str(type(r).__name__) in ('method', 'instancemethod'))
... if not HAVE_UNTRUSTED else True)
True
>>> r = context.evaluate('python: {12: (1, 2, 3)}[12]')
>>> str(type(r).__name__) if HAVE_UNTRUSTED else '_Proxy'
'_Proxy'
General path expressions provide objects that are wrapped in
security proxies as well::
>>> from zope.component.testing import setUp, tearDown
>>> from zope.security.checker import NamesChecker, defineChecker
>>> @implementer(ITraversable)
... class Container(dict):
... def traverse(self, name, further_path):
... return self[name]
>>> setUp()
>>> defineChecker(Container, NamesChecker(['traverse']))
>>> d = engine.getBaseNames()
>>> foo = Container()
>>> foo.__name__ = 'foo'
>>> d['foo'] = ProxyFactory(foo)
>>> foo['bar'] = bar = Container()
>>> bar.__name__ = 'bar'
>>> bar.__parent__ = foo
>>> bar['baz'] = baz = Container()
>>> baz.__name__ = 'baz'
>>> baz.__parent__ = bar
>>> context = engine.getContext(d)
>>> o1 = context.evaluate('foo/bar')
>>> o1.__name__
'bar'
>>> type(o1)
<class 'zope.security._proxy._Proxy'>
>>> o2 = context.evaluate('foo/bar/baz')
>>> o2.__name__
'baz'
>>> type(o2)
<class 'zope.security._proxy._Proxy'>
>>> o3 = o2.__parent__
>>> type(o3)
<class 'zope.security._proxy._Proxy'>
>>> o1 == o3
True
>>> o1 is o2
False
Note that this engine special-cases dicts during path traversal:
it traverses only to their items, but not to their attributes
(e.g. methods on dicts), because of performance reasons::
>>> d = engine.getBaseNames()
>>> d['adict'] = {'items': 123}
>>> d['anotherdict'] = {}
>>> context = engine.getContext(d)
>>> context.evaluate('adict/items')
123
>>> context.evaluate('anotherdict/keys')
Traceback (most recent call last):
...
KeyError: 'keys'
This special-casing also applies to non-proxied dict subclasses::
>>> class TraverserDict(dict):
... def __init__(self):
... self.item_requested = None
... def __getitem__(self, item):
... self.item_requested = item
... return dict.__getitem__(self, item)
>>> d = engine.getBaseNames()
>>> foo = TraverserDict()
>>> d['foo'] = foo
>>> foo['bar'] = 'baz'
>>> context = engine.getContext(d)
>>> context.evaluate('foo/bar')
'baz'
>>> foo.item_requested
'bar'
>>> tearDown()
""" # noqa: E501 line too long
def getFunctionNamespace(self, namespacename):
""" Returns the function namespace """
return ProxyFactory(
super().getFunctionNamespace(namespacename))
class TrustedZopeEngine(ZopeBaseEngine):
"""
Trusted expression engine.
This engine allows modules to be imported::
>>> from zope.pagetemplate.engine import _TrustedEngine
>>> modname = 'zope.pagetemplate.tests.trusted'
>>> engine = _TrustedEngine()
>>> context = engine.getContext(engine.getBaseNames())
>>> modname in sys.modules
False
>>> m = context.evaluate('modules/' + modname)
>>> m.__name__ == modname
True
>>> modname in sys.modules
True
Since this is trusted code, we can look at whatever is in the
module, not just ``__name__`` or what's declared in a security
assertion::
>>> m.x
42
Clean up after ourselves::
>>> del sys.modules[modname]
"""
_create_context = TrustedZopeContext
@implementer(ITraversable)
class TraversableModuleImporter(SimpleModuleImporter):
def traverse(self, name, further_path):
try:
return self[name]
except ImportError:
raise TraversalError(self, name)
def _Engine(engine=None):
if engine is None:
engine = ZopeEngine()
engine = _create_base_engine(engine, ZopePathExpr)
engine.registerType('python', ZopePythonExpr)
# Using a proxy around sys.modules allows page templates to use
# modules for which security declarations have been made, but
# disallows execution of any import-time code for modules, which
# should not be allowed to happen during rendering.
engine.registerBaseName('modules', ProxyFactory(sys.modules))
return engine
def _TrustedEngine(engine=None):
if engine is None:
engine = TrustedZopeEngine()
engine = _create_base_engine(engine, TrustedZopePathExpr)
engine.registerType('python', PythonExpr)
engine.registerBaseName('modules', TraversableModuleImporter())
return engine
def _create_base_engine(engine, pathtype):
for pt in pathtype._default_type_names:
engine.registerType(pt, pathtype)
engine.registerType('string', StringExpr)
engine.registerType('not', NotExpr)
engine.registerType('defer', DeferExpr)
return engine
Engine = _Engine()
TrustedEngine = _TrustedEngine()
class AppPT:
def pt_getEngine(self):
return Engine
class TrustedAppPT:
def pt_getEngine(self):
return TrustedEngine | zope.pagetemplate | /zope.pagetemplate-5.0.tar.gz/zope.pagetemplate-5.0/src/zope/pagetemplate/engine.py | engine.py |
__all__ = ("PageTemplateFile",)
import logging
import os
import re
import sys
from zope.pagetemplate.pagetemplate import PageTemplate
logger = logging.getLogger(__name__)
DEFAULT_ENCODING = "utf-8"
meta_pattern = re.compile(
br'\s*<meta\s+http-equiv=["\']?Content-Type["\']?'
br'\s+content=["\']?([^;]+);\s*charset=([^"\']+)["\']?\s*/?\s*>\s*',
re.IGNORECASE)
def package_home(gdict):
filename = gdict["__file__"]
return os.path.dirname(filename)
class PageTemplateFile(PageTemplate):
"Zope wrapper for filesystem Page Template using TAL, TALES, and METAL"
_v_last_read = 0
_v_debug = __debug__
def __init__(self, filename, _prefix=None):
path = self.get_path_from_prefix(_prefix)
self.filename = os.path.join(path, filename)
if not os.path.isfile(self.filename):
raise ValueError("No such file", self.filename)
def get_path_from_prefix(self, _prefix):
if isinstance(_prefix, str):
path = _prefix
else:
if _prefix is None:
_prefix = sys._getframe(2).f_globals
path = package_home(_prefix)
return path
def _prepare_html(self, text):
match = meta_pattern.search(text)
if match is not None:
type_, encoding = (x.decode('utf-8') for x in match.groups())
# TODO: Shouldn't <meta>/<?xml?> stripping
# be in PageTemplate.__call__()?
text = meta_pattern.sub(b"", text)
else:
type_ = None
encoding = DEFAULT_ENCODING
text = text.decode(encoding)
return text, type_
def _read_file(self):
__traceback_info__ = self.filename
with open(self.filename, "rb") as f:
text = f.read(XML_PREFIX_MAX_LENGTH)
type_ = sniff_type(text)
text += f.read()
if type_ != "text/xml":
text, type_ = self._prepare_html(text)
return text, type_
def _cook_check(self):
if self._v_last_read and not self._v_debug:
return
__traceback_info__ = self.filename
try:
mtime = os.path.getmtime(self.filename)
except OSError:
mtime = 0
if self._v_program is not None and mtime == self._v_last_read:
return
text, type_ = self._read_file()
self.pt_edit(text, type_)
assert self._v_cooked
if self._v_errors:
logger.error('PageTemplateFile: Error in template %s: %s',
self.filename, '\n'.join(self._v_errors))
return
self._v_last_read = mtime
def pt_source_file(self):
return self.filename
def __getstate__(self):
raise TypeError("non-picklable object")
XML_PREFIXES = [
b"<?xml", # ascii, utf-8
b"\xef\xbb\xbf<?xml", # utf-8 w/ byte order mark
b"\0<\0?\0x\0m\0l", # utf-16 big endian
b"<\0?\0x\0m\0l\0", # utf-16 little endian
b"\xfe\xff\0<\0?\0x\0m\0l", # utf-16 big endian w/ byte order mark
b"\xff\xfe<\0?\0x\0m\0l\0", # utf-16 little endian w/ byte order mark
]
XML_PREFIX_MAX_LENGTH = max(map(len, XML_PREFIXES))
def sniff_type(text):
"""Return 'text/xml' if text appears to be XML, otherwise return None."""
for prefix in XML_PREFIXES:
if text.startswith(prefix):
return "text/xml"
return None | zope.pagetemplate | /zope.pagetemplate-5.0.tar.gz/zope.pagetemplate-5.0/src/zope/pagetemplate/pagetemplatefile.py | pagetemplatefile.py |
import sys
from zope.component import queryUtility
from zope.interface import implementer
from zope.interface import provider
from zope.tal.htmltalparser import HTMLTALParser
from zope.tal.talgenerator import TALGenerator
from zope.tal.talinterpreter import TALInterpreter
from zope.tal.talparser import TALParser
from zope.tales.engine import Engine
from zope.pagetemplate.interfaces import IPageTemplateEngine
from zope.pagetemplate.interfaces import IPageTemplateProgram
from zope.pagetemplate.interfaces import IPageTemplateSubclassing
_default_options = {}
class StringIO(list):
# Unicode aware append-only version of StringIO.
write = list.append
def __init__(self, value=None):
list.__init__(self)
if value is not None:
self.append(value)
def getvalue(self):
return ''.join(self)
@implementer(IPageTemplateSubclassing)
class PageTemplate:
"""
Page Templates using TAL, TALES, and METAL.
**Subclassing**
This class implements :class:`~zope.pagetemplate.interfaces.IPageTemplateSubclassing`.
The following methods have certain internal responsibilities.
``pt_getContext(**keywords)``
Should ignore keyword arguments that it doesn't care about,
and construct the namespace passed to the TALES expression
engine. This method is free to use the keyword arguments it
receives.
``pt_render(namespace, source=False, sourceAnnotations=False, showtal=False)``
Responsible the TAL interpreter to perform the rendering. The
namespace argument is a mapping which defines the top-level
namespaces passed to the TALES expression engine.
``__call__(*args, **keywords)``
Calls pt_getContext() to construct the top-level namespace
passed to the TALES expression engine, then calls pt_render()
to perform the rendering.
""" # noqa: E501 line too long
_error_start = '<!-- Page Template Diagnostics'
_error_end = '-->'
_newline = '\n'
content_type = 'text/html'
expand = 1
_v_errors = ()
_v_cooked = 0
_v_macros = None
_v_program = None
_text = ''
@property
def macros(self):
self._cook_check()
return self._v_macros
def pt_edit(self, text, content_type):
if content_type:
self.content_type = str(content_type)
if hasattr(text, 'read'):
text = text.read()
self.write(text)
def pt_getContext(self, args=(), options=_default_options, **ignored):
rval = {'template': self,
'options': options,
'args': args,
'nothing': None,
}
rval.update(self.pt_getEngine().getBaseNames())
return rval
def __call__(self, *args, **kwargs):
return self.pt_render(self.pt_getContext(args, kwargs))
def pt_getEngineContext(self, namespace):
return self.pt_getEngine().getContext(namespace)
def pt_getEngine(self):
return Engine
def pt_render(self, namespace, source=False, sourceAnnotations=False,
showtal=False):
"""Render this Page Template"""
self._cook_check()
__traceback_supplement__ = (
PageTemplateTracebackSupplement, self, namespace
)
if self._v_errors:
raise PTRuntimeError(str(self._v_errors))
context = self.pt_getEngineContext(namespace)
return self._v_program(
context, self._v_macros, tal=not source, showtal=showtal,
strictinsert=0, sourceAnnotations=sourceAnnotations
)
def pt_errors(self, namespace, check_macro_expansion=True):
self._cook_check()
err = self._v_errors
if err:
return err
if check_macro_expansion:
try:
self.pt_render(namespace, source=1)
except Exception:
return (
'Macro expansion failed', '%s: %s' % sys.exc_info()[:2])
def _convert(self, string, text):
"""Adjust the string type to the type of text"""
if isinstance(
text,
bytes) and not isinstance(
string,
bytes):
return string.encode('utf-8')
if isinstance(
text,
str) and not isinstance(
string,
str):
return string.decode('utf-8')
return string
def write(self, text):
# We accept both, since the text can either come from a file (and the
# parser will take care of the encoding) or from a TTW template, in
# which case we already have unicode.
assert isinstance(text, (str, bytes))
def bs(s):
"""Bytes or str"""
return self._convert(s, text)
if text.startswith(bs(self._error_start)):
errend = text.find(bs(self._error_end))
if errend >= 0:
text = text[errend + 3:]
if text[:1] == bs(self._newline):
text = text[1:]
if self._text != text:
self._text = text
# Always cook on an update, even if the source is the same;
# the content-type might have changed.
self._cook()
def read(self, request=None):
"""Gets the source, sometimes with macros expanded."""
self._cook_check()
def bs(s):
"""Bytes or str"""
return self._convert(s, self._text)
if not self._v_errors:
if not self.expand:
return self._text
try:
# This gets called, if macro expansion is turned on.
# Note that an empty dictionary is fine for the context at
# this point, since we are not evaluating the template.
context = self.pt_getContext(self, request)
return self.pt_render(context, source=1)
except BaseException:
return (bs('%s\n Macro expansion failed\n %s\n-->\n' %
(self._error_start, "%s: %s" %
sys.exc_info()[:2])) + self._text)
return bs('{}\n {}\n-->\n{}'.format(
self._error_start, '\n'.join(self._v_errors), self._text))
def pt_source_file(self):
"""To be overridden."""
return None
def _cook_check(self):
if not self._v_cooked:
self._cook()
def _cook(self):
"""Compile the TAL and METAL statments.
Cooking must not fail due to compilation errors in templates.
"""
pt_engine = self.pt_getEngine()
source_file = self.pt_source_file()
self._v_errors = ()
try:
engine = queryUtility(
IPageTemplateEngine, default=PageTemplateEngine
)
self._v_program, self._v_macros = engine.cook(
source_file, self._text, pt_engine, self.content_type)
except BaseException:
etype, e = sys.exc_info()[:2]
try:
self._v_errors = [
"Compilation failed",
"{}.{}: {}".format(etype.__module__, etype.__name__, e)
]
finally:
del e
self._v_cooked = 1
class PTRuntimeError(RuntimeError):
'''The Page Template has template errors that prevent it from rendering.'''
pass
@implementer(IPageTemplateProgram)
@provider(IPageTemplateEngine)
class PageTemplateEngine:
"""
Page template engine that uses the TAL interpreter to render.
This class implements
:class:`zope.pagetemplate.interfaces.IPageTemplateProgram`.
"""
def __init__(self, program):
self.program = program
def __call__(self, context, macros, **options):
output = StringIO('')
interpreter = TALInterpreter(
self.program, macros, context,
stream=output, **options
)
interpreter()
return output.getvalue()
@classmethod
def cook(cls, source_file, text, engine, content_type):
if content_type == 'text/html':
gen = TALGenerator(engine, xml=0, source_file=source_file)
parser = HTMLTALParser(gen)
else:
gen = TALGenerator(engine, source_file=source_file)
parser = TALParser(gen)
parser.parseString(text)
program, macros = parser.getCode()
return cls(program), macros
# @implementer(ITracebackSupplement)
class PageTemplateTracebackSupplement:
def __init__(self, pt, namespace):
self.manageable_object = pt
self.warnings = []
try:
e = pt.pt_errors(namespace, check_macro_expansion=False)
except TypeError:
# Old page template.
e = pt.pt_errors(namespace)
if e:
self.warnings.extend(e) | zope.pagetemplate | /zope.pagetemplate-5.0.tar.gz/zope.pagetemplate-5.0/src/zope/pagetemplate/pagetemplate.py | pagetemplate.py |
===========
ZPT Usage
===========
This document focuses on usage of the Page Templates API.
For information about writing Page Templates documents and using TAL
and TALES, refer to the `reference documentation
<https://pagetemplates.readthedocs.io/en/latest/>`_ or the
`Chameleon documentation
<https://chameleon.readthedocs.io/en/latest/reference.html>`_ if you
are using z3c.ptcompat.
.. testsetup::
__file__ = 'docs/using.rst'
Simple Usage
============
Using Page Templates is very easy and straight forward. Let's look at
a quick example. Suppose we have a file called ``hello_world.pt`` with
these contents:
.. literalinclude:: hello_world.pt
.. doctest::
>>> from zope.pagetemplate.pagetemplatefile import PageTemplateFile
>>> my_pt = PageTemplateFile('hello_world.pt')
>>> print(my_pt().strip())
<html><body>Hello World</body></html>
Subclassing PageTemplates
=========================
Lets say we want to alter page templates such that keyword arguments
appear as top level items in the namespace. We can subclass
:class:`~.PageTemplate` and alter the default behavior of
:meth:`~.pt_getContext()` to add them in:
.. testcode::
from zope.pagetemplate.pagetemplate import PageTemplate
class mypt(PageTemplate):
def pt_getContext(self, args=(), options={}, **kw):
rval = PageTemplate.pt_getContext(self, args=args)
options.update(rval)
return options
class foo(object):
def getContents(self): return 'hi'
So now we can bind objects in a more arbitrary fashion, like the
following:
.. testcode::
template = """
<html>
<body>
<b tal:replace="das_object/getContents">Good Stuff Here</b>
</body>
</html>
"""
pt = mypt()
pt.write(template)
print(pt(das_object=foo()).strip())
.. testoutput::
<html>
<body>
hi
</body>
</html>
| zope.pagetemplate | /zope.pagetemplate-5.0.tar.gz/zope.pagetemplate-5.0/docs/using.rst | using.rst |
=======================================
ZPT (Zope Page-Template) Architecture
=======================================
There are a number of major components that make up the page-template
architecture:
- The TAL *compiler* and *interpreter*. This is responsible for
compiling source files and for executing compiled templates. See
the zope.tal_ package for more information.
- An *expression engine* is responsible for compiling expressions and
for creating expression execution contexts. It is common for
applications to override expression engines to provide custom
expression support or to change the way expressions are implemented.
The :mod:`zope.pagetemplate.engine` module uses this to implement trusted
and untrusted evaluation; a different engine is used for each, with
different implementations of the same type of expressions.
The z3c.ptcompat_ package extends these engines to use the
Chameleon_ templating system for increased speed.
Expression contexts support execution of expressions and provide
APIs for setting up variable scopes and setting variables. The
expression contexts are passed to the TAL interpreter at execution
time.
The most commonly used expression implementation is that found in
zope.tales_.
- Page templates tie everything together. They assemble an expression
engine with the TAL interpreter and orchestrate management of source
and compiled template data. See :mod:`zope.pagetemplate.interfaces`.
.. _z3c.ptcompat: https://pypi.python.org/pypi/z3c.ptcompat
.. _zope.tal: https://pypi.python.org/pypi/zope.tal
.. _zope.tales: https://pypi.python.org/pypi/zope.tales
.. _Chameleon: https://chameleon.readthedocs.io/en/latest/
| zope.pagetemplate | /zope.pagetemplate-5.0.tar.gz/zope.pagetemplate-5.0/docs/architecture.rst | architecture.rst |
=========
Changes
=========
4.4 (2022-09-01)
================
- Add support for Python 3.7, 3.8, 3.9, 3.10.
- Drop support for Python 3.4.
4.3.1 (2017-09-01)
==================
- Fix running ``configure.zcml`` when ``zope.security`` is installed.
See `issue 15
<https://github.com/zopefoundation/zope.password/issues/15>`_.
4.3.0 (2017-08-31)
==================
- Added a ``bcrypt``-based password manager (available only if the
`bcrypt <https://pypi.python.org/pypi/bcrypt>`_ library is
importable). This manager can also check passwords that were encoded
with `z3c.bcrypt <https://pypi.python.org/pypi/z3c.bcrypt>`_. If
that package is *not* installed, then ``configure.zcml`` will
install this manager as a utility with both the ``BCRYPT``
(preferred) and ``bcrypt`` names for compatibility with it. (See
https://github.com/zopefoundation/zope.password/issues/10)
- Add a ``bcrypt_kdf`` password manager. This allows tunable numbers
of rounds. See https://github.com/zopefoundation/zope.password/issues/9
- Fix the ``zpasswd`` console script on Python 3.
- Update the ``zpasswd`` script to use ``argparse`` instead of ``optparse.``
- Use ``hmac.compare_digest`` when checking passwords to
prevent timing analysis. This requires Python 2.7.7 or above.
- Add support for Python 3.6.
- Drop support for Python 3.3 and Python 2.7.6 and below.
- Drop support for ``python setup.py test``.
4.2.0 (2016-07-07)
==================
- Drop support for Python 2.6.
- Converted documentation to Sphinx, including testing doctest snippets
under ``tox``.
- Add support for Python 3.5.
4.1.0 (2014-12-27)
==================
- Add support for PyPy. (PyPy3 is pending release of a fix for:
https://bitbucket.org/pypy/pypy/issue/1946)
- Add supprt for Python 3.4.
- Add support for testing on Travis.
4.0.2 (2013-03-11)
==================
- Fix some final resource warnings.
4.0.1 (2013-03-10)
==================
- Fix test failures under Python 3.3 when warnings are enabled.
4.0.0 (2013-02-21)
==================
- Make ``zpasswd`` a proper console script entry point.
- Add ``tox.ini`` and ``MANIFEST.in``.
- Add support for Python 3.3
- Replace deprecated ``zope.interface.implements`` usage with equivalent
``zope.interface.implementer`` decorator.
- Drop support for Python 2.4 and 2.5.
- Add a new ``IMatchingPasswordManager`` interface with a 'match' method,
which returns True if a given password hash was encdoded with the scheme
implemented by the specific manager. All managers in this package implement
this interface.
- Use "{SHA}" as the prefix for SHA1-encoded passwords to be compatible with
RFC 2307, but support matching against "{SHA1}" for backwards compatibility.
- Add a crypt password manager to fully support all methods named in RFC 2307.
It is contained in the ``legacy`` module however, to flag crypt's status.
- Add a SMD5 (salted MD5) password manager to fully support all encoding
schemes implemented by OpenLDAP.
- Add a MySQL ``PASSWORD()`` (versions before 4.1) password manager, as also
found in Zope2's ``AccessControl.AuthEncoding`` module.
- Remove the useless, cosmetic salt from the MD5 and SHA1 password managers,
and use base64 encoding instead of hexdigests. This makes the output of
these managers compatible with other MD5 and SHA1 hash implementations such
as RFC 2307 but doesn't lower it's security in any way. Checking passwords
against old, still 'salted' password hashes with hexdigests is still
supported.
- Use the ``standard_base64encode`` method instead of ``url_base64encode``
to maintain compatibility with LDAP.
3.6.1 (2010-05-27)
==================
- The SSHAPasswordManager.checkPassword() would not handle unicode input
(even if the string would only contain ascii characters). Now, the
``encoded_password`` input will be encoded to ascii, which is deemed safe
as it should not contain non-ascii characters anyway.
3.6.0 (2010-05-07)
==================
- Remove ``zope.testing`` dependency for tests.
- Update some copyright headers to comply to repository policy.
- Add ``zpasswd`` script formerly hold in zope.app.server. Contrary to
former zpasswd script, which used "Plain Text" as default password
manager, now SSHA is used as default.
3.5.1 (2009-03-14)
==================
- Make security protection directives in ``configure.zcml`` execute only
if ``zope.security`` is installed. This will allow reuse of the
``configure.zcml`` file in environments without ``zope.security``,
for example with ``repoze.zcml``.
- Add "Password Manager Names" vocabulary for use with ``zope.schema``
and ``zope.component``, like it was in ``zope.app.authentication``.
It's an optional feature so it doesn't add hard dependency. We use
"vocabulary" extra to list dependencies needed for vocabulary functionality.
3.5.0 (2009-03-06)
==================
First release. This package was splitted off from ``zope.app.authentication``
to separate password manager functionality that is greatly re-usable without
any bit of ``zope.app.authentication`` and to reduce its dependencies.
| zope.password | /zope.password-4.4.tar.gz/zope.password-4.4/CHANGES.rst | CHANGES.rst |
===================
``zope.password``
===================
.. image:: https://img.shields.io/pypi/v/zope.password.svg
:target: https://pypi.python.org/pypi/zope.password/
:alt: Latest release
.. image:: https://img.shields.io/pypi/pyversions/zope.password.svg
:target: https://pypi.org/project/zope.password/
:alt: Supported Python versions
.. image:: https://github.com/zopefoundation/zope.password/actions/workflows/tests.yml/badge.svg
:target: https://github.com/zopefoundation/zope.password/actions/workflows/tests.yml
.. image:: https://coveralls.io/repos/github/zopefoundation/zope.password/badge.svg?branch=master
:target: https://coveralls.io/github/zopefoundation/zope.password?branch=master
.. image:: https://readthedocs.org/projects/zopepassword/badge/?version=latest
:target: https://zopepassword.readthedocs.io/en/latest/
:alt: Documentation Status
This package provides a password manager mechanism. Password manager
is an utility object that can encode and check encoded
passwords.
Documentation is hosted at https://zopepassword.readthedocs.io/
| zope.password | /zope.password-4.4.tar.gz/zope.password-4.4/README.rst | README.rst |
"""Password managers
"""
__docformat__ = 'restructuredtext'
import re
from base64 import standard_b64decode
from base64 import standard_b64encode
from base64 import urlsafe_b64decode
from base64 import urlsafe_b64encode
from binascii import a2b_hex
from codecs import getencoder
from hashlib import md5
from hashlib import sha1
from hmac import compare_digest as _timing_safe_compare
from os import urandom
try:
import bcrypt
except ImportError: # pragma: no cover
bcrypt = None
from zope.interface import implementer
from zope.password.interfaces import IMatchingPasswordManager
_enc = getencoder("utf-8")
def _encoder(s):
if isinstance(s, bytes):
return s
return _enc(s)[0]
@implementer(IMatchingPasswordManager)
class PlainTextPasswordManager(object):
"""Plain text password manager.
>>> from zope.interface.verify import verifyObject
>>> from zope.password.interfaces import IMatchingPasswordManager
>>> from zope.password.password import PlainTextPasswordManager
>>> manager = PlainTextPasswordManager()
>>> verifyObject(IMatchingPasswordManager, manager)
True
>>> password = u"right \N{CYRILLIC CAPITAL LETTER A}"
>>> encoded = manager.encodePassword(password)
>>> encoded == password.encode('utf-8')
True
>>> manager.checkPassword(encoded, password)
True
>>> manager.checkPassword(encoded, password + u"wrong")
False
The plain text password manager *never* claims to implement the scheme,
because this would open a security hole, where a hash from a different
scheme could be used as-is as a plain-text password. Authentication code
that needs to support plain-text passwords need to explicitly check for
plain-text password matches after all other options have been tested for:
>>> manager.match(encoded)
False
"""
def encodePassword(self, password):
password = _encoder(password)
return password
def checkPassword(self, encoded_password, password):
return encoded_password == self.encodePassword(password)
def match(self, encoded_password):
# We always return False for PlainText because it was a) not encrypted
# and b) matching against actual encryption methods would result in
# the ability to authenticate with the un-encrypted hash as a password.
# For example, you should not be able to authenticate with a literal
# SSHA hash.
return False
class _PrefixedPasswordManager(PlainTextPasswordManager):
# The bytes prefix this object uses.
_prefix = None
def match(self, encoded_password):
return _encoder(encoded_password).startswith(self._prefix)
class SSHAPasswordManager(_PrefixedPasswordManager):
"""SSHA password manager.
SSHA is basically SHA1-encoding which also incorporates a salt
into the encoded string. This way, stored passwords are more
robust against dictionary attacks of attackers that could get
access to lists of encoded passwords.
SSHA is regularly used in LDAP databases and we should be
compatible with passwords used there.
>>> from zope.interface.verify import verifyObject
>>> from zope.password.interfaces import IMatchingPasswordManager
>>> from zope.password.password import SSHAPasswordManager
>>> manager = SSHAPasswordManager()
>>> verifyObject(IMatchingPasswordManager, manager)
True
>>> password = u"right \N{CYRILLIC CAPITAL LETTER A}"
>>> encoded = manager.encodePassword(password, salt="")
>>> isinstance(encoded, bytes)
True
>>> print(encoded.decode())
{SSHA}BLTuxxVMXzouxtKVb7gLgNxzdAI=
>>> manager.match(encoded)
True
>>> manager.match(encoded.decode())
True
>>> manager.checkPassword(encoded, password)
True
>>> manager.checkPassword(encoded, password + u"wrong")
False
Using the `slappasswd` utility to encode ``secret``, we get
``{SSHA}x3HIoiF9y6YRi/I4W1fkptbzTDiNr+9l`` as seeded hash.
Our password manager generates the same value when seeded with the
same salt, so we can be sure, our output is compatible with
standard LDAP tools that also use SSHA:
>>> from base64 import standard_b64decode
>>> salt = standard_b64decode('ja/vZQ==')
>>> password = 'secret'
>>> encoded = manager.encodePassword(password, salt)
>>> isinstance(encoded, bytes)
True
>>> print(encoded.decode())
{SSHA}x3HIoiF9y6YRi/I4W1fkptbzTDiNr+9l
>>> manager.checkPassword(encoded, password)
True
>>> manager.checkPassword(encoded, password + u"wrong")
False
We can also pass a salt that is a text string:
>>> salt = u'salt'
>>> password = 'secret'
>>> encoded = manager.encodePassword(password, salt)
>>> isinstance(encoded, bytes)
True
>>> print(encoded.decode())
{SSHA}gVK8WC9YyFT1gMsQHTGCgT3sSv5zYWx0
Because a random salt is generated, the output of encodePassword is
different every time you call it.
>>> manager.encodePassword(password) != manager.encodePassword(password)
True
The password manager should be able to cope with unicode strings for input:
>>> passwd = u'foobar\u2211' # sigma-sign.
>>> manager.checkPassword(manager.encodePassword(passwd), passwd)
True
>>> manager.checkPassword(manager.encodePassword(passwd).decode(), passwd)
True
The manager only claims to implement SSHA encodings, anything not starting
with the string {SSHA} returns False:
>>> manager.match('{MD5}someotherhash')
False
An older version of this manager used the urlsafe variant of the base64
encoding (replacing / and + characters with _ and - respectively). Hashes
encoded with the old manager are still supported:
>>> encoded = '{SSHA}x3HIoiF9y6YRi_I4W1fkptbzTDiNr-9l'
>>> manager.checkPassword(encoded, 'secret')
True
"""
_prefix = b'{SSHA}'
def encodePassword(self, password, salt=None):
if salt is None:
salt = urandom(4)
elif not isinstance(salt, bytes):
salt = _encoder(salt)
hash = sha1(_encoder(password))
hash.update(salt)
return self._prefix + standard_b64encode(hash.digest() + salt)
def checkPassword(self, encoded_password, password):
# standard_b64decode() cannot handle unicode input string.
encoded_password = _encoder(encoded_password)
encoded_password = encoded_password[6:]
if b'_' in encoded_password or b'-' in encoded_password:
# Encoded using old urlsafe_b64encode, re-encode
byte_string = urlsafe_b64decode(encoded_password)
encoded_password = standard_b64encode(byte_string)
else:
byte_string = standard_b64decode(encoded_password)
salt = byte_string[20:]
return _timing_safe_compare(encoded_password,
self.encodePassword(password, salt)[6:])
class SMD5PasswordManager(_PrefixedPasswordManager):
"""SMD5 password manager.
SMD5 is basically SMD5-encoding which also incorporates a salt
into the encoded string. This way, stored passwords are more
robust against dictionary attacks of attackers that could get
access to lists of encoded passwords:
>>> from zope.interface.verify import verifyObject
>>> from zope.password.interfaces import IMatchingPasswordManager
>>> from zope.password.password import SMD5PasswordManager
>>> manager = SMD5PasswordManager()
>>> verifyObject(IMatchingPasswordManager, manager)
True
>>> password = u"right \N{CYRILLIC CAPITAL LETTER A}"
>>> encoded = manager.encodePassword(password, salt="")
>>> isinstance(encoded, bytes)
True
>>> print(encoded.decode())
{SMD5}ht3czsRdtFmfGsAAGOVBOQ==
>>> manager.match(encoded)
True
>>> manager.match(encoded.decode())
True
>>> manager.checkPassword(encoded, password)
True
>>> manager.checkPassword(encoded, password + u"wrong")
False
Using the ``slappasswd`` utility to encode ``secret``, we get
``{SMD5}zChC6x0tl2zr9fjvjZzKePV5KWA=`` as seeded hash.
Our password manager generates the same value when seeded with the
same salt, so we can be sure, our output is compatible with
standard LDAP tools that also use SMD5:
>>> from base64 import standard_b64decode
>>> salt = standard_b64decode('9XkpYA==')
>>> password = 'secret'
>>> encoded = manager.encodePassword(password, salt)
>>> isinstance(encoded, bytes)
True
>>> print(encoded.decode())
{SMD5}zChC6x0tl2zr9fjvjZzKePV5KWA=
>>> manager.checkPassword(encoded, password)
True
>>> manager.checkPassword(encoded, password + u"wrong")
False
We can also pass a salt that is a text string:
>>> salt = u'salt'
>>> password = 'secret'
>>> encoded = manager.encodePassword(password, salt)
>>> isinstance(encoded, bytes)
True
>>> print(encoded.decode())
{SMD5}mc0uWpXVVe5747A4pKhGJXNhbHQ=
Because a random salt is generated, the output of encodePassword is
different every time you call it.
>>> manager.encodePassword(password) != manager.encodePassword(password)
True
The password manager should be able to cope with unicode strings for
input:
>>> passwd = u'foobar\u2211' # sigma-sign.
>>> manager.checkPassword(manager.encodePassword(passwd), passwd)
True
>>> manager.checkPassword(manager.encodePassword(passwd).decode(), passwd)
True
The manager only claims to implement SMD5 encodings, anything not starting
with the string {SMD5} returns False:
>>> manager.match('{MD5}someotherhash')
False
"""
_prefix = b'{SMD5}'
def encodePassword(self, password, salt=None):
if salt is None:
salt = urandom(4)
elif not isinstance(salt, bytes):
salt = salt.encode('utf-8')
hash = md5(_encoder(password))
hash.update(salt)
return self._prefix + standard_b64encode(hash.digest() + salt)
def checkPassword(self, encoded_password, password):
encoded_password = _encoder(encoded_password)
byte_string = standard_b64decode(encoded_password[6:])
salt = byte_string[16:]
return _timing_safe_compare(encoded_password,
self.encodePassword(password, salt))
class MD5PasswordManager(_PrefixedPasswordManager):
"""MD5 password manager.
>>> from zope.interface.verify import verifyObject
>>> from zope.password.interfaces import IMatchingPasswordManager
>>> from zope.password.password import MD5PasswordManager
>>> manager = MD5PasswordManager()
>>> verifyObject(IMatchingPasswordManager, manager)
True
>>> password = u"right \N{CYRILLIC CAPITAL LETTER A}"
>>> encoded = manager.encodePassword(password)
>>> isinstance(encoded, bytes)
True
>>> print(encoded.decode())
{MD5}ht3czsRdtFmfGsAAGOVBOQ==
>>> manager.match(encoded)
True
>>> manager.match(encoded.decode())
True
>>> manager.checkPassword(encoded, password)
True
>>> manager.checkPassword(encoded, password + u"wrong")
False
This password manager is compatible with other RFC 2307 MD5
implementations. For example the output of the slappasswd command for
a MD5 hashing of ``secret`` is ``{MD5}Xr4ilOzQ4PCOq3aQ0qbuaQ==``,
and our implementation returns the same hash:
>>> print(manager.encodePassword('secret').decode())
{MD5}Xr4ilOzQ4PCOq3aQ0qbuaQ==
The password manager should be able to cope with unicode strings for input:
>>> passwd = u'foobar\u2211' # sigma-sign.
>>> manager.checkPassword(manager.encodePassword(passwd), passwd)
True
>>> manager.checkPassword(manager.encodePassword(passwd).decode(), passwd)
True
A previous version of this manager also created a cosmetic salt, added
to the start of the hash, but otherwise not used in creating the hash
itself. Moreover, it generated the MD5 hash as a hex digest, not a base64
encoded value and did not include the {MD5} prefix. Such hashed values are
still supported too:
>>> encoded = 'salt86dddccec45db4599f1ac00018e54139'
>>> manager.checkPassword(encoded, password)
True
However, because the prefix is missing, the password manager cannot claim
to implement the scheme:
>>> manager.match(encoded)
False
"""
_prefix = b'{MD5}'
def encodePassword(self, password, salt=None):
# The salt argument only exists for backwards compatibility and is
# ignored on purpose.
return self._prefix + standard_b64encode(
md5(_encoder(password)).digest())
def checkPassword(self, encoded_password, password):
encoded_password = _encoder(encoded_password)
encoded = encoded_password[encoded_password.find(b'}') + 1:]
if len(encoded) > 24:
# Backwards compatible, hexencoded md5 and bogus salt
encoded = standard_b64encode(a2b_hex(encoded[-32:]))
return _timing_safe_compare(encoded, self.encodePassword(password)[5:])
class SHA1PasswordManager(_PrefixedPasswordManager):
"""SHA1 password manager.
>>> from zope.interface.verify import verifyObject
>>> from zope.password.interfaces import IMatchingPasswordManager
>>> from zope.password.password import SHA1PasswordManager
>>> manager = SHA1PasswordManager()
>>> verifyObject(IMatchingPasswordManager, manager)
True
>>> password = u"right \N{CYRILLIC CAPITAL LETTER A}"
>>> encoded = manager.encodePassword(password)
>>> isinstance(encoded, bytes)
True
>>> print(encoded.decode())
{SHA}BLTuxxVMXzouxtKVb7gLgNxzdAI=
>>> manager.match(encoded)
True
>>> manager.match(encoded.decode())
True
>>> manager.checkPassword(encoded, password)
True
>>> manager.checkPassword(encoded, password + u"wrong")
False
This password manager is compatible with other RFC 2307 SHA
implementations. For example the output of the slappasswd command for
a SHA hashing of ``secret`` is ``{SHA}5en6G6MezRroT3XKqkdPOmY/BfQ=``,
and our implementation returns the same hash:
>>> print(manager.encodePassword('secret').decode())
{SHA}5en6G6MezRroT3XKqkdPOmY/BfQ=
The password manager should be able to cope with unicode strings for input:
>>> passwd = u'foobar\u2211' # sigma-sign.
>>> manager.checkPassword(manager.encodePassword(passwd), passwd)
True
>>> manager.checkPassword(manager.encodePassword(passwd).decode(), passwd)
True
A previous version of this manager also created a cosmetic salt, added
to the start of the hash, but otherwise not used in creating the hash
itself. Moreover, it generated the SHA hash as a hex digest, not a base64
encoded value and did not include the {SHA} prefix. Such hashed values are
still supported too:
>>> encoded = 'salt04b4eec7154c5f3a2ec6d2956fb80b80dc737402'
>>> manager.checkPassword(encoded, password)
True
However, because the prefix is missing, the password manager cannot claim
to implement the scheme:
>>> manager.match(encoded)
False
Previously, this password manager used {SHA1} as a prefix, but this was
changed to be compatible with LDAP (RFC 2307). The old prefix is still
supported (note the hexdigest encoding as well):
>>> password = u"right \N{CYRILLIC CAPITAL LETTER A}"
>>> encoded = '{SHA1}04b4eec7154c5f3a2ec6d2956fb80b80dc737402'
>>> manager.match(encoded)
True
>>> manager.checkPassword(encoded, password)
True
>>> manager.checkPassword(encoded, password + u"wrong")
False
"""
_prefix = b'{SHA}'
def encodePassword(self, password, salt=None):
# The salt argument only exists for backwards compatibility and is
# ignored on purpose.
return self._prefix + standard_b64encode(
sha1(_encoder(password)).digest())
def checkPassword(self, encoded_password, password):
encoded_password = _encoder(encoded_password)
if self.match(encoded_password):
encoded = encoded_password[encoded_password.find(b'}') + 1:]
if len(encoded) > 28:
# Backwards compatible, hexencoded sha1 and bogus salt
encoded = standard_b64encode(a2b_hex(encoded[-40:]))
return encoded == self.encodePassword(password)[5:]
# Backwards compatible, hexdigest and no prefix
encoded_password = standard_b64encode(a2b_hex(encoded_password[-40:]))
return _timing_safe_compare(
encoded_password, self.encodePassword(password)[5:])
def match(self, encoded_password):
encoded_password = _encoder(encoded_password)
return encoded_password.startswith((self._prefix, b'{SHA1}'))
class BCRYPTPasswordManager(_PrefixedPasswordManager):
"""
BCRYPT password manager.
In addition to the passwords encoded by this class,
this class can also recognize passwords encoded by :mod:`z3c.bcrypt`
and properly match and check them.
.. note:: This uses the :mod:`bcrypt` library in its
implementation, which `only uses the first 72 characters
<https://pypi.python.org/pypi/bcrypt/3.1.3#maximum-password-length>`_
of the password when computing the hash.
"""
_prefix = b'{BCRYPT}'
# This is the same regex that z3c.bcrypt uses, via way of cryptacular
# The $2a$ is a prefix.
_z3c_bcrypt_syntax = re.compile(br'\$2a\$[0-9]{2}\$[./A-Za-z0-9]{53}')
_clean_clear = staticmethod(_encoder)
_clean_hashed = staticmethod(_encoder)
def checkPassword(self, hashed_password, clear_password):
"""Check a *hashed_password* against a *clear_password*.
>>> from zope.password.password import BCRYPTPasswordManager
>>> manager = BCRYPTPasswordManager()
>>> manager.checkPassword(b'not from here', None)
False
:param bytes hashed_password: The encoded password.
:param unicode clear_password: The password to check.
:returns: True iif hashed passwords are equal.
:rtype: bool
"""
if not self.match(hashed_password):
return False
pw_bytes = self._clean_clear(clear_password)
pw_hash = hashed_password
if hashed_password.startswith(self._prefix):
pw_hash = hashed_password[len(self._prefix):]
try:
ok = bcrypt.checkpw(pw_bytes, pw_hash)
except ValueError: # pragma: no cover
# invalid salt
ok = False
return ok
def encodePassword(self, password, salt=None):
"""Encode a `password`, with an optional `salt`.
If `salt` is not provided, a unique hash will be generated
for each invokation.
:param password: The clear-text password.
:type password: unicode
:param salt: The salt to be used to hash the password.
:rtype: str
:returns: The encoded password as a byte-siring.
"""
if salt is None:
salt = bcrypt.gensalt()
salt = self._clean_hashed(salt)
pw = self._clean_clear(password)
return self._prefix + bcrypt.hashpw(pw, salt=salt)
def match(self, hashed_password):
"""Was the password hashed with this password manager?
:param bytes hashed_password: The encoded password.
:rtype: bool
:returns: True iif the password was hashed with this manager.
"""
hashed_password = _encoder(hashed_password)
return (hashed_password.startswith(self._prefix)
or self._z3c_bcrypt_syntax.match(hashed_password) is not None)
class BCRYPTKDFPasswordManager(_PrefixedPasswordManager):
"""
BCRYPT KDF password manager.
This manager converts a plain text password into a byte array.
The password and salt values (randomly generated when the password
is encoded) are combined and repeatedly hashed *rounds* times. The
repeated hashing is designed to thwart discovery of the key via
password guessing attacks. The higher the number of rounds, the
slower each attempt will be.
Compared to the :class:`BCRYPTPasswordManager`, this has the
advantage of allowing tunable rounds, so as computing devices get
more powerful making brute force attacks faster, the difficulty
level can be raised (for newly encoded passwords).
>>> from zope.password.password import BCRYPTKDFPasswordManager
>>> manager = BCRYPTKDFPasswordManager()
>>> manager.checkPassword(b'not from here', None)
False
Let's encode a password. We'll use the minimum acceptable number
of rounds so that the tests run fast:
>>> manager.rounds = 51
>>> password = u"right \N{CYRILLIC CAPITAL LETTER A}"
>>> encoded = manager.encodePassword(password)
>>> print(encoded.decode())
{BCRYPTKDF}33...
It checks out:
>>> manager.checkPassword(encoded, password)
True
We can change the number of rounds for future encodings:
>>> manager.rounds = 100
>>> encoded2 = manager.encodePassword(password)
>>> print(encoded2.decode())
{BCRYPTKDF}64...
>>> manager.checkPassword(encoded2, password)
True
And the old password still checks out:
>>> manager.checkPassword(encoded, password)
True
"""
#: The number of rounds of hashing that should be applied.
#: The higher the number, the slower it is. It should be at least
#: 50.
rounds = 1024
#: The number of bytes long the encoded password will be. It must be
#: at least 1 and no more than 512.
keylen = 32
_prefix = b'{BCRYPTKDF}'
def _encode(self, password, salt, rounds, keylen):
password = _encoder(password)
key = bcrypt.kdf(password, salt=salt,
desired_key_bytes=keylen,
rounds=rounds)
rounds_bytes = _encoder('%x' % rounds)
result = (self._prefix
+ rounds_bytes
+ b'$'
+ urlsafe_b64encode(salt)
+ b'$'
+ urlsafe_b64encode(key))
return result
def encodePassword(self, password):
salt = bcrypt.gensalt()
return self._encode(password, salt, self.rounds, self.keylen)
def checkPassword(self, hashed_password, clear_password):
hashed_password = _encoder(hashed_password)
if not self.match(hashed_password):
return False
rounds, salt, key = hashed_password[len(self._prefix):].split(b'$')
rounds = int(rounds, 16)
salt = urlsafe_b64decode(salt)
keylen = len(urlsafe_b64decode(key))
encoded_password = self._encode(clear_password, salt, rounds, keylen)
return _timing_safe_compare(hashed_password, encoded_password)
# Simple registry
managers = [
('Plain Text', PlainTextPasswordManager()),
('MD5', MD5PasswordManager()),
('SMD5', SMD5PasswordManager()),
('SHA1', SHA1PasswordManager()),
('SSHA', SSHAPasswordManager()),
]
if bcrypt is not None:
managers.append(('BCRYPT', BCRYPTPasswordManager()))
managers.append(('BCRYPTKDF', BCRYPTKDFPasswordManager())) | zope.password | /zope.password-4.4.tar.gz/zope.password-4.4/src/zope/password/password.py | password.py |
"""Legacy password managers, using now-outdated, insecure methods for hashing
"""
__docformat__ = 'restructuredtext'
import sys
from codecs import getencoder
try:
from crypt import crypt
from random import choice
except ImportError: # pragma: no cover
# The crypt module is not universally available, apparently
crypt = None
from zope.interface import implementer
from zope.password.interfaces import IMatchingPasswordManager
_encoder = getencoder("utf-8")
PY2 = sys.version_info[0] == 2
if crypt is not None:
@implementer(IMatchingPasswordManager)
class CryptPasswordManager(object):
"""Crypt password manager.
Implements a UNIX crypt(3) hashing scheme. Note that crypt is
considered far inferior to more modern schemes such as SSHA hashing,
and only uses the first 8 characters of a password.
>>> from zope.interface.verify import verifyObject
>>> from zope.password.interfaces import IMatchingPasswordManager
>>> from zope.password.legacy import CryptPasswordManager
>>> manager = CryptPasswordManager()
>>> verifyObject(IMatchingPasswordManager, manager)
True
>>> password = u"right \N{CYRILLIC CAPITAL LETTER A}"
>>> encoded = manager.encodePassword(password, salt="..")
>>> encoded
'{CRYPT}..I1I8wps4Na2'
>>> manager.match(encoded)
True
>>> manager.checkPassword(encoded, password)
True
Note that this object fails to return bytes from the ``encodePassword``
function on Python 3:
>>> isinstance(encoded, str)
True
Unfortunately, crypt only looks at the first 8 characters, so matching
against an 8 character password plus suffix always matches. Our test
password (including utf-8 encoding) is exactly 8 characters long, and
thus affixing 'wrong' to it tests as a correct password:
>>> manager.checkPassword(encoded, password + u"wrong")
True
Using a completely different password is rejected as expected:
>>> manager.checkPassword(encoded, 'completely wrong')
False
Using the `openssl passwd` command-line utility to encode ``secret``,
we get ``erz50QD3gv4Dw`` as seeded hash.
Our password manager generates the same value when seeded with the
same salt, so we can be sure, our output is compatible with
standard LDAP tools that also use crypt:
>>> salt = 'er'
>>> password = 'secret'
>>> encoded = manager.encodePassword(password, salt)
>>> encoded
'{CRYPT}erz50QD3gv4Dw'
>>> manager.checkPassword(encoded, password)
True
>>> manager.checkPassword(encoded, password + u"wrong")
False
>>> manager.encodePassword(password) != manager.encodePassword(
... password)
True
The manager only claims to implement CRYPT encodings, anything not
starting with the string {CRYPT} returns False:
>>> manager.match('{MD5}someotherhash')
False
"""
def encodePassword(self, password, salt=None):
if salt is None:
choices = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789./")
salt = choice(choices) + choice(choices)
if PY2:
# Py3: Python 2 can only handle ASCII for crypt.
password = _encoder(password)[0]
return '{CRYPT}%s' % crypt(password, salt)
def checkPassword(self, encoded_password, password):
return encoded_password == self.encodePassword(
password, encoded_password[7:9])
def match(self, encoded_password):
return encoded_password.startswith('{CRYPT}')
@implementer(IMatchingPasswordManager)
class MySQLPasswordManager(object):
"""A MySQL digest manager.
This Password Manager implements the digest scheme as implemented in the
MySQL PASSWORD function in MySQL versions before 4.1. Note that this method
results in a very weak 16-byte hash.
>>> from zope.interface.verify import verifyObject
>>> from zope.password.interfaces import IMatchingPasswordManager
>>> from zope.password.legacy import MySQLPasswordManager
>>> manager = MySQLPasswordManager()
>>> verifyObject(IMatchingPasswordManager, manager)
True
>>> password = u"right \N{CYRILLIC CAPITAL LETTER A}"
>>> encoded = manager.encodePassword(password)
>>> isinstance(encoded, bytes)
True
>>> print(encoded.decode())
{MYSQL}0ecd752c5097d395
>>> manager.match(encoded)
True
>>> manager.match(encoded.decode())
True
>>> manager.checkPassword(encoded.decode(), password)
True
>>> manager.checkPassword(encoded, password)
True
>>> manager.checkPassword(encoded, password + u"wrong")
False
Using the password 'PHP & Information Security' should result in the hash
``379693e271cd3bd6``, according to
http://phpsec.org/articles/2005/password-hashing.html
Our password manager generates the same value when seeded with the same
seed, so we can be sure, our output is compatible with MySQL versions
before 4.1:
>>> password = 'PHP & Information Security'
>>> encoded = manager.encodePassword(password)
>>> isinstance(encoded, bytes)
True
>>> print(encoded.decode())
{MYSQL}379693e271cd3bd6
>>> manager.checkPassword(encoded, password)
True
>>> manager.checkPassword(encoded, password + u"wrong")
False
The manager only claims to implement MYSQL encodings, anything not starting
with the string {MYSQL} returns False:
>>> manager.match('{MD5}someotherhash')
False
Spaces and tabs are ignored:
>>> encoded = manager.encodePassword('\tign or ed')
>>> print(encoded.decode())
{MYSQL}75818366052c6a78
>>> encoded = manager.encodePassword('ignored')
>>> print(encoded.decode())
{MYSQL}75818366052c6a78
"""
def encodePassword(self, password):
nr = 1345345333
add = 7
nr2 = 0x12345671
for i in _encoder(password)[0]:
if PY2:
# In Python 2 bytes iterate over single-char strings.
i = ord(i)
if i == ord(b' ') or i == ord(b'\t'):
continue # pragma: no cover (this is actually hit, but ...
# coverage isn't reporting it)
nr ^= (((nr & 63) + add) * i) + (nr << 8)
nr2 += (nr2 << 8) ^ nr
add += i
r0 = nr & ((1 << 31) - 1)
r1 = nr2 & ((1 << 31) - 1)
return ("{MYSQL}%08lx%08lx" % (r0, r1)).encode()
def checkPassword(self, encoded_password, password):
if not isinstance(encoded_password, bytes):
encoded_password = encoded_password.encode('ascii')
return encoded_password == self.encodePassword(password)
def match(self, encoded_password):
if not isinstance(encoded_password, bytes):
encoded_password = encoded_password.encode('ascii')
return encoded_password.startswith(b'{MYSQL}') | zope.password | /zope.password-4.4.tar.gz/zope.password-4.4/src/zope/password/legacy.py | legacy.py |
"""Implementation of the zpasswd script.
"""
from __future__ import print_function
import argparse
import os
import sys
from xml.sax.saxutils import quoteattr
import pkg_resources
VERSION = pkg_resources.get_distribution('zope.password').version
def main(argv=None, app_factory=None):
"""Top-level script function to create a new principals."""
argv = sys.argv if argv is None else argv
try:
options = parse_args(argv)
except SystemExit as e:
if e.code:
return 2
return 0
return run_app_with_options(options, app_factory)
def run_app_with_options(options, app_factory=None):
app = Application if app_factory is None else app_factory
app = app(options)
try:
return app.process()
except KeyboardInterrupt:
return 1
except SystemExit as e:
return e.code
class Principal(object):
"""Principal.
>>> principal = Principal("id", u"title", u"login", b"password")
>>> print(principal)
<principal
id="id"
title="title"
login="login"
password="password"
/>
>>> principal = Principal("id", u"title", u"login", b"password",
... u"description", "SHA1")
>>> print(principal)
<principal
id="id"
title="title"
login="login"
password="password"
description="description"
password_manager="SHA1"
/>
"""
def __init__(self, id, title, login, password,
description="", password_manager_name="Plain Text"):
self.id = id
self.login = login
self.password = password
self.title = title
self.description = description
self.password_manager_name = password_manager_name
def getLines(self):
lines = [
' <principal',
' id=%s' % quoteattr(self.id),
' title=%s' % quoteattr(self.title),
' login=%s' % quoteattr(self.login),
' password=%s' % quoteattr(self.password.decode())
]
if self.description:
lines.append(' description=%s' % quoteattr(self.description))
if self.password_manager_name != "Plain Text":
lines.append(' password_manager=%s'
% quoteattr(self.password_manager_name))
lines.append(' />')
return lines
def __str__(self):
return "\n".join(self.getLines())
TITLE = """
============================================
Principal information for inclusion in ZCML:
"""
ID_TITLE = """
Please choose an id for the principal.
"""
TITLE_TITLE = """
Please choose a title for the principal.
"""
LOGIN_TITLE = """
Please choose a login for the principal.
"""
PASSWORD_TITLE = """
Please provide a password for the principal.
"""
DESCRIPTION_TITLE = """
Please provide an optional description for the principal.
"""
class Application(object):
title = TITLE
id_title = ID_TITLE
title_title = TITLE_TITLE
login_title = LOGIN_TITLE
password_title = PASSWORD_TITLE
description_title = DESCRIPTION_TITLE
def __init__(self, options):
self.options = options
self.need_blank_line = False
def read_input_line(self, prompt):
# The tests replace this to make sure the right things happen.
read = raw_input if bytes is str else input # noqa: F821 undefined PY2
return read(prompt)
def read_password(self, prompt):
# The tests replace this to make sure the right things happen.
import getpass
try:
return getpass.getpass(prompt)
except KeyboardInterrupt:
# The cursor was left on the same line as the prompt,
# which we don't like. Print a blank line.
print()
raise
def process(self):
options = self.options
destination = options.destination
try:
principal = self.get_principal()
if destination is sys.stdout:
print(self.title)
print(principal, file=destination)
print()
finally:
if destination is not sys.stdout:
destination.close()
return 0
def get_principal(self):
id = self.get_value(self.id_title, "Id: ", "Id may not be empty")
title = self.get_value(self.title_title, "Title: ",
"Title may not be empty")
login = self.get_value(self.login_title, "Login: ",
"Login may not be empty")
password_manager_name, password_manager = self.get_password_manager()
password = self.get_password()
description = self.get_value(self.description_title, "Description: ",)
password = password_manager.encodePassword(password)
return Principal(id, title, login, password, description,
password_manager_name)
def get_value(self, title, prompt, error=""):
self.print_message(title)
self.need_blank_line = True
while True:
value = self.read_input_line(prompt).strip()
if not value and error:
print(error, file=sys.stderr)
continue
return value
def get_password_manager(self):
default = 0
self.print_message("Password manager:")
print()
managers = self.options.managers
for i, (name, manager) in enumerate(managers):
print("% i. %s" % (i + 1, name))
if name == 'BCRYPT':
default = i
elif name == 'SSHA' and not default:
default = i
print()
self.need_blank_line = True
while True:
password_manager = self.read_input_line(
"Password Manager Number [%s]: " % (default + 1))
if not password_manager:
index = default
break
elif password_manager.isdigit():
index = int(password_manager)
if index > 0 and index <= len(managers):
index -= 1
break
print("You must select a password manager", file=sys.stderr)
print("%s password manager selected" % managers[index][0])
return managers[index]
def get_password(self):
self.print_message(self.password_title)
while True:
password = self.read_password("Password: ")
if not password:
print("Password may not be empty", file=sys.stderr)
continue
if password != password.strip() or password.split() != [password]:
print("Password may not contain spaces", file=sys.stderr)
continue
break
again = self.read_password("Verify password: ")
if again != password:
print("Password not verified!", file=sys.stderr)
sys.exit(1)
return password
def print_message(self, message):
if self.need_blank_line:
print()
self.need_blank_line = False
print(message)
def get_password_managers(config_path=None):
from zope.password.password import managers as default_managers
managers = default_managers
if config_path:
from zope.component import getUtilitiesFor
from zope.configuration import xmlconfig
from zope.password.interfaces import IPasswordManager
print("Loading configuration...")
xmlconfig.file(config_path)
managers = []
for name, manager in getUtilitiesFor(IPasswordManager):
if name == "Plain Text":
managers.insert(0, (name, manager))
else:
managers.append((name, manager))
return managers or default_managers
def parse_args(argv):
"""Parse the command line, returning an object representing the input."""
prog = os.path.split(os.path.realpath(argv[0]))[1]
p = argparse.ArgumentParser(prog=prog)
p.add_argument(
"-c",
"--config",
dest="config",
metavar="FILE",
help=("path to the site.zcml configuration file"
" (more accurate but slow password managers registry creation)"))
p.add_argument("-o", "--output", dest="destination", metavar="FILE",
help=("the file in which the output will be saved"
" (STDOUT by default)"),
default=sys.stdout,
type=argparse.FileType('w'))
p.add_argument("--version", action="version", version=VERSION)
options = p.parse_args(argv[1:])
options.managers = get_password_managers(options.config)
options.program = prog
return options | zope.password | /zope.password-4.4.tar.gz/zope.password-4.4/src/zope/password/zpasswd.py | zpasswd.py |
==========================
:mod:`zope.password` API
==========================
Interfaces
==========
.. automodule:: zope.password.interfaces
:members:
:member-order: bysource
Password Manager Implementations
================================
.. automodule:: zope.password.password
:members:
:member-order: bysource
Deprecated Implementations
--------------------------
.. warning::
The following password managers are deprecated, because they produce
unacceptably-weak password hashes. They are only included to allow
apps which previously used them to migrate smoothly to a supported
implementation.
.. automodule:: zope.password.legacy
:members:
:member-order: bysource
Vocabulary
==========
.. automodule:: zope.password.vocabulary
:members:
:member-order: bysource
| zope.password | /zope.password-4.4.tar.gz/zope.password-4.4/docs/api.rst | api.rst |
Using :mod:`zope.password`
==========================
This package provides a password manager mechanism. Password manager
is an utility object that can encode and check encoded
passwords. Beyond the generic interface, this package also provides
eight implementations:
:class:`zope.password.password.PlainTextPasswordManager`
The most simple and the less secure one. It does not do any password
encoding and simply checks password by string equality. It's useful in
tests or as a base class for more secure implementations.
:class:`zope.password.password.MD5PasswordManager`
A password manager that uses MD5 algorithm to encode passwords. It's
generally weak against dictionary attacks due to a lack of a salt.
:class:`zope.password.password.SMD5PasswordManager`
A password manager that uses MD5 algorithm, together with a salt to
encode passwords. It's better protected against against dictionary
attacks, but the MD5 hashing algorithm is not as strong as the SHA1
algorithm.
:class:`zope.password.password.SHA1PasswordManager`
A password manager that uses SHA1 algorithm to encode passwords. It has
the same weakness as the MD5PasswordManager.
:class:`zope.password.password.SSHAPasswordManager`
A password manager that is strong against dictionary attacks. It's
basically SHA1-encoding password manager which also incorporates a
salt into the password when encoding it.
:class:`zope.password.password.CryptPasswordManager`
A manager implementing the crypt(3) hashing scheme. Only available if
the python crypt module is installed. This is a legacy manager, only
present to ensure that zope.password can be used for all schemes defined
in RFC 2307 (LDAP).
:class:`zope.password.password.MySQLPasswordManager`
A manager implementing the digest scheme as implemented in the MySQL
PASSWORD function in MySQL versions before 4.1. Note that this method
results in a very weak 16-byte hash.
:class:`zope.password.password.BCRYPTPasswordManager`
A manager implementing the bcrypt hashing scheme. Only available if
the bcrypt_ module is installed. This manager is considered
one of the most secure.
:class:`zope.password.password.BCRYPTKDFPasswordManager`
A manager implementing the bcrypt_kdf hashing scheme. Only available if
the bcrypt_ module is installed. This manager is considered
one of the most secure.
The ``Crypt``, ``MD5``, ``SMD5``, ``SHA`` and ``SSHA`` password managers
are all compatible with RFC 2307 LDAP implementations of the same password
encoding schemes.
.. note::
It is strongly recommended to use the BCRYPTPasswordManager or
BCRYPTKDFPasswordManager, as they are the
most secure.
The package also provides a script, :command:`zpasswd`, to generate
principal entries in typical ``site.zcml`` files.
Password Manager Interfaces
---------------------------
The :class:`zope.password.interfaces.IPasswordManager` interface defines only
two methods:
.. literalinclude:: ../src/zope/password/interfaces.py
:pyobject: IPasswordManager.encodePassword
.. literalinclude:: ../src/zope/password/interfaces.py
:pyobject: IPasswordManager.checkPassword
An extended interface,
:class:`zope.password.interfaces.IMatchingPasswordManager`,
adds one additional method:
.. literalinclude:: ../src/zope/password/interfaces.py
:pyobject: IMatchingPasswordManager.match
Looking Up Password Managers via a Vocabulary
---------------------------------------------
The :mod:`zope.password.vocabulary` module provides a vocabulary of
registered password manager utility names. It is typically registered
as an :class:`zope.schema.interfaces.IVocabularyFactory` utility named
"Password Manager Names".
It's intended to be used with :mod:`zope.component` and :mod:`zope.schema`,
so you need to have them installed and the utility registrations needs
to be done properly. The ``configure.zcml`` file contained in
:mod:`zope.password` does the registrations, as well as in
:func:`zope.password.testing.setUpPasswordManagers`.
Encrypting Passwords with :command:`zpasswd`
--------------------------------------------
:command:`zpasswd` is a script to generate principal entries in typical
``site.zcml`` files.
You can create a :command:`zpasswd` script in your buildout by adding a
section like this to your ``buildout.cfg``:
.. code-block:: ini
[zpasswd]
recipe = z3c.recipe.dev:script
eggs = zope.password
module = zope.password.zpasswd
method = main
This will generate a script :command:`zpasswd` next time you run
:command:`buildout`.
When run, the script will ask you for all parameters needed to create
a typical principal entry, including the encrypted password.
Use:
.. code-block:: sh
$ bin/zpasswd --help
to get a list of options.
Using
.. code-block:: sh
$ bin/zpasswd -c some/site.zcml
the script will try to lookup any password manager you defined and
registered in your environment. This is lookup is not necessary if you
go with the standard password managers defined in :mod:`zope.password`.
A typical :command:`zpasswd` session might look like:
.. code-block:: sh
$ ./bin/zpasswd
Please choose an id for the principal.
Id: foo
Please choose a title for the principal.
Title: The Foo
Please choose a login for the principal.
Login: foo
Password manager:
1. Plain Text
2. MD5
3. SMD5
4. SHA1
5. SSHA
6. BCRYPT
Password Manager Number [6]:
BCRYPT password manager selected
Please provide a password for the principal.
Password:
Verify password:
Please provide an optional description for the principal.
Description: The main foo
============================================
Principal information for inclusion in ZCML:
<principal
id="foo"
title="The Foo"
login="foo"
password="{BCRYPT}$2b$12$ez4eHl6W1PfAWix5bPIbe.drdnyqjpuT1Cp0N.xcdxkAEbA7K6AHK"
description="The main foo"
password_manager="BCRYPT"
/>
.. _bcrypt: https://pypi.python.org/pypi/bcrypt
| zope.password | /zope.password-4.4.tar.gz/zope.password-4.4/docs/narrative.rst | narrative.rst |
==================================
Pluggable-Authentication Utility
==================================
The Pluggable-Authentication Utility (PAU) provides a framework for
authenticating principals and associating information with them. It uses
plugins and subscribers to get its work done.
For a pluggable-authentication utility to be used, it should be
registered as a utility providing the
`zope.authentication.interfaces.IAuthentication` interface.
Authentication
==============
The primary job of PAU is to authenticate principals. It uses two types of
plug-ins in its work:
- Credentials Plugins
- Authenticator Plugins
Credentials plugins are responsible for extracting user credentials from a
request. A credentials plugin may in some cases issue a 'challenge' to obtain
credentials. For example, a 'session' credentials plugin reads credentials
from a session (the "extraction"). If it cannot find credentials, it will
redirect the user to a login form in order to provide them (the "challenge").
Authenticator plugins are responsible for authenticating the credentials
extracted by a credentials plugin. They are also typically able to create
principal objects for credentials they successfully authenticate.
Given a request object, the PAU returns a principal object, if it can. The PAU
does this by first iterating through its credentials plugins to obtain a
set of credentials. If it gets credentials, it iterates through its
authenticator plugins to authenticate them.
If an authenticator succeeds in authenticating a set of credentials, the PAU
uses the authenticator to create a principal corresponding to the credentials.
The authenticator notifies subscribers if an authenticated principal is
created. Subscribers are responsible for adding data, especially groups, to
the principal. Typically, if a subscriber adds data, it should also add
corresponding interface declarations.
Simple Credentials Plugin
-------------------------
To illustrate, we'll create a simple credentials plugin:
>>> from zope import interface
>>> from zope.pluggableauth.authentication import interfaces
>>> @interface.implementer(interfaces.ICredentialsPlugin)
... class MyCredentialsPlugin(object):
...
... def extractCredentials(self, request):
... return request.get('credentials')
...
... def challenge(self, request):
... pass # challenge is a no-op for this plugin
...
... def logout(self, request):
... pass # logout is a no-op for this plugin
As a plugin, MyCredentialsPlugin needs to be registered as a named utility:
>>> myCredentialsPlugin = MyCredentialsPlugin()
>>> provideUtility(myCredentialsPlugin, name='My Credentials Plugin')
Simple Authenticator Plugin
---------------------------
Next we'll create a simple authenticator plugin. For our plugin, we'll need
an implementation of IPrincipalInfo:
>>> @interface.implementer(interfaces.IPrincipalInfo)
... class PrincipalInfo(object):
...
... def __init__(self, id, title, description):
... self.id = id
... self.title = title
... self.description = description
...
... def __repr__(self):
... return 'PrincipalInfo(%r)' % self.id
Our authenticator uses this type when it creates a principal info:
>>> @interface.implementer(interfaces.IAuthenticatorPlugin)
... class MyAuthenticatorPlugin(object):
...
... def authenticateCredentials(self, credentials):
... if credentials == 'secretcode':
... return PrincipalInfo('bob', 'Bob', '')
...
... def principalInfo(self, id):
... pass # plugin not currently supporting search
As with the credentials plugin, the authenticator plugin must be registered
as a named utility:
>>> myAuthenticatorPlugin = MyAuthenticatorPlugin()
>>> provideUtility(myAuthenticatorPlugin, name='My Authenticator Plugin')
Configuring a PAU
-----------------
Finally, we'll create the PAU itself:
>>> from zope.pluggableauth import authentication
>>> pau = authentication.PluggableAuthentication('xyz_')
and configure it with the two plugins:
>>> pau.credentialsPlugins = ('My Credentials Plugin', )
>>> pau.authenticatorPlugins = ('My Authenticator Plugin', )
Using the PAU to Authenticate
-----------------------------
>>> from zope.pluggableauth.factories import AuthenticatedPrincipalFactory
>>> provideAdapter(AuthenticatedPrincipalFactory)
We can now use the PAU to authenticate a sample request:
>>> from zope.publisher.browser import TestRequest
>>> print(pau.authenticate(TestRequest()))
None
In this case, we cannot authenticate an empty request. In the same way, we
will not be able to authenticate a request with the wrong credentials:
>>> print(pau.authenticate(TestRequest(credentials='let me in!')))
None
However, if we provide the proper credentials:
>>> request = TestRequest(credentials='secretcode')
>>> principal = pau.authenticate(request)
>>> principal
Principal('xyz_bob')
we get an authenticated principal.
Multiple Authenticator Plugins
------------------------------
The PAU works with multiple authenticator plugins. It uses each plugin, in the
order specified in the PAU's authenticatorPlugins attribute, to authenticate
a set of credentials.
To illustrate, we'll create another authenticator:
>>> class MyAuthenticatorPlugin2(MyAuthenticatorPlugin):
...
... def authenticateCredentials(self, credentials):
... if credentials == 'secretcode':
... return PrincipalInfo('black', 'Black Spy', '')
... elif credentials == 'hiddenkey':
... return PrincipalInfo('white', 'White Spy', '')
>>> provideUtility(MyAuthenticatorPlugin2(), name='My Authenticator Plugin 2')
If we put it before the original authenticator:
>>> pau.authenticatorPlugins = (
... 'My Authenticator Plugin 2',
... 'My Authenticator Plugin')
Then it will be given the first opportunity to authenticate a request:
>>> pau.authenticate(TestRequest(credentials='secretcode'))
Principal('xyz_black')
If neither plugins can authenticate, pau returns None:
>>> print(pau.authenticate(TestRequest(credentials='let me in!!')))
None
When we change the order of the authenticator plugins:
>>> pau.authenticatorPlugins = (
... 'My Authenticator Plugin',
... 'My Authenticator Plugin 2')
we see that our original plugin is now acting first:
>>> pau.authenticate(TestRequest(credentials='secretcode'))
Principal('xyz_bob')
The second plugin, however, gets a chance to authenticate if first does not:
>>> pau.authenticate(TestRequest(credentials='hiddenkey'))
Principal('xyz_white')
Multiple Credentials Plugins
----------------------------
As with with authenticators, we can specify multiple credentials plugins. To
illustrate, we'll create a credentials plugin that extracts credentials from
a request form:
>>> @interface.implementer(interfaces.ICredentialsPlugin)
... class FormCredentialsPlugin:
...
... def extractCredentials(self, request):
... return request.form.get('my_credentials')
...
... def challenge(self, request):
... pass
...
... def logout(request):
... pass
>>> provideUtility(FormCredentialsPlugin(),
... name='Form Credentials Plugin')
and insert the new credentials plugin before the existing plugin:
>>> pau.credentialsPlugins = (
... 'Form Credentials Plugin',
... 'My Credentials Plugin')
The PAU will use each plugin in order to try and obtain credentials from a
request:
>>> pau.authenticate(TestRequest(credentials='secretcode',
... form={'my_credentials': 'hiddenkey'}))
Principal('xyz_white')
In this case, the first credentials plugin succeeded in getting credentials
from the form and the second authenticator was able to authenticate the
credentials. Specifically, the PAU went through these steps:
- Get credentials using 'Form Credentials Plugin'
- Got 'hiddenkey' credentials using 'Form Credentials Plugin', try to
authenticate using 'My Authenticator Plugin'
- Failed to authenticate 'hiddenkey' with 'My Authenticator Plugin', try
'My Authenticator Plugin 2'
- Succeeded in authenticating with 'My Authenticator Plugin 2'
Let's try a different scenario:
>>> pau.authenticate(TestRequest(credentials='secretcode'))
Principal('xyz_bob')
In this case, the PAU went through these steps::
- Get credentials using 'Form Credentials Plugin'
- Failed to get credentials using 'Form Credentials Plugin', try
'My Credentials Plugin'
- Got 'scecretcode' credentials using 'My Credentials Plugin', try to
authenticate using 'My Authenticator Plugin'
- Succeeded in authenticating with 'My Authenticator Plugin'
Let's try a slightly more complex scenario:
>>> pau.authenticate(TestRequest(credentials='hiddenkey',
... form={'my_credentials': 'bogusvalue'}))
Principal('xyz_white')
This highlights PAU's ability to use multiple plugins for authentication:
- Get credentials using 'Form Credentials Plugin'
- Got 'bogusvalue' credentials using 'Form Credentials Plugin', try to
authenticate using 'My Authenticator Plugin'
- Failed to authenticate 'boguskey' with 'My Authenticator Plugin', try
'My Authenticator Plugin 2'
- Failed to authenticate 'boguskey' with 'My Authenticator Plugin 2' --
there are no more authenticators to try, so lets try the next credentials
plugin for some new credentials
- Get credentials using 'My Credentials Plugin'
- Got 'hiddenkey' credentials using 'My Credentials Plugin', try to
authenticate using 'My Authenticator Plugin'
- Failed to authenticate 'hiddenkey' using 'My Authenticator Plugin', try
'My Authenticator Plugin 2'
- Succeeded in authenticating with 'My Authenticator Plugin 2' (shouts and
cheers!)
Multiple Authenticator Plugins
------------------------------
As with the other operations we've seen, the PAU uses multiple plugins to
find a principal. If the first authenticator plugin can't find the requested
principal, the next plugin is used, and so on.
>>> @interface.implementer(interfaces.IAuthenticatorPlugin)
... class AnotherAuthenticatorPlugin:
...
... def __init__(self):
... self.infos = {}
... self.ids = {}
...
... def principalInfo(self, id):
... return self.infos.get(id)
...
... def authenticateCredentials(self, credentials):
... id = self.ids.get(credentials)
... if id is not None:
... return self.infos[id]
...
... def add(self, id, title, description, credentials):
... self.infos[id] = PrincipalInfo(id, title, description)
... self.ids[credentials] = id
To illustrate, we'll create and register two authenticators:
>>> authenticator1 = AnotherAuthenticatorPlugin()
>>> provideUtility(authenticator1, name='Authentication Plugin 1')
>>> authenticator2 = AnotherAuthenticatorPlugin()
>>> provideUtility(authenticator2, name='Authentication Plugin 2')
and add a principal to them:
>>> authenticator1.add('bob', 'Bob', 'A nice guy', 'b0b')
>>> authenticator1.add('white', 'White Spy', 'Sneaky', 'deathtoblack')
>>> authenticator2.add('black', 'Black Spy', 'Also sneaky', 'deathtowhite')
When we configure the PAU to use both searchable authenticators (note the
order):
>>> pau.authenticatorPlugins = (
... 'Authentication Plugin 2',
... 'Authentication Plugin 1')
we register the factories for our principals:
>>> from zope.pluggableauth.factories import FoundPrincipalFactory
>>> provideAdapter(FoundPrincipalFactory)
we see how the PAU uses both plugins:
>>> pau.getPrincipal('xyz_white')
Principal('xyz_white')
>>> pau.getPrincipal('xyz_black')
Principal('xyz_black')
If more than one plugin know about the same principal ID, the first plugin is
used and the remaining are not delegated to. To illustrate, we'll add
another principal with the same ID as an existing principal:
>>> authenticator2.add('white', 'White Rider', '', 'r1der')
>>> pau.getPrincipal('xyz_white').title
'White Rider'
If we change the order of the plugins:
>>> pau.authenticatorPlugins = (
... 'Authentication Plugin 1',
... 'Authentication Plugin 2')
we get a different principal for ID 'white':
>>> pau.getPrincipal('xyz_white').title
'White Spy'
Issuing a Challenge
===================
Part of PAU's IAuthentication contract is to challenge the user for
credentials when its 'unauthorized' method is called. The need for this
functionality is driven by the following use case:
- A user attempts to perform an operation he is not authorized to perform.
- A handler responds to the unauthorized error by calling IAuthentication
'unauthorized'.
- The authentication component (in our case, a PAU) issues a challenge to
the user to collect new credentials (typically in the form of logging in
as a new user).
The PAU handles the credentials challenge by delegating to its credentials
plugins.
Currently, the PAU is configured with the credentials plugins that don't
perform any action when asked to challenge (see above the 'challenge' methods).
To illustrate challenges, we'll subclass an existing credentials plugin and
do something in its 'challenge':
>>> class LoginFormCredentialsPlugin(FormCredentialsPlugin):
...
... def __init__(self, loginForm):
... self.loginForm = loginForm
...
... def challenge(self, request):
... request.response.redirect(self.loginForm)
... return True
This plugin handles a challenge by redirecting the response to a login form.
It returns True to signal to the PAU that it handled the challenge.
We will now create and register a couple of these plugins:
>>> provideUtility(LoginFormCredentialsPlugin('simplelogin.html'),
... name='Simple Login Form Plugin')
>>> provideUtility(LoginFormCredentialsPlugin('advancedlogin.html'),
... name='Advanced Login Form Plugin')
and configure the PAU to use them:
>>> pau.credentialsPlugins = (
... 'Simple Login Form Plugin',
... 'Advanced Login Form Plugin')
Now when we call 'unauthorized' on the PAU:
>>> request = TestRequest()
>>> pau.unauthorized(id=None, request=request)
we see that the user is redirected to the simple login form:
>>> request.response.getStatus()
302
>>> request.response.getHeader('location')
'simplelogin.html'
We can change the challenge policy by reordering the plugins:
>>> pau.credentialsPlugins = (
... 'Advanced Login Form Plugin',
... 'Simple Login Form Plugin')
Now when we call 'unauthorized':
>>> request = TestRequest()
>>> pau.unauthorized(id=None, request=request)
the advanced plugin is used because it's first:
>>> request.response.getStatus()
302
>>> request.response.getHeader('location')
'advancedlogin.html'
Challenge Protocols
-------------------
Sometimes, we want multiple challengers to work together. For example, the
HTTP specification allows multiple challenges to be issued in a response. A
challenge plugin can provide a `challengeProtocol` attribute that effectively
groups related plugins together for challenging. If a plugin returns `True`
from its challenge and provides a non-None challengeProtocol, subsequent
plugins in the credentialsPlugins list that have the same challenge protocol
will also be used to challenge.
Without a challengeProtocol, only the first plugin to succeed in a challenge
will be used.
Let's look at an example. We'll define a new plugin that specifies an
'X-Challenge' protocol:
>>> class XChallengeCredentialsPlugin(FormCredentialsPlugin):
...
... challengeProtocol = 'X-Challenge'
...
... def __init__(self, challengeValue):
... self.challengeValue = challengeValue
...
... def challenge(self, request):
... value = self.challengeValue
... existing = request.response.getHeader('X-Challenge', '')
... if existing:
... value += ' ' + existing
... request.response.setHeader('X-Challenge', value)
... return True
and register a couple instances as utilities:
>>> provideUtility(XChallengeCredentialsPlugin('basic'),
... name='Basic X-Challenge Plugin')
>>> provideUtility(XChallengeCredentialsPlugin('advanced'),
... name='Advanced X-Challenge Plugin')
When we use both plugins with the PAU:
>>> pau.credentialsPlugins = (
... 'Basic X-Challenge Plugin',
... 'Advanced X-Challenge Plugin')
and call 'unauthorized':
>>> request = TestRequest()
>>> pau.unauthorized(None, request)
we see that both plugins participate in the challenge, rather than just the
first plugin:
>>> request.response.getHeader('X-Challenge')
'advanced basic'
Pluggable-Authentication Prefixes
=================================
Principal ids are required to be unique system wide. Plugins will often provide
options for providing id prefixes, so that different sets of plugins provide
unique ids within a PAU. If there are multiple pluggable-authentication
utilities in a system, it's a good idea to give each PAU a unique prefix, so
that principal ids from different PAUs don't conflict. We can provide a prefix
when a PAU is created:
>>> pau = authentication.PluggableAuthentication('mypau_')
>>> pau.credentialsPlugins = ('My Credentials Plugin', )
>>> pau.authenticatorPlugins = ('My Authenticator Plugin', )
When we create a request and try to authenticate:
>>> pau.authenticate(TestRequest(credentials='secretcode'))
Principal('mypau_bob')
Note that now, our principal's id has the pluggable-authentication
utility prefix.
We can still lookup a principal, as long as we supply the prefix::
>> pau.getPrincipal('mypas_42')
Principal('mypas_42', "{'domain': 42}")
>> pau.getPrincipal('mypas_41')
OddPrincipal('mypas_41', "{'int': 41}")
| zope.pluggableauth | /zope.pluggableauth-3.0-py3-none-any.whl/zope/pluggableauth/README.rst | README.rst |
"""Pluggable Authentication Utility Interfaces
"""
__docformat__ = "reStructuredText"
import zope.interface
from zope.authentication.interfaces import ILogout
from zope.container.constraints import containers
from zope.container.constraints import contains
from zope.container.interfaces import IContainer
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('zope')
class IPlugin(zope.interface.Interface):
"""A plugin for a pluggable authentication component."""
class IPluggableAuthentication(ILogout, IContainer):
"""Provides authentication services with the help of various plugins.
IPluggableAuthentication implementations will also implement
zope.authentication.interfaces.IAuthentication. The `authenticate` method
of this interface in an IPluggableAuthentication should annotate the
IPrincipalInfo with the credentials plugin and authentication plugin used.
The `getPrincipal` method should annotate the IPrincipalInfo with the
authentication plugin used.
"""
contains(IPlugin)
credentialsPlugins = zope.schema.List(
title=_('Credentials Plugins'),
description=_("""Used for extracting credentials.
Names may be of ids of non-utility ICredentialsPlugins contained in
the IPluggableAuthentication, or names of registered
ICredentialsPlugins utilities. Contained non-utility ids mask
utility names."""),
value_type=zope.schema.Choice(vocabulary='CredentialsPlugins'),
default=[],
)
authenticatorPlugins = zope.schema.List(
title=_('Authenticator Plugins'),
description=_("""Used for converting credentials to principals.
Names may be of ids of non-utility IAuthenticatorPlugins contained in
the IPluggableAuthentication, or names of registered
IAuthenticatorPlugins utilities. Contained non-utility ids mask
utility names."""),
value_type=zope.schema.Choice(vocabulary='AuthenticatorPlugins'),
default=[],
)
def getCredentialsPlugins():
"""Return iterable of (plugin name, actual credentials plugin) pairs.
Looks up names in credentialsPlugins as contained ids of non-utility
ICredentialsPlugins first, then as registered ICredentialsPlugin
utilities. Names that do not resolve are ignored."""
def getAuthenticatorPlugins():
"""Return iterable of (plugin name, actual authenticator plugin) pairs.
Looks up names in authenticatorPlugins as contained ids of non-utility
IAuthenticatorPlugins first, then as registered IAuthenticatorPlugin
utilities. Names that do not resolve are ignored."""
prefix = zope.schema.TextLine(
title=_('Prefix'),
default='',
required=True,
readonly=True,
)
def logout(request):
"""Performs a logout by delegating to its authenticator plugins."""
class ICredentialsPlugin(IPlugin):
"""Handles credentials extraction and challenges per request."""
containers(IPluggableAuthentication)
challengeProtocol = zope.interface.Attribute(
"""A challenge protocol used by the plugin.
If a credentials plugin works with other credentials pluggins, it
and the other cooperating plugins should specify a common (non-None)
protocol. If a plugin returns True from its challenge method, then
other credentials plugins will be called only if they have the same
protocol.
""")
def extractCredentials(request):
"""Ties to extract credentials from a request.
A return value of None indicates that no credentials could be found.
Any other return value is treated as valid credentials.
"""
def challenge(request):
"""Possibly issues a challenge.
This is typically done in a protocol-specific way.
If a challenge was issued, return True, otherwise return False.
"""
def logout(request):
"""Possibly logout.
If a logout was performed, return True, otherwise return False.
"""
class IAuthenticatorPlugin(IPlugin):
"""Authenticates a principal using credentials.
An authenticator may also be responsible for providing information
about and creating principals.
"""
containers(IPluggableAuthentication)
def authenticateCredentials(credentials):
"""Authenticates credentials.
If the credentials can be authenticated, return an object that provides
IPrincipalInfo. If the plugin cannot authenticate the credentials,
returns None.
"""
def principalInfo(id):
"""Returns an IPrincipalInfo object for the specified principal id.
If the plugin cannot find information for the id, returns None.
"""
class IPrincipalInfo(zope.interface.Interface):
"""Minimal information about a principal."""
id = zope.interface.Attribute("The principal id.")
title = zope.interface.Attribute("The principal title.")
description = zope.interface.Attribute("A description of the principal.")
credentialsPlugin = zope.interface.Attribute(
"""Plugin used to generate the credentials for this principal info.
Optional. Should be set in IPluggableAuthentication.authenticate.
""")
authenticatorPlugin = zope.interface.Attribute(
"""Plugin used to authenticate the credentials for this principal info.
Optional. Should be set in IPluggableAuthentication.authenticate and
IPluggableAuthentication.getPrincipal.
""")
class IPrincipalFactory(zope.interface.Interface):
"""A principal factory."""
def __call__(authentication):
"""Creates a principal.
The authentication utility that called the factory is passed
and should be included in the principal-created event.
"""
class IFoundPrincipalFactory(IPrincipalFactory):
"""A found principal factory."""
class IAuthenticatedPrincipalFactory(IPrincipalFactory):
"""An authenticated principal factory."""
class IPrincipalCreated(zope.interface.Interface):
"""A principal has been created."""
principal = zope.interface.Attribute("The principal that was created")
authentication = zope.interface.Attribute(
"The authentication utility that created the principal")
info = zope.interface.Attribute("An object providing IPrincipalInfo.")
class IAuthenticatedPrincipalCreated(IPrincipalCreated):
"""A principal has been created by way of an authentication operation."""
request = zope.interface.Attribute(
"The request the user was authenticated against")
@zope.interface.implementer(IAuthenticatedPrincipalCreated)
class AuthenticatedPrincipalCreated:
"""
>>> from zope.interface.verify import verifyObject
>>> event = AuthenticatedPrincipalCreated("authentication", "principal",
... "info", "request")
>>> verifyObject(IAuthenticatedPrincipalCreated, event)
True
"""
def __init__(self, authentication, principal, info, request):
self.authentication = authentication
self.principal = principal
self.info = info
self.request = request
class IFoundPrincipalCreated(IPrincipalCreated):
"""A principal has been created by way of a search operation."""
@zope.interface.implementer(IFoundPrincipalCreated)
class FoundPrincipalCreated:
"""
>>> from zope.interface.verify import verifyObject
>>> event = FoundPrincipalCreated("authentication", "principal",
... "info")
>>> verifyObject(IFoundPrincipalCreated, event)
True
"""
def __init__(self, authentication, principal, info):
self.authentication = authentication
self.principal = principal
self.info = info
class IQueriableAuthenticator(zope.interface.Interface):
"""Indicates the authenticator provides a search UI for principals."""
class IPrincipal(zope.security.interfaces.IGroupClosureAwarePrincipal):
groups = zope.schema.List(
title=_("Groups"),
description=_(
"""ids of groups to which the principal directly belongs.
Plugins may append to this list. Mutating the list only affects
the life of the principal object, and does not persist (so
persistently adding groups to a principal should be done by working
with a plugin that mutates this list every time the principal is
created, like the group folder in this package.)
"""),
value_type=zope.schema.TextLine(),
required=False)
class IQuerySchemaSearch(zope.interface.Interface):
"""An interface for searching using schema-constrained input."""
schema = zope.interface.Attribute("""
The schema that constrains the input provided to the search method.
A mapping of name/value pairs for each field in this schema is used
as the query argument in the search method.
""")
def search(query, start=None, batch_size=None):
"""Returns an iteration of principal IDs matching the query.
query is a mapping of name/value pairs for fields specified by the
schema.
If the start argument is provided, then it should be an
integer and the given number of initial items should be
skipped.
If the batch_size argument is provided, then it should be an
integer and no more than the given number of items should be
returned.
"""
class IGroupAdded(zope.interface.Interface):
"""A group has been added."""
group = zope.interface.Attribute("""The group that was defined""")
class IPrincipalsAddedToGroup(zope.interface.Interface):
group_id = zope.interface.Attribute(
'the id of the group to which the principal was added')
principal_ids = zope.interface.Attribute(
'an iterable of one or more ids of principals added')
class IPrincipalsRemovedFromGroup(zope.interface.Interface):
group_id = zope.interface.Attribute(
'the id of the group from which the principal was removed')
principal_ids = zope.interface.Attribute(
'an iterable of one or more ids of principals removed') | zope.pluggableauth | /zope.pluggableauth-3.0-py3-none-any.whl/zope/pluggableauth/interfaces.py | interfaces.py |
"""Pluggable Authentication Utility implementation
"""
from zope.authentication.interfaces import IAuthentication
from zope.authentication.interfaces import PrincipalLookupError
from zope.component import queryNextUtility
from zope.container.btree import BTreeContainer
from zope.interface import implementer
from zope.schema.interfaces import ISourceQueriables
from zope import component
from zope.pluggableauth import interfaces
@implementer(
IAuthentication,
interfaces.IPluggableAuthentication,
ISourceQueriables)
class PluggableAuthentication(BTreeContainer):
authenticatorPlugins = ()
credentialsPlugins = ()
def __init__(self, prefix=''):
super().__init__()
self.prefix = prefix
def _plugins(self, names, interface):
for name in names:
plugin = self.get(name)
if not interface.providedBy(plugin):
plugin = component.queryUtility(interface, name, context=self)
if plugin is not None:
yield name, plugin
def getAuthenticatorPlugins(self):
return self._plugins(
self.authenticatorPlugins, interfaces.IAuthenticatorPlugin)
def getCredentialsPlugins(self):
return self._plugins(
self.credentialsPlugins, interfaces.ICredentialsPlugin)
def authenticate(self, request):
authenticatorPlugins = [p for n, p in self.getAuthenticatorPlugins()]
for name, credplugin in self.getCredentialsPlugins():
credentials = credplugin.extractCredentials(request)
for authplugin in authenticatorPlugins:
if authplugin is None:
continue
info = authplugin.authenticateCredentials(credentials)
if info is None:
continue
info.credentialsPlugin = credplugin
info.authenticatorPlugin = authplugin
principal = component.getMultiAdapter(
(info, request),
interfaces.IAuthenticatedPrincipalFactory)(self)
principal.id = self.prefix + info.id
return principal
return None
def getPrincipal(self, id):
if not id.startswith(self.prefix):
next = queryNextUtility(self, IAuthentication)
if next is None:
raise PrincipalLookupError(id)
return next.getPrincipal(id)
id = id[len(self.prefix):]
for name, authplugin in self.getAuthenticatorPlugins():
info = authplugin.principalInfo(id)
if info is None:
continue
info.credentialsPlugin = None
info.authenticatorPlugin = authplugin
principal = interfaces.IFoundPrincipalFactory(info)(self)
principal.id = self.prefix + info.id
return principal
next = queryNextUtility(self, IAuthentication)
if next is not None:
return next.getPrincipal(self.prefix + id)
raise PrincipalLookupError(id)
def getQueriables(self):
for name, authplugin in self.getAuthenticatorPlugins():
queriable = component.queryMultiAdapter(
(authplugin, self), interfaces.IQueriableAuthenticator)
if queriable is not None:
yield name, queriable
def unauthenticatedPrincipal(self):
return None
def unauthorized(self, id, request):
challengeProtocol = None
for name, credplugin in self.getCredentialsPlugins():
protocol = getattr(credplugin, 'challengeProtocol', None)
if challengeProtocol is None or protocol == challengeProtocol:
if credplugin.challenge(request):
if protocol is None:
return
elif challengeProtocol is None:
challengeProtocol = protocol
if challengeProtocol is None:
next = queryNextUtility(self, IAuthentication)
if next is not None:
next.unauthorized(id, request)
def logout(self, request):
challengeProtocol = None
for name, credplugin in self.getCredentialsPlugins():
protocol = getattr(credplugin, 'challengeProtocol', None)
if challengeProtocol is None or protocol == challengeProtocol:
if credplugin.logout(request):
if protocol is None:
return
elif challengeProtocol is None:
challengeProtocol = protocol
if challengeProtocol is None:
next = queryNextUtility(self, IAuthentication)
if next is not None:
next.logout(request) | zope.pluggableauth | /zope.pluggableauth-3.0-py3-none-any.whl/zope/pluggableauth/authentication.py | authentication.py |
"""Principals related factories
"""
__docformat__ = "reStructuredText"
from zope.authentication.interfaces import IAuthentication
from zope.event import notify
from zope.publisher.interfaces import IRequest
from zope.security.interfaces import IGroupClosureAwarePrincipal as IPrincipal
from zope import component
from zope import interface
from zope.pluggableauth import interfaces
@interface.implementer(interfaces.IPrincipalInfo)
class PrincipalInfo:
"""An implementation of IPrincipalInfo used by the principal folder.
A principal info is created with id, login, title, and description:
>>> info = PrincipalInfo('users.foo', 'foo', 'Foo', 'An over-used term.')
>>> info
PrincipalInfo('users.foo')
>>> info.id
'users.foo'
>>> info.login
'foo'
>>> info.title
'Foo'
>>> info.description
'An over-used term.'
"""
def __init__(self, id, login, title, description):
self.id = id
self.login = login
self.title = title
self.description = description
def __repr__(self):
return 'PrincipalInfo(%r)' % self.id
@interface.implementer(IPrincipal)
class Principal:
"""A group-aware implementation of zope.security.interfaces.IPrincipal.
A principal is created with an ID:
>>> p = Principal(1)
>>> p
Principal(1)
>>> p.id
1
title and description may also be provided:
>>> p = Principal('george', 'George', 'A site member.')
>>> p
Principal('george')
>>> p.id
'george'
>>> p.title
'George'
>>> p.description
'A site member.'
The `groups` is a simple list, filled in by plugins.
>>> p.groups
[]
The `allGroups` attribute is a readonly iterable of the full closure of the
groups in the `groups` attribute--that is, if the principal is a direct
member of the 'Administrators' group, and the 'Administrators' group is
a member of the 'Reviewers' group, then p.groups would be
['Administrators'] and list(p.allGroups) would be
['Administrators', 'Reviewers'].
To illustrate this, we'll need to set up a dummy authentication utility,
and a few principals. Our main principal will also gain some groups, as if
plugins had added the groups to the list. This is all setup--skip to the
next block to actually see `allGroups` in action.
>>> p.groups.extend(
... ['content_administrators', 'zope_3_project',
... 'list_administrators', 'zpug'])
>>> editor = Principal('editors', 'Content Editors')
>>> creator = Principal('creators', 'Content Creators')
>>> reviewer = Principal('reviewers', 'Content Reviewers')
>>> reviewer.groups.extend(['editors', 'creators'])
>>> usermanager = Principal('user_managers', 'User Managers')
>>> contentAdmin = Principal(
... 'content_administrators', 'Content Administrators')
>>> contentAdmin.groups.extend(['reviewers', 'user_managers'])
>>> zope3Dev = Principal('zope_3_project', 'Zope 3 Developer')
>>> zope3ListAdmin = Principal(
... 'zope_3_list_admin', 'Zope 3 List Administrators')
>>> zope3ListAdmin.groups.append('zope_3_project') # duplicate, but
... # should only appear in allGroups once
>>> listAdmin = Principal('list_administrators', 'List Administrators')
>>> listAdmin.groups.append('zope_3_list_admin')
>>> zpugMember = Principal('zpug', 'ZPUG Member')
>>> martians = Principal('martians', 'Martians') # not in p's allGroups
>>> group_data = dict((p.id, p) for p in (
... editor, creator, reviewer, usermanager, contentAdmin,
... zope3Dev, zope3ListAdmin, listAdmin, zpugMember, martians))
>>> @interface.implementer(IAuthentication)
... class DemoAuth(object):
... def getPrincipal(self, id):
... return group_data[id]
...
>>> demoAuth = DemoAuth()
>>> component.provideUtility(demoAuth)
Now, we have a user with the following groups (lowest level are p's direct
groups, and lines show membership):
editors creators
\\-----//
|| zope_3_project (duplicate)
reviewers user_managers ||
\\--------// zope_3_list_admin
|| ||
content_administrators zope_3_project list_administrators zpug
The allGroups value includes all of the shown groups, and with
'zope_3_project' only appearing once.
>>> p.groups # doctest: +NORMALIZE_WHITESPACE
['content_administrators', 'zope_3_project', 'list_administrators',
'zpug']
>>> list(p.allGroups) # doctest: +NORMALIZE_WHITESPACE
['content_administrators', 'reviewers', 'editors', 'creators',
'user_managers', 'zope_3_project', 'list_administrators',
'zope_3_list_admin', 'zpug']
"""
def __init__(self, id, title='', description=''):
self.id = id
self.title = title
self.description = description
self.groups = []
def __repr__(self):
return 'Principal(%r)' % self.id
@property
def allGroups(self):
if self.groups:
seen = set()
principals = component.getUtility(IAuthentication)
stack = [iter(self.groups)]
while stack:
try:
group_id = next(stack[-1])
except StopIteration:
stack.pop()
else:
if group_id not in seen:
yield group_id
seen.add(group_id)
group = principals.getPrincipal(group_id)
stack.append(iter(group.groups))
@component.adapter(interfaces.IPrincipalInfo, IRequest)
@interface.implementer(interfaces.IAuthenticatedPrincipalFactory)
class AuthenticatedPrincipalFactory:
"""Creates 'authenticated' principals.
An authenticated principal is created as a result of an authentication
operation.
To use the factory, create it with the info (interfaces.IPrincipalInfo) of
the principal to create and a request:
>>> info = PrincipalInfo('users.mary', 'mary', 'Mary', 'The site admin.')
>>> from zope.publisher.base import TestRequest
>>> request = TestRequest('/')
>>> factory = AuthenticatedPrincipalFactory(info, request)
The factory must be called with a pluggable-authentication object:
>>> class Auth:
... prefix = 'auth.'
>>> auth = Auth()
>>> principal = factory(auth)
The factory uses the pluggable authentication and the info to
create a principal with the same ID, title, and description:
>>> principal.id
'auth.users.mary'
>>> principal.title
'Mary'
>>> principal.description
'The site admin.'
It also fires an AuthenticatedPrincipalCreatedEvent:
>>> from zope.component.eventtesting import getEvents
>>> [event] = getEvents(interfaces.IAuthenticatedPrincipalCreated)
>>> event.principal is principal, event.authentication is auth
(True, True)
>>> event.info
PrincipalInfo('users.mary')
>>> event.request is request
True
Listeners can subscribe to this event to perform additional operations
when the authenticated principal is created.
For information on how factories are used in the authentication process,
see README.txt.
"""
def __init__(self, info, request):
self.info = info
self.request = request
def __call__(self, authentication):
principal = Principal(authentication.prefix + self.info.id,
self.info.title,
self.info.description)
notify(interfaces.AuthenticatedPrincipalCreated(
authentication, principal, self.info, self.request))
return principal
@component.adapter(interfaces.IPrincipalInfo)
@interface.implementer(interfaces.IFoundPrincipalFactory)
class FoundPrincipalFactory:
"""Creates 'found' principals.
A 'found' principal is created as a result of a principal lookup.
To use the factory, create it with the info (interfaces.IPrincipalInfo) of
the principal to create:
>>> info = PrincipalInfo('users.sam', 'sam', 'Sam', 'A site user.')
>>> factory = FoundPrincipalFactory(info)
The factory must be called with a pluggable-authentication object:
>>> class Auth:
... prefix = 'auth.'
>>> auth = Auth()
>>> principal = factory(auth)
The factory uses the pluggable-authentication object and the info
to create a principal with the same ID, title, and description:
>>> principal.id
'auth.users.sam'
>>> principal.title
'Sam'
>>> principal.description
'A site user.'
It also fires a FoundPrincipalCreatedEvent:
>>> from zope.component.eventtesting import getEvents
>>> [event] = getEvents(interfaces.IFoundPrincipalCreated)
>>> event.principal is principal, event.authentication is auth
(True, True)
>>> event.info
PrincipalInfo('users.sam')
Listeners can subscribe to this event to perform additional operations
when the 'found' principal is created.
For information on how factories are used in the authentication process,
see README.txt.
"""
def __init__(self, info):
self.info = info
def __call__(self, authentication):
principal = Principal(authentication.prefix + self.info.id,
self.info.title,
self.info.description)
notify(interfaces.FoundPrincipalCreated(authentication,
principal, self.info))
return principal | zope.pluggableauth | /zope.pluggableauth-3.0-py3-none-any.whl/zope/pluggableauth/factories.py | factories.py |
__docformat__ = "reStructuredText"
from persistent import Persistent
from zope.component import getUtility
from zope.container.btree import BTreeContainer
from zope.container.constraints import containers
from zope.container.constraints import contains
from zope.container.contained import Contained
from zope.container.interfaces import DuplicateIDError
from zope.i18nmessageid import MessageFactory
from zope.interface import Interface
from zope.interface import implementer
from zope.password.interfaces import IPasswordManager
from zope.schema import Choice
from zope.schema import Password
from zope.schema import Text
from zope.schema import TextLine
from zope.pluggableauth.factories import PrincipalInfo
from zope.pluggableauth.interfaces import IAuthenticatorPlugin
from zope.pluggableauth.interfaces import IQuerySchemaSearch
_ = MessageFactory('zope')
class IInternalPrincipal(Interface):
"""Principal information"""
login = TextLine(
title=_("Login"),
description=_("The Login/Username of the principal. "
"This value can change."))
def setPassword(password, passwordManagerName=None):
pass
password = Password(
title=_("Password"),
description=_("The password for the principal."))
passwordManagerName = Choice(
title=_("Password Manager"),
vocabulary="Password Manager Names",
description=_("The password manager will be used"
" for encode/check the password"),
default="SSHA",
# TODO: The password manager name may be changed only
# if the password changed
readonly=True
)
title = TextLine(
title=_("Title"),
description=_("Provides a title for the principal."))
description = Text(
title=_("Description"),
description=_("Provides a description for the principal."),
required=False,
missing_value='',
default='')
class IInternalPrincipalContainer(Interface):
"""A container that contains internal principals."""
prefix = TextLine(
title=_("Prefix"),
description=_(
"Prefix to be added to all principal ids to assure "
"that all ids are unique within the authentication service"),
missing_value="",
default='',
readonly=True)
def getIdByLogin(login):
"""Return the principal id currently associated with login.
The return value includes the container prefix, but does not
include the PAU prefix.
KeyError is raised if no principal is associated with login.
"""
contains(IInternalPrincipal)
class IInternalPrincipalContained(Interface):
"""Principal information"""
containers(IInternalPrincipalContainer)
class ISearchSchema(Interface):
"""Search Interface for this Principal Provider"""
search = TextLine(
title=_("Search String"),
description=_("A Search String"),
required=False,
default='',
missing_value='')
@implementer(IInternalPrincipal, IInternalPrincipalContained)
class InternalPrincipal(Persistent, Contained):
"""An internal principal for Persistent Principal Folder."""
# If you're searching for self._passwordManagerName, or self._password
# probably you just need to evolve the database to new generation
# at /++etc++process/@@generations.html
# NOTE: All changes needs to be synchronized with the evolver at
# zope.app.zopeappgenerations.evolve2
def __init__(self, login, password, title, description='',
passwordManagerName="SSHA"):
self._login = login
self._passwordManagerName = passwordManagerName
self.password = password
self.title = title
self.description = description
def getPasswordManagerName(self):
return self._passwordManagerName
passwordManagerName = property(getPasswordManagerName)
def _getPasswordManager(self):
return getUtility(IPasswordManager, self.passwordManagerName)
def getPassword(self):
return self._password
def setPassword(self, password, passwordManagerName=None):
if passwordManagerName is not None:
self._passwordManagerName = passwordManagerName
passwordManager = self._getPasswordManager()
self._password = passwordManager.encodePassword(password)
password = property(getPassword, setPassword)
def checkPassword(self, password):
passwordManager = self._getPasswordManager()
return passwordManager.checkPassword(self.password, password)
def getLogin(self):
return self._login
def setLogin(self, login):
oldLogin = self._login
self._login = login
if self.__parent__ is not None:
try:
self.__parent__.notifyLoginChanged(oldLogin, self)
except ValueError:
self._login = oldLogin
raise
login = property(getLogin, setLogin)
@implementer(IAuthenticatorPlugin,
IQuerySchemaSearch,
IInternalPrincipalContainer)
class PrincipalFolder(BTreeContainer):
"""A Persistent Principal Folder and Authentication plugin.
See principalfolder.txt for details.
"""
schema = ISearchSchema
def __init__(self, prefix=''):
self.prefix = prefix
super().__init__()
self.__id_by_login = self._newContainerData()
def notifyLoginChanged(self, oldLogin, principal):
"""Notify the Container about changed login of a principal.
We need this, so that our second tree can be kept up-to-date.
"""
# A user with the new login already exists
if principal.login in self.__id_by_login:
raise ValueError('Principal Login already taken!')
del self.__id_by_login[oldLogin]
self.__id_by_login[principal.login] = principal.__name__
def __setitem__(self, id, principal):
"""Add principal information.
Create a Principal Folder
>>> pf = PrincipalFolder()
Create a principal with 1 as id
Add a login attr since __setitem__ is in need of one
>>> from zope.pluggableauth.factories import Principal
>>> principal = Principal(1)
>>> principal.login = 1
Add the principal within the Principal Folder
>>> pf.__setitem__(u'1', principal)
Try to add another principal with the same id.
It should raise a DuplicateIDError
>>> try:
... pf.__setitem__(u'1', principal)
... except DuplicateIDError:
... pass
>>>
"""
# A user with the new login already exists
if principal.login in self.__id_by_login:
raise DuplicateIDError('Principal Login already taken!')
super().__setitem__(id, principal)
self.__id_by_login[principal.login] = id
def __delitem__(self, id):
"""Remove principal information."""
principal = self[id]
super().__delitem__(id)
del self.__id_by_login[principal.login]
def authenticateCredentials(self, credentials):
"""Return principal info if credentials can be authenticated
"""
if not isinstance(credentials, dict):
return None
if not ('login' in credentials and 'password' in credentials):
return None
id = self.__id_by_login.get(credentials['login'])
if id is None:
return None
internal = self[id]
if not internal.checkPassword(credentials["password"]):
return None
return PrincipalInfo(self.prefix + id, internal.login, internal.title,
internal.description)
def principalInfo(self, id):
if id.startswith(self.prefix):
internal = self.get(id[len(self.prefix):])
if internal is not None:
return PrincipalInfo(id, internal.login, internal.title,
internal.description)
def getIdByLogin(self, login):
return self.prefix + self.__id_by_login[login]
def search(self, query, start=None, batch_size=None):
"""Search through this principal provider."""
search = query.get('search')
if search is None:
return
search = search.lower()
n = 1
for i, value in enumerate(self.values()):
if (search in value.title.lower() or
search in value.description.lower() or
search in value.login.lower()):
if not ((start is not None and i < start)
or (batch_size is not None and n > batch_size)):
n += 1
yield self.prefix + value.__name__ | zope.pluggableauth | /zope.pluggableauth-3.0-py3-none-any.whl/zope/pluggableauth/plugins/principalfolder.py | principalfolder.py |
===============
Group Folders
===============
Group folders provide support for groups information stored in the ZODB. They
are persistent, and must be contained within the PAUs that use them.
Like other principals, groups are created when they are needed.
Group folders contain group-information objects that contain group information.
We create group information using the `GroupInformation` class:
>>> import zope.pluggableauth.plugins.groupfolder
>>> g1 = zope.pluggableauth.plugins.groupfolder.GroupInformation("Group 1")
>>> groups = zope.pluggableauth.plugins.groupfolder.GroupFolder('group.')
>>> groups['g1'] = g1
Note that when group-info is added, a GroupAdded event is generated:
>>> from zope.pluggableauth import interfaces
>>> from zope.component.eventtesting import getEvents
>>> getEvents(interfaces.IGroupAdded)
[<GroupAdded 'group.g1'>]
Groups are defined with respect to an authentication service. Groups
must be accessible via an authentication service and can contain
principals accessible via an authentication service.
To illustrate the group interaction with the authentication service,
we'll create a sample authentication service:
>>> from zope import interface
>>> from zope.authentication.interfaces import IAuthentication
>>> from zope.authentication.interfaces import PrincipalLookupError
>>> from zope.security.interfaces import IGroupAwarePrincipal
>>> from zope.pluggableauth.plugins.groupfolder import setGroupsForPrincipal
>>> @interface.implementer(IGroupAwarePrincipal)
... class Principal:
... def __init__(self, id, title='', description=''):
... self.id, self.title, self.description = id, title, description
... self.groups = []
>>> class PrincipalCreatedEvent:
... def __init__(self, authentication, principal):
... self.authentication = authentication
... self.principal = principal
>>> from zope.pluggableauth.plugins import principalfolder
>>> @interface.implementer(IAuthentication)
... class Principals:
... def __init__(self, groups, prefix='auth.'):
... self.prefix = prefix
... self.principals = {
... 'p1': principalfolder.PrincipalInfo('p1', '', '', ''),
... 'p2': principalfolder.PrincipalInfo('p2', '', '', ''),
... 'p3': principalfolder.PrincipalInfo('p3', '', '', ''),
... 'p4': principalfolder.PrincipalInfo('p4', '', '', ''),
... }
... self.groups = groups
... groups.__parent__ = self
...
... def getAuthenticatorPlugins(self):
... return [('principals', self.principals), ('groups', self.groups)]
...
... def getPrincipal(self, id):
... if not id.startswith(self.prefix):
... raise PrincipalLookupError(id)
... id = id[len(self.prefix):]
... info = self.principals.get(id)
... if info is None:
... info = self.groups.principalInfo(id)
... if info is None:
... raise PrincipalLookupError(id)
... principal = Principal(self.prefix+info.id,
... info.title, info.description)
... setGroupsForPrincipal(PrincipalCreatedEvent(self, principal))
... return principal
This class doesn't really implement the full `IAuthentication` interface, but
it implements the `getPrincipal` method used by groups. It works very much
like the pluggable authentication utility. It creates principals on demand. It
calls `setGroupsForPrincipal`, which is normally called as an event subscriber,
when principals are created. In order for `setGroupsForPrincipal` to find out
group folder, we have to register it as a utility:
>>> from zope.pluggableauth.interfaces import IAuthenticatorPlugin
>>> from zope.component import provideUtility
>>> provideUtility(groups, IAuthenticatorPlugin)
We will create and register a new principals utility:
>>> principals = Principals(groups)
>>> provideUtility(principals, IAuthentication)
Now we can set the principals on the group:
>>> g1.principals = ['auth.p1', 'auth.p2']
>>> g1.principals
('auth.p1', 'auth.p2')
Adding principals fires an event.
>>> getEvents(interfaces.IPrincipalsAddedToGroup)[-1]
<PrincipalsAddedToGroup ['auth.p1', 'auth.p2'] 'auth.group.g1'>
We can now look up groups for the principals:
>>> groups.getGroupsForPrincipal('auth.p1')
('group.g1',)
Note that the group id is a concatenation of the group-folder prefix
and the name of the group-information object within the folder.
If we delete a group:
>>> del groups['g1']
then the groups folder loses the group information for that group's
principals:
>>> groups.getGroupsForPrincipal('auth.p1')
()
but the principal information on the group is unchanged:
>>> g1.principals
('auth.p1', 'auth.p2')
It also fires an event showing that the principals are removed from the group
(g1 is group information, not a zope.security.interfaces.IGroup).
>>> getEvents(interfaces.IPrincipalsRemovedFromGroup)[-1]
<PrincipalsRemovedFromGroup ['auth.p1', 'auth.p2'] 'auth.group.g1'>
Adding the group sets the folder principal information. Let's use a
different group name:
>>> groups['G1'] = g1
>>> groups.getGroupsForPrincipal('auth.p1')
('group.G1',)
Here we see that the new name is reflected in the group information.
An event is fired, as usual.
>>> getEvents(interfaces.IPrincipalsAddedToGroup)[-1]
<PrincipalsAddedToGroup ['auth.p1', 'auth.p2'] 'auth.group.G1'>
In terms of member events (principals added and removed from groups), we have
now seen that events are fired when a group information object is added and
when it is removed from a group folder; and we have seen that events are fired
when a principal is added to an already-registered group. Events are also
fired when a principal is removed from an already-registered group. Let's
quickly see some more examples.
>>> g1.principals = ('auth.p1', 'auth.p3', 'auth.p4')
>>> getEvents(interfaces.IPrincipalsAddedToGroup)[-1]
<PrincipalsAddedToGroup ['auth.p3', 'auth.p4'] 'auth.group.G1'>
>>> getEvents(interfaces.IPrincipalsRemovedFromGroup)[-1]
<PrincipalsRemovedFromGroup ['auth.p2'] 'auth.group.G1'>
>>> g1.principals = ('auth.p1', 'auth.p2')
>>> getEvents(interfaces.IPrincipalsAddedToGroup)[-1]
<PrincipalsAddedToGroup ['auth.p2'] 'auth.group.G1'>
>>> getEvents(interfaces.IPrincipalsRemovedFromGroup)[-1]
<PrincipalsRemovedFromGroup ['auth.p3', 'auth.p4'] 'auth.group.G1'>
Groups can contain groups:
>>> g2 = zope.pluggableauth.plugins.groupfolder.GroupInformation("Group Two")
>>> groups['G2'] = g2
>>> g2.principals = ['auth.group.G1']
>>> groups.getGroupsForPrincipal('auth.group.G1')
('group.G2',)
>>> old = getEvents(interfaces.IPrincipalsAddedToGroup)[-1]
>>> old
<PrincipalsAddedToGroup ['auth.group.G1'] 'auth.group.G2'>
Groups cannot contain cycles:
>>> g1.principals = ('auth.p1', 'auth.p2', 'auth.group.G2')
... # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
zope.pluggableauth.plugins.groupfolder.GroupCycle: ('auth.group.G2', ['auth.group.G2', 'auth.group.G1'])
Trying to do so does not fire an event.
>>> getEvents(interfaces.IPrincipalsAddedToGroup)[-1] is old
True
They need not be hierarchical:
>>> ga = zope.pluggableauth.plugins.groupfolder.GroupInformation("Group A")
>>> groups['GA'] = ga
>>> gb = zope.pluggableauth.plugins.groupfolder.GroupInformation("Group B")
>>> groups['GB'] = gb
>>> gb.principals = ['auth.group.GA']
>>> gc = zope.pluggableauth.plugins.groupfolder.GroupInformation("Group C")
>>> groups['GC'] = gc
>>> gc.principals = ['auth.group.GA']
>>> gd = zope.pluggableauth.plugins.groupfolder.GroupInformation("Group D")
>>> groups['GD'] = gd
>>> gd.principals = ['auth.group.GA', 'auth.group.GB']
>>> ga.principals = ['auth.p1']
Group folders provide a very simple search interface. They perform
simple string searches on group titles and descriptions.
>>> list(groups.search({'search': 'gro'})) # doctest: +NORMALIZE_WHITESPACE
['group.G1', 'group.G2',
'group.GA', 'group.GB', 'group.GC', 'group.GD']
>>> list(groups.search({'search': 'two'}))
['group.G2']
They also support batching:
>>> list(groups.search({'search': 'gro'}, 2, 3))
['group.GA', 'group.GB', 'group.GC']
If you don't supply a search key, no results will be returned:
>>> list(groups.search({}))
[]
Identifying groups
==================
The function, `setGroupsForPrincipal`, is a subscriber to
principal-creation events. It adds any group-folder-defined groups to
users in those groups:
>>> principal = principals.getPrincipal('auth.p1')
>>> principal.groups
['auth.group.G1', 'auth.group.GA']
Of course, this applies to groups too:
>>> principal = principals.getPrincipal('auth.group.G1')
>>> principal.id
'auth.group.G1'
>>> principal.groups
['auth.group.G2']
In addition to setting principal groups, the `setGroupsForPrincipal`
function also declares the `IGroup` interface on groups:
>>> [iface.__name__ for iface in interface.providedBy(principal)]
['IGroup', 'IGroupAwarePrincipal']
>>> [iface.__name__
... for iface in interface.providedBy(principals.getPrincipal('auth.p1'))]
['IGroupAwarePrincipal']
Special groups
==============
Two special groups, Authenticated, and Everyone may apply to users
created by the pluggable-authentication utility. There is a
subscriber, specialGroups, that will set these groups on any non-group
principals if IAuthenticatedGroup, or IEveryoneGroup utilities are
provided.
Lets define a group-aware principal:
>>> import zope.security.interfaces
>>> @interface.implementer(zope.security.interfaces.IGroupAwarePrincipal)
... class GroupAwarePrincipal(Principal):
... def __init__(self, id):
... Principal.__init__(self, id)
... self.groups = []
If we notify the subscriber with this principal, nothing will happen
because the groups haven't been defined:
>>> prin = GroupAwarePrincipal('x')
>>> event = interfaces.FoundPrincipalCreated(42, prin, {})
>>> zope.pluggableauth.plugins.groupfolder.specialGroups(event)
>>> prin.groups
[]
Now, if we define the Everybody group:
>>> import zope.authentication.interfaces
>>> @interface.implementer(zope.authentication.interfaces.IEveryoneGroup)
... class EverybodyGroup(Principal):
... pass
>>> everybody = EverybodyGroup('all')
>>> provideUtility(everybody, zope.authentication.interfaces.IEveryoneGroup)
Then the group will be added to the principal:
>>> zope.pluggableauth.plugins.groupfolder.specialGroups(event)
>>> prin.groups
['all']
Similarly for the authenticated group:
>>> @interface.implementer(
... zope.authentication.interfaces.IAuthenticatedGroup)
... class AuthenticatedGroup(Principal):
... pass
>>> authenticated = AuthenticatedGroup('auth')
>>> provideUtility(authenticated, zope.authentication.interfaces.IAuthenticatedGroup)
Then the group will be added to the principal:
>>> prin.groups = []
>>> zope.pluggableauth.plugins.groupfolder.specialGroups(event)
>>> prin.groups.sort()
>>> prin.groups
['all', 'auth']
These groups are only added to non-group principals:
>>> prin.groups = []
>>> interface.directlyProvides(prin, zope.security.interfaces.IGroup)
>>> zope.pluggableauth.plugins.groupfolder.specialGroups(event)
>>> prin.groups
[]
And they are only added to group aware principals:
>>> @interface.implementer(zope.security.interfaces.IPrincipal)
... class SolitaryPrincipal:
... id = title = description = ''
>>> event = interfaces.FoundPrincipalCreated(42, SolitaryPrincipal(), {})
>>> zope.pluggableauth.plugins.groupfolder.specialGroups(event)
>>> prin.groups
[]
Member-aware groups
===================
The groupfolder includes a subscriber that gives group principals the
zope.security.interfaces.IGroupAware interface and an implementation thereof.
This allows groups to be able to get and set their members.
Given an info object and a group...
>>> @interface.implementer(
... zope.pluggableauth.plugins.groupfolder.IGroupInformation)
... class DemoGroupInformation(object):
... def __init__(self, title, description, principals):
... self.title = title
... self.description = description
... self.principals = principals
...
>>> i = DemoGroupInformation(
... 'Managers', 'Taskmasters', ('joe', 'jane'))
...
>>> info = zope.pluggableauth.plugins.groupfolder.GroupInfo(
... 'groups.managers', i)
>>> @interface.implementer(IGroupAwarePrincipal)
... class DummyGroup(object):
... def __init__(self, id, title='', description=''):
... self.id = id
... self.title = title
... self.description = description
... self.groups = []
...
>>> principal = DummyGroup('foo')
>>> zope.security.interfaces.IMemberAwareGroup.providedBy(principal)
False
...when you call the subscriber, it adds the two pseudo-methods to the
principal and makes the principal provide the IMemberAwareGroup interface.
>>> zope.pluggableauth.plugins.groupfolder.setMemberSubscriber(
... interfaces.FoundPrincipalCreated(
... 'dummy auth (ignored)', principal, info))
>>> principal.getMembers()
('joe', 'jane')
>>> principal.setMembers(('joe', 'jane', 'jaimie'))
>>> principal.getMembers()
('joe', 'jane', 'jaimie')
>>> zope.security.interfaces.IMemberAwareGroup.providedBy(principal)
True
The two methods work with the value on the IGroupInformation object.
>>> i.principals == principal.getMembers()
True
Limitation
----------
The current group-folder design has an important limitation!
There is no point in assigning principals to a group
from a group folder unless the principal is from the same pluggable
authentication utility.
* If a principal is from a higher authentication utility, the user
will not get the group definition. Why? Because the principals
group assignments are set when the principal is authenticated. At
that point, the current site is the site containing the principal
definition. Groups defined in lower sites will not be consulted,
* It is impossible to assign users from lower authentication
utilities because they can't be seen when managing the group,
from the site containing the group.
A better design might be to store user-role assignments independent of
the group definitions and to look for assignments during (url)
traversal. This could get quite complex though.
While it is possible to have multiple authentication utilities long a
URL path, it is generally better to stick to a simpler model in which
there is only one authentication utility along a URL path (in addition
to the global utility, which is used for bootstrapping purposes).
| zope.pluggableauth | /zope.pluggableauth-3.0-py3-none-any.whl/zope/pluggableauth/plugins/groupfolder.rst | groupfolder.rst |
"""PAS plugins related to HTTP
"""
__docformat__ = "reStructuredText"
import base64
from zope.interface import Interface
from zope.interface import implementer
from zope.publisher.interfaces.http import IHTTPRequest
from zope.schema import TextLine
from zope.pluggableauth import interfaces
class IHTTPBasicAuthRealm(Interface):
"""HTTP Basic Auth Realm
Represents the realm string that is used during basic HTTP authentication
"""
realm = TextLine(title='Realm',
description='HTTP Basic Authentication Realm',
required=True,
default='Zope')
@implementer(interfaces.ICredentialsPlugin, IHTTPBasicAuthRealm)
class HTTPBasicAuthCredentialsPlugin:
realm = 'Zope'
protocol = 'http auth'
def extractCredentials(self, request):
"""Extracts HTTP basic auth credentials from a request.
First we need to create a request that contains some credentials.
>>> from zope.publisher.browser import TestRequest
>>> request = TestRequest(
... environ={'HTTP_AUTHORIZATION': 'Basic bWdyOm1ncnB3'})
Now create the plugin and get the credentials.
>>> plugin = HTTPBasicAuthCredentialsPlugin()
>>> from pprint import pprint
>>> pprint(plugin.extractCredentials(request))
{'login': 'mgr', 'password': 'mgrpw'}
Make sure we return `None`, if no authentication header has been
specified.
>>> print(plugin.extractCredentials(TestRequest()))
None
Also, this plugin can *only* handle basic authentication.
>>> request = TestRequest(environ={'HTTP_AUTHORIZATION': 'foo bar'})
>>> print(plugin.extractCredentials(TestRequest()))
None
This plugin only works with HTTP requests.
>>> from zope.publisher.base import TestRequest
>>> print(plugin.extractCredentials(TestRequest('/')))
None
According to RFC 2617, password can contain one or more colons; user ID
can't contain any colon.
>>> from zope.publisher.browser import TestRequest as BrowserRequest
>>> request = BrowserRequest('/',
... environ={'HTTP_AUTHORIZATION': 'Basic bWdyOm1ncnB3OndpdGg6Y29sb24='})
>>> pprint(plugin.extractCredentials(request))
{'login': 'mgr', 'password': 'mgrpw:with:colon'}
""" # noqa: E501 line too long
if not IHTTPRequest.providedBy(request):
return None
if request._auth:
if request._auth.lower().startswith('basic '):
credentials = request._auth.split()[-1]
if isinstance(credentials, str):
# No encoding needed, should be base64 string anyways.
credentials = credentials.encode()
login, password = base64.b64decode(credentials).split(b':', 1)
return {'login': login.decode('utf-8'),
'password': password.decode('utf-8')}
return None
def challenge(self, request):
"""Issues an HTTP basic auth challenge for credentials.
The challenge is issued by setting the appropriate response headers.
To illustrate, we'll create a plugin:
>>> plugin = HTTPBasicAuthCredentialsPlugin()
The plugin adds its challenge to the HTTP response.
>>> from zope.publisher.browser import TestRequest
>>> request = TestRequest()
>>> response = request.response
>>> plugin.challenge(request)
True
>>> response._status
401
>>> response.getHeader('WWW-Authenticate', literal=True)
'basic realm="Zope"'
Notice that the realm is quoted, as per RFC 2617.
The plugin only works with HTTP requests.
>>> from zope.publisher.base import TestRequest
>>> request = TestRequest('/')
>>> response = request.response
>>> print(plugin.challenge(request))
False
"""
if not IHTTPRequest.providedBy(request):
return False
request.response.setHeader("WWW-Authenticate",
'basic realm="%s"' % self.realm,
literal=True)
request.response.setStatus(401)
return True
def logout(self, request):
"""Always returns False as logout is not supported by basic auth.
>>> plugin = HTTPBasicAuthCredentialsPlugin()
>>> from zope.publisher.browser import TestRequest
>>> plugin.logout(TestRequest())
False
"""
return False | zope.pluggableauth | /zope.pluggableauth-3.0-py3-none-any.whl/zope/pluggableauth/plugins/httpplugins.py | httpplugins.py |
"""Zope Groups Folder implementation."""
import BTrees.OOBTree
import persistent
import zope.authentication.principal
import zope.container.constraints
import zope.container.interfaces
import zope.location.interfaces
from zope.authentication.interfaces import IAuthenticatedGroup
from zope.authentication.interfaces import IAuthentication
from zope.authentication.interfaces import IEveryoneGroup
from zope.container.btree import BTreeContainer
from zope.i18nmessageid import MessageFactory
from zope.interface import alsoProvides
from zope.interface import implementer
from zope.security.interfaces import IGroup
from zope.security.interfaces import IGroupAwarePrincipal
from zope.security.interfaces import IMemberAwareGroup
from zope import component
from zope import event
from zope import interface
from zope import schema
from zope.pluggableauth import factories
from zope.pluggableauth.interfaces import IAuthenticatorPlugin
from zope.pluggableauth.interfaces import IFoundPrincipalCreated
from zope.pluggableauth.interfaces import IGroupAdded
from zope.pluggableauth.interfaces import IPrincipalInfo
from zope.pluggableauth.interfaces import IPrincipalsAddedToGroup
from zope.pluggableauth.interfaces import IPrincipalsRemovedFromGroup
from zope.pluggableauth.interfaces import IQuerySchemaSearch
_ = MessageFactory('zope')
class IGroupInformation(interface.Interface):
title = schema.TextLine(
title=_("Title"),
description=_("Provides a title for the permission."),
required=True)
description = schema.Text(
title=_("Description"),
description=_("Provides a description for the permission."),
required=False)
principals = schema.List(
title=_("Principals"),
value_type=schema.Choice(
source=zope.authentication.principal.PrincipalSource()),
description=_(
"List of ids of principals which belong to the group"),
required=False)
class IGroupFolder(zope.container.interfaces.IContainer):
zope.container.constraints.contains(IGroupInformation)
prefix = schema.TextLine(
title=_("Group ID prefix"),
description=_("Prefix added to IDs of groups in this folder"),
readonly=True,
)
def getGroupsForPrincipal(principalid):
"""Get groups the given principal belongs to"""
def getPrincipalsForGroup(groupid):
"""Get principals which belong to the group"""
class IGroupContained(zope.location.interfaces.IContained):
zope.container.constraints.containers(IGroupFolder)
class IGroupSearchCriteria(interface.Interface):
search = schema.TextLine(
title=_("Group Search String"),
required=False,
missing_value='',
)
class IGroupPrincipalInfo(IPrincipalInfo):
members = interface.Attribute('an iterable of members of the group')
@interface.implementer(IGroupPrincipalInfo)
class GroupInfo:
"""An implementation of IPrincipalInfo used by the group folder.
A group info is created with id, title, and description:
>>> @interface.implementer(IGroupInformation)
... class DemoGroupInformation(object):
... def __init__(self, title, description, principals):
... self.title = title
... self.description = description
... self.principals = principals
...
>>> i = DemoGroupInformation(
... 'Managers', 'Taskmasters', ('joe', 'jane'))
...
>>> info = GroupInfo('groups.managers', i)
>>> info
GroupInfo('groups.managers')
>>> info.id
'groups.managers'
>>> info.title
'Managers'
>>> info.description
'Taskmasters'
>>> info.members
('joe', 'jane')
>>> info.members = ('joe', 'jane', 'jaime')
>>> info.members
('joe', 'jane', 'jaime')
"""
def __init__(self, id, information):
self.id = id
self._information = information
@property
def title(self):
return self._information.title
@property
def description(self):
return self._information.description
@property
def members(self):
return self._information.principals
@members.setter
def members(self, value):
self._information.principals = value
def __repr__(self):
return 'GroupInfo(%r)' % self.id
@interface.implementer(IAuthenticatorPlugin, IQuerySchemaSearch, IGroupFolder)
class GroupFolder(BTreeContainer):
schema = IGroupSearchCriteria
def __init__(self, prefix=''):
super().__init__()
self.prefix = prefix
# __inversemapping is used to map principals to groups
self.__inverseMapping = BTrees.OOBTree.OOBTree()
def __setitem__(self, name, value):
BTreeContainer.__setitem__(self, name, value)
group_id = self._groupid(value)
self._addPrincipalsToGroup(value.principals, group_id)
if value.principals:
event.notify(
PrincipalsAddedToGroup(
value.principals, self.__parent__.prefix + group_id))
group = factories.Principal(self.prefix + name)
event.notify(GroupAdded(group))
def __delitem__(self, name):
value = self[name]
group_id = self._groupid(value)
self._removePrincipalsFromGroup(value.principals, group_id)
if value.principals:
event.notify(
PrincipalsRemovedFromGroup(
value.principals, self.__parent__.prefix + group_id))
BTreeContainer.__delitem__(self, name)
def _groupid(self, group):
return self.prefix + group.__name__
def _addPrincipalsToGroup(self, principal_ids, group_id):
for principal_id in principal_ids:
self.__inverseMapping[principal_id] = (
self.__inverseMapping.get(principal_id, ())
+ (group_id,))
def _removePrincipalsFromGroup(self, principal_ids, group_id):
for principal_id in principal_ids:
groups = self.__inverseMapping.get(principal_id)
if groups is None:
return
new = tuple([id for id in groups if id != group_id])
if new:
self.__inverseMapping[principal_id] = new
else:
del self.__inverseMapping[principal_id]
def getGroupsForPrincipal(self, principalid):
"""Get groups the given principal belongs to"""
return self.__inverseMapping.get(principalid, ())
def getPrincipalsForGroup(self, groupid):
"""Get principals which belong to the group"""
return self[groupid].principals
def search(self, query, start=None, batch_size=None):
""" Search for groups"""
search = query.get('search')
if search is not None:
n = 0
search = search.lower()
for i, (id, groupinfo) in enumerate(self.items()):
if (search in groupinfo.title.lower() or
(groupinfo.description and
search in groupinfo.description.lower())):
if not ((start is not None and i < start)
or
(batch_size is not None and n >= batch_size)):
n += 1
yield self.prefix + id
def authenticateCredentials(self, credentials):
# user folders don't authenticate
pass
def principalInfo(self, id):
if id.startswith(self.prefix):
id = id[len(self.prefix):]
info = self.get(id)
if info is not None:
return GroupInfo(
self.prefix + id, info)
class GroupCycle(Exception):
"""There is a cyclic relationship among groups
"""
class InvalidPrincipalIds(Exception):
"""A user has a group id for a group that can't be found
"""
class InvalidGroupId(Exception):
"""A user has a group id for a group that can't be found
"""
def nocycles(principal_ids, seen, getPrincipal):
for principal_id in principal_ids:
if principal_id in seen:
raise GroupCycle(principal_id, seen)
seen.append(principal_id)
principal = getPrincipal(principal_id)
nocycles(principal.groups, seen, getPrincipal)
seen.pop()
@interface.implementer(IGroupInformation, IGroupContained)
class GroupInformation(persistent.Persistent):
__parent__ = __name__ = None
_principals = ()
def __init__(self, title='', description=''):
self.title = title
self.description = description
def setPrincipals(self, prinlist, check=True):
# method is not a part of the interface
parent = self.__parent__
old = self._principals
self._principals = tuple(prinlist)
if parent is not None:
oldset = set(old)
new = set(prinlist)
group_id = parent._groupid(self)
removed = oldset - new
added = new - oldset
try:
parent._removePrincipalsFromGroup(removed, group_id)
except AttributeError:
removed = None
try:
parent._addPrincipalsToGroup(added, group_id)
except AttributeError:
added = None
if check:
try:
principalsUtility = component.getUtility(IAuthentication)
nocycles(sorted(new), [], principalsUtility.getPrincipal)
except GroupCycle:
# abort
self.setPrincipals(old, False)
raise
# now that we've gotten past the checks, fire the events.
if removed:
event.notify(
PrincipalsRemovedFromGroup(
removed, self.__parent__.__parent__.prefix + group_id))
if added:
event.notify(
PrincipalsAddedToGroup(
added, self.__parent__.__parent__.prefix + group_id))
principals = property(lambda self: self._principals, setPrincipals)
def specialGroups(event):
principal = event.principal
if (IGroup.providedBy(principal) or
not IGroupAwarePrincipal.providedBy(principal)):
return
everyone = component.queryUtility(IEveryoneGroup)
if everyone is not None:
principal.groups.append(everyone.id)
auth = component.queryUtility(IAuthenticatedGroup)
if auth is not None:
principal.groups.append(auth.id)
def setGroupsForPrincipal(event):
"""Set group information when a principal is created"""
principal = event.principal
if not IGroupAwarePrincipal.providedBy(principal):
return
authentication = event.authentication
for name, plugin in authentication.getAuthenticatorPlugins():
if not IGroupFolder.providedBy(plugin):
continue
groupfolder = plugin
principal.groups.extend(
[authentication.prefix + id
for id in groupfolder.getGroupsForPrincipal(principal.id)
])
id = principal.id
prefix = authentication.prefix + groupfolder.prefix
if id.startswith(prefix) and id[len(prefix):] in groupfolder:
alsoProvides(principal, IGroup)
@component.adapter(IFoundPrincipalCreated)
def setMemberSubscriber(event):
"""adds `getMembers`, `setMembers` to groups made from IGroupPrincipalInfo.
"""
info = event.info
if IGroupPrincipalInfo.providedBy(info):
principal = event.principal
principal.getMembers = lambda: info.members
def setMembers(value):
info.members = value
principal.setMembers = setMembers
alsoProvides(principal, IMemberAwareGroup)
@zope.interface.implementer(IGroupAdded)
class GroupAdded:
"""
>>> from zope.interface.verify import verifyObject
>>> event = GroupAdded("group")
>>> verifyObject(IGroupAdded, event)
True
"""
def __init__(self, group):
self.group = group
def __repr__(self):
return "<GroupAdded %r>" % self.group.id
class AbstractMembersChanged:
def __init__(self, principal_ids, group_id):
self.principal_ids = principal_ids
self.group_id = group_id
def __repr__(self):
return "<{} {!r} {!r}>".format(
self.__class__.__name__, sorted(self.principal_ids), self.group_id)
@implementer(IPrincipalsAddedToGroup)
class PrincipalsAddedToGroup(AbstractMembersChanged):
pass
@implementer(IPrincipalsRemovedFromGroup)
class PrincipalsRemovedFromGroup(AbstractMembersChanged):
pass | zope.pluggableauth | /zope.pluggableauth-3.0-py3-none-any.whl/zope/pluggableauth/plugins/groupfolder.py | groupfolder.py |
""" Session-based and cookie-based extractor and challenge plugins.
"""
__docformat__ = 'restructuredtext'
from urllib.parse import urlencode
import persistent
import transaction
import zope.container.contained
from zope.component import hooks
from zope.interface import Interface
from zope.interface import implementer
from zope.publisher.interfaces.http import IHTTPRequest
from zope.schema import TextLine
from zope.session.interfaces import ISession
from zope.traversing.browser.absoluteurl import absoluteURL
from zope.pluggableauth.interfaces import ICredentialsPlugin
class ISessionCredentials(Interface):
"""Interface for storing and accessing credentials in a session.
We use a real class with interface here to prevent unauthorized
access to the credentials.
"""
def __init__(login, password):
pass
def getLogin():
"""Return login name."""
def getPassword():
"""Return password."""
@implementer(ISessionCredentials)
class SessionCredentials:
"""Credentials class for use with sessions.
A session credential is created with a login and a password:
>>> cred = SessionCredentials('scott', 'tiger')
Logins are read using getLogin:
>>> cred.getLogin()
'scott'
and passwords with getPassword:
>>> cred.getPassword()
'tiger'
"""
def __init__(self, login, password):
self.login = login
self.password = password
def getLogin(self):
return self.login
def getPassword(self):
return self.password
def __str__(self):
return self.getLogin() + ':' + self.getPassword()
class IBrowserFormChallenger(Interface):
"""A challenger that uses a browser form to collect user credentials."""
loginpagename = TextLine(
title='Loginpagename',
description="""Name of the login form used by challenger.
The form must provide 'login' and 'password' input fields.
""",
default='loginForm.html')
loginfield = TextLine(
title='Loginfield',
description="Field of the login page in which is looked for the"
" login user name.",
default="login")
passwordfield = TextLine(
title='Passwordfield',
description="Field of the login page in which is looked for the"
" password.",
default="password")
@implementer(ICredentialsPlugin, IBrowserFormChallenger)
class SessionCredentialsPlugin(persistent.Persistent,
zope.container.contained.Contained):
"""A credentials plugin that uses Zope sessions to get/store credentials.
To illustrate how a session plugin works, we'll first setup some session
machinery:
>>> from zope.session.session import RAMSessionDataContainer
>>> from zope.pluggableauth.tests import sessionSetUp
>>> sessionSetUp(RAMSessionDataContainer)
This lets us retrieve the same session info from any test request, which
simulates what happens when a user submits a session ID as a cookie.
We also need a session plugin:
>>> plugin = SessionCredentialsPlugin()
A session plugin uses an ISession component to store the last set of
credentials it gets from a request. Credentials can be retrieved from
subsequent requests using the session-stored credentials.
Our test environment is initially configured without credentials:
>>> from zope.pluggableauth.tests import sessionSetUp
>>> from zope.publisher.browser import TestRequest
>>> request = TestRequest()
>>> print(plugin.extractCredentials(request))
None
We must explicitly provide credentials once so the plugin can store
them in a session:
>>> request = TestRequest(login='scott', password='tiger')
>>> from pprint import pprint
>>> pprint(plugin.extractCredentials(request))
{'login': 'scott', 'password': 'tiger'}
Subsequent requests now have access to the credentials even if they're
not explicitly in the request:
>>> pprint(plugin.extractCredentials(TestRequest()))
{'login': 'scott', 'password': 'tiger'}
We can always provide new credentials explicitly in the request:
>>> pprint(plugin.extractCredentials(TestRequest(
... login='harry', password='hirsch')))
{'login': 'harry', 'password': 'hirsch'}
and these will be used on subsequent requests:
>>> pprint(plugin.extractCredentials(TestRequest()))
{'login': 'harry', 'password': 'hirsch'}
We can also change the fields from which the credentials are extracted:
>>> plugin.loginfield = "my_new_login_field"
>>> plugin.passwordfield = "my_new_password_field"
Now we build a request that uses the new fields:
>>> request = TestRequest(my_new_login_field='luke',
... my_new_password_field='the_force')
The plugin now extracts the credentials information from these new fields:
>>> pprint(plugin.extractCredentials(request))
{'login': 'luke', 'password': 'the_force'}
Finally, we clear the session credentials using the logout method:
>>> plugin.logout(TestRequest())
True
>>> print(plugin.extractCredentials(TestRequest()))
None
Instances are persistent:
>>> import persistent.interfaces
>>> persistent.interfaces.IPersistent.providedBy(plugin)
True
>>> isinstance(plugin, persistent.Persistent)
True
Instances provide IContained:
>>> import zope.location.interfaces
>>> zope.location.interfaces.IContained.providedBy(plugin)
True
"""
loginpagename = 'loginForm.html'
loginfield = 'login'
passwordfield = 'password'
def extractCredentials(self, request):
"""Extracts credentials from a session if they exist."""
if not IHTTPRequest.providedBy(request):
return None
session = ISession(request)
sessionData = session.get(
'zope.pluggableauth.browserplugins')
login = request.get(self.loginfield, None)
password = request.get(self.passwordfield, None)
credentials = None
if login and password:
credentials = self._makeCredentials(login, password)
elif not sessionData:
return None
sessionData = session[
'zope.pluggableauth.browserplugins']
if credentials:
sessionData['credentials'] = credentials
else:
credentials = sessionData.get('credentials', None)
if not credentials:
return None
return {'login': credentials.getLogin(),
'password': credentials.getPassword()}
def _makeCredentials(self, login, password):
"""Create an ISessionCredentials.
You can override this if you desire a different implementation, e.g.
one that encrypts the password, so it's not stored in plain text in
the ZODB.
"""
return SessionCredentials(login, password)
def challenge(self, request):
"""Challenges by redirecting to a login form.
To illustrate, we'll create a test request:
>>> from zope.publisher.browser import TestRequest
>>> request = TestRequest()
and confirm its response's initial status and 'location' header:
>>> request.response.getStatus()
599
>>> request.response.getHeader('location')
When we issue a challenge using a session plugin:
>>> plugin = SessionCredentialsPlugin()
>>> plugin.challenge(request)
True
we get a redirect:
>>> request.response.getStatus()
302
>>> request.response.getHeader('location')
'http://127.0.0.1/@@loginForm.html?camefrom=http%3A%2F%2F127.0.0.1'
The plugin redirects to the page defined by the loginpagename
attribute:
>>> plugin.loginpagename = 'mylogin.html'
>>> plugin.challenge(request)
True
>>> request.response.getHeader('location')
'http://127.0.0.1/@@mylogin.html?camefrom=http%3A%2F%2F127.0.0.1'
It also provides the request URL as a 'camefrom' GET style parameter.
To illustrate, we'll pretend we've traversed a couple names:
>>> env = {
... 'REQUEST_URI': '/foo/bar/folder/page%201.html?q=value',
... 'QUERY_STRING': 'q=value'
... }
>>> request = TestRequest(environ=env)
>>> request._traversed_names = [u'foo', u'bar']
>>> request._traversal_stack = [u'page 1.html', u'folder']
>>> request['REQUEST_URI']
'/foo/bar/folder/page%201.html?q=value'
When we challenge:
>>> plugin.challenge(request)
True
We see the 'camefrom' points to the requested URL:
>>> request.response.getHeader('location')
'http://127.0.0.1/@@mylogin.html?camefrom=http%3A%2F%2F127.0.0.1%2Ffoo%2Fbar%2Ffolder%2Fpage+1.html%3Fq%3Dvalue'
This can be used by the login form to redirect the user back to the
originating URL upon successful authentication.
Now that the 'camefrom' is an absolute URL, quickly demonstrate that
'camefrom' information that inadvertently points to a different host,
will by default not be trusted in a redirect:
>>> camefrom = request.response.getHeader('location')
>>> request.response.redirect(camefrom)
'http://127.0.0.1/@@mylogin.html?camefrom=http%3A%2F%2F127.0.0.1%2Ffoo%2Fbar%2Ffolder%2Fpage+1.html%3Fq%3Dvalue'
>>> suspicious_camefrom = 'http://example.com/foobar'
>>> request.response.redirect(suspicious_camefrom) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Untrusted redirect to host 'example.com:80' not allowed.
""" # noqa: E501 line too long
if not IHTTPRequest.providedBy(request):
return False
site = hooks.getSite()
redirectWithComeFrom(request, '{}/@@{}'.format(
absoluteURL(site, request), self.loginpagename))
return True
def logout(self, request):
"""Performs logout by clearing session data credentials."""
if not IHTTPRequest.providedBy(request):
return False
sessionData = ISession(request)[
'zope.pluggableauth.browserplugins']
sessionData['credentials'] = None
transaction.commit()
return True
def redirectWithComeFrom(request, location):
"""Redirect to a new location adding the current URL as ?comefrom=...
>>> from zope.publisher.browser import TestRequest
>>> request = TestRequest()
>>> redirectWithComeFrom(request, 'http://127.0.0.1/login')
>>> request.response.getStatus()
302
>>> request.response.getHeader('location')
'http://127.0.0.1/login?camefrom=http%3A%2F%2F127.0.0.1'
We'll fake up a more interesting request
>>> env = {
... 'REQUEST_URI': '/foo/bar/folder/page%201.html?q=value',
... 'QUERY_STRING': 'q=value'
... }
>>> request = TestRequest(environ=env)
>>> request._traversed_names = [u'foo', u'bar']
>>> request._traversal_stack = [u'page 1.html', u'folder']
>>> request['REQUEST_URI']
'/foo/bar/folder/page%201.html?q=value'
>>> redirectWithComeFrom(request, 'http://127.0.0.1/login')
>>> request.response.getHeader('location')
'http://127.0.0.1/login?camefrom=http%3A%2F%2F127.0.0.1%2Ffoo%2Fbar%2Ffolder%2Fpage+1.html%3Fq%3Dvalue'
"""
# We need the traversal stack to complete the 'camefrom' parameter
# (sice this function must work during traversal as well as after it)
stack = request.getTraversalStack()
stack.reverse()
# Better to add the query string, if present
query = request.get('QUERY_STRING')
camefrom = '/'.join([request.getURL()] + stack)
if query:
camefrom = camefrom + '?' + query
# We assume location doesn't have query parameters
url = '{}?{}'.format(location, urlencode({'camefrom': camefrom}))
request.response.redirect(url) | zope.pluggableauth | /zope.pluggableauth-3.0-py3-none-any.whl/zope/pluggableauth/plugins/session.py | session.py |
==================
Principal Folder
==================
Principal folders contain principal-information objects that contain principal
information. We create an internal principal using the `InternalPrincipal`
class:
>>> from zope.pluggableauth.plugins.principalfolder import InternalPrincipal
>>> p1 = InternalPrincipal('login1', '123', "Principal 1",
... passwordManagerName="SHA1")
>>> p2 = InternalPrincipal('login2', '456', "The Other One")
and add them to a principal folder:
>>> from zope.pluggableauth.plugins.principalfolder import PrincipalFolder
>>> principals = PrincipalFolder('principal.')
>>> principals['p1'] = p1
>>> principals['p2'] = p2
Authentication
==============
Principal folders provide the `IAuthenticatorPlugin` interface. When we
provide suitable credentials:
>>> from pprint import pprint
>>> principals.authenticateCredentials({'login': 'login1', 'password': '123'})
PrincipalInfo('principal.p1')
We get back a principal id and supplementary information, including the
principal title and description. Note that the principal id is a concatenation
of the principal-folder prefix and the name of the principal-information object
within the folder.
None is returned if the credentials are invalid:
>>> principals.authenticateCredentials({'login': 'login1',
... 'password': '1234'})
>>> principals.authenticateCredentials(42)
Search
======
Principal folders also provide the IQuerySchemaSearch interface. This
supports both finding principal information based on their ids:
>>> principals.principalInfo('principal.p1')
PrincipalInfo('principal.p1')
>>> principals.principalInfo('p1')
and searching for principals based on a search string:
>>> list(principals.search({'search': 'other'}))
['principal.p2']
>>> list(principals.search({'search': 'OTHER'}))
['principal.p2']
>>> list(principals.search({'search': ''}))
['principal.p1', 'principal.p2']
>>> list(principals.search({'search': 'eek'}))
[]
>>> list(principals.search({}))
[]
If there are a large number of matches:
>>> for i in range(20):
... i = str(i)
... p = InternalPrincipal('l'+i, i, "Dude "+i)
... principals[i] = p
>>> pprint(list(principals.search({'search': 'D'})), width=25)
['principal.0',
'principal.1',
'principal.10',
'principal.11',
'principal.12',
'principal.13',
'principal.14',
'principal.15',
'principal.16',
'principal.17',
'principal.18',
'principal.19',
'principal.2',
'principal.3',
'principal.4',
'principal.5',
'principal.6',
'principal.7',
'principal.8',
'principal.9']
We can use batching parameters to specify a subset of results:
>>> pprint(list(principals.search({'search': 'D'}, start=17)))
['principal.7', 'principal.8', 'principal.9']
>>> pprint(list(principals.search({'search': 'D'}, batch_size=5)), width=60)
['principal.0',
'principal.1',
'principal.10',
'principal.11',
'principal.12']
>>> pprint(list(principals.search({'search': 'D'}, start=5, batch_size=5)),
... width=25)
['principal.13',
'principal.14',
'principal.15',
'principal.16',
'principal.17']
There is an additional method that allows requesting the principal id
associated with a login id. The method raises KeyError when there is
no associated principal:
>>> principals.getIdByLogin("not-there")
Traceback (most recent call last):
KeyError: 'not-there'
If there is a matching principal, the id is returned:
>>> principals.getIdByLogin("login1")
'principal.p1'
Changing credentials
====================
Credentials can be changed by modifying principal-information objects:
>>> p1.login = 'bob'
>>> p1.password = 'eek'
>>> principals.authenticateCredentials({'login': 'bob', 'password': 'eek'})
PrincipalInfo('principal.p1')
>>> principals.authenticateCredentials({'login': 'login1',
... 'password': 'eek'})
>>> principals.authenticateCredentials({'login': 'bob',
... 'password': '123'})
It is an error to try to pick a login name that is already taken:
>>> p1.login = 'login2'
Traceback (most recent call last):
...
ValueError: Principal Login already taken!
If such an attempt is made, the data are unchanged:
>>> principals.authenticateCredentials({'login': 'bob', 'password': 'eek'})
PrincipalInfo('principal.p1')
Removing principals
===================
Of course, if a principal is removed, we can no-longer authenticate it:
>>> del principals['p1']
>>> principals.authenticateCredentials({'login': 'bob',
... 'password': 'eek'})
| zope.pluggableauth | /zope.pluggableauth-3.0-py3-none-any.whl/zope/pluggableauth/plugins/principalfolder.rst | principalfolder.rst |
__docformat__ = 'restructuredtext'
import re
from zope.container.contained import NameChooser
from zope.exceptions.interfaces import UserError
from zope.i18nmessageid import MessageFactory
try:
text_type = unicode
except NameError: # py3
text_type = str
_ = MessageFactory('zope')
ok = re.compile('[!-~]+$').match
class IdPicker(NameChooser):
"""Helper base class that picks principal ids.
Add numbers to ids given by users to make them unique.
The Id picker is a variation on the name chooser that picks numeric
ids when no name is given.
>>> from zope.pluggableauth.plugins.idpicker import IdPicker
>>> IdPicker({}).chooseName('', None)
'1'
>>> IdPicker({'1': 1}).chooseName('', None)
'2'
>>> IdPicker({'2': 1}).chooseName('', None)
'1'
>>> IdPicker({'1': 1}).chooseName('bob', None)
'bob'
>>> IdPicker({'bob': 1}).chooseName('bob', None)
'bob1'
"""
def chooseName(self, name, object):
i = 0
name = text_type(name)
orig = name
while (not name) or (name in self.context):
i += 1
name = orig + str(i)
self.checkName(name, object)
return name
def checkName(self, name, object):
"""Limit ids
Ids can only contain printable, non-space, 7-bit ASCII strings:
>>> from zope.pluggableauth.plugins.idpicker import IdPicker
>>> IdPicker({}).checkName('1', None)
True
>>> IdPicker({}).checkName('bob', None)
True
>>> try:
... IdPicker({}).checkName('bob\xfa', None)
... except UserError as e:
... print(e)
... # doctest: +NORMALIZE_WHITESPACE
Ids must contain only printable 7-bit non-space ASCII characters
>>> try:
... IdPicker({}).checkName('big bob', None)
... except UserError as e:
... print(e)
... # doctest: +NORMALIZE_WHITESPACE
Ids must contain only printable 7-bit non-space ASCII characters
Ids also can't be over 100 characters long:
>>> IdPicker({}).checkName('x' * 100, None)
True
>>> IdPicker({}).checkName('x' * 101, None)
Traceback (most recent call last):
...
zope.exceptions.interfaces.UserError: Ids can't be more than 100 characters long.
""" # noqa: E501 line too long
NameChooser.checkName(self, name, object)
if not ok(name):
raise UserError(
_("Ids must contain only printable 7-bit non-space"
" ASCII characters")
)
if len(name) > 100:
raise UserError(
_("Ids can't be more than 100 characters long.")
)
return True | zope.pluggableauth | /zope.pluggableauth-3.0-py3-none-any.whl/zope/pluggableauth/plugins/idpicker.py | idpicker.py |
"""Generic PAS Plugins
"""
__docformat__ = "reStructuredText"
from zope.authentication.interfaces import IUnauthenticatedPrincipal
from zope.interface import implementer
from zope.pluggableauth import interfaces
@implementer(interfaces.ICredentialsPlugin)
class NoChallengeCredentialsPlugin:
"""A plugin that doesn't challenge if the principal is authenticated.
There are two reasonable ways to handle an unauthorized error for an
authenticated principal:
- Inform the user of the unauthorized error
- Let the user login with a different set of credentials
Since either approach is reasonable, we need to give the site manager
some way of specifying one of the two policies.
By default, a user will be challenged for a new set of credentials if
unauthorized. A site manager can insert this plugin in the front of the
plugin list to prevent that challenge from occurring. This will
typically result in an 'Unauthorized' message to the user.
The 'challenge' behavior of the plugin is simple. To illustrate, we'll
create a plugin:
>>> challenger = NoChallengeCredentialsPlugin()
and a test request with an authenticated principal:
>>> from zope.publisher.browser import TestRequest
>>> request = TestRequest()
>>> IUnauthenticatedPrincipal.providedBy(request.principal)
False
When we challenge using the plugin:
>>> challenger.challenge(request)
True
we get a value that signals the PAU that this plugin successfully
challenged the user (even though it actually did nothing). The PAU
will stop trying to challenge and the user will not get a chance to
provide different credentials. The result is typically an error message.
On the other hand, if the user is unauthenticated:
>>> @implementer(IUnauthenticatedPrincipal)
... class Principal(object):
... pass
>>> request.setPrincipal(Principal())
>>> IUnauthenticatedPrincipal.providedBy(request.principal)
True
the plugin challenge will return None:
>>> print(challenger.challenge(request))
None
signaling the PAU that it should try the next plugin for a challenge. If
the PAU is configured properly, the user will receive a challenge and be
allowed to provide different credentials.
"""
def extractCredentials(self, request):
return None
def challenge(self, request):
if not IUnauthenticatedPrincipal.providedBy(request.principal):
return True
return None
def logout(self, request):
return False | zope.pluggableauth | /zope.pluggableauth-3.0-py3-none-any.whl/zope/pluggableauth/plugins/generic.py | generic.py |
=========
CHANGES
=========
5.0 (2023-02-10)
================
- Drop support for Python 2.7, 3.4, 3.5, 3.6.
- Add support for Python 3.8, 3.9, 3.10, 3.11.
4.1.0 (2018-09-27)
==================
- Support newer zope.configuration and persistent. See `issue 2
<https://github.com/zopefoundation/zope.preference/issues/2>`_.
- Add support for Python 3.7 and PyPy3.
- Drop support for Python 3.3.
4.0.0 (2017-05-09)
==================
- Add support for Python 3.4, 3.5 and 3.6.
- Add support for PyPy.
- Drop support for Python 2.6.
4.0.0a1 (2013-02-24)
====================
- Added support for Python 3.3.
- Replaced deprecated ``zope.interface.implements`` usage with equivalent
``zope.interface.implementer`` decorator.
- Dropped support for Python 2.4 and 2.5.
- Refactored tests not to rely on ``zope.app.testing`` anymore.
- Fixed a bug while accessing the parent of a preference group.
3.8.0 (2010-06-12)
==================
- Split out from `zope.app.preference`.
- Removed dependency on `zope.app.component.hooks` by using
`zope.component.hooks`.
- Removed dependency on `zope.app.container` by using
`zope.container`.
| zope.preference | /zope.preference-5.0.tar.gz/zope.preference-5.0/CHANGES.rst | CHANGES.rst |
"""Default Preferences Provider
"""
import persistent
import zope.component
import zope.interface
from BTrees.OOBTree import OOBTree
from zope.container.contained import Contained
from zope.location import locate
from zope.security.checker import defineChecker
from zope.traversing.interfaces import IContainmentRoot
from zope.preference import interfaces
from zope.preference import preference
@zope.interface.implementer(interfaces.IDefaultPreferenceProvider)
class DefaultPreferenceProvider(persistent.Persistent, Contained):
def __init__(self):
self.data = OOBTree()
def getDefaultPreferenceGroup(self, id=''):
group = zope.component.getUtility(interfaces.IPreferenceGroup, name=id)
group = group.__bind__(self)
default = DefaultPreferenceGroup(group, self)
zope.interface.alsoProvides(default, IContainmentRoot)
locate(default, self, 'preferences')
return default
preferences = property(getDefaultPreferenceGroup)
def DefaultPreferences(context, request):
return context.preferences
class DefaultPreferenceGroup(preference.PreferenceGroup):
"""A preference group representing the site-wide default values."""
def __init__(self, group, provider):
self.provider = provider
super().__init__(
group.__id__, group.__schema__,
group.__title__, group.__description__)
# Make sure that we also mark the default group as category if the
# actual group is one; this is important for the UI.
if interfaces.IPreferenceCategory.providedBy(group):
zope.interface.alsoProvides(self, interfaces.IPreferenceCategory)
def get(self, key, default=None):
group = super().get(key, default)
if group is default:
return default
return DefaultPreferenceGroup(group, self.provider).__bind__(self)
def items(self):
return [
(id, DefaultPreferenceGroup(group, self.provider).__bind__(self))
for id, group in super(DefaultPreferenceGroup, self).items()]
def __getattr__(self, key):
# Try to find a sub-group of the given id
group = self.get(key)
if group is not None:
return group
# Try to find a preference of the given name
if self.__schema__ and key in self.__schema__:
marker = object()
value = self.data.get(key, marker)
if value is not marker:
return value
# There is currently no local entry, so let's go to the next
# provider and lookup the group and value there.
nextProvider = zope.component.queryNextUtility(
self.provider, interfaces.IDefaultPreferenceProvider)
# No more providers found, so return the schema's default
if nextProvider is None:
return self.__schema__[key].default
nextGroup = nextProvider.getDefaultPreferenceGroup(self.__id__)
return getattr(nextGroup, key, self.__schema__[key].default)
# Nothing found, raise an attribute error
raise AttributeError("'%s' is not a preference or sub-group." % key)
@property
def data(self):
if self.__id__ not in self.provider.data:
self.provider.data[self.__id__] = OOBTree()
return self.provider.data[self.__id__]
defineChecker(DefaultPreferenceGroup, preference.PreferenceGroupChecker) | zope.preference | /zope.preference-5.0.tar.gz/zope.preference-5.0/src/zope/preference/default.py | default.py |
==================
User Preferences
==================
Implementing user preferences is usually a painful task, since it requires a
lot of custom coding and constantly changing preferences makes it hard to
maintain the data and UI. The `preference` package
>>> from zope.preference import preference
eases this pain by providing a generic user preferences framework that uses
schemas to categorize and describe the preferences.
Preference Groups
=================
Preferences are grouped in preference groups and the preferences inside a
group are specified via the preferences group schema:
>>> import zope.interface
>>> import zope.schema
>>> class IZMIUserSettings(zope.interface.Interface):
... """Basic User Preferences"""
...
... email = zope.schema.TextLine(
... title=u"E-mail Address",
... description=u"E-mail Address used to send notifications")
...
... skin = zope.schema.Choice(
... title=u"Skin",
... description=u"The skin that should be used for the ZMI.",
... values=['Rotterdam', 'ZopeTop', 'Basic'],
... default='Rotterdam')
...
... showZopeLogo = zope.schema.Bool(
... title=u"Show Zope Logo",
... description=u"Specifies whether Zope logo should be displayed "
... u"at the top of the screen.",
... default=True)
Now we can instantiate the preference group. Each preference group must have an
ID by which it can be accessed and optional title and description fields for UI
purposes:
>>> settings = preference.PreferenceGroup(
... "ZMISettings",
... schema=IZMIUserSettings,
... title=u"ZMI User Settings",
... description=u"")
Note that the preferences group provides the interface it is representing:
>>> IZMIUserSettings.providedBy(settings)
True
and the id, schema and title of the group are directly available:
>>> settings.__id__
'ZMISettings'
>>> settings.__schema__
<InterfaceClass zope.preference.README.IZMIUserSettings>
>>> settings.__title__
'ZMI User Settings'
So let's ask the preference group for the `skin` setting:
>>> settings.skin #doctest:+ELLIPSIS
Traceback (most recent call last):
...
zope.security.interfaces.NoInteraction
So why did the lookup fail? Because we have not specified a principal yet, for
which we want to lookup the preferences. To do that, we have to create a new
interaction:
>>> class Principal:
... def __init__(self, id):
... self.id = id
>>> principal = Principal('zope.user')
>>> class Participation:
... interaction = None
... def __init__(self, principal):
... self.principal = principal
>>> participation = Participation(principal)
>>> import zope.security.management
>>> zope.security.management.newInteraction(participation)
We also need an IAnnotations adapter for principals, so we can store the
settings:
>>> from zope.annotation.interfaces import IAnnotations
>>> @zope.interface.implementer(IAnnotations)
... class PrincipalAnnotations(dict):
... data = {}
... def __new__(class_, principal, context):
... try:
... annotations = class_.data[principal.id]
... except KeyError:
... annotations = dict.__new__(class_)
... class_.data[principal.id] = annotations
... return annotations
... def __init__(self, principal, context):
... pass
>>> from zope.component import provideAdapter
>>> provideAdapter(PrincipalAnnotations,
... (Principal, zope.interface.Interface), IAnnotations)
Let's now try to access the settings again:
>>> settings.skin
'Rotterdam'
which is the default value, since we have not set it yet. We can now reassign
the value:
>>> settings.skin = 'Basic'
>>> settings.skin
'Basic'
However, you cannot just enter any value, since it is validated before the
assignment:
>>> settings.skin = 'MySkin'
Traceback (most recent call last):
...
ConstraintNotSatisfied: MySkin
Preference Group Trees
======================
The preferences would not be very powerful, if you could create a full
preferences. So let's create a sub-group for our ZMI user settings, where we
can adjust the look and feel of the folder contents view:
>>> class IFolderSettings(zope.interface.Interface):
... """Basic User Preferences"""
...
... shownFields = zope.schema.Set(
... title=u"Shown Fields",
... description=u"Fields shown in the table.",
... value_type=zope.schema.Choice(['name', 'size', 'creator']),
... default=set(['name', 'size']))
...
... sortedBy = zope.schema.Choice(
... title=u"Sorted By",
... description=u"Data field to sort by.",
... values=['name', 'size', 'creator'],
... default='name')
>>> folderSettings = preference.PreferenceGroup(
... "ZMISettings.Folder",
... schema=IFolderSettings,
... title=u"Folder Content View Settings")
Note that the id was chosen so that the parent id is the prefix of the child's
id. Our new preference sub-group should now be available as an attribute or an
item on the parent group ...
>>> settings.Folder
Traceback (most recent call last):
...
AttributeError: 'Folder' is not a preference or sub-group.
>>> settings['Folder']
Traceback (most recent call last):
...
KeyError: 'Folder'
but not before we register the groups as utilities:
>>> from zope.preference import interfaces
>>> from zope.component import provideUtility
>>> provideUtility(settings, interfaces.IPreferenceGroup,
... name='ZMISettings')
>>> provideUtility(folderSettings, interfaces.IPreferenceGroup,
... name='ZMISettings.Folder')
If we now try to lookup the sub-group again, we should be successful:
>>> settings.Folder #doctest:+ELLIPSIS
<zope.preference.preference.PreferenceGroup object at ...>
>>> settings['Folder'] #doctest:+ELLIPSIS
<zope.preference.preference.PreferenceGroup object at ...>
>>> 'Folder' in settings
True
>>> list(settings)
[<zope.preference.preference.PreferenceGroup object at ...>]
While the registry of the preference groups is flat, the careful naming of the
ids allows us to have a tree of preferences. Note that this pattern is very
similar to the way modules are handled in Python; they are stored in a flat
dictionary in ``sys.modules``, but due to the naming they appear to be in a
namespace tree.
While we are at it, there are also preference categories that can be compared
to Python packages. They basically are just a higher level grouping concept
that is used by the UI to better organize the preferences. A preference group
can be converted to a category by simply providing an additional interface:
>>> zope.interface.alsoProvides(folderSettings, interfaces.IPreferenceCategory)
>>> interfaces.IPreferenceCategory.providedBy(folderSettings)
True
Preference group objects can also hold arbitrary attributes, but since
they're not persistent this must be used with care:
>>> settings.not_in_schema = 1
>>> settings.not_in_schema
1
>>> del settings.not_in_schema
>>> settings.not_in_schema
Traceback (most recent call last):
...
AttributeError: 'not_in_schema' is not a preference or sub-group.
Default Preferences
===================
It is sometimes desirable to define default settings on a site-by-site
basis, instead of just using the default value from the schema. The
preferences package provides a module that implements a default
preferences provider that can be added as a unnamed utility for each
site:
>>> from zope.preference import default
We'll begin by creating a new root site:
>>> from zope.site.folder import rootFolder
>>> root = rootFolder()
>>> from zope.site.site import LocalSiteManager
>>> rsm = LocalSiteManager(root)
>>> root.setSiteManager(rsm)
And we'll make the new site the current site:
>>> zope.component.hooks.setSite(root)
Now we can register the default preference provider with the root site:
>>> provider = addUtility(
... rsm, default.DefaultPreferenceProvider(),
... interfaces.IDefaultPreferenceProvider)
So before we set an explicit default value for a preference, the schema field
default is used:
>>> settings.Folder.sortedBy
'name'
But if we now set a new default value with the provider,
>>> defaultFolder = provider.getDefaultPreferenceGroup('ZMISettings.Folder')
>>> defaultFolder.sortedBy = 'size'
then the default of the setting changes:
>>> settings.Folder.sortedBy
'size'
Because the ``ZMISettings.Folder`` was declared as a preference
category, the default implementation is too:
>>> interfaces.IPreferenceCategory.providedBy(defaultFolder)
True
The default preference providers also implicitly acquire default
values from parent sites. So if we add a new child folder called
``folder1``, make it a site and set it as the active site:
>>> from zope.site.folder import Folder
>>> root['folder1'] = Folder()
>>> folder1 = root['folder1']
>>> from zope.site.site import LocalSiteManager
>>> sm1 = LocalSiteManager(folder1)
>>> folder1.setSiteManager(sm1)
>>> zope.component.hooks.setSite(folder1)
and add a default provider there,
>>> provider1 = addUtility(
... sm1, default.DefaultPreferenceProvider(),
... interfaces.IDefaultPreferenceProvider)
then we still get the root's default values, because we have not defined any
in the higher default provider:
>>> settings.Folder.sortedBy
'size'
But if we provide the new provider with a default value for `sortedBy`,
>>> defaultFolder1 = provider1.getDefaultPreferenceGroup('ZMISettings.Folder')
>>> defaultFolder1.sortedBy = 'creator'
then it is used instead:
>>> settings.Folder.sortedBy
'creator'
Of course, once the root site becomes our active site again
>>> zope.component.hooks.setSite(root)
the default value of the root provider is used:
>>> settings.Folder.sortedBy
'size'
Of course, all the defaults in the world are not relevant anymore as soon as
the user actually provides a value:
>>> settings.Folder.sortedBy = 'name'
>>> settings.Folder.sortedBy
'name'
Oh, and have I mentioned that entered values are always validated? So you
cannot just assign any old value:
>>> settings.Folder.sortedBy = 'foo'
Traceback (most recent call last):
...
ConstraintNotSatisfied: foo
Finally, if the user deletes his/her explicit setting, we are back to the
default value:
>>> del settings.Folder.sortedBy
>>> settings.Folder.sortedBy
'size'
Just as with regular preference groups, the default preference groups
are arranged in a matching hierarchy:
>>> defaultSettings = provider.getDefaultPreferenceGroup('ZMISettings')
>>> defaultSettings.get('Folder')
<zope.preference.default.DefaultPreferenceGroup object at ...>
>>> defaultSettings.Folder
<zope.preference.default.DefaultPreferenceGroup object at ...>
They also report useful AttributeErrors for bad accesses:
>>> defaultSettings.not_in_schema
Traceback (most recent call last):
...
AttributeError: 'not_in_schema' is not a preference or sub-group.
Creating Preference Groups Using ZCML
=====================================
If you are using the user preference system in Zope 3, you will not have to
manually setup the preference groups as we did above (of course). We will use
ZCML instead. First, we need to register the directives:
>>> from zope.configuration import xmlconfig
>>> import zope.preference
>>> context = xmlconfig.file('meta.zcml', zope.preference)
Then the system sets up a root preference group:
>>> context = xmlconfig.string('''
... <configure
... xmlns="http://namespaces.zope.org/zope"
... i18n_domain="test">
...
... <preferenceGroup
... id=""
... title="User Preferences"
... />
...
... </configure>''', context)
Now we can use the preference system in its intended way. We access the folder
settings as follows:
>>> import zope.component
>>> prefs = zope.component.getUtility(interfaces.IPreferenceGroup)
>>> prefs.ZMISettings.Folder.sortedBy
'size'
Let's register the ZMI settings again under a new name via ZCML:
>>> context = xmlconfig.string('''
... <configure
... xmlns="http://namespaces.zope.org/zope"
... i18n_domain="test">
...
... <preferenceGroup
... id="ZMISettings2"
... title="ZMI Settings NG"
... schema="zope.preference.README.IZMIUserSettings"
... category="true"
... />
...
... </configure>''', context)
>>> prefs.ZMISettings2 #doctest:+ELLIPSIS
<zope.preference.preference.PreferenceGroup object at ...>
>>> prefs.ZMISettings2.__title__
'ZMI Settings NG'
>>> IZMIUserSettings.providedBy(prefs.ZMISettings2)
True
>>> interfaces.IPreferenceCategory.providedBy(prefs.ZMISettings2)
True
And the tree can built again by carefully constructing the id:
>>> context = xmlconfig.string('''
... <configure
... xmlns="http://namespaces.zope.org/zope"
... i18n_domain="test">
...
... <preferenceGroup
... id="ZMISettings2.Folder"
... title="Folder Settings"
... schema="zope.preference.README.IFolderSettings"
... />
...
... </configure>''', context)
>>> prefs.ZMISettings2 #doctest:+ELLIPSIS
<zope.preference.preference.PreferenceGroup object at ...>
>>> prefs.ZMISettings2.Folder.__title__
'Folder Settings'
>>> IFolderSettings.providedBy(prefs.ZMISettings2.Folder)
True
>>> interfaces.IPreferenceCategory.providedBy(prefs.ZMISettings2.Folder)
False
Simple Python-Level Access
==========================
If a site is set, getting the user preferences is very simple:
>>> from zope.preference import UserPreferences
>>> prefs2 = UserPreferences()
>>> prefs2.ZMISettings.Folder.sortedBy
'size'
This function is also commonly registered as an adapter,
>>> from zope.location.interfaces import ILocation
>>> provideAdapter(UserPreferences, [ILocation], interfaces.IUserPreferences)
so that you can adapt any location to the user preferences:
>>> prefs3 = interfaces.IUserPreferences(folder1)
>>> prefs3.ZMISettings.Folder.sortedBy
'creator'
Traversal
=========
Okay, so all these objects are nice, but they do not make it any easier to
access the preferences in page templates. Thus, a special traversal namespace
has been created that makes it very simple to access the preferences via a
traversal path. But before we can use the path expressions, we have to
register all necessary traversal components and the special `preferences`
namespace:
>>> import zope.traversing.interfaces
>>> provideAdapter(preference.preferencesNamespace, [None],
... zope.traversing.interfaces.ITraversable,
... 'preferences')
We can now access the preferences as follows:
>>> from zope.traversing.api import traverse
>>> traverse(None, '++preferences++ZMISettings/skin')
'Basic'
>>> traverse(None, '++preferences++/ZMISettings/skin')
'Basic'
Security
========
You might already wonder under which permissions the preferences are
available. They are actually available publicly (`CheckerPublic`), but that
is not a problem, since the available values are looked up specifically for
the current user. And why should a user not have full access to his/her
preferences?
Let's create a checker using the function that the security machinery is
actually using:
>>> checker = preference.PreferenceGroupChecker(settings)
>>> checker.permission_id('skin')
Global(CheckerPublic,zope.security.checker)
>>> checker.setattr_permission_id('skin')
Global(CheckerPublic,zope.security.checker)
The id, title, description, and schema are publicly available for access,
but are not available for mutation at all:
>>> checker.permission_id('__id__')
Global(CheckerPublic,zope.security.checker)
>>> checker.setattr_permission_id('__id__') is None
True
The only way security could be compromised is when one could override the
annotations property. However, this property is not available for public
consumption at all, including read access:
>>> checker.permission_id('annotation') is None
True
>>> checker.setattr_permission_id('annotation') is None
True
| zope.preference | /zope.preference-5.0.tar.gz/zope.preference-5.0/src/zope/preference/README.rst | README.rst |
"""User Preferences Interfaces
"""
__docformat__ = "reStructuredText"
import zope.interface
import zope.schema
from zope.configuration.fields import MessageID
from zope.location.interfaces import ILocation
class IPreferenceGroup(ILocation):
"""A group of preferences.
This component represents a logical group of preferences. The preferences
contained by this group is defined through the schema. The group has also
a name by which it can be accessed.
The fields specified in the schema *must* be available as attributes and
items of the group instance. It is up to the implementation how this is
realized, however, most often one will implement __setattr__ and
__getattr__ as well as the common mapping API.
The reason all the API fields are doubly underlined is to avoid name
clashes.
"""
__id__ = zope.schema.TextLine(
title="Id",
description="The id of the group.",
required=True)
__schema__ = zope.schema.InterfaceField(
title="Schema",
description="Schema describing the preferences of the group.",
required=False)
__title__ = MessageID(
title="Title",
description="The title of the group used in the UI.",
required=True)
__description__ = MessageID(
title="Description",
description="The description of the group used in the UI.",
required=False)
class IPreferenceCategory(zope.interface.Interface):
"""A collection of preference groups.
Objects providing this interface serve as groups of preference
groups. This allows UIs to distinguish between high- and low-level
prefernce groups.
"""
class IUserPreferences(zope.interface.Interface):
"""Objects providing this interface have to provide the root preference
group API as well."""
class IDefaultPreferenceProvider(zope.interface.Interface):
"""A root object providing default values for the entire preferences tree.
Default preference providers are responsible for providing default values
for all preferences. The way they get these values are up to the
implementation.
"""
preferences = zope.schema.Field(
title="Default Preferences Root",
description="Link to the default preferences") | zope.preference | /zope.preference-5.0.tar.gz/zope.preference-5.0/src/zope/preference/interfaces.py | interfaces.py |
__docformat__ = "reStructuredText"
import zope.component
import zope.component.hooks
import zope.interface
from BTrees.OOBTree import OOBTree
from zope.annotation.interfaces import IAnnotations
from zope.container.interfaces import IReadContainer
from zope.location import Location
from zope.schema import getFields
from zope.security.checker import Checker
from zope.security.checker import CheckerPublic
from zope.security.management import getInteraction
from zope.traversing.interfaces import IContainmentRoot
from zope.preference.interfaces import IDefaultPreferenceProvider
from zope.preference.interfaces import IPreferenceCategory
from zope.preference.interfaces import IPreferenceGroup
pref_key = 'zope.app.user.UserPreferences'
@zope.interface.implementer(IPreferenceGroup, IReadContainer)
class PreferenceGroup(Location):
"""A feature-rich ``IPreferenceGroup`` implementation.
This class implements the
"""
# Declare attributes here, so that they are always available.
__id__ = ''
__schema__ = None
__title__ = None
__description__ = None
def __init__(self, id, schema=None, title='', description='',
isCategory=False):
self.__id__ = id
self.__schema__ = schema
self.__title__ = title
self.__description__ = description
# The last part of the id is the name.
self.__name__ = id.split('.')[-1]
# Make sure this group provides all important interfaces.
directlyProvided = ()
if isCategory:
directlyProvided += (IPreferenceCategory,)
if schema:
directlyProvided += (schema,)
zope.interface.directlyProvides(self, directlyProvided)
# Store the actual parent in ``__parent``. Usually we would just override
# the property to an actual value during binding, but because we overrode
# ``__setattr__`` this is not possible anymore.
__parent = None
@property
def __parent__(self):
return self.__parent if self.__parent is not None \
else zope.component.hooks.getSite()
def __bind__(self, parent):
clone = self.__class__.__new__(self.__class__)
clone.__dict__.update(self.__dict__)
clone.__parent = parent
return clone
def get(self, key, default=None):
id = self.__id__ and self.__id__ + '.' + key or key
group = zope.component.queryUtility(IPreferenceGroup, id, default)
if group is default:
return default
return group.__bind__(self)
def items(self):
cutoff = self.__id__ and len(self.__id__) + 1 or 0
utilities = zope.component.getUtilitiesFor(IPreferenceGroup)
return [(id[cutoff:], group.__bind__(self))
for id, group in utilities
if (id != self.__id__ and
id.startswith(self.__id__) and
id[cutoff:].find('.') == -1)]
def __getitem__(self, key):
"""See zope.container.interfaces.IReadContainer"""
default = object()
obj = self.get(key, default)
if obj is default:
raise KeyError(key)
return obj
def __contains__(self, key):
"""See zope.container.interfaces.IReadContainer"""
return self.get(key) is not None
def keys(self):
"""See zope.container.interfaces.IReadContainer"""
return [id for id, group in self.items()]
def __iter__(self):
"""See zope.container.interfaces.IReadContainer"""
return iter(self.values())
def values(self):
"""See zope.container.interfaces.IReadContainer"""
return [group for _id, group in self.items()]
def __len__(self):
"""See zope.container.interfaces.IReadContainer"""
return len(self.items())
def __getattr__(self, key):
# Try to find a sub-group of the given id
group = self.get(key)
if group is not None:
return group
# Try to find a preference of the given name
if self.__schema__ and key in self.__schema__:
marker = object()
value = self.data.get(key, marker)
if value is marker:
# Try to find a default preference provider
provider = zope.component.queryUtility(
IDefaultPreferenceProvider,
context=self
)
if provider is None:
return self.__schema__[key].default
defaultGroup = provider.getDefaultPreferenceGroup(self.__id__)
return getattr(defaultGroup, key)
return value
# Nothing found, raise an attribute error
raise AttributeError("'%s' is not a preference or sub-group." % key)
def __setattr__(self, key, value):
if self.__schema__ and key in self.__schema__:
# Validate the value
bound = self.__schema__[key].bind(self)
bound.validate(value)
# Assign value
self.data[key] = value
else:
self.__dict__[key] = value
# If the schema changed, we really need to change the security
# checker as well.
if key == '__schema__':
checker = PreferenceGroupChecker(self)
self.__dict__['__Security_checker__'] = checker
def __delattr__(self, key):
if self.__schema__ and key in self.__schema__:
del self.data[key]
else:
del self.__dict__[key]
@property
def data(self):
# TODO: what if we have multiple participations?
principal = getInteraction().participations[0].principal
ann = zope.component.getMultiAdapter((principal, self), IAnnotations)
# If no preferences exist, create the root preferences object.
if ann.get(pref_key) is None:
ann[pref_key] = OOBTree()
prefs = ann[pref_key]
# If no entry for the group exists, create a new entry.
if self.__id__ not in prefs.keys():
prefs[self.__id__] = OOBTree()
return prefs[self.__id__]
def PreferenceGroupChecker(instance):
"""A function that generates a custom security checker.
The attributes available in a preference group are dynamically generated
based on the group schema and the available sub-groups. Thus, the
permission dictionaries have to be generated at runtime and are unique for
each preference group instance.
"""
read_perm_dict = {}
write_perm_dict = {}
# Make sure that the attributes from IPreferenceGroup and IReadContainer
# are public.
for attrName in ('__id__', '__schema__', '__title__', '__description__',
'get', 'items', 'keys', 'values',
'__getitem__', '__contains__', '__iter__', '__len__'):
read_perm_dict[attrName] = CheckerPublic
# Make the attributes generated from the schema available as well.
if instance.__schema__ is not None:
for name in getFields(instance.__schema__):
read_perm_dict[name] = CheckerPublic
write_perm_dict[name] = CheckerPublic
# Make all sub-groups available as well.
for name in instance.keys():
read_perm_dict[name] = CheckerPublic
write_perm_dict[name] = CheckerPublic
return Checker(read_perm_dict, write_perm_dict)
def UserPreferences(context=None):
"""Adapts an ``ILocation`` object to the ``IUserPreferences`` interface."""
if context is None:
context = zope.component.getSiteManager()
rootGroup = zope.component.getUtility(IPreferenceGroup)
rootGroup = rootGroup.__bind__(context)
rootGroup.__name__ = '++preferences++'
zope.interface.alsoProvides(rootGroup, IContainmentRoot)
return rootGroup
class preferencesNamespace:
"""Used to traverse to the root preferences group."""
def __init__(self, ob, request=None):
self.context = ob
def traverse(self, name, ignore):
rootGroup = zope.component.getUtility(IPreferenceGroup)
rootGroup = rootGroup.__bind__(self.context)
rootGroup.__name__ = '++preferences++'
zope.interface.alsoProvides(rootGroup, IContainmentRoot)
return name and rootGroup[name] or rootGroup | zope.preference | /zope.preference-5.0.tar.gz/zope.preference-5.0/src/zope/preference/preference.py | preference.py |
=======================
Principal Annotations
=======================
This package implements annotations for :mod:`zope.security` principals.
To make it clear, the *principal* here is the object that provides
:class:`zope.security.interfaces.IPrincipal` interface and *annotations* is
the object providing :class:`zope.annotation.interfaces.IAnnotations`.
The problem is that principals is dynamic, non-persistent objects created
on the fly for every security participation (request or something), so
common annotation techniques, like AttributeAnnotations cannot be applied
to them.
This package provides a persistent storage of principal annotations,
storing annotations by principal ID as well as an adapter from IPrincipal
to IAnnotations.
PrincipalAnnotationUtility
==========================
The core of this package is the :class:`~.PrincipalAnnotationUtility` class
that stores annotations for principals and allows to get them easily.
It provides the :class:`~zope.principalannotation.interfaces.IPrincipalAnnotationUtility` interface::
>>> from zope.principalannotation.interfaces import IPrincipalAnnotationUtility
>>> from zope.principalannotation.utility import PrincipalAnnotationUtility
>>> from zope.interface.verify import verifyObject
>>> util = PrincipalAnnotationUtility()
>>> verifyObject(IPrincipalAnnotationUtility, util)
True
It provides three methods: ``getAnnotations``, ``getAnnotationsById``
and ``hasAnnotations``. Let's create a testing principal and check out
these methods::
>>> from zope.security.testing import Principal
>>> nadako = Principal('nadako')
>>> nadako.id
'nadako'
We can check if our principal has any annotations. Of course, it
currently doesn't have any::
>>> util.hasAnnotations(nadako)
False
We can get ``IAnnotations`` object using principal object itself::
>>> util.getAnnotations(nadako)
<...Annotations object at 0x...>
Or using principal id::
>>> util.getAnnotationsById(nadako.id)
<...Annotations object at 0x...>
Let's get the ``IAnnotations`` object for our principal and play with it::
>>> annots = util.getAnnotations(nadako)
>>> from zope.interface.verify import verifyObject
>>> from zope.annotation.interfaces import IAnnotations
>>> verifyObject(IAnnotations, annots)
True
Let's check the ``IAnnotation`` contract::
>>> bool(annots)
False
>>> annots['not.here']
Traceback (most recent call last):
...
KeyError: 'not.here'
>>> annots.get('not.here') is None
True
>>> annots.get('not.here', 42)
42
Note that the ``IAnnotations`` object gets stored in the utility only
when we set a key for it. This is a simple optimization that allows
us not to store any data when all we do is simply checking for presense
of annotation. The ``hasAnnotations`` method will return ``True`` after
storing a key in the annotations::
>>> util.hasAnnotations(nadako)
False
>>> annots['its.here'] = 'some info'
>>> util.hasAnnotations(nadako)
True
We can also delete the existing key::
>>> del annots['its.here']
But we can't delete the key that is (no more) existant::
>>> del annots['its.here']
Traceback (most recent call last):
...
KeyError: 'its.here'
Multiple annotation utilities
=============================
Imagine that your application has a root ``site`` object with its
component registry (a.k.a. site manager) and that object has a sub-site
object with its own component registry, and that component registry
has the root's component registry as its base.
In that case, we want the ``IAnnotations`` object to be available to
retrieve annotations from higher-level utilities.
Let's register our utility in the root site and create a sub-site
with its own IPrincipalAnnotationUtility::
>>> root['util'] = util
>>> rootsm = root.getSiteManager()
>>> rootsm.registerUtility(util, IPrincipalAnnotationUtility)
>>> from zope.site.folder import Folder
>>> from zope.site.site import LocalSiteManager
>>> subsite = Folder()
>>> root['subsite'] = subsite
>>> subsm = LocalSiteManager(subsite)
>>> subsm.__bases__ = (rootsm,)
>>> subsite.setSiteManager(subsm)
>>> util2 = PrincipalAnnotationUtility()
>>> subsite['util2'] = util2
>>> subsm.registerUtility(util2, IPrincipalAnnotationUtility)
Now, let's create a key in the IAnnotations, provided by root utility::
>>> annots = util.getAnnotations(nadako)
>>> annots['root.number'] = 42
>>> sorted(annots.items())
[('root.number', 42)]
The subsite utility should get the annotation successfully::
>>> annots2 = util2.getAnnotations(nadako)
>>> bool(annots2)
True
>>> annots2['root.number']
42
>>> del annots['root.number']
>>> bool(annots2)
False
>>> annots['root.number'] = 42
If we have the key both in higher-level annotations and lower-level ones,
the lower-level will have priority, but higher-level won't be deleted or
overriden::
>>> annots['another.number'] = 1
>>> annots2['another.number'] = 42
>>> annots['another.number']
1
>>> annots2['another.number']
42
>>> sorted(iter(annots))
['another.number', 'root.number']
>>> sorted(iter(annots2))
['another.number']
If we'll delete the key from lower-level, it will not be deleted from a
higher level utility::
>>> del annots2['another.number']
>>> annots['another.number']
1
>>> annots2['another.number']
1
>>> sorted(iter(annots))
['another.number', 'root.number']
This is somewhat confusing given the way that ``in`` and boolean tests
work::
>>> 'another.number' in annots
True
>>> 'another.number' in annots2
False
>>> annots2['another.number']
1
>>> list(iter(annots2))
[]
>>> bool(annots2)
True
IPrincipal -> IAnnotations adapter
==================================
Of course, the most nice feature is that we can simply adapt our
principal object to IAnnotations and get those annotations using
standard way documented in ``zope.annotation`` package.
>>> annots = IAnnotations(nadako)
>>> annots
<...Annotations object at 0x...>
>>> annots['root.number']
42
By default, the IAnnotation adapter uses the current site's utility::
>>> IAnnotations(nadako) is util.getAnnotations(nadako)
True
>>> from zope.component.hooks import setSite
>>> setSite(subsite)
>>> IAnnotations(nadako) is util2.getAnnotations(nadako)
True
Howerver, we can use a binary multi-adapter to IAnnotations to specify
some context object from which to get the annotations utility::
>>> from zope.component import getMultiAdapter
>>> annots = getMultiAdapter((nadako, root), IAnnotations)
>>> annots is util.getAnnotations(nadako)
True
>>> annots = getMultiAdapter((nadako, subsite), IAnnotations)
>>> annots is util2.getAnnotations(nadako)
True
| zope.principalannotation | /zope.principalannotation-5.0-py3-none-any.whl/zope/principalannotation/README.rst | README.rst |
"""Implementation of IPrincipalAnnotationUtility
"""
__docformat__ = 'restructuredtext'
from BTrees.OOBTree import OOBTree
from persistent import Persistent
from persistent.dict import PersistentDict
from zope.annotation.interfaces import IAnnotations
from zope.component import queryNextUtility
from zope.location import Location
from zope.location.interfaces import IContained
from zope.security.interfaces import IPrincipal
from zope import component
from zope import interface
from zope.principalannotation.interfaces import IPrincipalAnnotationUtility
# TODO: register utility as adapter for IAnnotations on utility activation.
@interface.implementer(IPrincipalAnnotationUtility, IContained)
class PrincipalAnnotationUtility(Persistent):
"""
Stores :class:`zope.annotation.interfaces.IAnnotations` for
:class:`zope.security.interfaces.IPrinicipals`.
"""
__parent__ = None
__name__ = None
def __init__(self):
self.annotations = OOBTree()
def getAnnotations(self, principal):
"""
See :meth:`.IPrincipalAnnotationUtility.getAnnotations`.
"""
return self.getAnnotationsById(principal.id)
def getAnnotationsById(self, principalId):
"""
See :meth:`.IPrincipalAnnotationUtility.getAnnotationsById`.
"""
annotations = self.annotations.get(principalId)
if annotations is None:
annotations = Annotations(principalId, store=self.annotations)
annotations.__parent__ = self
annotations.__name__ = principalId
return annotations
def hasAnnotations(self, principal):
"""
See :meth:`.IPrincipalAnnotationUtility.hasAnnotations`.
"""
return principal.id in self.annotations
@interface.implementer(IAnnotations)
class Annotations(Persistent, Location):
"""
Stores annotations for a single principal in a :class:`~.PersistentDict`.
Implements the dict-like API of
:class:`zope.annotation.interfaces.IAnnotations`.
Cooperates with the site hierarchy to find annotations in parent sites.
"""
def __init__(self, principalId, store=None):
self.principalId = principalId
self.data = PersistentDict() # We don't really expect that many
# _v_store is used to remember a mapping object that we should
# be saved in if we ever change
self._v_store = store
def __bool__(self):
nz = bool(self.data)
if not nz:
# maybe higher-level utility's annotations will be non-zero
next = queryNextUtility(self, IPrincipalAnnotationUtility)
if next is not None:
annotations = next.getAnnotationsById(self.principalId)
return bool(annotations)
return nz
__nonzero__ = __bool__
def __getitem__(self, key):
try:
return self.data[key]
except KeyError:
# We failed locally: delegate to a higher-level utility.
next = queryNextUtility(self, IPrincipalAnnotationUtility)
if next is not None:
annotations = next.getAnnotationsById(self.principalId)
return annotations[key]
raise
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __setitem__(self, key, value):
if getattr(self, '_v_store', None) is not None:
# _v_store is used to remember a mapping object that we should
# be saved in if we ever change
self._v_store[self.principalId] = self
del self._v_store
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
def __contains__(self, key):
return key in self.data
def items(self):
return self.data.items()
@component.adapter(IPrincipal)
@interface.implementer(IAnnotations)
def annotations(principal, context=None):
utility = component.getUtility(
IPrincipalAnnotationUtility,
context=context)
return utility.getAnnotations(principal) | zope.principalannotation | /zope.principalannotation-5.0-py3-none-any.whl/zope/principalannotation/utility.py | utility.py |
=============================
Global principal definition
=============================
Global principals are defined via ZCML and are placed in
:data:`zope.principalregistry.principalregistry.principalRegistry`.
There are several kinds of principals that can be defined.
When you use ZCML to configure this package (load its
``configure.zcml``) that registry becomes a global utility
implementing :class:`zope.authentication.interfaces.IAuthentication`.
Authenticated Users
===================
There are principals that can log in:
>>> zcml("""
... <configure xmlns="http://namespaces.zope.org/zope">
...
... <principal
... id="zope.manager"
... title="Manager"
... description="System Manager"
... login="admin"
... password_manager="SHA1"
... password="40bd001563085fc35165329ea1ff5c5ecbdbbeef"
... />
...
... </configure>
... """)
>>> from zope.principalregistry.principalregistry import principalRegistry
>>> [p] = principalRegistry.getPrincipals('')
>>> p.id, p.title, p.description, p.getLogin(), p.validate('123')
('zope.manager', 'Manager', 'System Manager', 'admin', True)
We can verify that it conforms to the
:class:`zope.security.interfaces.IPrincipal` interface:
>>> from zope.security.interfaces import IPrincipal
>>> from zope.interface.verify import verifyObject
>>> from zope.schema import getValidationErrors
>>> verifyObject(IPrincipal, p)
True
>>> getValidationErrors(IPrincipal, p)
[]
In fact, it's actually a
:class:`zope.security.interfaces.IGroupAwarePrincipal`:
>>> from zope.security.interfaces import IGroupAwarePrincipal
>>> verifyObject(IGroupAwarePrincipal, p)
True
>>> getValidationErrors(IGroupAwarePrincipal, p)
[]
The unauthenticated principal
=============================
There is the unauthenticated principal:
>>> zcml("""
... <configure
... xmlns="http://namespaces.zope.org/zope"
... >
...
... <unauthenticatedPrincipal
... id="zope.unknown"
... title="Anonymous user"
... description="A person we don't know"
... />
...
... </configure>
... """)
>>> p = principalRegistry.unauthenticatedPrincipal()
>>> p.id, p.title, p.description
('zope.unknown', 'Anonymous user', "A person we don't know")
It implements :class:`zope.authentication.interfaces.IUnauthenticatedPrincipal`:
>>> from zope.authentication import interfaces
>>> verifyObject(interfaces.IUnauthenticatedPrincipal, p)
True
>>> getValidationErrors(interfaces.IUnauthenticatedPrincipal, p)
[]
The unauthenticated principal will also be registered as a utility.
This is to provide easy access to the data defined for the principal so
that other (more featureful) principal objects can be created for the
same principal.
>>> from zope import component
>>> p = component.getUtility(interfaces.IUnauthenticatedPrincipal)
>>> p.id, p.title, p.description
('zope.unknown', 'Anonymous user', "A person we don't know")
The unauthenticated group
=========================
An unauthenticated group can also be defined in ZCML:
>>> zcml("""
... <configure
... xmlns="http://namespaces.zope.org/zope"
... >
...
... <unauthenticatedGroup
... id="zope.unknowngroup"
... title="Anonymous users"
... description="People we don't know"
... />
...
... </configure>
... """)
This directive creates a group and registers it as a utility providing
IUnauthenticatedGroup:
>>> g = component.getUtility(interfaces.IUnauthenticatedGroup)
>>> g.id, g.title, g.description
('zope.unknowngroup', 'Anonymous users', "People we don't know")
It implements :class:`zope.authentication.interfaces.IUnauthenticatedGroup`:
>>> verifyObject(interfaces.IUnauthenticatedGroup, g)
True
>>> getValidationErrors(interfaces.IUnauthenticatedGroup, g)
[]
The unauthenticatedGroup directive also updates the group of the
unauthenticated principal:
>>> p = principalRegistry.unauthenticatedPrincipal()
>>> g.id in p.groups
True
>>> p = component.getUtility(interfaces.IUnauthenticatedPrincipal)
>>> g.id in p.groups
True
If the unauthenticated principal is defined after the unauthenticated
group, it will likewise have the group added to it:
>>> reset()
>>> zcml("""
... <configure xmlns="http://namespaces.zope.org/zope">
...
... <unauthenticatedGroup
... id="zope.unknowngroup2"
... title="Anonymous users"
... description="People we don't know"
... />
... <unauthenticatedPrincipal
... id="zope.unknown2"
... title="Anonymous user"
... description="A person we don't know"
... />
...
... </configure>
... """)
>>> g = component.getUtility(interfaces.IUnauthenticatedGroup)
>>> g.id, g.title, g.description
('zope.unknowngroup2', 'Anonymous users', "People we don't know")
>>> p = principalRegistry.unauthenticatedPrincipal()
>>> p.id, g.id in p.groups
('zope.unknown2', True)
>>> p = component.getUtility(interfaces.IUnauthenticatedPrincipal)
>>> p.id, g.id in p.groups
('zope.unknown2', True)
The unauthenticated group shows up as a principal in the principal
registry:
>>> principalRegistry.getPrincipal(g.id) == g
True
>>> list(principalRegistry.getPrincipals("Anonymous")) == [g]
True
The authenticated group
=======================
There is an authenticated group:
>>> reset()
>>> zcml("""
... <configure xmlns="http://namespaces.zope.org/zope">
...
... <unauthenticatedPrincipal
... id="zope.unknown3"
... title="Anonymous user"
... description="A person we don't know"
... />
... <principal
... id="zope.manager2"
... title="Manager"
... description="System Manager"
... login="admin"
... password="123"
... />
... <authenticatedGroup
... id="zope.authenticated"
... title="Authenticated users"
... description="People we know"
... />
... <principal
... id="zope.manager3"
... title="Manager 3"
... login="admin3"
... password="123"
... />
...
... </configure>
... """)
It defines an IAuthenticatedGroup utility:
>>> g = component.getUtility(interfaces.IAuthenticatedGroup)
>>> g.id, g.title, g.description
('zope.authenticated', 'Authenticated users', 'People we know')
It implements :class:`zope.authentication.interfaces.IUnauthenticatedGroup`:
>>> verifyObject(interfaces.IAuthenticatedGroup, g)
True
>>> getValidationErrors(interfaces.IAuthenticatedGroup, g)
[]
It also adds it self to the groups of any non-group principals already
defined, and, when non-group principals are defined, they put
themselves in the group if it's defined:
>>> principals = sorted(principalRegistry.getPrincipals(''),
... key=lambda p: p.id)
>>> for p in principals:
... print(p.id, p.groups == [g.id])
zope.authenticated False
zope.manager2 True
zope.manager3 True
Excluding unauthenticated principals, of course:
>>> p = principalRegistry.unauthenticatedPrincipal()
>>> p.id, g.id in p.groups
('zope.unknown3', False)
>>> p = component.getUtility(interfaces.IUnauthenticatedPrincipal)
>>> p.id, g.id in p.groups
('zope.unknown3', False)
The everybody group
===================
Finally, there is an everybody group:
>>> reset()
>>> zcml("""
... <configure xmlns="http://namespaces.zope.org/zope">
...
... <unauthenticatedPrincipal
... id="zope.unknown4"
... title="Anonymous user"
... description="A person we don't know"
... />
... <principal
... id="zope.manager4"
... title="Manager"
... description="System Manager"
... login="admin"
... password="123"
... />
... <everybodyGroup
... id="zope.everybody"
... title="Everybody"
... description="All People"
... />
... <principal
... id="zope.manager5"
... title="Manager 5"
... login="admin5"
... password="123"
... />
...
... </configure>
... """)
The everybodyGroup directive defines an IEveryoneGroup utility:
>>> g = component.getUtility(interfaces.IEveryoneGroup)
>>> g.id, g.title, g.description
('zope.everybody', 'Everybody', 'All People')
It implements :class:`zope.authentication.interfaces.IEveryoneGroup`:
>>> verifyObject(interfaces.IEveryoneGroup, g)
True
>>> getValidationErrors(interfaces.IEveryoneGroup, g)
[]
It also adds it self to the groups of any non-group principals already
defined, and, when non-group principals are defined, they put
themselves in the group if it's defined:
>>> principals = sorted(principalRegistry.getPrincipals(''),
... key=lambda p: p.id)
>>> for p in principals:
... print(p.id, p.groups == [g.id])
zope.everybody False
zope.manager4 True
zope.manager5 True
Including unauthenticated principals, of course:
>>> p = principalRegistry.unauthenticatedPrincipal()
>>> p.id, g.id in p.groups
('zope.unknown4', True)
>>> p = component.getUtility(interfaces.IUnauthenticatedPrincipal)
>>> p.id, g.id in p.groups
('zope.unknown4', True)
Note that it is up to IAuthentication implementations to associate
these groups with their principals, as appropriate.
In our case, if we define an unauthenticated principal after having
defined the everybody group, the principal will be automatically
added:
>>> zcml("""
... <configure xmlns="http://namespaces.zope.org/zope">
...
... <unauthenticatedPrincipal
... id="zope.unknown5"
... title="Anonymous user"
... description="A person we don't know"
... />
...
... </configure>
... """)
>>> p = component.getUtility(interfaces.IUnauthenticatedPrincipal)
>>> p.id, g.id in p.groups
('zope.unknown5', True)
The system_user
===============
There is also a system_user that is defined in the code. It will be returned
from the getPrincipal method of the registry.
>>> import zope.security.management
>>> import zope.principalregistry.principalregistry
>>> auth = zope.principalregistry.principalregistry.PrincipalRegistry()
>>> system_user = auth.getPrincipal('zope.security.management.system_user')
>>> system_user is zope.security.management.system_user
True
| zope.principalregistry | /zope.principalregistry-5.0-py3-none-any.whl/zope/principalregistry/README.rst | README.rst |
"""Global Authentication Utility or Principal Registry
"""
import zope.security.management
from zope.authentication.interfaces import IAuthenticatedGroup
from zope.authentication.interfaces import IAuthentication
from zope.authentication.interfaces import IEveryoneGroup
from zope.authentication.interfaces import ILoginPassword
from zope.authentication.interfaces import ILogout
from zope.authentication.interfaces import IUnauthenticatedGroup
from zope.authentication.interfaces import IUnauthenticatedPrincipal
from zope.authentication.interfaces import PrincipalLookupError
from zope.component import getUtility
from zope.interface import implementer
from zope.password.interfaces import IPasswordManager
from zope.security.interfaces import IGroupAwarePrincipal
def _as_text(s):
return s.decode('utf-8') if isinstance(s, bytes) else s
class DuplicateLogin(Exception):
pass
class DuplicateId(Exception):
pass
@implementer(IAuthentication, ILogout)
class PrincipalRegistry:
"""
An in-memory implementation of
:class:`zope.authentication.interfaces.IAuthentication`
and :class:`zope.authentication.interfaces.ILogout`.
"""
# Methods implementing IAuthentication
def authenticate(self, request):
a = ILoginPassword(request, None)
if a is not None:
login = a.getLogin()
if login is not None:
# The login will be in bytes, but the registry stores them
# using strings.
p = self.__principalsByLogin.get(_as_text(login), None)
if p is not None:
password = a.getPassword()
if p.validate(password):
return p
return None
__defaultid = None
__defaultObject = None
def defineDefaultPrincipal(self, id, title, description='',
principal=None):
id = _as_text(id)
title = _as_text(title)
description = _as_text(description)
if id in self.__principalsById:
raise DuplicateId(id)
self.__defaultid = id
if principal is None:
principal = UnauthenticatedPrincipal(id, title, description)
principal.__name__ = id
principal.__parent__ = self
self.__defaultObject = principal
return principal
def unauthenticatedPrincipal(self):
return self.__defaultObject
def unauthorized(self, id, request):
if id is None or id == self.__defaultid:
a = ILoginPassword(request)
a.needLogin(realm="Zope")
def getPrincipal(self, id):
r = self.__principalsById.get(id)
if r is None:
if id == self.__defaultid:
return self.__defaultObject
if id == zope.security.management.system_user.id:
return zope.security.management.system_user
raise PrincipalLookupError(id)
return r
def getPrincipalByLogin(self, login):
return self.__principalsByLogin[login]
def getPrincipals(self, name):
name = name.lower()
return [p for p in self.__principalsById.values()
if (p.title.lower().startswith(name) or
p.getLogin().lower().startswith(name))]
def logout(self, request):
# not supporting basic auth logout -- no such thing
pass
# Management methods
def __init__(self):
self.__principalsById = {}
self.__principalsByLogin = {}
def definePrincipal(
self,
principal,
title,
description='',
login='',
password=b'',
passwordManagerName='Plain Text'):
id = _as_text(principal)
title = _as_text(title)
description = _as_text(description)
login = _as_text(login)
if login in self.__principalsByLogin:
raise DuplicateLogin(login)
if id in self.__principalsById or id == self.__defaultid:
raise DuplicateId(id)
p = Principal(id, title, description,
login, password, passwordManagerName)
p.__name__ = id
p.__parent__ = self
self.__principalsByLogin[login] = p
self.__principalsById[id] = p
return p
def registerGroup(self, group):
id = _as_text(group.id)
if id in self.__principalsById or id == self.__defaultid:
raise DuplicateId(id)
self.__principalsById[group.id] = group
def _clear(self):
self.__init__()
self.__defaultid = None
self.__defaultObject = None
#: The global registry that the ZCML directives will
#: modify.
principalRegistry = PrincipalRegistry()
# Register our cleanup with Testing.CleanUp to make writing unit tests
# simpler.
try:
from zope.testing.cleanup import addCleanUp
except ImportError: # pragma: no cover
pass
else:
addCleanUp(principalRegistry._clear)
del addCleanUp
class PrincipalBase:
__name__ = __parent__ = None
def __init__(self, id, title, description):
self.id = _as_text(id)
self.title = _as_text(title)
self.description = _as_text(description)
self.groups = []
class Group(PrincipalBase):
def getLogin(self):
return '' # to make registry search happy
@implementer(IGroupAwarePrincipal)
class Principal(PrincipalBase):
"""
The default implementation of
:class:`zope.security.interfaces.IGroupAwarePrincipal`
that :class:`PrincipalRegistry` will create.
"""
def __init__(self, id, title, description, login,
pw, pwManagerName="Plain Text"):
super().__init__(id, title, description)
self.__login = _as_text(login)
self.__pwManagerName = pwManagerName
self.__pw = pw
def __getPasswordManager(self):
return getUtility(IPasswordManager, self.__pwManagerName)
def getLogin(self):
return self.__login
def validate(self, pw):
pwManager = self.__getPasswordManager()
return pwManager.checkPassword(self.__pw, pw)
@implementer(IUnauthenticatedPrincipal)
class UnauthenticatedPrincipal(PrincipalBase):
"""An implementation of :class:`zope.authentication.interfaces.IUnauthenticatedPrincipal`.""" # noqa: E501 line too long
fallback_unauthenticated_principal = (
UnauthenticatedPrincipal(
__name__ + '.fallback_unauthenticated_principal',
'Fallback unauthenticated principal',
'The default unauthenticated principal. Used as a fallback to '
'allow challenging for a user even if the IAuthentication returned '
'None as the unauthenticated principal.'))
@implementer(IUnauthenticatedGroup)
class UnauthenticatedGroup(Group):
"""An implementation of :class:`zope.authentication.interfaces.IUnauthenticatedGroup`.""" # noqa: E501 line too long
@implementer(IAuthenticatedGroup)
class AuthenticatedGroup(Group):
"""An implementation of :class:`zope.authentication.interfaces.IAuthenticatedGroup`.""" # noqa: E501 line too long
@implementer(IEveryoneGroup)
class EverybodyGroup(Group):
"""An implementation of :class:`zope.authentication.interfaces.IEverybodyGroup`.""" # noqa: E501 line too long | zope.principalregistry | /zope.principalregistry-5.0-py3-none-any.whl/zope/principalregistry/principalregistry.py | principalregistry.py |
"""Directives for defining principals and groups
"""
from zope.authentication import interfaces
from zope.component.zcml import utility
from zope import component
from zope.principalregistry import principalregistry
def _principal():
group = component.queryUtility(interfaces.IAuthenticatedGroup)
if group is not None:
_authenticatedGroup(group.id)
group = component.queryUtility(interfaces.IEveryoneGroup)
if group is not None:
_everybodyGroup(group.id)
def principal(_context, id, title, login,
password, description='', password_manager="Plain Text"):
"""Implementation of :class:`zope.principalregistry.metadirectives.IDefinePrincipalDirective`.""" # noqa: E501 line too long
# Make sure password is encoded to bytes, which is required by the
# principal registry.
password = password.encode('utf-8')
_context.action(
discriminator=('principal', id),
callable=principalregistry.principalRegistry.definePrincipal,
args=(id, title, description, login, password, password_manager))
_context.action(discriminator=None, callable=_principal, args=())
def _unauthenticatedPrincipal():
group = component.queryUtility(interfaces.IUnauthenticatedGroup)
if group is not None:
_unauthenticatedGroup(group.id)
group = component.queryUtility(interfaces.IEveryoneGroup)
if group is not None:
_everybodyGroup(group.id)
def unauthenticatedPrincipal(_context, id, title, description=''):
"""Implementation of :class:`zope.principalregistry.metadirectives.IDefineUnauthenticatedPrincipalDirective`.""" # noqa: E501 line too long
principal = principalregistry.UnauthenticatedPrincipal(
id, title, description)
_context.action(
discriminator='unauthenticatedPrincipal',
callable=principalregistry.principalRegistry.defineDefaultPrincipal,
args=(id, title, description, principal))
utility(_context, interfaces.IUnauthenticatedPrincipal, principal)
_context.action(
discriminator=None,
callable=_unauthenticatedPrincipal,
args=(),
)
def _unauthenticatedGroup(group):
p = principalregistry.principalRegistry.unauthenticatedPrincipal()
if p is not None:
p.groups.append(group)
def unauthenticatedGroup(_context, id, title, description=''):
"""Implementation of :class:`zope.principalregistry.metadirectives.IDefineUnauthenticatedGroupDirective`.""" # noqa: E501 line too long
principal = principalregistry.UnauthenticatedGroup(
id, title, description)
utility(_context, interfaces.IUnauthenticatedGroup, principal)
_context.action(
discriminator=None,
callable=_unauthenticatedGroup,
args=(principal.id, ),
)
_context.action(
discriminator=None,
callable=principalregistry.principalRegistry.registerGroup,
args=(principal, ),
)
def _authenticatedGroup(group):
for p in principalregistry.principalRegistry.getPrincipals(''):
if not isinstance(p, principalregistry.Principal):
continue
if group not in p.groups:
p.groups.append(group)
def authenticatedGroup(_context, id, title, description=''):
"""Implementation of :class:`zope.principalregistry.metadirectives.IDefineAuthenticatedGroupDirective`.""" # noqa: E501 line too long
principal = principalregistry.AuthenticatedGroup(
id, title, description)
utility(_context, interfaces.IAuthenticatedGroup, principal)
_context.action(
discriminator=None,
callable=_authenticatedGroup,
args=(principal.id, ),
)
_context.action(
discriminator=None,
callable=principalregistry.principalRegistry.registerGroup,
args=(principal, ),
)
def _everybodyGroup(group):
for p in principalregistry.principalRegistry.getPrincipals(''):
if not isinstance(p, principalregistry.Principal):
continue
if group not in p.groups:
p.groups.append(group)
p = principalregistry.principalRegistry.unauthenticatedPrincipal()
if p is not None:
p.groups.append(group)
def everybodyGroup(_context, id, title, description=''):
"""Implementation of :class:`zope.principalregistry.metadirectives.IDefineEverybodyGroupDirective`.""" # noqa: E501 line too long
principal = principalregistry.EverybodyGroup(
id, title, description)
utility(_context, interfaces.IEveryoneGroup, principal)
_context.action(
discriminator=None,
callable=_everybodyGroup,
args=(principal.id, ),
)
_context.action(
discriminator=None,
callable=principalregistry.principalRegistry.registerGroup,
args=(principal, ),
) | zope.principalregistry | /zope.principalregistry-5.0-py3-none-any.whl/zope/principalregistry/metaconfigure.py | metaconfigure.py |
=========
Changes
=========
3.0 (2023-03-27)
================
- Add support for Python 3.11.
- Drop support for Python 2.7, 3.5, 3.6.
- Drop support for deprecated ``python setup.py test``.
2.4 (2022-08-26)
================
- Drop support for Python 3.4.
- Add support for Python 3.8, 3.9, 3.10.
2.3.0 (2018-10-05)
==================
- Add support for Python 3.7.
2.2.0 (2017-09-01)
==================
- Add support for Python 3.5 and 3.6.
- Drop support for Python 2.6, 3.2 and 3.3.
- Host documentation at https://zopeprocesslifetime.readthedocs.io/en/latest/
2.1.0 (2014-12-27)
==================
- Add support for PyPy and PyPy3.
- Add support for Python 3.4.
- Add support for testing on Travis.
2.0.0 (2013-02-22)
==================
- Replace deprecated ``zope.interface.implements`` usage with equivalent
``zope.interface.implementer`` decorator.
- Add support for Python 3.2 and 3.3
- Drop support for Python 2.4 and 2.5.
1.0 (2009-05-13)
================
- Split out event interfaces / implementations from ``zope.app.appsetup``
version 3.10.2.
| zope.processlifetime | /zope.processlifetime-3.0.tar.gz/zope.processlifetime-3.0/CHANGES.rst | CHANGES.rst |
==========================
``zope.processlifetime``
==========================
.. image:: https://img.shields.io/pypi/v/zope.processlifetime.svg
:target: https://pypi.python.org/pypi/zope.processlifetime/
:alt: Latest release
.. image:: https://img.shields.io/pypi/pyversions/zope.processlifetime.svg
:target: https://pypi.org/project/zope.processlifetime/
:alt: Supported Python versions
.. image:: https://github.com/zopefoundation/zope.processlifetime/actions/workflows/tests.yml/badge.svg
:target: https://github.com/zopefoundation/zope.processlifetime/actions/workflows/tests.yml
.. image:: https://coveralls.io/repos/github/zopefoundation/zope.processlifetime/badge.svg?branch=master
:target: https://coveralls.io/github/zopefoundation/zope.processlifetime?branch=master
.. image:: https://readthedocs.org/projects/zopeprocesslifetime/badge/?version=latest
:target: https://zopeprocesslifetime.readthedocs.io/en/latest/
:alt: Documentation Status
This package provides interfaces / implementations for events relative
to the lifetime of a server process (startup, database opening, etc.)
These events are usually used with `zope.event
<http://zopeevent.readthedocs.io/en/latest/>`_.
Documentation is hosted at https://zopeprocesslifetime.readthedocs.io/en/latest/
| zope.processlifetime | /zope.processlifetime-3.0.tar.gz/zope.processlifetime-3.0/README.rst | README.rst |
.. include:: ../README.rst
This is a simple package consisting of just a few interfaces and
implementations.
.. automodule:: zope.processlifetime
.. toctree::
:maxdepth: 2
changelog
Development
===========
zope.processlifetime is hosted at GitHub:
https://github.com/zopefoundation/zope.processlifetime/
Project URLs
============
* https://pypi.python.org/pypi/zope.processlifetime (PyPI entry and downloads)
====================
Indices and tables
====================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| zope.processlifetime | /zope.processlifetime-3.0.tar.gz/zope.processlifetime-3.0/docs/index.rst | index.rst |
"""More convenience functions for dealing with proxies.
"""
import operator
import os
import pickle
from zope.interface import moduleProvides
from zope.proxy.interfaces import IProxyIntrospection
moduleProvides(IProxyIntrospection)
__all__ = tuple(IProxyIntrospection)
def ProxyIterator(p):
yield p
while isProxy(p):
p = getProxiedObject(p)
yield p
_MARKER = object()
def _WrapperType_Lookup(type_, name):
"""
Looks up information in class dictionaries in MRO
order, ignoring the proxy type itself.
Returns the first found object, or _MARKER
"""
for base in type_.mro():
if base is AbstractPyProxyBase:
continue
res = base.__dict__.get(name, _MARKER)
if res is not _MARKER:
return res
return _MARKER
def _get_wrapped(self):
"""
Helper method to access the wrapped object.
"""
return super(AbstractPyProxyBase, self).__getattribute__('_wrapped')
class _EmptyInterfaceDescriptor:
"""A descriptor for the attributes used on the class by the
Python implementation of `zope.interface`.
When wrapping builtin types, these descriptors prevent the objects
we find in the AbstractPyProxyBase from being used.
"""
def __get__(self, inst, klass):
raise AttributeError()
def __set__(self, inst, value):
raise TypeError()
def __delete__(self, inst):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration()
next = __next__
class _ProxyMetaclass(type):
# The metaclass is applied after the class definition
# for Py2/Py3 compatibility.
__implemented__ = _EmptyInterfaceDescriptor()
class AbstractPyProxyBase:
"""
A reference implementation that cannot be instantiated. Most users
will want to use :class:`PyProxyBase`.
This type is intended to be used in multiple-inheritance
scenarios, where another super class already has defined
``__slots__``. In order to subclass both that class and this
class, you must include the ``_wrapped`` value in your own
``__slots__`` definition (or else you will get the infamous
TypeError: "multiple bases have instance lay-out conflicts")
"""
__slots__ = ()
def __new__(cls, value=None):
# Some subclasses (zope.security.proxy) fail to pass the object
inst = super(AbstractPyProxyBase, cls).__new__(cls)
inst._wrapped = value
return inst
def __init__(self, obj):
self._wrapped = obj
def __call__(self, *args, **kw):
return self._wrapped(*args, **kw)
def __repr__(self):
return repr(self._wrapped)
def __str__(self):
return str(self._wrapped)
def __reduce__(self): # pragma: no cover (__reduce_ex__ prevents normal)
raise pickle.PicklingError
def __reduce_ex__(self, proto):
raise pickle.PicklingError
# Rich comparison protocol
def __lt__(self, other):
return self._wrapped < other
def __le__(self, other):
return self._wrapped <= other
def __eq__(self, other):
return self._wrapped == other
def __ne__(self, other):
return self._wrapped != other
def __gt__(self, other):
return self._wrapped > other
def __ge__(self, other):
return self._wrapped >= other
def __bool__(self):
return bool(self._wrapped)
def __hash__(self):
return hash(self._wrapped)
# Attribute protocol
def __getattribute__(self, name):
# Try to avoid accessing the _wrapped value until we need to.
# We don't know how subclasses may be storing it
# (e.g., persistent subclasses)
if name == '_wrapped':
return _get_wrapped(self)
if name in ('__class__', '__module__'):
# __class__ and __module__ are special cased in the C
# implementation, because we will always find them on the
# type of this object if we are being subclassed
return getattr(_get_wrapped(self), name)
if name in ('__reduce__', '__reduce_ex__'):
# These things we specifically override and no one
# can stop us, not even a subclass
return object.__getattribute__(self, name)
# First, look for descriptors in this object's type
type_self = type(self)
descriptor = _WrapperType_Lookup(type_self, name)
if descriptor is _MARKER:
# Nothing in the class, go straight to the wrapped object
return getattr(_get_wrapped(self), name)
if hasattr(descriptor, '__get__'):
if not hasattr(descriptor, '__set__'):
# Non-data-descriptor: call through to the wrapped object
# to see if it's there
try:
return getattr(_get_wrapped(self), name)
except AttributeError:
pass
# Data-descriptor on this type. Call it
return descriptor.__get__(self, type_self)
return descriptor
def __getattr__(self, name):
return getattr(self._wrapped, name)
def __setattr__(self, name, value):
if name == '_wrapped':
return super(AbstractPyProxyBase, self).__setattr__(name, value)
# First, look for descriptors in this object's type
type_self = type(self)
descriptor = _WrapperType_Lookup(type_self, name)
if descriptor is _MARKER or not hasattr(descriptor, '__set__'):
# Nothing in the class that's a descriptor,
# go straight to the wrapped object
return setattr(self._wrapped, name, value)
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name == '_wrapped':
raise AttributeError()
delattr(self._wrapped, name)
# Container protocols
def __len__(self):
return len(self._wrapped)
def __getitem__(self, key):
return self._wrapped[key]
def __setitem__(self, key, value):
self._wrapped[key] = value
def __delitem__(self, key):
del self._wrapped[key]
def __iter__(self):
# This handles a custom __iter__ and generator support at the same
# time.
return iter(self._wrapped)
def next(self):
# Called when we wrap an iterator itself.
return next(self._wrapped)
def __next__(self):
return self._wrapped.__next__()
# Python 2.7 won't let the C wrapper support __reversed__
# Uncomment this when the supported Python versions do
# def __reversed__(self):
# return reversed(self._wrapped)
def __contains__(self, item):
return item in self._wrapped
# Numeric protocol: unary operators
def __neg__(self):
return -self._wrapped
def __pos__(self):
return +self._wrapped
def __abs__(self):
return abs(self._wrapped)
def __invert__(self):
return ~self._wrapped
# Numeric protocol: unary conversions
def __complex__(self):
return complex(self._wrapped)
def __int__(self):
return int(self._wrapped)
def __float__(self):
return float(self._wrapped)
def __index__(self):
return operator.index(self._wrapped)
# Numeric protocol: binary arithmetic operators
def __add__(self, other):
return self._wrapped + other
def __sub__(self, other):
return self._wrapped - other
def __mul__(self, other):
return self._wrapped * other
def __floordiv__(self, other):
return self._wrapped // other
def __truediv__(self, other):
return self._wrapped / other
def __mod__(self, other):
return self._wrapped % other
def __divmod__(self, other):
return divmod(self._wrapped, other)
def __pow__(self, other, modulus=None):
if modulus is None:
return pow(self._wrapped, other)
return pow(self._wrapped, other, modulus)
def __radd__(self, other):
return other + self._wrapped
def __rsub__(self, other):
return other - self._wrapped
def __rmul__(self, other):
return other * self._wrapped
def __rfloordiv__(self, other):
return other // self._wrapped
def __rtruediv__(self, other):
return other / self._wrapped
def __rmod__(self, other):
return other % self._wrapped
def __rdivmod__(self, other):
return divmod(other, self._wrapped)
def __rpow__(self, other, modulus=None):
if modulus is None:
return pow(other, self._wrapped)
# We can't actually get here, because we can't lie about our type()
return pow(other, self._wrapped, modulus) # pragma: no cover
# Numeric protocol: binary bitwise operators
def __lshift__(self, other):
return self._wrapped << other
def __rshift__(self, other):
return self._wrapped >> other
def __and__(self, other):
return self._wrapped & other
def __xor__(self, other):
return self._wrapped ^ other
def __or__(self, other):
return self._wrapped | other
def __rlshift__(self, other):
return other << self._wrapped
def __rrshift__(self, other):
return other >> self._wrapped
def __rand__(self, other):
return other & self._wrapped
def __rxor__(self, other):
return other ^ self._wrapped
def __ror__(self, other):
return other | self._wrapped
# Numeric protocol: binary in-place operators
def __iadd__(self, other):
self._wrapped += other
return self
def __isub__(self, other):
self._wrapped -= other
return self
def __imul__(self, other):
self._wrapped *= other
return self
def __itruediv__(self, other):
self._wrapped /= other
return self
def __ifloordiv__(self, other):
self._wrapped //= other
return self
def __imod__(self, other):
self._wrapped %= other
return self
def __ilshift__(self, other):
self._wrapped <<= other
return self
def __irshift__(self, other):
self._wrapped >>= other
return self
def __iand__(self, other):
self._wrapped &= other
return self
def __ixor__(self, other):
self._wrapped ^= other
return self
def __ior__(self, other):
self._wrapped |= other
return self
def __ipow__(self, other, modulus=None):
if modulus is None:
self._wrapped **= other
else: # pragma: no cover
# There is no syntax which triggers in-place pow w/ modulus
self._wrapped = pow(self._wrapped, other, modulus)
return self
AbstractPyProxyBase = _ProxyMetaclass('AbstractPyProxyBase', (),
dict(AbstractPyProxyBase.__dict__))
class PyProxyBase(AbstractPyProxyBase):
"""Reference implementation.
"""
__slots__ = ('_wrapped', )
def py_getProxiedObject(obj):
if isinstance(obj, PyProxyBase):
return obj._wrapped
return obj
def py_setProxiedObject(obj, new_value):
if not isinstance(obj, PyProxyBase):
raise TypeError('Not a proxy')
old, obj._wrapped = obj._wrapped, new_value
return old
def py_isProxy(obj, klass=None):
if klass is None:
klass = PyProxyBase
return isinstance(obj, klass)
def py_sameProxiedObjects(lhs, rhs):
while isinstance(lhs, PyProxyBase):
lhs = super(PyProxyBase, lhs).__getattribute__('_wrapped')
while isinstance(rhs, PyProxyBase):
rhs = super(PyProxyBase, rhs).__getattribute__('_wrapped')
return lhs is rhs
def py_queryProxy(obj, klass=None, default=None):
if klass is None:
klass = PyProxyBase
while obj is not None and not isinstance(obj, klass):
obj = getattr(obj, '_wrapped', None)
if obj is not None:
return obj
return default
def py_queryInnerProxy(obj, klass=None, default=None):
if klass is None:
klass = PyProxyBase
found = []
while obj is not None:
if isinstance(obj, klass):
found.append(obj) # stack
obj = getattr(obj, '_wrapped', None)
if found:
return found[-1]
return default
def py_removeAllProxies(obj):
while isinstance(obj, PyProxyBase):
obj = super(PyProxyBase, obj).__getattribute__('_wrapped')
return obj
_c_available = False
if 'PURE_PYTHON' not in os.environ:
try: # pragma: no cover
from zope.proxy._zope_proxy_proxy import ProxyBase as _c_available
except ImportError:
pass
class PyNonOverridable:
"Deprecated, only for BWC."
def __init__(self, method_desc): # pragma: no cover PyPy
self.desc = method_desc
if _c_available: # pragma: no cover
# Python API: not used in this module
# API for proxy-using C extensions.
from zope.proxy._zope_proxy_proxy import _CAPI # noqa: F401 unused
from zope.proxy._zope_proxy_proxy import ProxyBase
from zope.proxy._zope_proxy_proxy import getProxiedObject
from zope.proxy._zope_proxy_proxy import isProxy
from zope.proxy._zope_proxy_proxy import queryInnerProxy
from zope.proxy._zope_proxy_proxy import queryProxy
from zope.proxy._zope_proxy_proxy import removeAllProxies
from zope.proxy._zope_proxy_proxy import sameProxiedObjects
from zope.proxy._zope_proxy_proxy import setProxiedObject
else:
# no C extension available, fall back
ProxyBase = PyProxyBase
getProxiedObject = py_getProxiedObject
setProxiedObject = py_setProxiedObject
isProxy = py_isProxy
sameProxiedObjects = py_sameProxiedObjects
queryProxy = py_queryProxy
queryInnerProxy = py_queryInnerProxy
removeAllProxies = py_removeAllProxies
def non_overridable(func):
return property(lambda self: func.__get__(self)) | zope.proxy | /zope.proxy-5.0.0-cp310-cp310-macosx_10_9_x86_64.whl/zope/proxy/__init__.py | __init__.py |
=========
psycopgda
=========
This file outlines the basics of using Zope3 with PostgreSQL via PsycopgDA.
Installing PsycopgDA
--------------------
1. Check out the psycopgda package into a directory in your
PYTHONPATH. INSTANCE_HOME/lib/python or Zope3/src is usually the
most convenient place:
svn co svn://svn.zope.org/repos/main/psycopgda/trunk/psycopgda psycopgda
2. Copy `psycopg-configure.zcml` to the `package-includes` directory
of your Zope instance.
You can also use the eggified version, by installing it sitewide:
easy_install -U psycopgda
If you're using buildout, just by listing psycopgda in the 'eggs' key
of your buildout.cfg, then buildout should be able to find and fetch it.
Creating Database Connections
------------------------------
It is time to add some connections. A connection in Zope 3 is
registered as a utility.
3. Open a web browser on your Zope root folder (http://localhost:8080/
if you use the default settings in zope.conf.in).
4. Click on the 'Manage Site' action on the right side of the
screen. You should see a screen which reads 'Common Site Management
Tasks'
5. Around the middle of that page, you should see a link named 'Add
Utility'. Click on it.
6. Select 'Psycopg DA' and type in a name at the bottom of the page.
7. Enter the database connection string. It looks like this:
dbi://username:password@host:port/databasename
8. Click on the 'Add' button.
9. You should be on a page which reads 'Add Database Connection
Registration'. There you can configure the permission needed to use
the database connection, the name of the registration and the
registration status. You can use any name for 'Register As' field,
as long as it doesn't clash with an existing one. Choose a
permission. Choose between 'Registered' and 'Active' for the
'Registration Status'. Only one component of a kind can be 'Active'
at a time, so be careful.
10. You should be redirected to the 'Edit' screen of the connection
utility.
11. If you want to, you can go to the Test page and execute arbitrary
SQL queries to see whether the connection is working as expected.
Using SQL Scripts
-----------------
You can create SQL Scripts in the content space. For example:
12. Go to Zope root.
13. Add an SQL script (you can use the Common Tasks box on the left,
or the Add action on the right).
14. Click on the name of your new SQL script.
15. Choose a connection name (the one you entered in step 29) from the
drop-down.
16. Enter your query and click on the 'Save Changes' button.
17. You can test the script in the -- surprise! -- Test page.
| zope.psycopgda | /zope.psycopgda-1.1.1.tar.gz/zope.psycopgda-1.1.1/README.txt | README.txt |
import datetime
import psycopg
import re
import sys
import zope.interface
import zope.rdb
from zope.datetime import tzinfo
from zope.interface import implements
from zope.rdb.interfaces import DatabaseException, IZopeConnection
from zope.publisher.interfaces import Retry
# These OIDs are taken from include/server/pg_type.h from PostgreSQL headers.
# Unfortunatelly psycopg does not export them as constants, and
# we cannot use psycopg.FOO.values because they overlap.
DATE_OID = 1082
TIME_OID = 1083
TIMETZ_OID = 1266
TIMESTAMP_OID = 1114
TIMESTAMPTZ_OID = 1184
INTERVAL_OID = 1186
CHAR_OID = 18
TEXT_OID = 25
BPCHAR_OID = 1042
VARCHAR_OID = 1043
# The following ones are obsolete and we don't handle them
#ABSTIME_OID = 702
#RELTIME_OID = 703
#TINTERVAL_OID = 704
# Date/time parsing functions
_dateFmt = re.compile(r"^(\d\d\d\d)-?([01]\d)-?([0-3]\d)$")
def parse_date(s):
"""Parses ISO-8601 compliant dates and returns a tuple (year, month,
day).
The following formats are accepted:
YYYY-MM-DD (extended format)
YYYYMMDD (basic format)
"""
m = _dateFmt.match(s)
if m is None:
raise ValueError, 'invalid date string: %s' % s
year, month, day = m.groups()
return int(year), int(month), int(day)
_timeFmt = re.compile(
r"^([0-2]\d)(?::?([0-5]\d)(?::?([0-5]\d)(?:[.,](\d+))?)?)?$")
def parse_time(s):
"""Parses ISO-8601 compliant times and returns a tuple (hour, minute,
second).
The following formats are accepted:
HH:MM:SS.ssss or HHMMSS.ssss
HH:MM:SS,ssss or HHMMSS,ssss
HH:MM:SS or HHMMSS
HH:MM or HHMM
HH
"""
m = _timeFmt.match(s)
if m is None:
raise ValueError, 'invalid time string: %s' % s
hr, mn, sc, msc = m.groups(0)
if msc != 0:
sc = float("%s.%s" % (sc, msc))
else:
sc = int(sc)
return int(hr), int(mn), sc
_tzFmt = re.compile(r"^([+-])([0-2]\d)(?::?([0-5]\d))?$")
def parse_tz(s):
"""Parses ISO-8601 timezones and returns the offset east of UTC in
minutes.
The following formats are accepted:
+/-HH:MM
+/-HHMM
+/-HH
Z (equivalent to +0000)
"""
if s == 'Z':
return 0
m = _tzFmt.match(s)
if m is None:
raise ValueError, 'invalid time zone: %s' % s
d, hoff, moff = m.groups(0)
if d == "-":
return - int(hoff) * 60 - int(moff)
return int(hoff) * 60 + int(moff)
_tzPos = re.compile(r"[Z+-]")
def parse_timetz(s):
"""Parses ISO-8601 compliant times that may include timezone information
and returns a tuple (hour, minute, second, tzoffset).
tzoffset is the offset east of UTC in minutes. It will be None if s does
not include time zone information.
Formats accepted are those listed in the descriptions of parse_time() and
parse_tz(). Time zone should immediatelly follow time without intervening
spaces.
"""
m = _tzPos.search(s)
if m is None:
return parse_time(s) + (None,)
pos = m.start()
return parse_time(s[:pos]) + (parse_tz(s[pos:]),)
_datetimeFmt = re.compile(r"[T ]")
def _split_datetime(s):
"""Split date and time parts of ISO-8601 compliant timestamp and
return a tuple (date, time).
' ' or 'T' used to separate date and time parts.
"""
m = _datetimeFmt.search(s)
if m is None:
raise ValueError, 'time part of datetime missing: %s' % s
pos = m.start()
return s[:pos], s[pos + 1:]
def parse_datetime(s):
"""Parses ISO-8601 compliant timestamp and returns a tuple (year, month,
day, hour, minute, second).
Formats accepted are those listed in the descriptions of parse_date() and
parse_time() with ' ' or 'T' used to separate date and time parts.
"""
dt, tm = _split_datetime(s)
return parse_date(dt) + parse_time(tm)
def parse_datetimetz(s):
"""Parses ISO-8601 compliant timestamp that may include timezone
information and returns a tuple (year, month, day, hour, minute, second,
tzoffset).
tzoffset is the offset east of UTC in minutes. It will be None if s does
not include time zone information.
Formats accepted are those listed in the descriptions of parse_date() and
parse_timetz() with ' ' or 'T' used to separate date and time parts.
"""
dt, tm = _split_datetime(s)
return parse_date(dt) + parse_timetz(tm)
def parse_interval(s):
"""Parses PostgreSQL interval notation and returns a tuple (years, months,
days, hours, minutes, seconds).
Values accepted:
interval ::= date
| time
| date time
date ::= date_comp
| date date_comp
date_comp ::= 1 'day'
| number 'days'
| 1 'month'
\ | 1 'mon'
| number 'months'
| number 'mons'
| 1 'year'
| number 'years'
time ::= number ':' number
| number ':' number ':' number
| number ':' number ':' number '.' fraction
"""
years = months = days = 0
hours = minutes = seconds = 0
elements = s.split()
# Tests with 7.4.6 on Ubuntu 5.4 interval output returns 'mon' and 'mons'
# and not 'month' or 'months' as expected. I've fixed this and left
# the original matches there too in case this is dependant on
# OS or PostgreSQL release.
for i in range(0, len(elements) - 1, 2):
count, unit = elements[i:i+2]
if unit == 'day' and count == '1':
days += 1
elif unit == 'days':
days += int(count)
elif unit == 'month' and count == '1':
months += 1
elif unit == 'mon' and count == '1':
months += 1
elif unit == 'months':
months += int(count)
elif unit == 'mons':
months += int(count)
elif unit == 'year' and count == '1':
years += 1
elif unit == 'years':
years += int(count)
else:
raise ValueError, 'unknown time interval %s %s' % (count, unit)
if len(elements) % 2 == 1:
hours, minutes, seconds = parse_time(elements[-1])
return (years, months, days, hours, minutes, seconds)
# Type conversions
def _conv_date(s):
if s:
return datetime.date(*parse_date(s))
def _conv_time(s):
if s:
hr, mn, sc = parse_time(s)
sc, micro = divmod(sc, 1.0)
micro = round(micro * 1000000)
return datetime.time(hr, mn, int(sc), int(micro))
def _conv_timetz(s):
if s:
hr, mn, sc, tz = parse_timetz(s)
sc, micro = divmod(sc, 1.0)
micro = round(micro * 1000000)
if tz:
tz = tzinfo(tz)
return datetime.time(hr, mn, int(sc), int(micro), tz)
def _conv_timestamp(s):
if s:
y, m, d, hr, mn, sc = parse_datetime(s)
sc, micro = divmod(sc, 1.0)
micro = round(micro * 1000000)
return datetime.datetime(y, m, d, hr, mn, int(sc), int(micro))
def _conv_timestamptz(s):
if s:
y, m, d, hr, mn, sc, tz = parse_datetimetz(s)
sc, micro = divmod(sc, 1.0)
micro = round(micro * 1000000)
if tz:
tz = tzinfo(tz)
return datetime.datetime(y, m, d, hr, mn, int(sc), int(micro), tz)
def _conv_interval(s):
if s:
y, m, d, hr, mn, sc = parse_interval(s)
if (y, m) != (0, 0):
# XXX: Currently there's no way to represent years and months as
# timedeltas
return s
else:
return datetime.timedelta(days=d, hours=hr, minutes=mn, seconds=sc)
def _get_string_conv(encoding):
def _conv_string(s):
if s is not None:
s = s.decode(encoding)
return s
return _conv_string
# User-defined types
DATE = psycopg.new_type((DATE_OID,), "ZDATE", _conv_date)
TIME = psycopg.new_type((TIME_OID,), "ZTIME", _conv_time)
TIMETZ = psycopg.new_type((TIMETZ_OID,), "ZTIMETZ", _conv_timetz)
TIMESTAMP = psycopg.new_type((TIMESTAMP_OID,), "ZTIMESTAMP", _conv_timestamp)
TIMESTAMPTZ = psycopg.new_type((TIMESTAMPTZ_OID,), "ZTIMESTAMPTZ",
_conv_timestamptz)
INTERVAL = psycopg.new_type((INTERVAL_OID,), "ZINTERVAL", _conv_interval)
dsn2option_mapping = {'host': 'host',
'port': 'port',
'dbname': 'dbname',
'username': 'user',
'password': 'password'}
def registerTypes(encoding):
"""Register type conversions for psycopg"""
psycopg.register_type(DATE)
psycopg.register_type(TIME)
psycopg.register_type(TIMETZ)
psycopg.register_type(TIMESTAMP)
psycopg.register_type(TIMESTAMPTZ)
psycopg.register_type(INTERVAL)
STRING = psycopg.new_type((CHAR_OID, TEXT_OID, BPCHAR_OID, VARCHAR_OID),
"ZSTRING", _get_string_conv(encoding))
psycopg.register_type(STRING)
class PsycopgAdapter(zope.rdb.ZopeDatabaseAdapter):
"""A PsycoPG adapter for Zope3.
The following type conversions are performed:
DATE -> datetime.date
TIME -> datetime.time
TIMETZ -> datetime.time
TIMESTAMP -> datetime.datetime
TIMESTAMPTZ -> datetime.datetime
XXX: INTERVAL cannot be represented exactly as datetime.timedelta since
it might be something like '1 month', which is a variable number of days.
"""
def connect(self):
if not self.isConnected():
try:
self._v_connection = PsycopgConnection(
self._connection_factory(), self
)
except psycopg.Error, error:
raise DatabaseException, str(error)
def registerTypes(self):
registerTypes(self.getEncoding())
def _connection_factory(self):
"""Create a Psycopg DBI connection based on the DSN"""
self.registerTypes()
conn_info = zope.rdb.parseDSN(self.dsn)
conn_list = []
for dsnname, optname in dsn2option_mapping.iteritems():
if conn_info[dsnname]:
conn_list.append('%s=%s' % (optname, conn_info[dsnname]))
conn_str = ' '.join(conn_list)
connection = psycopg.connect(conn_str)
# Ensure we are in SERIALIZABLE transaction isolation level.
# This is the default under psycopg1, but changed to READ COMMITTED
# under psycopg2. This should become an option if anyone wants
# different isolation levels.
connection.set_isolation_level(3)
return connection
def _handle_psycopg_exception(error):
"""Called from a exception handler for psycopg.Error.
If we have a serialization exception or a deadlock, we should retry the
transaction by raising a Retry exception. Otherwise, we reraise.
"""
if not error.args:
raise
msg = error.args[0]
# These messages are from PostgreSQL 8.0. They may change between
# PostgreSQL releases - if so, the different messages should be added
# rather than the existing ones changed so this logic works with
# different versions.
if msg.startswith(
'ERROR: could not serialize access due to concurrent update'
):
raise Retry(sys.exc_info())
if msg.startswith('ERROR: deadlock detected'):
raise Retry(sys.exc_info())
raise
class IPsycopgZopeConnection(IZopeConnection):
"""A marker interface stating that this connection uses PostgreSQL."""
class PsycopgConnection(zope.rdb.ZopeConnection):
zope.interface.implements(IPsycopgZopeConnection)
def cursor(self):
"""See IZopeConnection"""
return PsycopgCursor(self.conn.cursor(), self)
def commit(self):
try:
zope.rdb.ZopeConnection.commit(self)
except psycopg.Error, error:
_handle_psycopg_exception(error)
class PsycopgCursor(zope.rdb.ZopeCursor):
def execute(self, operation, parameters=None):
"""See IZopeCursor"""
try:
return zope.rdb.ZopeCursor.execute(self, operation, parameters)
except psycopg.Error, error:
_handle_psycopg_exception(error)
def executemany(operation, seq_of_parameters=None):
"""See IZopeCursor"""
raise RuntimeError, 'Oos'
try:
return zope.rdb.ZopeCursor.execute(
self, operation, seq_of_parameters)
except psycopg.Error, error:
_handle_psycopg_exception(error) | zope.psycopgda | /zope.psycopgda-1.1.1.tar.gz/zope.psycopgda-1.1.1/src/zope/psycopgda/adapter.py | adapter.py |
=========
Changes
=========
5.0 (2023-03-27)
================
- Add support for Python 3.11.
- Drop support for Python 2.7, 3.5, 3.6.
- Drop support for deprecated ``python setup.py test``.
4.3.0 (2021-12-15)
==================
- Add support for Python 3.8, 3.9 and 3.10.
- Drop support for Python 3.4.
4.2.0 (2018-10-05)
==================
- Add support for Python 3.7.
4.1.0 (2017-08-31)
==================
- Add support for Python 3.5 and 3.6.
- Drop support for Python 2.6 and 3.3.
4.0.0 (2014-12-24)
==================
- Add support for PyPy and PyPy3.
- Add support for Python 3.4.
- Add support for testing on Travis.
4.0.0a1 (2013-02-25)
====================
- Add support for Python 3.3.
- Replace deprecated ``zope.interface.implements`` usage with equivalent
``zope.interface.implementer`` decorator.
- Drop support for Python 2.4 and 2.5.
3.9.0 (2009-08-27)
==================
Initial release. This package was split off zope.app.publisher as a part
of refactoring process. It's now a plugin for another package that was
refactored from zope.app.publisher - zope.browserresource. See its
documentation for more details.
Other changes:
* Don't render PageTemplateResource when called as the IResource interface
requires that __call__ method should return an absolute URL. When accessed
by browser, it still will be rendered, because "browserDefault" method now
returns a callable that will render the template to browser.
| zope.ptresource | /zope.ptresource-5.0.tar.gz/zope.ptresource-5.0/CHANGES.rst | CHANGES.rst |
===============
zope.ptresource
===============
.. image:: https://img.shields.io/pypi/v/zope.ptresource.svg
:target: https://pypi.python.org/pypi/zope.ptresource/
:alt: Latest release
.. image:: https://img.shields.io/pypi/pyversions/zope.ptresource.svg
:target: https://pypi.org/project/zope.ptresource/
:alt: Supported Python versions
.. image:: https://github.com/zopefoundation/zope.ptresource/actions/workflows/tests.yml/badge.svg
:target: https://github.com/zopefoundation/zope.ptresource/actions/workflows/tests.yml
.. image:: https://coveralls.io/repos/github/zopefoundation/zope.ptresource/badge.svg?branch=master
:target: https://coveralls.io/github/zopefoundation/zope.ptresource?branch=master
.. note::
This package is at present not reusable without depending on a large
chunk of the Zope Toolkit and its assumptions. It is maintained by the
`Zope Toolkit project <http://docs.zope.org/zopetoolkit/>`_.
This package provides a "page template" `resource class
<https://pypi.python.org/pypi/zope.browserresource>`_, a resource
whose content is processed with the `Zope Page Templates
<https://pypi.python.org/pypi/zope.pagetemplate>`_ engine before
being returned to client.
The resource factory class is registered for "pt", "zpt" and "html" file
extensions in the package's ``configure.zcml`` file.
| zope.ptresource | /zope.ptresource-5.0.tar.gz/zope.ptresource-5.0/README.rst | README.rst |
from zope.browserresource.interfaces import IResourceFactory
from zope.browserresource.interfaces import IResourceFactoryFactory
from zope.browserresource.resource import Resource
from zope.interface import implementer
from zope.interface import provider
from zope.pagetemplate.engine import TrustedAppPT
from zope.pagetemplate.pagetemplatefile import PageTemplateFile
from zope.publisher.browser import BrowserView
from zope.publisher.interfaces import NotFound
from zope.publisher.interfaces.browser import IBrowserPublisher
class PageTemplate(TrustedAppPT, PageTemplateFile):
"""
Resource that is a page template
"""
def __init__(self, filename, _prefix=None, content_type=None):
_prefix = self.get_path_from_prefix(_prefix)
super().__init__(filename, _prefix)
if content_type is not None:
self.content_type = content_type
def pt_getContext(self, request, **kw):
namespace = super().pt_getContext(**kw)
namespace['context'] = None
namespace['request'] = request
return namespace
def __call__(self, request, **keywords):
namespace = self.pt_getContext(
request=request,
options=keywords
)
return self.pt_render(namespace)
@implementer(IBrowserPublisher)
class PageTemplateResource(BrowserView, Resource):
def publishTraverse(self, request, name):
'''See interface IBrowserPublisher'''
raise NotFound(None, name)
def browserDefault(self, request):
'''See interface IBrowserPublisher'''
return getattr(self, request.method), ()
def HEAD(self):
pt = self.context
response = self.request.response
if not response.getHeader("Content-Type"):
response.setHeader("Content-Type", pt.content_type)
return ''
def GET(self):
pt = self.context
response = self.request.response
if not response.getHeader("Content-Type"):
response.setHeader("Content-Type", pt.content_type)
return pt(self.request)
@implementer(IResourceFactory)
@provider(IResourceFactoryFactory)
class PageTemplateResourceFactory:
def __init__(self, path, checker, name):
self.__pt = PageTemplate(path)
self.__checker = checker
self.__name = name
def __call__(self, request):
resource = PageTemplateResource(self.__pt, request)
resource.__Security_checker__ = self.__checker
resource.__name__ = self.__name
return resource | zope.ptresource | /zope.ptresource-5.0.tar.gz/zope.ptresource-5.0/src/zope/ptresource/ptresource.py | ptresource.py |
import datetime
import sys
import xmlrpc.client as xmlrpclib
from io import BytesIO
import zope.component
import zope.interface
from zope.interface import implementer
from zope.security.proxy import isinstance
from zope.publisher.http import DirectResult
from zope.publisher.http import HTTPRequest
from zope.publisher.http import HTTPResponse
from zope.publisher.interfaces.xmlrpc import IXMLRPCPremarshaller
from zope.publisher.interfaces.xmlrpc import IXMLRPCRequest
from zope.publisher.interfaces.xmlrpc import IXMLRPCView
@implementer(IXMLRPCRequest)
class XMLRPCRequest(HTTPRequest):
_args = ()
def _createResponse(self):
"""Create a specific XML-RPC response object."""
return XMLRPCResponse()
def processInputs(self):
"""See IPublisherRequest."""
# Parse the request XML structure
# Using lines() does not work as Twisted's BufferedStream sends back
# an empty stream here for read() (bug). Using readlines() does not
# work with paster.httpserver. However, readline() works fine.
lines = b''
while True:
line = self._body_instream.readline()
if not line:
break
lines += line
self._args, function = xmlrpclib.loads(lines)
# Translate '.' to '/' in function to represent object traversal.
function = function.split('.')
if function:
self.setPathSuffix(function)
class TestRequest(XMLRPCRequest):
def __init__(self, body_instream=None, environ=None, response=None, **kw):
_testEnv = {
'SERVER_URL': 'http://127.0.0.1',
'HTTP_HOST': '127.0.0.1',
'CONTENT_LENGTH': '0',
'GATEWAY_INTERFACE': 'TestFooInterface/1.0',
}
if environ:
_testEnv.update(environ)
if kw:
_testEnv.update(kw)
if body_instream is None:
body_instream = BytesIO(b'')
super().__init__(body_instream, _testEnv, response)
class XMLRPCResponse(HTTPResponse):
"""XMLRPC response.
This object is responsible for converting all output to valid XML-RPC.
"""
def setResult(self, result):
"""Set the result of the response
Sets the return body equal to the (string) argument "body". Also
updates the "content-length" return header.
If the body is a 2-element tuple, then it will be treated
as (title,body)
If is_error is true then the HTML will be formatted as a Zope error
message instead of a generic HTML page.
"""
body = premarshal(result)
if isinstance(body, xmlrpclib.Fault):
# Convert Fault object to XML-RPC response.
body = xmlrpclib.dumps(body, methodresponse=True)
else:
# Marshall our body as an XML-RPC response. Strings will be sent
# as strings, integers as integers, etc. We do *not* convert
# everything to a string first.
try:
body = xmlrpclib.dumps((body,), methodresponse=True,
allow_none=True)
except: # noqa: E722 do not use bare 'except'
# We really want to catch all exceptions at this point!
self.handleException(sys.exc_info())
return
# HTTP response payloads are byte strings, and methods like
# consumeBody rely on that, but xmlrpc.client.dumps produces
# str, which is incorrect.
if not isinstance(body, bytes):
body = body.encode('utf-8')
headers = [('content-type', 'text/xml;charset=utf-8'),
('content-length', str(len(body)))]
self._headers.update({k: [v] for (k, v) in headers})
super().setResult(DirectResult((body,)))
def handleException(self, exc_info):
"""Handle Errors during publsihing and wrap it in XML-RPC XML
>>> import sys
>>> resp = XMLRPCResponse()
>>> try:
... raise AttributeError('xyz')
... except:
... exc_info = sys.exc_info()
... resp.handleException(exc_info)
>>> resp.getStatusString()
'200 OK'
>>> resp.getHeader('content-type')
'text/xml;charset=utf-8'
>>> body = ''.join(resp.consumeBody())
>>> 'Unexpected Zope exception: AttributeError: xyz' in body
True
"""
t, value = exc_info[:2]
s = '{}: {}'.format(getattr(t, '__name__', t), value)
# Create an appropriate Fault object. Unfortunately, we throw away
# most of the debugging information. More useful error reporting is
# left as an exercise for the reader.
Fault = xmlrpclib.Fault
fault_text = None
try:
if isinstance(value, Fault):
fault_text = value
elif isinstance(value, Exception):
fault_text = Fault(-1, "Unexpected Zope exception: " + s)
else:
fault_text = Fault(-2, "Unexpected Zope error value: " + s)
except: # noqa: E722 do not use bare 'except'
fault_text = Fault(-3, "Unknown Zope fault type")
# Do the damage.
self.setResult(fault_text)
# XML-RPC prefers a status of 200 ("ok") even when reporting errors.
self.setStatus(200)
@implementer(IXMLRPCView)
class XMLRPCView:
"""A base XML-RPC view that can be used as mix-in for XML-RPC views."""
def __init__(self, context, request):
self.context = context
self.request = request
@implementer(IXMLRPCPremarshaller)
class PreMarshallerBase:
"""Abstract base class for pre-marshallers."""
def __init__(self, data):
self.data = data
def __call__(self):
raise Exception("Not implemented")
@zope.component.adapter(dict)
class DictPreMarshaller(PreMarshallerBase):
"""Pre-marshaller for dicts"""
def __call__(self):
return {premarshal(k): premarshal(v)
for (k, v) in self.data.items()}
@zope.component.adapter(list)
class ListPreMarshaller(PreMarshallerBase):
"""Pre-marshaller for list"""
def __call__(self):
return [premarshal(x) for x in self.data]
@zope.component.adapter(tuple)
class TuplePreMarshaller(ListPreMarshaller):
pass
@zope.component.adapter(xmlrpclib.Binary)
class BinaryPreMarshaller(PreMarshallerBase):
"""Pre-marshaller for xmlrpc.Binary"""
def __call__(self):
return xmlrpclib.Binary(self.data.data)
@zope.component.adapter(xmlrpclib.Fault)
class FaultPreMarshaller(PreMarshallerBase):
"""Pre-marshaller for xmlrpc.Fault"""
def __call__(self):
return xmlrpclib.Fault(
premarshal(self.data.faultCode),
premarshal(self.data.faultString),
)
@zope.component.adapter(xmlrpclib.DateTime)
class DateTimePreMarshaller(PreMarshallerBase):
"""Pre-marshaller for xmlrpc.DateTime"""
def __call__(self):
return xmlrpclib.DateTime(self.data.value)
@zope.component.adapter(datetime.datetime)
class PythonDateTimePreMarshaller(PreMarshallerBase):
"""Pre-marshaller for datetime.datetime"""
def __call__(self):
return xmlrpclib.DateTime(self.data.isoformat())
def premarshal(data):
"""Premarshal data before handing it to xmlrpclib for marhalling
The initial purpose of this function is to remove security proxies
without resorting to removeSecurityProxy. This way, we can avoid
inadvertently providing access to data that should be protected.
"""
premarshaller = IXMLRPCPremarshaller(data, alternate=None)
if premarshaller is not None:
return premarshaller()
return data | zope.publisher | /zope.publisher-7.0-py3-none-any.whl/zope/publisher/xmlrpc.py | xmlrpc.py |
"""Default view name API
"""
import zope.interface
from zope.component import getSiteManager
from zope.interface.interfaces import ComponentLookupError
from zope.publisher.interfaces import IDefaultViewName
class IDefaultViewNameAPI(zope.interface.Interface):
def getDefaultViewName(object, request, context=None):
"""Get the name of the default view for the object and request.
If a matching default view name cannot be found, raises
ComponentLookupError.
If context is not specified, attempts to use
object to specify a context.
"""
def queryDefaultViewName(object, request, default=None, context=None):
"""Look for the name of the default view for the object and request.
If a matching default view name cannot be found, returns the default.
If context is not specified, attempts to use object to specify
a context.
"""
def getDefaultViewName(object, request, context=None):
name = queryDefaultViewName(object, request, context=context)
if name is not None:
return name
raise ComponentLookupError("Couldn't find default view name",
context, request)
def queryDefaultViewName(object, request, default=None, context=None):
"""
query the default view for a given object and request.
>>> from zope.publisher.defaultview import queryDefaultViewName
lets create an object with a default view.
>>> import zope.interface
>>> class IMyObject(zope.interface.Interface):
... pass
>>> @zope.interface.implementer(IMyObject)
... class MyObject(object):
... pass
>>> queryDefaultViewName(MyObject(), object()) is None
True
Now we can will set a default view.
>>> import zope.component
>>> import zope.publisher.interfaces
>>> zope.component.provideAdapter('name',
... adapts=(IMyObject, zope.interface.Interface),
... provides=zope.publisher.interfaces.IDefaultViewName)
>>> queryDefaultViewName(MyObject(), object())
'name'
This also works if the name is empty
>>> zope.component.provideAdapter('',
... adapts=(IMyObject, zope.interface.Interface),
... provides=zope.publisher.interfaces.IDefaultViewName)
>>> queryDefaultViewName(MyObject(), object())
''
"""
name = getSiteManager(context).adapters.lookup(
map(zope.interface.providedBy, (object, request)), IDefaultViewName)
if name is None:
return default
return name | zope.publisher | /zope.publisher-7.0-py3-none-any.whl/zope/publisher/defaultview.py | defaultview.py |
"""Default view and default skin ZCML configuration feature.
"""
from zope.component.interface import provideInterface
from zope.component.zcml import handler
from zope.configuration.fields import GlobalInterface
from zope.configuration.fields import GlobalObject
from zope.interface import Interface
from zope.schema import TextLine
from zope import component
from zope.publisher.interfaces import IDefaultViewName
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.publisher.interfaces.browser import IBrowserSkinType
from zope.publisher.interfaces.browser import IDefaultSkin
class IDefaultSkinDirective(Interface):
"""Sets the default browser skin"""
name = TextLine(
title="Default skin name",
description="Default skin name",
required=True
)
class IDefaultViewDirective(Interface):
"""
The name of the view that should be the default.
This name refers to view that should be the
view used by default (if no view name is supplied
explicitly).
"""
name = TextLine(
title="The name of the view that should be the default.",
description="""
This name refers to view that should be the view used by
default (if no view name is supplied explicitly).""",
required=True
)
for_ = GlobalObject(
title="The interface this view is the default for.",
description="""
Specifies the interface for which the view is registered.
All objects implementing this interface can make use of
this view. If this attribute is not specified, the view is available
for all objects.""",
required=False
)
layer = GlobalInterface(
title="The layer the default view is declared for",
description="The default layer for which the default view is "
"applicable. By default it is applied to all layers.",
required=False
)
def setDefaultSkin(name, info=''):
"""Set the default skin.
>>> from zope.interface import directlyProvides
>>> from zope.app.testing import ztapi
>>> class Skin1: pass
>>> directlyProvides(Skin1, IBrowserSkinType)
>>> ztapi.provideUtility(IBrowserSkinType, Skin1, 'Skin1')
>>> setDefaultSkin('Skin1')
>>> adapters = component.getSiteManager().adapters
Look up the default skin for a request that has the
>>> adapters.lookup((IBrowserRequest,), IDefaultSkin, '') is Skin1
True
"""
skin = component.getUtility(IBrowserSkinType, name)
handler('registerAdapter',
skin, (IBrowserRequest,), IDefaultSkin, '', info)
def defaultSkin(_context, name):
_context.action(
discriminator='defaultSkin',
callable=setDefaultSkin,
args=(name, _context.info)
)
def defaultView(_context, name, for_=None, layer=IBrowserRequest):
_context.action(
discriminator=('defaultViewName', for_, layer, name),
callable=handler,
args=('registerAdapter',
name, (for_, layer), IDefaultViewName, '', _context.info)
)
if for_ is not None:
_context.action(
discriminator=None,
callable=provideInterface,
args=('', for_)
) | zope.publisher | /zope.publisher-7.0-py3-none-any.whl/zope/publisher/zcml.py | zcml.py |
"""HTTP Publisher
"""
import base64
import http.cookies as cookies
import logging
import re
import tempfile
from html import escape
from io import BytesIO
from urllib.parse import quote
from urllib.parse import urlsplit
import zope.component
import zope.contenttype.parse
import zope.event
import zope.interface
from zope.i18n.interfaces import IUserPreferredCharsets
from zope.i18n.interfaces import IUserPreferredLanguages
from zope.i18n.locales import LoadLocaleError
from zope.i18n.locales import locales
from zope.publisher.base import BaseRequest
from zope.publisher.base import BaseResponse
from zope.publisher.base import RequestDataGetter
from zope.publisher.base import RequestDataMapper
from zope.publisher.base import RequestDataProperty
from zope.publisher.interfaces import ISkinnable
from zope.publisher.interfaces import Redirect
from zope.publisher.interfaces.http import IHTTPApplicationRequest
from zope.publisher.interfaces.http import IHTTPApplicationResponse
from zope.publisher.interfaces.http import IHTTPCredentials
from zope.publisher.interfaces.http import IHTTPRequest
from zope.publisher.interfaces.http import IHTTPResponse
from zope.publisher.interfaces.http import IHTTPVirtualHostChangedEvent
from zope.publisher.interfaces.http import IResult
from zope.publisher.interfaces.logginginfo import ILoggingInfo
from zope.publisher.skinnable import setDefaultSkin
# Default Encoding
ENCODING = 'UTF-8'
# not just text/* but RFC 3023 and */*+xml
unicode_mimetypes_re = re.compile(
r"^text\/.*$|^.*\/xml.*$|^.*\+xml$|^application/json$")
eventlog = logging.getLogger('eventlog')
class CookieMapper(RequestDataMapper):
_mapname = '_cookies'
class HeaderGetter(RequestDataGetter):
_gettrname = 'getHeader'
host_port_re = re.compile(
r"^(.*):([0-9]*)$", re.DOTALL)
def splitport(host):
"""Split port number off the hostname.
>>> splitport('example.com:80')
('example.com', '80')
>>> splitport('localhost')
('localhost', None)
>>> splitport('[::1]')
('[::1]', None)
>>> splitport('[::1]:443')
('[::1]', '443')
>>> splitport('localhost:')
('localhost', None)
"""
match = host_port_re.match(host)
if match:
host, port = match.groups()
else:
port = None
return host, port or None
def sane_environment(env):
# return an environment mapping which has been cleaned of
# funny business such as REDIRECT_ prefixes added by Apache
# or HTTP_CGI_AUTHORIZATION hacks.
# It also makes sure PATH_INFO is a string.
dict = {}
for key, val in env.items():
while key.startswith('REDIRECT_'):
key = key[9:]
dict[key] = val
if 'HTTP_CGI_AUTHORIZATION' in dict:
dict['HTTP_AUTHORIZATION'] = dict.pop('HTTP_CGI_AUTHORIZATION')
if 'PATH_INFO' in dict:
# Recode PATH_INFO to UTF-8 from original latin1
pi = dict['PATH_INFO']
pi = pi if isinstance(pi, bytes) else pi.encode('latin1')
dict['PATH_INFO'] = pi.decode(ENCODING)
return dict
@zope.interface.implementer(IHTTPVirtualHostChangedEvent)
class HTTPVirtualHostChangedEvent:
request = None
def __init__(self, request):
self.request = request
# Possible HTTP status responses
status_reasons = {
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
200: 'Ok',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi-Status',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Moved Temporarily',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Time-out',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Large',
415: 'Unsupported Media Type',
416: 'Requested range not satisfiable',
417: 'Expectation Failed',
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Time-out',
505: 'HTTP Version not supported',
507: 'Insufficient Storage',
}
status_codes = {}
def init_status_codes():
# Add mappings for builtin exceptions and
# provide text -> error code lookups.
for key, val in status_reasons.items():
status_codes[val.replace(' ', '').lower()] = key
status_codes[val.lower()] = key
status_codes[key] = key
status_codes[str(key)] = key
en = [n.lower() for n in dir(__builtins__) if n.endswith('Error')]
for name in en:
status_codes[name] = 500
init_status_codes()
class URLGetter:
__slots__ = "__request"
def __init__(self, request):
self.__request = request
def __str__(self):
return self.__request.getURL()
def __getitem__(self, name):
url = self.get(name, None)
if url is None:
raise KeyError(name)
return url
def get(self, name, default=None):
i = int(name)
try:
if i < 0:
i = -i
return self.__request.getURL(i)
else:
return self.__request.getApplicationURL(i)
except IndexError as v:
if v.args[0] == i:
return default
raise
class HTTPInputStream:
"""Special stream that supports caching the read data.
This is important, so that we can retry requests.
"""
def __init__(self, stream, environment):
self.stream = stream
size = environment.get('CONTENT_LENGTH')
# There can be no size in the environment (None) or the size
# can be an empty string, in which case we treat it as absent.
if not size:
size = environment.get('HTTP_CONTENT_LENGTH')
if not size or int(size) < 65536:
self.cacheStream = BytesIO()
else:
self.cacheStream = tempfile.TemporaryFile()
self.size = size and int(size) or -1
def getCacheStream(self):
self.read(self.size)
self.cacheStream.seek(0)
return self.cacheStream
def read(self, size=-1):
data = self.stream.read(size)
self.cacheStream.write(data)
return data
def readline(self, size=None):
# Previous versions of Twisted did not support the ``size`` argument
# See http://twistedmatrix.com/trac/ticket/1451
# https://bugs.launchpad.net/zope3/+bug/98284
# Note, however, that we cannot pass a size of None to cStringIO
# objects, or we'll get a TypeError: an integer is required
if size is not None:
data = self.stream.readline(size)
else:
data = self.stream.readline()
self.cacheStream.write(data)
return data
def readlines(self, hint=0):
data = self.stream.readlines(hint)
self.cacheStream.write(b''.join(data))
return data
DEFAULT_PORTS = {'http': '80', 'https': '443'}
@zope.interface.implementer(IHTTPCredentials,
IHTTPRequest,
IHTTPApplicationRequest)
class HTTPRequest(BaseRequest):
"""Model HTTP request data.
This object provides access to request data. This includes, the
input headers, form data, server data, and cookies.
Request objects are created by the object publisher and will be
passed to published objects through the argument name, REQUEST.
The request object is a mapping object that represents a
collection of variable to value mappings. In addition, variables
are divided into four categories:
- Environment variables
These variables include input headers, server data, and other
request-related data. The variable names are as <a
href="http://hoohoo.ncsa.uiuc.edu/cgi/env.html">specified</a>
in the <a
href="http://hoohoo.ncsa.uiuc.edu/cgi/interface.html">CGI
specification</a>
- Form data
These are data extracted from either a URL-encoded query
string or body, if present.
- Cookies
These are the cookie data, if present.
- Other
Data that may be set by an application object.
The form attribute of a request is actually a Field Storage
object. When file uploads are used, this provides a richer and
more complex interface than is provided by accessing form data as
items of the request. See the FieldStorage class documentation
for more details.
The request object may be used as a mapping object, in which case
values will be looked up in the order: environment variables,
other variables, form data, and then cookies.
"""
__slots__ = (
'__provides__', # Allow request to directly provide interfaces
'_auth', # The value of the HTTP_AUTHORIZATION header.
'_cookies', # The request cookies
'_path_suffix', # Extra traversal steps after normal traversal
'_retry_count', # How many times the request has been retried
'_app_names', # The application path as a sequence
'_app_server', # The server path of the application url
'_orig_env', # The original environment
'_endswithslash', # Does the given path end with /
'method', # The upper-cased request method (REQUEST_METHOD)
'_locale', # The locale for the request
'_vh_root', # Object at the root of the virtual host
)
retry_max_count = 3 # How many times we're willing to retry
def __init__(self, body_instream, environ, response=None):
super().__init__(
HTTPInputStream(body_instream, environ), environ, response)
self._orig_env = environ
environ = sane_environment(environ)
if 'HTTP_AUTHORIZATION' in environ:
self._auth = environ['HTTP_AUTHORIZATION']
del environ['HTTP_AUTHORIZATION']
else:
self._auth = None
self.method = environ.get("REQUEST_METHOD", 'GET').upper()
self._environ = environ
self.__setupCookies()
self.__setupPath()
self.__setupURLBase()
self._vh_root = None
self.setupLocale()
def setupLocale(self):
envadapter = IUserPreferredLanguages(self, None)
if envadapter is None:
self._locale = None
return
langs = envadapter.getPreferredLanguages()
for httplang in langs:
parts = (httplang.split('-') + [None, None])[:3]
try:
self._locale = locales.getLocale(*parts)
return
except LoadLocaleError:
# Just try the next combination
pass
else:
# No combination gave us an existing locale, so use the default,
# which is guaranteed to exist
self._locale = locales.getLocale(None, None, None)
def _getLocale(self):
return self._locale
locale = property(_getLocale)
def __setupURLBase(self):
get_env = self._environ.get
# Get base info first. This isn't likely to cause
# errors and might be useful to error handlers.
script = get_env('SCRIPT_NAME', '').strip()
# _script and the other _names are meant for URL construction
self._app_names = [f for f in script.split('/') if f]
# get server URL and store it too, since we are already looking it up
server_url = get_env('SERVER_URL', None)
if server_url is not None:
self._app_server = server_url = server_url.strip()
else:
server_url = self.__deduceServerURL()
if server_url.endswith('/'):
server_url = server_url[:-1]
# strip off leading /'s of script
while script.startswith('/'):
script = script[1:]
self._app_server = server_url
def __deduceServerURL(self):
environ = self._environ
if (environ.get('HTTPS', '').lower() == "on" or
environ.get('SERVER_PORT_SECURE') == "1"):
protocol = 'https'
else:
protocol = 'http'
if 'HTTP_HOST' in environ:
host = environ['HTTP_HOST'].strip()
hostname, port = splitport(host)
else:
hostname = environ.get('SERVER_NAME', '').strip()
port = environ.get('SERVER_PORT', '')
if port and port != DEFAULT_PORTS.get(protocol):
host = hostname + ':' + port
else:
host = hostname
return '{}://{}'.format(protocol, host)
def _parseCookies(self, text, result=None):
"""Parse 'text' and return found cookies as 'result' dictionary."""
if result is None:
result = {}
# ignore cookies on a CookieError
try:
c = cookies.SimpleCookie(text)
except cookies.CookieError as e:
eventlog.warning(e)
return result
for k, v in c.items():
# recode cookie value to ENCODING (UTF-8)
if not isinstance(k, bytes):
k = k.encode('latin1')
rk = k.decode(ENCODING)
v = v.value
if not isinstance(v, bytes):
v = v.encode('latin1')
rv = v.decode(ENCODING)
result[rk] = rv
return result
def __setupCookies(self):
# Cookie values should *not* be appended to existing form
# vars with the same name - they are more like default values
# for names not otherwise specified in the form.
self._cookies = {}
cookie_header = self._environ.get('HTTP_COOKIE', None)
if cookie_header is not None:
self._parseCookies(cookie_header, self._cookies)
def __setupPath(self):
# PATH_INFO is str here, so setupPath_helper sets up the
# traversal stack correctly.
self._setupPath_helper("PATH_INFO")
def supportsRetry(self):
"""See IPublisherRequest"""
count = getattr(self, '_retry_count', 0)
if count < self.retry_max_count:
return True
def retry(self):
"""See IPublisherRequest"""
count = getattr(self, '_retry_count', 0)
self._retry_count = count + 1
request = self.__class__(
# Use the cache stream as the new input stream.
body_instream=self._body_instream.getCacheStream(),
environ=self._orig_env,
response=self.response.retry(),
)
# restore the default skin
if ISkinnable.providedBy(self):
# only ISkinnable requests have skins
setDefaultSkin(request)
request.setPublication(self.publication)
request._retry_count = self._retry_count
return request
def traverse(self, obj):
"""See IPublisherRequest"""
ob = super().traverse(obj)
if self._path_suffix:
self._traversal_stack = self._path_suffix
ob = super().traverse(ob)
return ob
def getHeader(self, name, default=None, literal=False):
"""See IHTTPRequest"""
environ = self._environ
if not literal:
name = name.replace('-', '_').upper()
val = environ.get(name, None)
if val is not None:
return val
if not name.startswith('HTTP_'):
name = 'HTTP_%s' % name
return environ.get(name, default)
headers = RequestDataProperty(HeaderGetter)
def getCookies(self):
"""See IHTTPApplicationRequest"""
return self._cookies
cookies = RequestDataProperty(CookieMapper)
def setPathSuffix(self, steps):
"""See IHTTPRequest"""
steps = list(steps)
steps.reverse()
self._path_suffix = steps
def _authUserPW(self):
"""See IHTTPCredentials"""
if self._auth and self._auth.lower().startswith('basic '):
encoded = self._auth.split(None, 1)[-1]
decoded = base64.b64decode(encoded.encode('iso-8859-1'))
name, password = bytes.split(decoded, b':', 1)
return name, password
def unauthorized(self, challenge):
"""See IHTTPCredentials"""
self._response.setHeader("WWW-Authenticate", challenge, True)
self._response.setStatus(401)
def setPrincipal(self, principal):
"""See IPublicationRequest"""
super().setPrincipal(principal)
logging_info = ILoggingInfo(principal, None)
if logging_info is None:
message = '-'
else:
message = logging_info.getLogMessage()
self.response.authUser = message
def _createResponse(self):
# Should be overridden by subclasses
return HTTPResponse()
def getURL(self, level=0, path_only=False):
names = self._app_names + self._traversed_names
if level:
if level > len(names):
raise IndexError(level)
names = names[:-level]
# See: http://www.ietf.org/rfc/rfc2718.txt, Section 2.2.5
names = [quote(name.encode("utf-8"), safe='/+@') for name in names]
if path_only:
if not names:
return '/'
return '/' + '/'.join(names)
else:
if not names:
return self._app_server
return "{}/{}".format(self._app_server, '/'.join(names))
def getApplicationURL(self, depth=0, path_only=False):
"""See IHTTPApplicationRequest"""
if depth:
names = self._traversed_names
if depth > len(names):
raise IndexError(depth)
names = self._app_names + names[:depth]
else:
names = self._app_names
# See: http://www.ietf.org/rfc/rfc2718.txt, Section 2.2.5
names = [quote(name.encode("utf-8"), safe='/+@') for name in names]
if path_only:
return names and ('/' + '/'.join(names)) or '/'
else:
return (names
and ("{}/{}".format(self._app_server, '/'.join(names)))
or self._app_server)
def setApplicationServer(self, host, proto='http', port=None):
if port and str(port) != DEFAULT_PORTS.get(proto):
host = '{}:{}'.format(host, port)
self._app_server = '{}://{}'.format(proto, host)
zope.event.notify(HTTPVirtualHostChangedEvent(self))
def shiftNameToApplication(self):
"""Add the name being traversed to the application name
This is only allowed in the case where the name is the first name.
A Value error is raise if the shift can't be performed.
"""
if len(self._traversed_names) == 1:
self._app_names.append(self._traversed_names.pop())
zope.event.notify(HTTPVirtualHostChangedEvent(self))
return
raise ValueError("Can only shift leading traversal "
"names to application names")
def setVirtualHostRoot(self, names=()):
del self._traversed_names[:]
self._vh_root = self._last_obj_traversed
self._app_names = list(names)
zope.event.notify(HTTPVirtualHostChangedEvent(self))
def getVirtualHostRoot(self):
return self._vh_root
URL = RequestDataProperty(URLGetter)
def __repr__(self):
# Returns a *short* string.
return '<{}.{} instance URL={}>'.format(
self.__class__.__module__, self.__class__.__name__, str(self.URL))
def get(self, key, default=None):
"""See Interface.Common.Mapping.IReadMapping"""
marker = object()
result = self._cookies.get(key, marker)
if result is not marker:
return result
return super().get(key, default)
def keys(self):
"""See Interface.Common.Mapping.IEnumerableMapping"""
d = {}
d.update(self._environ)
d.update(self._cookies)
return d.keys()
@zope.interface.implementer(IHTTPResponse, IHTTPApplicationResponse)
class HTTPResponse(BaseResponse):
__slots__ = (
'authUser', # Authenticated user string
'_headers',
'_cookies',
'_status', # The response status (usually an integer)
'_reason', # The reason that goes with the status
'_status_set', # Boolean: status explicitly set
'_charset', # String: character set for the output
)
def __init__(self):
super().__init__()
self.reset()
def reset(self):
"""See IResponse"""
super().reset()
self._headers = {}
self._cookies = {}
self._status = 599
self._reason = 'No status set'
self._status_set = False
self._charset = None
self.authUser = '-'
def setStatus(self, status, reason=None):
"""See IHTTPResponse"""
if status is None:
status = 200
try:
status = int(status)
except ValueError:
if isinstance(status, (str, bytes)):
status = status.lower()
# Use a standard status code, falling back to 500 for
# nonstandard values (such as "valueerror")
status = status_codes.get(status, 500)
self._status = status
if reason is None:
reason = status_reasons.get(status, "Unknown")
self._reason = reason
self._status_set = True
def getStatus(self):
"""See IHTTPResponse"""
return self._status
def getStatusString(self):
"""See IHTTPResponse"""
return '%i %s' % (self._status, self._reason)
def setHeader(self, name, value, literal=False):
"""See IHTTPResponse"""
name = str(name)
value = str(value)
if not literal:
name = name.lower()
self._headers[name] = [value]
def addHeader(self, name, value):
"""See IHTTPResponse"""
values = self._headers.setdefault(name, [])
values.append(value)
def getHeader(self, name, default=None, literal=False):
"""See IHTTPResponse"""
key = name.lower()
name = literal and name or key
result = self._headers.get(name)
if result:
return result[0]
return default
def getHeaders(self):
"""See IHTTPResponse"""
result = []
headers = self._headers
result.append(
("X-Powered-By", "Zope (www.zope.org), Python (www.python.org)"))
for key, values in sorted(headers.items(), key=lambda x: x[0].lower()):
if key.lower() == key:
# only change non-literal header names
key = '-'.join([k.capitalize() for k in key.split('-')])
result.extend([(key, val) for val in values])
result.extend([tuple(cookie.split(': ', 1))
for cookie in self._cookie_list()])
return result
def appendToCookie(self, name, value):
"""See IHTTPResponse"""
cookies = self._cookies
if name in cookies:
cookie = cookies[name]
else:
cookie = cookies[name] = {}
if 'value' in cookie:
cookie['value'] = '{}:{}'.format(cookie['value'], value)
else:
cookie['value'] = value
def expireCookie(self, name, **kw):
"""See IHTTPResponse"""
dict = {'max_age': 0, 'expires': 'Wed, 31-Dec-97 23:59:59 GMT'}
for k, v in kw.items():
if v is not None:
dict[k] = v
cookies = self._cookies
if name in cookies:
# Cancel previous setCookie().
del cookies[name]
self.setCookie(name, 'deleted', **dict)
def setCookie(self, name, value, **kw):
"""See IHTTPResponse"""
cookies = self._cookies
cookie = cookies.setdefault(name, {})
for k, v in kw.items():
if v is not None:
cookie[k.lower()] = v
cookie['value'] = value
def getCookie(self, name, default=None):
"""See IHTTPResponse"""
return self._cookies.get(name, default)
def setResult(self, result):
"""See IHTTPResponse"""
if IResult.providedBy(result):
r = result
else:
r = zope.component.queryMultiAdapter(
(result, self._request), IResult)
if r is None:
if isinstance(result, (str, bytes)):
r = result
elif result is None:
# Default to bytes because str results require a
# corresponding Content-Type header.
r = b''
else:
raise TypeError(
'The result should be None, a string, or adaptable to '
'IResult.')
if isinstance(r, (str, bytes)):
r, headers = self._implicitResult(r)
self._headers.update({k: [v] for (k, v) in headers})
r = (r,) # chunking should be much larger than per character
self._result = r
if not self._status_set:
self.setStatus(200)
def consumeBody(self):
"""See IHTTPResponse"""
return b''.join(self._result)
def consumeBodyIter(self):
"""See IHTTPResponse"""
return self._result
def _implicitResult(self, body):
encoding = getCharsetUsingRequest(self._request) or 'utf-8'
content_type = self.getHeader('content-type') or ''
if isinstance(body, str):
ct = content_type
if not unicode_mimetypes_re.match(ct):
raise ValueError(
'str results must have a text, RFC 3023, RFC 4627,'
' or +xml content type.')
major, minor, params = zope.contenttype.parse.parse(ct)
if 'charset' in params:
encoding = params['charset']
try:
body = body.encode(encoding)
except (UnicodeEncodeError, LookupError):
# RFC 2616 section 10.4.7 allows us to return an
# unacceptable encoding instead of 406 Not Acceptable
# response.
encoding = 'utf-8'
body = body.encode(encoding)
if (major, minor) != ('application', 'json'):
# The RFC says this is UTF-8, and the type has no params.
params['charset'] = encoding
content_type = "{}/{}".format(major, minor)
if params:
content_type += ";"
content_type += ";".join(k + "=" + v
for k, v in params.items())
if content_type:
headers = [('content-type', content_type),
('content-length', str(len(body)))]
else:
headers = [('content-length', str(len(body)))]
return body, headers
def handleException(self, exc_info):
"""
Calls self.setBody() with an error response.
"""
t, v = exc_info[:2]
if isinstance(t, type):
if issubclass(t, Redirect):
self.redirect(v.getLocation(), trusted=v.getTrusted())
return
title = tname = t.__name__
else:
title = tname = t
# Throwing non-protocol-specific exceptions is a good way
# for apps to control the status code.
self.setStatus(tname)
body = self._html(title, "A server error occurred.")
self.setHeader("Content-Type", "text/html")
self.setResult(body)
def internalError(self):
'See IPublisherResponse'
self.setStatus(500, "The engines can't take any more, Jim!")
def _html(self, title, content):
t = escape(title)
return (
"<html><head><title>%s</title></head>\n"
"<body><h2>%s</h2>\n"
"%s\n"
"</body></html>\n"
) % (t, t, content)
def retry(self):
"""
Returns a response object to be used in a retry attempt
"""
return self.__class__()
def redirect(self, location, status=None, trusted=False):
"""Causes a redirection without raising an error"""
# convert to a string, as the location could be non-string
# convertable to string, for example, an URLGetter instance
location = str(location)
__traceback_info__ = location
if not trusted:
target_host = extract_host(location)
if target_host:
app_host = extract_host(self._request.getApplicationURL())
if target_host != app_host:
raise ValueError(
"Untrusted redirect to host %r not allowed."
% target_host)
if status is None:
# parse the HTTP version and set default accordingly
if (self._request.get("SERVER_PROTOCOL", "HTTP/1.0") <
"HTTP/1.1"):
status = 302
else:
status = 303
self.setStatus(status)
self.setHeader('Location', location)
self.setResult(DirectResult(()))
return location
def _cookie_list(self):
try:
c = cookies.SimpleCookie()
except cookies.CookieError as e:
eventlog.warning(e)
return []
for name, attrs in self._cookies.items():
name = str(name)
# The Cookie module expects latin-1 unicode string.
cookieval = attrs['value'].encode(ENCODING)
c[name] = cookieval.decode('latin-1')
for k, v in attrs.items():
if k == 'value':
continue
if k == 'secure':
if v:
c[name]['secure'] = True
continue
if k == 'max_age':
k = 'max-age'
elif k == 'comment':
# Encode rather than throw an exception
v = quote(v.encode('utf-8'), safe="/?:@&+")
c[name][k] = str(v)
return str(c).splitlines()
def write(*_):
raise TypeError(
"The HTTP response write method is no longer supported. "
"See the file httpresults.txt in the zope.publisher package "
"for more information."
)
def sort_charsets(charset):
# Make utf-8 to be the last element of the sorted list
if charset[1] == 'utf-8':
return (1, charset)
# Otherwise, sort by charset
return (0, charset)
def extract_host(url):
scheme, host, path, query, fragment = urlsplit(url)
if ':' not in host:
port = DEFAULT_PORTS.get(scheme)
if port:
host = '{}:{}'.format(host, port)
return host
@zope.interface.implementer(IUserPreferredCharsets)
@zope.component.adapter(IHTTPRequest)
class HTTPCharsets:
def __init__(self, request):
self.request = request
def getPreferredCharsets(self):
'''See interface IUserPreferredCharsets'''
charsets = []
sawstar = sawiso88591 = 0
header_present = bool(self.request.get('HTTP_ACCEPT_CHARSET'))
for charset in self.request.get('HTTP_ACCEPT_CHARSET', '').split(','):
charset = charset.strip().lower()
if charset:
if ';' in charset:
try:
charset, quality = charset.split(';')
except ValueError:
continue
if not quality.startswith('q='):
# not a quality parameter
quality = 1.0
else:
try:
quality = float(quality[2:])
except ValueError:
continue
else:
quality = 1.0
if quality == 0.0:
continue
if charset == '*':
sawstar = 1
if charset == 'iso-8859-1':
sawiso88591 = 1
charsets.append((quality, charset))
# Quoting RFC 2616, $14.2: If no "*" is present in an Accept-Charset
# field, then all character sets not explicitly mentioned get a
# quality value of 0, except for ISO-8859-1, which gets a quality
# value of 1 if not explicitly mentioned.
# And quoting RFC 2616, $14.2: "If no Accept-Charset header is
# present, the default is that any character set is acceptable."
if not sawstar and not sawiso88591 and header_present:
charsets.append((1.0, 'iso-8859-1'))
# UTF-8 is **always** preferred over anything else.
# Reason: UTF-8 is not specific and can encode the entire str
# range , unlike many other encodings. Since Zope can easily use very
# different ranges, like providing a French-Chinese dictionary, it is
# always good to use UTF-8.
charsets.sort(key=sort_charsets, reverse=True)
charsets = [charset for quality, charset in charsets]
if sawstar and 'utf-8' not in charsets:
charsets.insert(0, 'utf-8')
elif charsets == []:
charsets = ['utf-8']
return charsets
def getCharsetUsingRequest(request):
'See IHTTPResponse'
envadapter = IUserPreferredCharsets(request, None)
if envadapter is None:
return
try:
charset = envadapter.getPreferredCharsets()[0]
except IndexError:
# Exception caused by empty list! This is okay though, since the
# browser just could have sent a '*', which means we can choose
# the encoding, which we do here now.
charset = 'utf-8'
return charset
@zope.interface.implementer(IResult)
class DirectResult:
"""A generic result object.
The result's body can be any iterable. It is the responsibility of the
application to specify all headers related to the content, such as the
content type and length.
"""
def __init__(self, body):
self.body = body
def __iter__(self):
if isinstance(self.body, bytes):
return iter([self.body])
return iter(self.body)
# BBB
try:
from zope.login.http import BasicAuthAdapter # noqa: F401 import unused
except ImportError:
pass | zope.publisher | /zope.publisher-7.0-py3-none-any.whl/zope/publisher/http.py | http.py |
import re
from email.message import Message
from urllib.parse import parse_qsl
import multipart
import zope.component
import zope.interface
from zope.i18n.interfaces import IModifiableUserPreferredLanguages
from zope.i18n.interfaces import IUserPreferredCharsets
from zope.i18n.interfaces import IUserPreferredLanguages
from zope.interface import directlyProvides
from zope.interface import implementer
from zope.location import Location
from zope.publisher.http import HTTPRequest
from zope.publisher.http import HTTPResponse
from zope.publisher.http import getCharsetUsingRequest
# BBB imports, these components got moved from this module
from zope.publisher.interfaces import IHeld
from zope.publisher.interfaces import ISkinChangedEvent # noqa: F401
from zope.publisher.interfaces import ISkinType # noqa: F401 import unused
from zope.publisher.interfaces import NotFound
from zope.publisher.interfaces.browser import IBrowserApplicationRequest
from zope.publisher.interfaces.browser import IBrowserPage
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.publisher.interfaces.browser import IBrowserView
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
from zope.publisher.interfaces.http import IHTTPRequest
from zope.publisher.skinnable import SkinChangedEvent # noqa: F401
from zope.publisher.skinnable import applySkin # noqa: F401
from zope.publisher.skinnable import getDefaultSkin # noqa: F401
from zope.publisher.skinnable import setDefaultSkin # noqa: F401
__ArrayTypes = (list, tuple)
start_of_header_search = re.compile(b'(<head[^>]*>)', re.I).search
base_re_search = re.compile(b'(<base.*?>)', re.I).search
isRelative = re.compile("[-_.!~*a-zA-z0-9'()@&=+$,]+(/|$)").match
newlines = re.compile('\r\n|\n\r|\r')
def is_text_html(content_type):
return content_type.startswith('text/html')
# Flag Constants
SEQUENCE = 1
DEFAULT = 2
RECORD = 4
RECORDS = 8
REC = RECORD | RECORDS
CONVERTED = 32
DEFAULTABLE_METHODS = 'GET', 'POST', 'HEAD'
def field2string(v):
if hasattr(v, 'read'):
return v.read()
return str(v)
def field2text(v, nl=newlines):
return nl.sub("\n", field2string(v))
def field2required(v):
v = field2string(v)
if not v.strip():
raise ValueError('No input for required field<p>')
return v
def field2int(v):
if isinstance(v, __ArrayTypes):
return list(map(field2int, v))
v = field2string(v)
if not v:
raise ValueError('Empty entry when <strong>integer</strong> expected')
try:
return int(v)
except ValueError:
raise ValueError("An integer was expected in the value '%s'" % v)
def field2float(v):
if isinstance(v, __ArrayTypes):
return list(map(field2float, v))
v = field2string(v)
if not v:
raise ValueError(
'Empty entry when <strong>floating-point number</strong> expected')
try:
return float(v)
except ValueError:
raise ValueError(
"A floating-point number was expected in the value '%s'" % v)
def field2long(v):
if isinstance(v, __ArrayTypes):
return list(map(field2long, v))
v = field2string(v)
# handle trailing 'L' if present.
if v and v[-1].upper() == 'L':
v = v[:-1]
if not v:
raise ValueError('Empty entry when <strong>integer</strong> expected')
try:
return int(v)
except ValueError:
raise ValueError("A long integer was expected in the value '%s'" % v)
def field2tokens(v):
return field2string(v).split()
def field2lines(v):
if isinstance(v, __ArrayTypes):
return [str(item) for item in v]
return field2text(v).splitlines()
def field2boolean(v):
return bool(v)
type_converters = {
'float': field2float,
'int': field2int,
'long': field2long,
'string': field2string,
'required': field2required,
'tokens': field2tokens,
'lines': field2lines,
'text': field2text,
'boolean': field2boolean,
}
get_converter = type_converters.get
def registerTypeConverter(field_type, converter, replace=False):
"""Add a custom type converter to the registry.
o If 'replace' is not true, raise a KeyError if a converter is
already registered for 'field_type'.
"""
existing = type_converters.get(field_type)
if existing is not None and not replace:
raise KeyError('Existing converter for field_type: %s' % field_type)
type_converters[field_type] = converter
def isCGI_NAME(key):
return key in {
# These fields are placed in request.environ instead of request.form.
'SERVER_SOFTWARE',
'SERVER_NAME',
'GATEWAY_INTERFACE',
'SERVER_PROTOCOL',
'SERVER_PORT',
'REQUEST_METHOD',
'PATH_INFO',
'PATH_TRANSLATED',
'SCRIPT_NAME',
'QUERY_STRING',
'REMOTE_HOST',
'REMOTE_ADDR',
'AUTH_TYPE',
'REMOTE_USER',
'REMOTE_IDENT',
'CONTENT_TYPE',
'CONTENT_LENGTH',
'SERVER_URL',
}
def hide_key(key):
return key in {
'HTTP_AUTHORIZATION',
'HTTP_CGI_AUTHORIZATION',
}
class Record:
_attrs = frozenset(('get', 'keys', 'items', 'values', 'copy',
'has_key', '__contains__'))
def __getattr__(self, key, default=None):
if key in self._attrs:
return getattr(self.__dict__, key)
raise AttributeError(key)
def __getitem__(self, key):
return self.__dict__[key]
def __str__(self):
items = list(self.__dict__.items())
items.sort()
return "{" + ", ".join(["%s: %s" % item for item in items]) + "}"
def __repr__(self):
items = list(self.__dict__.items())
items.sort()
return ("{"
+ ", ".join(["{}: {!r}".format(key, value)
for key, value in items]) + "}")
_get_or_head = 'GET', 'HEAD'
@implementer(IBrowserRequest, IBrowserApplicationRequest)
class BrowserRequest(HTTPRequest):
__slots__ = (
'__provides__', # Allow request to directly provide interfaces
'form', # Form data
'charsets', # helper attribute
'__meth',
'__tuple_items',
'__defaults',
'__annotations__',
)
# Set this to True in a subclass to redirect GET requests when the
# effective and actual URLs differ.
use_redirect = False
default_form_charset = 'UTF-8'
def __init__(self, body_instream, environ, response=None):
self.form = {}
self.charsets = None
super().__init__(body_instream, environ, response)
def _createResponse(self):
return BrowserResponse()
def _decode(self, text):
"""Try to decode the text using one of the available charsets."""
if self.charsets is None:
envadapter = IUserPreferredCharsets(self)
self.charsets = envadapter.getPreferredCharsets() or ['utf-8']
self.charsets = [c for c in self.charsets if c != '*']
# All text comes from parse_qsl or multipart.parse_form_data, and
# has normally already been decoded into Unicode according to a
# request-specified encoding. However, in the case of query strings
# for GET/HEAD requests we may not be sure of the encoding and must
# guess.
if isinstance(text, bytes):
for charset in self.charsets:
try:
text = text.decode(charset)
break
except UnicodeError:
pass
# XXX so when none of the provided charsets works we just return bytes
# and let the application crash???
return text
def processInputs(self):
'See IPublisherRequest'
items = []
# We could simply not parse QUERY_STRING if it's absent, but this
# provides slightly better doctest-compatibility with the old code
# based on cgi.FieldStorage.
self._environ.setdefault('QUERY_STRING', '')
if self.method in _get_or_head:
kwargs = {}
# For now, use an encoding that can decode any byte
# sequence. We'll do some guesswork later.
kwargs['encoding'] = 'ISO-8859-1'
kwargs['errors'] = 'replace'
query_items = parse_qsl(
self._environ['QUERY_STRING'], keep_blank_values=True,
**kwargs)
for key, value in query_items:
# Encode back to bytes for later guessing.
value = value.encode('ISO-8859-1')
items.append((key, value))
elif self.method not in _get_or_head:
env = self._environ.copy()
env['wsgi.input'] = self._body_instream
# cgi.FieldStorage used to set the default Content-Type for POST
# requests to a "traditional" value. Do that here for
# compatibility.
if env.get('REQUEST_METHOD') == 'POST':
env.setdefault(
'CONTENT_TYPE', 'application/x-www-form-urlencoded')
ctype = env.get('CONTENT_TYPE')
# Of course this isn't email, but email.message.Message has
# a handy Content-Type parser.
msg = Message()
msg['Content-Type'] = ctype
# cgi.FieldStorage treated any multipart/* Content-Type as
# multipart/form-data. This seems a bit dodgy, but for
# compatibility we emulate it for now.
if ctype is not None and msg.get_content_maintype() == 'multipart':
msg.set_type('multipart/form-data')
env['CONTENT_TYPE'] = msg['Content-Type']
# cgi.FieldStorage allowed any HTTP method, while
# multipart.parse_form_data only allows POST or PUT. However,
# it's helpful to support methods such as PATCH too, and
# multipart doesn't actually care beyond an initial check, so
# just pretend everything is POST from here on.
env['REQUEST_METHOD'] = 'POST'
# According to PEP 333 CONTENT_LENGTH may be empty or absent.
# An empty string here breaks multipart, because it's an invalid
# value according to RFC 2616 (HTTP/1.1).
if env.get('CONTENT_LENGTH') == '':
env.pop('CONTENT_LENGTH')
forms, files = multipart.parse_form_data(
env, charset=self.default_form_charset, memfile_limit=0)
items.extend(forms.iterallitems())
for key, item in files.iterallitems():
# multipart puts fields in 'files' even if no upload was
# made. We only consider fields to be file uploads if a
# filename was passed in and data was uploaded.
if item.file:
if item.filename:
# RFC 7578 section 4.2 says:
# Some commonly deployed systems use
# multipart/form-data with file names directly
# encoded including octets outside the US-ASCII
# range. The encoding used for the file names is
# typically UTF-8, although HTML forms will use
# the charset associated with the form.
# So we must decode the filename according to our
# usual rules.
item.filename = self._decode(item.filename)
item = FileUpload(item)
else:
value = item.value
item.file.close()
item = value
else:
item = item.value
self.hold(item)
items.append((key, item))
if items:
self.__meth = None
self.__tuple_items = {}
self.__defaults = {}
# process all entries in the field storage (form)
for key, item in items:
self.__processItem(key, item)
if self.__defaults:
self.__insertDefaults()
if self.__tuple_items:
self.__convertToTuples()
if self.__meth:
self.setPathSuffix((self.__meth,))
_typeFormat = re.compile('([a-zA-Z][a-zA-Z0-9_]+|\\.[xy])$')
def __processItem(self, key, item):
"""Process item in the field storage."""
flags = 0
converter = None
# Loop through the different types and set
# the appropriate flags
# Syntax: var_name:type_name
# We'll search from the back to the front.
# We'll do the search in two steps. First, we'll
# do a string search, and then we'll check it with
# a re search.
while key:
pos = key.rfind(":")
if pos < 0:
break
match = self._typeFormat.match(key, pos + 1)
if match is None:
break
key, type_name = key[:pos], key[pos + 1:]
# find the right type converter
c = get_converter(type_name, None)
if c is not None:
converter = c
flags |= CONVERTED
elif type_name == 'list':
flags |= SEQUENCE
elif type_name == 'tuple':
self.__tuple_items[key] = 1
flags |= SEQUENCE
elif (type_name == 'method' or type_name == 'action'):
if key:
self.__meth = key
else:
self.__meth = item
elif (type_name == 'default_method'
or type_name == 'default_action') and not self.__meth:
if key:
self.__meth = key
else:
self.__meth = item
elif type_name == 'default':
flags |= DEFAULT
elif type_name == 'record':
flags |= RECORD
elif type_name == 'records':
flags |= RECORDS
elif type_name == 'ignore_empty' and not item:
# skip over empty fields
return
if key is not None:
key = self._decode(key)
if isinstance(item, (str, bytes)):
item = self._decode(item)
if flags:
self.__setItemWithType(key, item, flags, converter)
else:
self.__setItemWithoutType(key, item)
def __setItemWithoutType(self, key, item):
"""Set item value without explicit type."""
form = self.form
if key not in form:
form[key] = item
else:
found = form[key]
if isinstance(found, list):
found.append(item)
else:
form[key] = [found, item]
def __setItemWithType(self, key, item, flags, converter):
"""Set item value with explicit type."""
# Split the key and its attribute
if flags & REC:
key, attr = self.__splitKey(key)
# defer conversion
if flags & CONVERTED:
try:
item = converter(item)
except: # noqa: E722 do not use bare 'except'
if item or flags & DEFAULT or key not in self.__defaults:
raise
item = self.__defaults[key]
if flags & RECORD:
item = getattr(item, attr)
elif flags & RECORDS:
item = getattr(item[-1], attr)
# Determine which dictionary to use
if flags & DEFAULT:
form = self.__defaults
else:
form = self.form
# Insert in dictionary
if key not in form:
if flags & SEQUENCE:
item = [item]
if flags & RECORD:
r = form[key] = Record()
setattr(r, attr, item)
elif flags & RECORDS:
r = Record()
setattr(r, attr, item)
form[key] = [r]
else:
form[key] = item
else:
r = form[key]
if flags & RECORD:
if not flags & SEQUENCE:
setattr(r, attr, item)
else:
if not hasattr(r, attr):
setattr(r, attr, [item])
else:
getattr(r, attr).append(item)
elif flags & RECORDS:
last = r[-1]
if not hasattr(last, attr):
if flags & SEQUENCE:
item = [item]
setattr(last, attr, item)
else:
if flags & SEQUENCE:
getattr(last, attr).append(item)
else:
new = Record()
setattr(new, attr, item)
r.append(new)
else:
if isinstance(r, list):
r.append(item)
else:
form[key] = [r, item]
def __splitKey(self, key):
"""Split the key and its attribute."""
i = key.rfind(".")
if i >= 0:
return key[:i], key[i + 1:]
return key, ""
def __convertToTuples(self):
"""Convert form values to tuples."""
form = self.form
for key in self.__tuple_items:
if key in form:
form[key] = tuple(form[key])
else:
k, attr = self.__splitKey(key)
# remove any type_names in the attr
i = attr.find(":")
if i >= 0:
attr = attr[:i]
if k in form:
item = form[k]
if isinstance(item, Record):
if hasattr(item, attr):
setattr(item, attr, tuple(getattr(item, attr)))
else:
for v in item:
if hasattr(v, attr):
setattr(v, attr, tuple(getattr(v, attr)))
def __insertDefaults(self):
"""Insert defaults into form dictionary."""
form = self.form
for keys, values in self.__defaults.items():
if keys not in form:
form[keys] = values
else:
item = form[keys]
if isinstance(values, Record):
for k, v in values.items():
if not hasattr(item, k):
setattr(item, k, v)
elif isinstance(values, list):
for val in values:
if isinstance(val, Record):
for k, v in val.items():
for r in item:
if not hasattr(r, k):
setattr(r, k, v)
elif val not in item:
item.append(val)
def traverse(self, obj):
"""See IPublisherRequest."""
ob = super().traverse(obj)
method = self.method
base_needed = 0
if self._path_suffix:
# We had a :method variable, so we need to set the base,
# but we don't look for default documents any more.
base_needed = 1
redirect = 0
elif method in DEFAULTABLE_METHODS:
# We need to check for default documents
publication = self.publication
nsteps = 0
ob, add_steps = publication.getDefaultTraversal(self, ob)
while add_steps:
nsteps += len(add_steps)
add_steps = list(add_steps)
add_steps.reverse()
self.setTraversalStack(add_steps)
ob = super().traverse(ob)
ob, add_steps = publication.getDefaultTraversal(self, ob)
if nsteps != self._endswithslash:
base_needed = 1
redirect = self.use_redirect and method == 'GET'
if base_needed:
url = self.getURL()
response = self.response
if redirect:
response.redirect(url)
return ''
elif not response.getBase():
response.setBase(url)
return ob
def keys(self):
'See Interface.Common.Mapping.IEnumerableMapping'
d = {}
d.update(self._environ)
d.update(self._cookies)
d.update(self.form)
return list(d.keys())
def get(self, key, default=None):
'See Interface.Common.Mapping.IReadMapping'
marker = object()
result = self.form.get(key, marker)
if result is not marker:
return result
return super().get(key, default)
@implementer(IHeld)
class FileUpload:
'''File upload objects
File upload objects are used to represent file-uploaded data.
File upload objects can be used just like files.
In addition, they have a 'headers' attribute that is a dictionary
containing the file-upload headers, and a 'filename' attribute
containing the name of the uploaded file.
'''
def __init__(self, aFieldStorage):
file = aFieldStorage.file
if hasattr(file, '__methods__'):
methods = file.__methods__
else:
methods = ['close', 'fileno', 'flush', 'isatty',
'read', 'readline', 'readlines', 'seek',
'tell', 'truncate', 'write', 'writelines',
'seekable']
d = self.__dict__
for m in methods:
if hasattr(file, m):
d[m] = getattr(file, m)
self.headers = aFieldStorage.headers
filename = aFieldStorage.filename
if filename is not None:
if isinstance(filename, bytes):
filename = filename.decode('UTF-8')
# fix for IE full paths
filename = filename[filename.rfind('\\')+1:].strip()
self.filename = filename
def release(self):
self.close()
class RedirectingBrowserRequest(BrowserRequest):
"""Browser requests that redirect when the actual and effective URLs differ
"""
use_redirect = True
class TestRequest(BrowserRequest):
"""Browser request with a constructor convenient for testing
"""
def __init__(self, body_instream=None, environ=None, form=None,
skin=None, **kw):
_testEnv = {
'SERVER_URL': 'http://127.0.0.1',
'HTTP_HOST': '127.0.0.1',
'CONTENT_LENGTH': '0',
'GATEWAY_INTERFACE': 'TestFooInterface/1.0',
}
if environ is not None:
_testEnv.update(environ)
if kw:
_testEnv.update(kw)
if body_instream is None:
from io import BytesIO
body_instream = BytesIO()
super().__init__(body_instream, _testEnv)
if form:
self.form.update(form)
# Setup locale object
langs = BrowserLanguages(self).getPreferredLanguages()
from zope.i18n.locales import locales
if not langs or langs[0] == '':
self._locale = locales.getLocale(None, None, None)
else:
parts = (langs[0].split('-') + [None, None])[:3]
self._locale = locales.getLocale(*parts)
if skin is not None:
directlyProvides(self, skin)
else:
directlyProvides(self, IDefaultBrowserLayer)
class BrowserResponse(HTTPResponse):
"""Browser response
"""
__slots__ = (
'_base', # The base href
)
def _implicitResult(self, body):
content_type = self.getHeader('content-type')
if content_type is None and self._status != 304:
if isHTML(body):
content_type = 'text/html'
else:
content_type = 'text/plain'
self.setHeader('x-content-type-warning', 'guessed from content')
self.setHeader('content-type', content_type)
body, headers = super()._implicitResult(body)
body = self.__insertBase(body)
# Update the Content-Length header to account for the inserted
# <base> tag.
headers = [
(name, value) for name, value in headers
if name != 'content-length'
]
headers.append(('content-length', str(len(body))))
return body, headers
def __insertBase(self, body):
# Only insert a base tag if content appears to be html.
content_type = self.getHeader('content-type', '')
if content_type and not is_text_html(content_type):
return body
if self.getBase():
if body:
match = start_of_header_search(body)
if match is not None:
index = match.start(0) + len(match.group(0))
ibase = base_re_search(body)
if ibase is None:
# Make sure the base URL is not a unicode string.
base = self.getBase()
if not isinstance(base, bytes):
encoding = getCharsetUsingRequest(
self._request) or 'utf-8'
base = self.getBase().encode(encoding)
body = b''.join([body[:index],
b'\n<base href="',
base,
b'" />\n',
body[index:]])
return body
def getBase(self):
return getattr(self, '_base', '')
def setBase(self, base):
self._base = base
def redirect(self, location, status=None, trusted=False):
base = getattr(self, '_base', '')
if base and isRelative(str(location)):
pos = base.rfind('/')
if pos >= 0:
base = base[:pos + 1]
else:
base += '/'
location = base + location
# TODO: HTTP redirects must provide an absolute location, see
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.30
# So, what if location is relative and base is unknown?
# Uncomment the following and you'll see that it actually
# happens.
#
# if isRelative(str(location)):
# raise AssertionError('Cannot determine absolute location')
return super().redirect(location, status, trusted)
def reset(self):
super().reset()
self._base = ''
def isHTML(str):
"""Try to determine whether str is HTML or not."""
if isinstance(str, bytes):
try:
str = str.decode()
except UnicodeDecodeError:
return False
s = str.lstrip().lower()
if s.startswith('<!doctype html'):
return True
if s.startswith('<html') and (s[5:6] in ' >'):
return True
if s.startswith('<!--'):
idx = s.find('<html')
return idx > 0 and (s[idx+5:idx+6] in ' >')
else:
return False
def normalize_lang(lang):
lang = lang.strip().lower()
lang = lang.replace('_', '-')
lang = lang.replace(' ', '')
return lang
@zope.component.adapter(IHTTPRequest)
@implementer(IUserPreferredLanguages)
class BrowserLanguages:
def __init__(self, request):
self.request = request
def getPreferredLanguages(self):
'''See interface IUserPreferredLanguages'''
accept_langs = self.request.get('HTTP_ACCEPT_LANGUAGE', '').split(',')
# Normalize lang strings
accept_langs = [normalize_lang(lang) for lang in accept_langs]
# Then filter out empty ones
accept_langs = [lang for lang in accept_langs if lang]
accepts = []
for index, lang in enumerate(accept_langs):
lang = lang.split(';', 2)
# If not supplied, quality defaults to 1...
quality = 1.0
if len(lang) == 2:
q = lang[1]
if q.startswith('q='):
q = q.split('=', 2)[1]
try:
quality = float(q)
except ValueError:
# malformed quality value, skip it.
continue
if quality == 1.0:
# ... but we use 1.9 - 0.001 * position to
# keep the ordering between all items with
# 1.0 quality, which may include items with no quality
# defined, and items with quality defined as 1.
quality = 1.9 - (0.001 * index)
accepts.append((quality, lang[0]))
# Filter langs with q=0, which means
# unwanted lang according to the spec
# See: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
accepts = [acc for acc in accepts if acc[0]]
accepts.sort()
accepts.reverse()
return [lang for quality, lang in accepts]
class NotCompatibleAdapterError(Exception):
"""Adapter not compatible with
zope.i18n.interfaces.IModifiableBrowserLanguages has been used.
"""
BROWSER_LANGUAGES_KEY = "zope.publisher.browser.IUserPreferredLanguages"
class CacheableBrowserLanguages(BrowserLanguages):
def getPreferredLanguages(self):
languages_data = self._getLanguagesData()
if "overridden" in languages_data:
return languages_data["overridden"]
elif "cached" not in languages_data:
languages_data["cached"] = super().getPreferredLanguages()
return languages_data["cached"]
def _getLanguagesData(self):
annotations = self.request.annotations
languages_data = annotations.get(BROWSER_LANGUAGES_KEY)
if languages_data is None:
annotations[BROWSER_LANGUAGES_KEY] = languages_data = {}
return languages_data
@implementer(IModifiableUserPreferredLanguages)
class ModifiableBrowserLanguages(CacheableBrowserLanguages):
def setPreferredLanguages(self, languages):
languages_data = self.request.annotations.get(BROWSER_LANGUAGES_KEY)
if languages_data is None:
# Better way to create a compatible with
# IModifiableUserPreferredLanguages adapter is to use
# CacheableBrowserLanguages as base class or as example.
raise NotCompatibleAdapterError(
"Adapter not compatible with"
" zope.i18n.interfaces.IModifiableBrowserLanguages"
" has been used.")
languages_data["overridden"] = languages
self.request.setupLocale()
@implementer(IBrowserView)
class BrowserView(Location):
"""Browser View.
>>> view = BrowserView("context", "request")
>>> view.context
'context'
>>> view.request
'request'
>>> view.__parent__
'context'
>>> view.__parent__ = "parent"
>>> view.__parent__
'parent'
"""
def __init__(self, context, request):
self.context = context
self.request = request
def __getParent(self):
return getattr(self, '_parent', self.context)
def __setParent(self, parent):
self._parent = parent
__parent__ = property(__getParent, __setParent)
@implementer(IBrowserPage)
class BrowserPage(BrowserView):
"""Browser page
To create a page, which is an object that is published as a page,
you need to provide an object that:
- has a __call__ method and that
- provides IBrowserPublisher, and
- if ZPT is going to be used, then your object should also provide
request and context attributes.
The BrowserPage base class provides a standard constructor and a
simple implementation of IBrowserPublisher:
>>> class MyPage(BrowserPage):
... pass
>>> request = TestRequest()
>>> context = object()
>>> page = MyPage(context, request)
>>> from zope.publisher.interfaces.browser import IBrowserPublisher
>>> IBrowserPublisher.providedBy(page)
True
>>> page.browserDefault(request) == (page, ())
True
>>> page.publishTraverse(request, 'bob') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
zope.publisher.interfaces.NotFound: Object: <zope.publisher.browser.MyPage object at ...>, name: 'bob'
>>> page.request is request
True
>>> page.context is context
True
But it doesn't supply a __call__ method:
>>> page()
Traceback (most recent call last):
...
NotImplementedError: Subclasses should override __call__ to provide a response body
It is the subclass' responsibility to do that.
""" # noqa: E501 line too long
def browserDefault(self, request):
return self, ()
def publishTraverse(self, request, name):
raise NotFound(self, name, request)
def __call__(self, *args, **kw):
raise NotImplementedError("Subclasses should override __call__ to "
"provide a response body") | zope.publisher | /zope.publisher-7.0-py3-none-any.whl/zope/publisher/browser.py | browser.py |
from io import BytesIO
from io import StringIO
from zope.exceptions.exceptionformatter import print_exception
from zope.interface import implementer
from zope.interface.common.mapping import IEnumerableMapping
from zope.interface.common.mapping import IReadMapping
from zope.security.proxy import removeSecurityProxy
from zope.publisher.interfaces import DebugError
from zope.publisher.interfaces import IDebugFlags
from zope.publisher.interfaces import IHeld
from zope.publisher.interfaces import IPublication
from zope.publisher.interfaces import IRequest
from zope.publisher.interfaces import IResponse
from zope.publisher.interfaces import NotFound
from zope.publisher.interfaces import Unauthorized
from zope.publisher.publish import mapply
_marker = object()
@implementer(IResponse)
class BaseResponse:
"""Base Response Class
"""
__slots__ = (
'_result', # The result of the application call
'_request', # The associated request (if any)
)
def __init__(self):
self._request = None
def setResult(self, result):
'See IPublisherResponse'
self._result = result
def handleException(self, exc_info):
'See IPublisherResponse'
f = StringIO()
print_exception(exc_info[0], exc_info[1], exc_info[2], 100, f)
self.setResult(f.getvalue())
def internalError(self):
'See IPublisherResponse'
pass
def reset(self):
'See IPublisherResponse'
pass
def retry(self):
'See IPublisherResponse'
return self.__class__()
@implementer(IReadMapping)
class RequestDataGetter:
def __init__(self, request):
self.__get = getattr(request, self._gettrname)
def __getitem__(self, name):
return self.__get(name)
def get(self, name, default=None):
return self.__get(name, default)
def __contains__(self, key):
lookup = self.get(key, self)
return lookup is not self
has_key = __contains__
@implementer(IEnumerableMapping)
class RequestDataMapper:
def __init__(self, request):
self.__map = getattr(request, self._mapname)
def __getitem__(self, name):
return self.__map[name]
def get(self, name, default=None):
return self.__map.get(name, default)
def __contains__(self, key):
lookup = self.get(key, self)
return lookup is not self
has_key = __contains__
def keys(self):
return self.__map.keys()
def __iter__(self):
return iter(self.keys())
def items(self):
return self.__map.items()
def values(self):
return self.__map.values()
def __len__(self):
return len(self.__map)
class RequestDataProperty:
def __init__(self, gettr_class):
self.__gettr_class = gettr_class
def __get__(self, request, rclass=None):
if request is not None:
return self.__gettr_class(request)
def __set__(*args):
raise AttributeError('Unassignable attribute')
class RequestEnvironment(RequestDataMapper):
_mapname = '_environ'
@implementer(IDebugFlags)
class DebugFlags:
"""Debugging flags."""
sourceAnnotations = False
showTAL = False
@implementer(IRequest)
class BaseRequest:
"""Represents a publishing request.
This object provides access to request data. Request data may
vary depending on the protocol used.
Request objects are created by the object publisher and will be
passed to published objects through the argument name, REQUEST.
The request object is a mapping object that represents a
collection of variable to value mappings.
"""
__slots__ = (
'__provides__', # Allow request to directly provide interfaces
'_held', # Objects held until the request is closed
'_traversed_names', # The names that have been traversed
'_last_obj_traversed', # Object that was traversed last
'_traversal_stack', # Names to be traversed, in reverse order
'_environ', # The request environment variables
'_response', # The response
'_args', # positional arguments
'_body_instream', # input stream
'_body', # The request body as a string
'_publication', # publication object
'_principal', # request principal, set by publication
'interaction', # interaction, set by interaction
'debug', # debug flags
'annotations', # per-package annotations
)
environment = RequestDataProperty(RequestEnvironment)
def __init__(self, body_instream, environ, response=None,
positional=None):
self._traversal_stack = []
self._last_obj_traversed = None
self._traversed_names = []
self._environ = environ
self._args = positional or ()
if response is None:
self._response = self._createResponse()
else:
self._response = response
self._response._request = self
self._body_instream = body_instream
self._held = ()
self._principal = None
self.debug = DebugFlags()
self.interaction = None
self.annotations = {}
def setPrincipal(self, principal):
self._principal = principal
principal = property(lambda self: self._principal)
def _getPublication(self):
"""See IPublisherRequest."""
return getattr(self, '_publication', None)
publication = property(_getPublication)
def processInputs(self):
"""See IPublisherRequest."""
# Nothing to do here
def retry(self):
"""See IPublisherRequest."""
raise TypeError('Retry is not supported')
def setPublication(self, pub):
"""See IPublisherRequest."""
self._publication = pub
def supportsRetry(self):
"""See IPublisherRequest."""
return 0
def traverse(self, obj):
"""See IPublisherRequest."""
publication = self.publication
traversal_stack = self._traversal_stack
traversed_names = self._traversed_names
prev_object = None
while True:
self._last_obj_traversed = obj
if (removeSecurityProxy(obj)
is not removeSecurityProxy(prev_object)):
# Invoke hooks (but not more than once).
publication.callTraversalHooks(self, obj)
if not traversal_stack:
# Finished traversal.
break
prev_object = obj
# Traverse to the next step.
entry_name = traversal_stack.pop()
traversed_names.append(entry_name)
obj = publication.traverseName(self, obj, entry_name)
return obj
def close(self):
"""See IPublicationRequest."""
for held in self._held:
if IHeld.providedBy(held):
held.release()
self._held = None
self._body_instream = None
self._publication = None
def getPositionalArguments(self):
"""See IPublicationRequest."""
return self._args
def _getResponse(self):
return self._response
response = property(_getResponse)
def getTraversalStack(self):
"""See IPublicationRequest."""
return list(self._traversal_stack) # Return a copy
def hold(self, object):
"""See IPublicationRequest."""
self._held = self._held + (object,)
def setTraversalStack(self, stack):
"""See IPublicationRequest."""
self._traversal_stack[:] = list(stack)
def _getBodyStream(self):
"""See zope.publisher.interfaces.IApplicationRequest."""
return self._body_instream
bodyStream = property(_getBodyStream)
def __len__(self):
"""See Interface.Common.Mapping.IEnumerableMapping."""
return len(self.keys())
def items(self):
"""See Interface.Common.Mapping.IEnumerableMapping."""
result = []
get = self.get
for k in self.keys():
result.append((k, get(k)))
return result
def keys(self):
"""See Interface.Common.Mapping.IEnumerableMapping."""
return self._environ.keys()
def __iter__(self):
return iter(self.keys())
def values(self):
"""See Interface.Common.Mapping.IEnumerableMapping."""
result = []
get = self.get
for k in self.keys():
result.append(get(k))
return result
def __getitem__(self, key):
"""See Interface.Common.Mapping.IReadMapping."""
result = self.get(key, _marker)
if result is _marker:
raise KeyError(key)
else:
return result
def get(self, key, default=None):
"""See Interface.Common.Mapping.IReadMapping."""
result = self._environ.get(key, _marker)
if result is not _marker:
return result
return default
def __contains__(self, key):
"""See Interface.Common.Mapping.IReadMapping."""
lookup = self.get(key, self)
return lookup is not self
has_key = __contains__
def _createResponse(self):
# Should be overridden by subclasses
return BaseResponse()
def __bool__(self):
# This is here to avoid calling __len__ for boolean tests
return True
def __str__(self):
L1 = self.items()
L1.sort()
return "\n".join(map(lambda item: "%s:\t%s" % item, L1))
def _setupPath_helper(self, attr):
path = self.get(attr, "/")
if path.endswith('/'):
# Remove trailing backslash, so that we will not get an empty
# last entry when splitting the path.
path = path[:-1]
self._endswithslash = True
else:
self._endswithslash = False
clean = []
for item in path.split('/'):
if not item or item == '.':
continue
elif item == '..':
# try to remove the last name
try:
del clean[-1]
except IndexError:
# the list of names was empty, so do nothing and let the
# string '..' be placed on the list
pass
clean.append(item)
clean.reverse()
self.setTraversalStack(clean)
self._path_suffix = None
class TestRequest(BaseRequest):
__slots__ = ('_presentation_type', )
def __init__(self, path, body_instream=None, environ=None):
if environ is None:
environ = {}
environ['PATH_INFO'] = path
if body_instream is None:
body_instream = BytesIO(b'')
super().__init__(body_instream, environ)
@implementer(IPublication)
class DefaultPublication:
"""A stub publication.
This works just like Zope2's ZPublisher. It rejects any name
starting with an underscore and any objects (specifically: method)
that doesn't have a docstring.
"""
require_docstrings = True
def __init__(self, app):
self.app = app
def beforeTraversal(self, request):
# Lop off leading and trailing empty names
stack = request.getTraversalStack()
while stack and not stack[-1]:
stack.pop() # toss a trailing empty name
while stack and not stack[0]:
stack.pop(0) # toss a leading empty name
request.setTraversalStack(stack)
def getApplication(self, request):
return self.app
def callTraversalHooks(self, request, ob):
pass
def traverseName(self, request, ob, name, check_auth=1):
if name.startswith('_'):
raise Unauthorized(name)
if hasattr(ob, name):
subob = getattr(ob, name)
else:
try:
subob = ob[name]
except (KeyError, IndexError,
TypeError, AttributeError):
raise NotFound(ob, name, request)
if self.require_docstrings and not getattr(subob, '__doc__', None):
raise DebugError(subob, 'Missing or empty doc string')
return subob
def getDefaultTraversal(self, request, ob):
return ob, ()
def afterTraversal(self, request, ob):
pass
def callObject(self, request, ob):
return mapply(ob, request.getPositionalArguments(), request)
def afterCall(self, request, ob):
pass
def endRequest(self, request, ob):
pass
def handleException(self, object, request, exc_info, retry_allowed=1):
# Let the response handle it as best it can.
request.response.reset()
request.response.handleException(exc_info)
class TestPublication(DefaultPublication):
def traverseName(self, request, ob, name, check_auth=1):
if hasattr(ob, name):
subob = getattr(ob, name)
else:
try:
subob = ob[name]
except (KeyError, IndexError,
TypeError, AttributeError):
raise NotFound(ob, name, request)
return subob | zope.publisher | /zope.publisher-7.0-py3-none-any.whl/zope/publisher/base.py | base.py |
__docformat__ = 'restructuredtext'
import zope.component
import zope.interface
import zope.interface.interfaces
from zope.publisher import interfaces
@zope.interface.implementer(interfaces.ISkinChangedEvent)
class SkinChangedEvent:
"""Skin changed event."""
def __init__(self, request):
self.request = request
def getDefaultSkin(request):
"""Returns the IDefaultSkin layer for IBrowserRequest."""
return interfaces.browser.IDefaultBrowserLayer
def setDefaultSkin(request):
"""Sets the default skin for a given request."""
adapters = zope.component.getSiteManager().adapters
skin = adapters.lookup((zope.interface.providedBy(request),),
interfaces.IDefaultSkin, '')
if skin is None:
# Find a named ``default`` adapter providing IDefaultSkin as fallback.
skin = adapters.lookup((zope.interface.providedBy(request),),
interfaces.IDefaultSkin, 'default')
if skin is None:
# Let's be nice and continue to work for IBrowserRequest's
# without relying on adapter registrations.
if interfaces.browser.IBrowserRequest.providedBy(request):
skin = getDefaultSkin
if skin is not None:
if not zope.interface.interfaces.IInterface.providedBy(skin):
# The default fallback skin is registered as a named adapter.
skin = skin(request)
else:
# The defaultSkin directive registers skins as interfaces and not
# as adapters. We will not try to adapt the request to an
# interface to produce an interface.
pass
if interfaces.ISkinType.providedBy(skin):
# silently ignore skins which do not provide ISkinType
zope.interface.directlyProvides(request, skin)
else:
raise TypeError("Skin interface %r doesn't provide ISkinType" %
skin)
def applySkin(request, skin):
"""Change the presentation skin for this request."""
# Remove all existing skin type declarations (commonly the default skin)
# based on the given skin type.
ifaces = [iface for iface in zope.interface.directlyProvidedBy(request)
if not interfaces.ISkinType.providedBy(iface)]
# Add the new skin.
ifaces.append(skin)
zope.interface.directlyProvides(request, *ifaces)
zope.event.notify(SkinChangedEvent(request)) | zope.publisher | /zope.publisher-7.0-py3-none-any.whl/zope/publisher/skinnable.py | skinnable.py |
import sys
from zope.interface import implementer
from zope.proxy import removeAllProxies
from zope import component
from zope.publisher.interfaces import IReRaiseException
from zope.publisher.interfaces import Retry
_marker = object() # Create a new marker object.
def unwrapMethod(obj):
"""obj -> (unwrapped, wrapperCount)
Unwrap 'obj' until we get to a real function, counting the number of
unwrappings.
Bail if we find a class or something we can't identify as callable.
"""
wrapperCount = 0
unwrapped = obj
for i in range(10):
bases = getattr(unwrapped, '__bases__', None)
if bases is not None:
raise TypeError("mapply() can not call class constructors")
im_func = getattr(unwrapped, '__func__', None)
if im_func is not None:
unwrapped = im_func
wrapperCount += 1
elif getattr(unwrapped, '__code__', None) is not None:
break
elif getattr(unwrapped, 'func_code', None) is not None:
break
else:
unwrapped = getattr(unwrapped, '__call__', None)
if unwrapped is None:
raise TypeError("mapply() can not call %s" % repr(obj))
else:
raise TypeError("couldn't find callable metadata, mapply() error on %s"
% repr(obj))
return unwrapped, wrapperCount
def mapply(obj, positional=(), request={}):
__traceback_info__ = obj
# we need deep access for introspection. Waaa.
unwrapped = removeAllProxies(obj)
unwrapped, wrapperCount = unwrapMethod(unwrapped)
code = getattr(unwrapped, '__code__', None)
if code is None:
code = unwrapped.func_code
defaults = getattr(unwrapped, '__defaults__', None)
if defaults is None:
defaults = getattr(unwrapped, 'func_defaults', None)
names = code.co_varnames[wrapperCount:code.co_argcount]
nargs = len(names)
if not positional:
args = []
else:
args = list(positional)
if len(args) > nargs:
given = len(args)
if wrapperCount:
given += wrapperCount
raise TypeError('%s() takes at most %d argument%s(%d given)' % (
getattr(unwrapped, '__name__', repr(obj)),
code.co_argcount,
(code.co_argcount > 1 and 's ' or ' '),
given))
get = request.get
nrequired = len(names)
if defaults:
nrequired -= len(defaults)
for index in range(len(args), nargs):
name = names[index]
v = get(name, _marker)
if v is _marker:
if name == 'REQUEST':
v = request
elif index < nrequired:
raise TypeError('Missing argument to {}(): {}'.format(
getattr(unwrapped, '__name__', repr(obj)), name))
else:
v = defaults[index - nrequired]
args.append(v)
args = tuple(args)
if __debug__:
return debug_call(obj, args)
return obj(*args)
def debug_call(obj, args):
# The presence of this function allows us to set a pdb breakpoint
return obj(*args)
def publish(request, handle_errors=True):
try: # finally to clean up to_raise and close request
to_raise = None
while True:
publication = request.publication
try:
try:
obj = None
try:
try:
request.processInputs()
publication.beforeTraversal(request)
obj = publication.getApplication(request)
obj = request.traverse(obj)
publication.afterTraversal(request, obj)
result = publication.callObject(request, obj)
response = request.response
if result is not response:
response.setResult(result)
publication.afterCall(request, obj)
except: # noqa: E722 do not use bare 'except'
exc_info = sys.exc_info()
publication.handleException(
obj, request, exc_info, True)
if not handle_errors:
# Reraise only if there is no adapter
# indicating that we shouldn't
reraise = component.queryAdapter(
exc_info[1], IReRaiseException,
default=None)
if reraise is None or reraise():
raise
finally:
exc_info = None # Avoid circular reference.
publication.endRequest(request, obj)
break # Successful.
except Retry as retryException:
if request.supportsRetry():
# Create a copy of the request and use it.
newrequest = request.retry()
request.close()
request = newrequest
elif handle_errors:
# Output the original exception.
publication = request.publication
publication.handleException(
obj, request,
retryException.getOriginalException(), False)
break
else:
to_raise = retryException.getOriginalException()
if to_raise is None:
# There is no original exception inside
# the Retry, so just reraise it.
raise
break
except: # noqa: E722 do not use bare 'except'
# Bad exception handler or retry method.
# Re-raise after outputting the response.
if handle_errors:
request.response.internalError()
to_raise = sys.exc_info()
break
else:
raise
response = request.response
if to_raise is not None:
raise to_raise[1].with_traceback(to_raise[2])
finally:
to_raise = None # Avoid circ. ref.
request.close() # Close database connections, etc.
# Return the request, since it might be a different object than the one
# that was passed in.
return request
@implementer(IReRaiseException)
class DoNotReRaiseException:
"""Marker adapter for exceptions that should not be re-raised"""
def __init__(self, exc):
pass
def __call__(self):
return False | zope.publisher | /zope.publisher-7.0-py3-none-any.whl/zope/publisher/publish.py | publish.py |
"""HTTP-related publisher interfaces.
"""
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.common.interfaces import IException
from zope.publisher.interfaces import IApplicationRequest
from zope.publisher.interfaces import IPublishTraverse
from zope.publisher.interfaces import IRequest
from zope.publisher.interfaces import IResponse
from zope.publisher.interfaces import IView
class IVirtualHostRequest(Interface):
"""The support for virtual hosts in Zope is very important.
In order to make virtual hosts working, we need to support several
methods in our Request object. This interface defines the required
methods.
"""
def setVirtualHostRoot(names):
"""Marks the currently traversed object as the root of a virtual host.
Any path elements traversed up to that
Set the names which compose the application path.
These are the path elements that appear in the beginning of
the generated URLs.
Should be called during traversal.
"""
def getVirtualHostRoot():
"""Returns the object which is the virtual host root for this request
Return None if setVirtualHostRoot hasn't been called.
"""
def setApplicationServer(host, proto='http', port=None):
"""Override the host, protocol and port parts of generated URLs.
This affects automatically inserted <base> tags and URL getters
in the request, but not things like @@absolute_url views.
"""
def shiftNameToApplication():
"""Add the name being traversed to the application name
This is only allowed in the case where the name is the first name.
A Value error is raised if the shift can't be performed.
"""
class IHTTPApplicationRequest(IApplicationRequest, IVirtualHostRequest):
"""HTTP request data.
This object provides access to request data. This includes, the
input headers, server data, and cookies.
Request objects are created by the object publisher and will be
passed to published objects through the argument name, REQUEST.
The request object is a mapping object that represents a
collection of variable to value mappings. In addition, variables
are divided into four categories:
- Environment variables
These variables include input headers, server data, and other
request-related data. The variable names are as <a
href="http://hoohoo.ncsa.uiuc.edu/cgi/env.html">specified</a>
in the <a
href="http://hoohoo.ncsa.uiuc.edu/cgi/interface.html">CGI
specification</a>
- Cookies
These are the cookie data, if present.
- Other
Data that may be set by an application object.
The request object may be used as a mapping object, in which case
values will be looked up in the order: environment variables,
other variables, cookies, and special.
"""
def __getitem__(key):
"""Return HTTP request data
Request data sre retrieved from one of:
- Environment variables
These variables include input headers, server data, and other
request-related data. The variable names are as <a
href="http://hoohoo.ncsa.uiuc.edu/cgi/env.html">specified</a>
in the <a
href="http://hoohoo.ncsa.uiuc.edu/cgi/interface.html">CGI
specification</a>
- Cookies
These are the cookie data, if present.
Cookies are searched before environmental data.
"""
def getCookies():
"""Return the cookie data
Data are returned as a mapping object, mapping cookie name to value.
"""
cookies = Attribute(
"""Request cookie data
This is a read-only mapping from variable name to value.
""")
def getHeader(name, default=None, literal=False):
"""Get a header value
Return the named HTTP header, or an optional default
argument or None if the header is not found. Note that
both original and CGI-ified header names are recognized,
e.g. 'Content-Type', 'CONTENT_TYPE' and 'HTTP_CONTENT_TYPE'
should all return the Content-Type header, if available.
If the literal argument is passed, the header is searched
'as is', eg: only if the case matches.
"""
headers = Attribute(
"""Request header data
This is a read-only mapping from variable name to value.
It does *not* support iteration.
""")
URL = Attribute(
"""Request URL data
When converted to a string, this gives the effective published URL.
This object can also be used as a mapping object. The key must
be an integer or a string that can be converted to an
integer. A non-negative integer returns a URL n steps from the
URL of the top-level application objects. A negative integer
gives a URL that is -n steps back from the effective URL.
For example, 'request.URL[-2]' is equivalent to the Zope 2
'request["URL2"]'. The notion is that this would be used in
path expressions, like 'request/URL/-2'.
""")
def getURL(level=0, path_only=False):
"""Return the published URL with level names removed from the end.
If path_only is true, then only a path will be returned.
"""
def getApplicationURL(depth=0, path_only=False):
"""Return the application URL plus depth steps
If path_only is true, then only a path will be returned.
"""
class IHTTPPublisher(IPublishTraverse):
"""HTTP Publisher"""
class IHTTPRequest(IRequest):
method = Attribute("Request method, normalized to upper case")
def setPathSuffix(steps):
"""Add additional traversal steps to be taken after all other traversal
This is used to handle HTTP request methods (except for GET
and POST in the case of browser requests) and XML-RPC methods.
"""
locale = Attribute(
"Return the locale object associated with this request.")
def setupLocale():
"""Setup the locale object based on languages returned by
IUserPreferredLanguages adapter.
"""
class IHTTPView(IView):
"HTTP View"
class IHTTPCredentials(Interface):
# TODO: Eventially this will be a different method
def _authUserPW():
"""Return (login, password) if there are basic credentials;
return None if there aren't."""
def unauthorized(challenge):
"""Issue a 401 Unauthorized error (asking for login/password).
The challenge is the value of the WWW-Authenticate header."""
class IHTTPApplicationResponse(Interface):
"""HTTP Response"""
def redirect(location, status=302, trusted=False):
"""Causes a redirection without raising an error.
By default redirects are untrusted which restricts target URLs to the
same host that the request was sent to.
If the `trusted` flag is set, redirects are allowed for any target
URL.
"""
class IHeaderOutput(Interface):
"""Interface for setting HTTP response headers.
This allows the HTTP server and the application to both set response
headers.
zope.publisher.http.HTTPResponse is optionally passed an
object which implements this interface in order to intermingle
its headers with the HTTP server's response headers,
and for the purpose of better logging.
"""
def setResponseStatus(status, reason):
"""Sets the status code and the accompanying message.
"""
def setResponseHeaders(mapping):
"""Sets headers. The headers must be Correctly-Cased.
"""
def appendResponseHeaders(lst):
"""Sets headers that can potentially repeat.
Takes a list of strings.
"""
def wroteResponseHeader():
"""Returns a flag indicating whether the response
header has already been sent.
"""
def setAuthUserName(name):
"""Sets the name of the authenticated user so the name can be logged.
"""
class IResult(Interface):
"""An iterable that provides the body data of the response.
For simplicity, an adapter to this interface may in fact return any
iterable, without needing to strictly have the iterable provide
IResult.
IMPORTANT: The result object may be held indefinitely by a server
and may be accessed by arbitrary threads. For that reason the result
should not hold on to any application resources (i.e., should not
have a connection to the database) and should be prepared to be
invoked from any thread.
This iterable should generally be appropriate for WSGI iteration.
Each element of the iteration should generally be much larger than a
character or line; concrete advice on chunk size is hard to come by,
but a single chunk of even 100 or 200 K is probably fine.
If the IResult is a string, then, the default iteration of
per-character is wildly too small. Because this is such a common
case, if a string is used as an IResult then this is special-cased
to simply convert to a tuple of one value, the string.
Adaptation to this interface provides the opportunity for efficient file
delivery, pipelining hooks, and more.
"""
def __iter__():
"""iterate over the values that should be returned as the result.
See IHTTPResponse.setResult.
"""
class IHTTPResponse(IResponse):
"""An object representation of an HTTP response.
The Response type encapsulates all possible responses to HTTP
requests. Responses are normally created by the object publisher.
A published object may recieve the response object as an argument
named 'RESPONSE'. A published object may also create its own
response object. Normally, published objects use response objects
to:
- Provide specific control over output headers,
- Set cookies, or
- Provide stream-oriented output.
If stream oriented output is used, then the response object
passed into the object must be used.
"""
authUser = Attribute('The authenticated user message.')
def getStatus():
"""Returns the current HTTP status code as an integer.
"""
def setStatus(status, reason=None):
"""Sets the HTTP status code of the response
The status parameter must be either an integer (preferred), a value
that can be converted to an integer using the int() function,
or one of the standard status messages listed in the status_codes
dict of the zope.publisher.http module (including "OK", "NotFound",
and so on). If the parameter is some other value, the status will
be set to 500.
The reason parameter is a short message to be sent with the status
code to the client. If reason is not provided, a standard
reason will be supplied, falling back to "Unknown" for unregistered
status codes.
"""
def getStatusString():
"""Return the status followed by the reason."""
def setHeader(name, value, literal=False):
"""Sets an HTTP return header "name" with value "value"
The previous value is cleared. If the literal flag is true,
the case of the header name is preserved, otherwise
word-capitalization will be performed on the header name on
output.
"""
def addHeader(name, value):
"""Add an HTTP Header
Sets a new HTTP return header with the given value, while retaining
any previously set headers with the same name.
"""
def getHeader(name, default=None):
"""Gets a header value
Returns the value associated with a HTTP return header, or
'default' if no such header has been set in the response
yet.
"""
def getHeaders():
"""Returns a list of header name, value tuples.
"""
def appendToCookie(name, value):
"""Append text to a cookie value
If a value for the cookie has previously been set, the new
value is appended to the old one separated by a colon.
"""
def expireCookie(name, **kw):
"""Causes an HTTP cookie to be removed from the browser
The response will include an HTTP header that will remove the cookie
corresponding to "name" on the client, if one exists. This is
accomplished by sending a new cookie with an expiration date
that has already passed. Note that some clients require a path
to be specified - this path must exactly match the path given
when creating the cookie. The path can be specified as a keyword
argument.
If the value of a keyword argument is None, it will be ignored.
"""
def setCookie(name, value, **kw):
"""Sets an HTTP cookie on the browser
The response will include an HTTP header that sets a cookie on
cookie-enabled browsers with a key "name" and value
"value". This overwrites any previously set value for the
cookie in the Response object.
If the value of a keyword argument is None, it will be ignored.
"""
def getCookie(name, default=None):
"""Gets HTTP cookie data as a dict
Returns the dict of values associated with an HTTP cookie set in the
response, or 'default' if no such cookie has been set in the response
yet.
"""
def setResult(result):
"""Sets response result value based on input.
Input is usually a unicode string, a string, None, or an object
that can be adapted to IResult with the request. The end result
is an iterable such as WSGI prefers, determined by following the
process described below.
Try to adapt the given input, with the request, to IResult
(found above in this file). If this fails, and the original
value was a string, use the string as the result; or if was
None, use an empty string as the result; and if it was anything
else, raise a TypeError.
If the result of the above (the adaptation or the default
handling of string and None) is unicode, encode it (to the
preferred encoding found by adapting the request to
zope.i18n.interfaces.IUserPreferredCharsets, usually implemented
by looking at the HTTP Accept-Charset header in the request, and
defaulting to utf-8) and set the proper encoding information on
the Content-Type header, if present. Otherwise (the end result
was not unicode) application is responsible for setting
Content-Type header encoding value as necessary.
If the result of the above is a string, set the Content-Length
header, and make the string be the single member of an iterable
such as a tuple (to send large chunks over the wire; see
discussion in the IResult interface). Otherwise (the end result
was not a string) application is responsible for setting
Content-Length header as necessary.
Set the result of all of the above as the response's result. If
the status has not been set, set it to 200 (OK). """
def consumeBody():
"""Returns the response body as a string.
Note that this function can be only requested once, since it is
constructed from the result.
"""
def consumeBodyIter():
"""Returns the response body as an iterable.
Note that this function can be only requested once, since it is
constructed from the result.
"""
class IHTTPVirtualHostChangedEvent(Interface):
"""The host, port and/or the application path have changed.
The request referred to in this event implements at least the
IHTTPAppliationRequest interface.
"""
request = Attribute("The application request whose virtual host info has "
"been altered")
class IHTTPException(Interface):
"""Marker interface for http exceptions views
"""
pass
class IMethodNotAllowed(IException):
"""An exception that signals the 405 Method Not Allowed HTTP error"""
object = Attribute("""The object on which the error occurred""")
request = Attribute("""The request in which the error occurred""")
@implementer(IMethodNotAllowed)
class MethodNotAllowed(Exception):
"""An exception that signals the 405 Method Not Allowed HTTP error"""
def __init__(self, object, request):
self.object = object
self.request = request
def __str__(self):
return "{!r}, {!r}".format(self.object, self.request) | zope.publisher | /zope.publisher-7.0-py3-none-any.whl/zope/publisher/interfaces/http.py | http.py |
"""Browser Interfaces
"""
from zope.browser.interfaces import IBrowserView # BBB import
from zope.interface import Attribute
from zope.interface import alsoProvides
# BBB moved to zope.publisher.interfaces since not only browser request
# can use the skin pattern
from zope.publisher.interfaces import IDefaultSkin # noqa: F401 import unused
from zope.publisher.interfaces import IPublication
from zope.publisher.interfaces import IPublishTraverse
from zope.publisher.interfaces import ISkinChangedEvent # noqa: F401
from zope.publisher.interfaces import ISkinnable
from zope.publisher.interfaces import ISkinType
from zope.publisher.interfaces.http import IHTTPApplicationRequest
from zope.publisher.interfaces.http import IHTTPRequest
class IBrowserApplicationRequest(IHTTPApplicationRequest):
"""Browser-specific requests
"""
def __getitem__(key):
"""Return Browser request data
Request data are retrieved from one of:
- Environment variables
These variables include input headers, server data, and other
request-related data. The variable names are as
specified
in the `CGI specification <https://tools.ietf.org/html/rfc3875>`_.
- Cookies
These are the cookie data, if present.
- Form data
Form data are searched before cookies, which are searched
before environmental data.
"""
form = Attribute(
"""Form data
This is a read-only mapping from name to form value for the name.
""")
class IBrowserPublication(IPublication):
"""Object publication framework.
"""
def getDefaultTraversal(request, ob):
"""Get the default published object for the request
Allows a default view to be added to traversal.
Returns (ob, steps_reversed).
"""
class IBrowserRequest(IHTTPRequest, ISkinnable):
"""Browser-specific Request functionality.
Note that the browser is special in many ways, since it exposes
the Request object to the end-developer.
"""
class IBrowserPublisher(IPublishTraverse):
"""
A type of `.IPublishTraverse` that also supports default objects.
"""
def browserDefault(request):
"""Provide the default object
The default object is expressed as a (possibly different)
object and/or additional traversal steps.
Returns an object and a sequence of names. If the sequence of
names is not empty, then a traversal step is made for each name.
After the publisher gets to the end of the sequence, it will
call ``browserDefault`` on the last traversed object.
Normal usage is to return self for object and a default view name.
The publisher calls this method at the end of each traversal path. If
a non-empty sequence of names is returned, the publisher will traverse
those names and call browserDefault again at the end.
Note that if additional traversal steps are indicated (via a
nonempty sequence of names), then the publisher will try to adjust
the base href.
"""
class IBrowserPage(IBrowserView, IBrowserPublisher):
"""Browser page"""
def __call__(*args, **kw):
"""Compute a response body"""
class IBrowserSkinType(ISkinType):
"""A skin is a set of layers."""
class IDefaultBrowserLayer(IBrowserRequest):
"""The default layer."""
alsoProvides(IDefaultBrowserLayer, IBrowserSkinType) | zope.publisher | /zope.publisher-7.0-py3-none-any.whl/zope/publisher/interfaces/browser.py | browser.py |
"""Interfaces for the publisher.
"""
# BBB
from zope.browser.interfaces import IView # noqa: F401 import unused
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.common.interfaces import IException
from zope.interface.common.interfaces import ILookupError
from zope.interface.common.mapping import IEnumerableMapping
from zope.interface.interfaces import IInterface
# BBB:
from zope.security.interfaces import IParticipation
from zope.security.interfaces import Unauthorized # noqa: F401 import unused
class IPublishingException(IException):
"""
An exception that occurs during publication.
"""
@implementer(IPublishingException)
class PublishingException(Exception):
"""
Default implementation of `IPublishingException`.
"""
class ITraversalException(IPublishingException):
"""
An exception that occurs during publication traversal.
"""
@implementer(ITraversalException)
class TraversalException(PublishingException):
"""
Default implementation of `ITraversalException`.
"""
class INotFound(ILookupError, ITraversalException):
"""
The object we want to traverse to cannot be found.
"""
def getObject():
'Returns the object that was being traversed.'
def getName():
'Returns the name that was being traversed.'
@implementer(INotFound)
class NotFound(LookupError, TraversalException):
"""
Default implementation of `INotFound`.
"""
def __init__(self, ob, name, request=None):
self.ob = ob
self.name = name
def getObject(self):
return self.ob
def getName(self):
return self.name
def __str__(self):
try:
ob = repr(self.ob)
except: # noqa: E722 do not use bare 'except'
ob = 'unprintable object'
return 'Object: {}, name: {!r}'.format(ob, self.name)
class IDebugError(ITraversalException):
"""
A debug error.
"""
def getObject():
'Returns the object being traversed.'
def getMessage():
'Returns the debug message.'
@implementer(IDebugError)
class DebugError(TraversalException):
"""
Default implementation of `IDebugError`.
"""
message = None # override this not to cause warnings in python 2.6
def __init__(self, ob, message):
self.ob = ob
self.message = message
def getObject(self):
return self.ob
def getMessage(self):
return self.message
def __str__(self):
return self.message
class IBadRequest(IPublishingException):
"""
The request is bad.
"""
def __str__():
'Returns the error message.'
@implementer(IBadRequest)
class BadRequest(PublishingException):
"""
Default implementation of `IBadRequest`.
"""
message = None # override this not to cause warnings in python 2.6
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class IRedirect(IPublishingException):
"""
An exception that redirects the client.
"""
def getLocation():
'Returns the location.'
def getTrusted():
'Returns the trusted value.'
@implementer(IRedirect)
class Redirect(PublishingException):
"""
Default implementation of `IRedirect`.
"""
def __init__(self, location, trusted=False):
self.location = location
self.trusted = trusted
def getLocation(self):
return self.location
def getTrusted(self):
return self.trusted
def __str__(self):
return 'Location: %s' % self.location
class IRetry(IPublishingException):
"""
An exception that indicates a request should be retried.
"""
def getOriginalException():
'Returns the original exception object.'
@implementer(IRetry)
class Retry(PublishingException):
"""
Default implementation of `IRetry`.
"""
def __init__(self, orig_exc=None):
"""orig_exc must be a 3-tuple as returned from sys.exc_info() ...
or None.
"""
self.orig_exc = orig_exc
def getOriginalException(self):
return self.orig_exc
def __str__(self):
if self.orig_exc is None:
return 'None'
return str(self.orig_exc[1])
class IExceptionSideEffects(Interface):
"""An exception caught by the publisher is adapted to this so that
it can have persistent side-effects."""
def __call__(obj, request, exc_info):
"""Effect persistent side-effects.
Arguments are:
obj context-wrapped object that was published
request the request
exc_info the exception info being handled
"""
class IPublishTraverse(Interface):
"""
Traversal for the specific purpose of publishing.
"""
def publishTraverse(request, name):
"""Lookup a name
The 'request' argument is the publisher request object. The
'name' argument is the name that is to be looked up; it must
be an ASCII string or Unicode object.
If a lookup is not possible, raise a NotFound error.
This method should return an object having the specified name and
`self` as parent. The method can use the request to determine the
correct object.
"""
class IPublisher(Interface):
"""
An object that can publish.
"""
def publish(request):
"""Publish a request
The request must be an `IPublisherRequest`.
"""
class IResponse(Interface):
"""Interface used by the publsher"""
def setResult(result):
"""Sets the response result value.
"""
def handleException(exc_info):
"""Handles an exception.
This method is intended only as a convenience for the publication
object. The publication object can choose to handle exceptions by
calling this method. The publication object can also choose not
to call this method at all.
Implementations set the reponse body.
"""
def internalError():
"""Called when the exception handler bombs.
Should report back to the client that an internal error occurred.
"""
def reset():
"""Reset the output result.
Reset the response by nullifying already set variables.
"""
def retry():
"""Returns a retry response
Returns a response suitable for repeating the publication attempt.
"""
class IPublication(Interface):
"""Object publication framework.
The responsibility of publication objects is to provide
application hooks for the publishing process. This allows
application-specific tasks, such as connecting to databases,
managing transactions, and setting security contexts to be invoked
during the publishing process.
"""
# The order of the hooks mostly corresponds with the order in which
# they are invoked.
def beforeTraversal(request):
"""Pre-traversal hook.
This is called *once* before any traversal has been done.
"""
def getApplication(request):
"""Returns the object where traversal should commence.
"""
def callTraversalHooks(request, ob):
"""Invokes any traversal hooks associated with the object.
This is called before traversing each object. The ob argument
is the object that is about to be traversed.
"""
def traverseName(request, ob, name):
"""Traverses to the next object.
Name must be an ASCII string or Unicode object."""
def afterTraversal(request, ob):
"""Post-traversal hook.
This is called after all traversal.
"""
def callObject(request, ob):
"""Call the object, returning the result.
For GET/POST this means calling it, but for other methods
(including those of WebDAV and FTP) this might mean invoking
a method of an adapter.
"""
def afterCall(request, ob):
"""Post-callObject hook (if it was successful).
"""
def handleException(object, request, exc_info, retry_allowed=1):
"""Handle an exception
Either:
- sets the body of the response, request.response, or
- raises a Retry exception, or
- throws another exception, which is a Bad Thing.
"""
def endRequest(request, ob):
"""Do any end-of-request cleanup
"""
class IPublicationRequest(IParticipation):
"""Interface provided by requests to `IPublication` objects
"""
response = Attribute("""The request's response object
Return an IPublisherResponse for the request.
""")
def close():
"""Release resources held by the request.
"""
def hold(held):
"""Hold a reference to an object until the request is closed.
The object should be an IHeld. If it is an IHeld, its
release method will be called when it is released.
"""
def getTraversalStack():
"""Return the request traversal stack
This is a sequence of steps to traverse in reverse order. They
will be traversed from last to first.
"""
def setTraversalStack(stack):
"""Change the traversal stack.
See getTraversalStack.
"""
def getPositionalArguments():
"""Return the positional arguments given to the request.
"""
def setPrincipal(principal):
"""Set the principal attribute.
It should be IPrincipal wrapped in its AuthenticationService's context.
"""
class IHeld(Interface):
"""Object to be held and explicitly released by a request
"""
def release():
"""Release the held object
This is called by a request that holds the IHeld when the
request is closed
"""
class IPublisherRequest(IPublicationRequest):
"""Request interface use by the publisher
The responsibility of requests is to encapsulate protocol
specific details, especially wrt request inputs.
Request objects also serve as "context" objects, providing
construction of and access to responses and storage of publication
objects.
"""
def supportsRetry():
"""Check whether the request supports retry
Return a boolean value indicating whether the request can be retried.
"""
def retry():
"""Return a retry request
Return a request suitable for repeating the publication attempt.
"""
publication = Attribute("""The request's publication object
The publication object, an IRequestPublication provides
application-specific functionality hooks.
""")
def setPublication(publication):
"""Set the request's publication object
"""
def traverse(obj):
"""Traverse from the given object to the published object
The published object is returned.
The following hook methods on the publication will be called:
- callTraversalHooks is called before each step and after
the last step.
- traverseName to actually do a single traversal
"""
def processInputs():
"""Do any input processing that needs to be done before traversing
This is done after construction to allow the publisher to
handle errors that arise.
"""
class IDebugFlags(Interface):
"""Features that support debugging."""
sourceAnnotations = Attribute("""Enable ZPT source annotations""")
showTAL = Attribute("""Leave TAL markup in rendered page templates""")
class IApplicationRequest(IEnumerableMapping):
"""Features that support application logic
"""
principal = Attribute("""Principal object associated with the request
This is a read-only attribute.
""")
bodyStream = Attribute(
"""The stream that provides the data of the request.
The data returned by the stream will not include any possible header
information, which should have been stripped by the server (or
previous layer) before.
Also, the body stream might already be read and not return any
data. This is commonly done when retrieving the data for the ``body``
attribute.
If you access this stream directly to retrieve data, it will not be
possible by other parts of the framework to access the data of the
request via the ``body`` attribute.""")
debug = Attribute("""Debug flags (see IDebugFlags).""")
def __getitem__(key):
"""Return request data
The only request data are environment variables.
"""
environment = Attribute(
"""Request environment data
This is a read-only mapping from variable name to value.
""")
annotations = Attribute(
"""Stores arbitrary application data under package-unique keys.
By "package-unique keys", we mean keys that are are unique by
virtue of including the dotted name of a package as a prefex. A
package name is used to limit the authority for picking names for
a package to the people using that package.
For example, when implementing annotations for hypothetical
request-persistent adapters in a hypothetical zope.persistentadapter
package, the key would be (or at least begin with) the following::
"zope.persistentadapter"
""")
class IRequest(IPublisherRequest, IPublicationRequest, IApplicationRequest):
"""The basic request contract
"""
class IRequestEvent(Interface):
"""An event which is about or for a request."""
request = Attribute("The request this event is about.")
class IEndRequestEvent(IRequestEvent):
"""An event which gets sent when the publication is ended."""
class IStartRequestEvent(IRequestEvent):
"""An event which gets sent before publication of a request."""
class RequestEvent:
"""Events for requests.
:param request: The request the event is for.
"""
def __init__(self, request):
self.request = request
@implementer(IEndRequestEvent)
class EndRequestEvent(RequestEvent):
"""An event which gets sent when the publication is ended"""
def __init__(self, ob, request):
super().__init__(request)
self.object = ob
@implementer(IStartRequestEvent)
class StartRequestEvent(RequestEvent):
"""An event send when before publication of a request."""
class ISkinType(IInterface):
"""Base interface for skin types."""
class ISkinnable(Interface):
"""A skinnable (request) can provide a skin.
The implementation in BrowserRequest will apply a default skin/layer called
``IDefaultBrowserLayer`` if not default skin get registered.
"""
class IDefaultSkin(Interface):
"""Any component providing this interface must be a skin.
This is a marker interface, so that we can register the default skin as an
adapter from the presentation type to `IDefaultSkin`.
"""
class ISkinChangedEvent(IRequestEvent):
"""Event that gets triggered when the skin of a request is changed."""
class IDefaultViewName(Interface):
"""A string that contains the default view name
A default view name is used to select a view when a user hasn't
specified one.
"""
class IReRaiseException(Interface):
"""An exception that should be reraised, when handled in publisher.
Under some circumstances (for instance if acting in a WSGI
pipeline with debugger middleware) certain exceptions occuring
while publishing should be handled by the Zope machinery and never
reach the 'outside world'.
Adapters providing this interface for a certain exception type
which also return ``False`` when being called, indicate by this
that the exception should not be reraised during publishing.
This makes it possible, for instance, to authenticate with
basic-auth when a debugger middleware is used and `IUnauthorized`
is raised.
"""
def __call__():
"""Return True if an exception should be re-raised""" | zope.publisher | /zope.publisher-7.0-py3-none-any.whl/zope/publisher/interfaces/__init__.py | __init__.py |
This package provides a few simple scripts to administrate the Python Package
Index (PyPI).
Adding and Removing Roles
-------------------------
The first two scripts allow to grant or revoke the owner role to/from a user
for a list of packages. Here is the syntax::
# addrole --user=USER --pwd=PASSWORD TARGETUSER PACKAGE1, PACKAGE2, ...
# delrole --user=USER --pwd=PASSWORD TARGETUSER PACKAGE1, PACKAGE2, ...
Optionally, you can also apply the role changes to all packages of the calling
user::
# addrole --user=USER --pwd=PASSWORD -a TARGETUSER
# delrole --user=USER --pwd=PASSWORD -a TARGETUSER
| zope.pypisupport | /zope.pypisupport-0.1.1.tar.gz/zope.pypisupport-0.1.1/README.txt | README.txt |
"""PyPI Role Management
"""
import sys
import optparse
import base64
import lxml.etree
import mechanize
import zope.testbrowser.browser
import urllib2
BASE_URL = 'http://pypi.python.org/pypi/'
ALL_PACKAGES_XPATH = ("//html:div[@id='document-navigation']/"
"html:ul/html:li[position()=1]/html:a/text()")
PACKAGE_DISTS_XPATH = "//html:table[@class='list']/html:tr/html:td/html:a/@href"
NS_MAP = {'html': 'http://www.w3.org/1999/xhtml'}
def getPackages(options, browser):
if not options.allPackages:
return options.packages
browser.open(BASE_URL)
tree = lxml.etree.fromstring(browser.contents)
return tree.xpath(ALL_PACKAGES_XPATH, NS_MAP)
def changeRole(browser, user, action):
# Try to get to the admin link and click it.
try:
browser.getLink('admin', index=0).click()
except mechanize._mechanize.LinkNotFoundError:
print ' +-> Error/Warning: admin link not found'
return
except urllib2.HTTPError, err:
print ' +-> Error/Warning: You are not an owner of this pacakge.'
return
# Fill in the user whose roles are modified.
browser.getControl(name='user_name').value = user
# Execute the action
try:
browser.getControl(action + ' Role').click()
except Exception, err:
msg = err.read().strip().split('\n')[-1]
print ' +-> Error/Warning: ' + msg
def manipulateRole(options):
# Create a browser instance.
browser = zope.testbrowser.browser.Browser()
# Log in as the specified user.
creds = base64.b64encode('%s:%s' %(options.username, options.password))
browser.addHeader('Authorization', 'Basic ' + creds)
# Execute the action for each specified package.
for package in getPackages(options, browser):
url = BASE_URL + package
print '%s %s as Owner to: %s' %(options.action, options.targetUser, url)
try:
browser.open(url)
except Exception, err:
print ' +-> Error/Warning: package does not exist'
continue
# Some packages list all of their versions
if 'Index of Packages' in browser.contents:
tree = lxml.etree.fromstring(browser.contents)
for href in tree.xpath(PACKAGE_DISTS_XPATH, NS_MAP):
browser.open(href)
changeRole(browser, options.targetUser, options.action)
else:
changeRole(browser, options.targetUser, options.action)
###############################################################################
# Command-line UI
parser = optparse.OptionParser("%prog [options] USERNAME [PACKAGE, ...]")
config = optparse.OptionGroup(
parser, "Configuration", "Options that deal with configuring the browser.")
config.add_option(
'--username', '--user', action="store", dest='username',
help="""Username to access the PyPI Web site.""")
config.add_option(
'--password', '--pwd', action="store", dest='password',
help="""Password to access the PyPI Web site.""")
config.add_option(
'--all', '-a', action="store_true", dest='allPackages',
help=("When specified, all packages that the user has access "
"to are modified."))
parser.add_option_group(config)
# Default setup
default_setup_args = []
def merge_options(options, defaults):
odict = options.__dict__
for name, value in defaults.__dict__.items():
if (value is not None) and (odict[name] is None):
odict[name] = value
def get_options(args=None, defaults=None):
default_setup, _ = parser.parse_args(default_setup_args)
assert not _
if defaults:
defaults, _ = parser.parse_args(defaults)
assert not _
merge_options(defaults, default_setup)
else:
defaults = default_setup
if args is None:
args = sys.argv
original_args = args
options, positional = parser.parse_args(args)
merge_options(options, defaults)
options.original_args = original_args
if not positional or len(positional) < 1:
parser.error("No target user and/or packages specified.")
options.targetUser = positional[0]
options.packages = positional[1:]
return options
# Command-line UI
###############################################################################
def addrole(args=None):
if args is None:
args = sys.argv[1:]
options = get_options(args)
options.action = 'Add'
manipulateRole(options)
def delrole(args=None):
if args is None:
args = sys.argv[1:]
options = get_options(args)
options.action = 'Remove'
manipulateRole(options) | zope.pypisupport | /zope.pypisupport-0.1.1.tar.gz/zope.pypisupport-0.1.1/src/zope/pypisupport/role.py | role.py |
zope.pytest
***********
Introduction
============
This package contains a set of helper functions to test Zope/Grok
using `pytest`_. It currently lacks special support for doctesting.
Core functions
==============
`zope.pytest.setup.create_app`
* this function creates a WSGI app object which utilizes a temporary db.
`zope.pytest.setup.configure`
* this function parses ZCML files and initializes the component registry
Simple example::
import my.project
from zope.pytest import create_app, configure
from my.project import Root
def pytest_funcarg__app(request):
return create_app(request, Root())
def pytest_funcarg__config(request):
return configure(request, my.project, 'ftesting.zcml')
def test_hello(app, config):
assert 1 == 1
Documentation
=============
Complete documentation can be found on
http://packages.python.org/zope.pytest
.. _pytest: http://pytest.org/
| zope.pytest | /zope.pytest-0.1.tar.gz/zope.pytest-0.1/README.txt | README.txt |
import os, shutil, sys, tempfile, textwrap, urllib, urllib2, subprocess
from optparse import OptionParser
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
quote = str
# See zc.buildout.easy_install._has_broken_dash_S for motivation and comments.
stdout, stderr = subprocess.Popen(
[sys.executable, '-Sc',
'try:\n'
' import ConfigParser\n'
'except ImportError:\n'
' print 1\n'
'else:\n'
' print 0\n'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
has_broken_dash_S = bool(int(stdout.strip()))
# In order to be more robust in the face of system Pythons, we want to
# run without site-packages loaded. This is somewhat tricky, in
# particular because Python 2.6's distutils imports site, so starting
# with the -S flag is not sufficient. However, we'll start with that:
if not has_broken_dash_S and 'site' in sys.modules:
# We will restart with python -S.
args = sys.argv[:]
args[0:0] = [sys.executable, '-S']
args = map(quote, args)
os.execv(sys.executable, args)
# Now we are running with -S. We'll get the clean sys.path, import site
# because distutils will do it later, and then reset the path and clean
# out any namespace packages from site-packages that might have been
# loaded by .pth files.
clean_path = sys.path[:]
import site
sys.path[:] = clean_path
for k, v in sys.modules.items():
if k in ('setuptools', 'pkg_resources') or (
hasattr(v, '__path__') and
len(v.__path__)==1 and
not os.path.exists(os.path.join(v.__path__[0],'__init__.py'))):
# This is a namespace package. Remove it.
sys.modules.pop(k)
is_jython = sys.platform.startswith('java')
setuptools_source = 'http://peak.telecommunity.com/dist/ez_setup.py'
distribute_source = 'http://python-distribute.org/distribute_setup.py'
# parsing arguments
def normalize_to_url(option, opt_str, value, parser):
if value:
if '://' not in value: # It doesn't smell like a URL.
value = 'file://%s' % (
urllib.pathname2url(
os.path.abspath(os.path.expanduser(value))),)
if opt_str == '--download-base' and not value.endswith('/'):
# Download base needs a trailing slash to make the world happy.
value += '/'
else:
value = None
name = opt_str[2:].replace('-', '_')
setattr(parser.values, name, value)
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --setup-source and --download-base to point to
local resources, you can keep this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", dest="version",
help="use a specific zc.buildout version")
parser.add_option("-d", "--distribute",
action="store_true", dest="use_distribute", default=False,
help="Use Distribute rather than Setuptools.")
parser.add_option("--setup-source", action="callback", dest="setup_source",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or file location for the setup file. "
"If you use Setuptools, this will default to " +
setuptools_source + "; if you use Distribute, this "
"will default to " + distribute_source +"."))
parser.add_option("--download-base", action="callback", dest="download_base",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or directory for downloading "
"zc.buildout and either Setuptools or Distribute. "
"Defaults to PyPI."))
parser.add_option("--eggs",
help=("Specify a directory for storing eggs. Defaults to "
"a temporary directory that is deleted when the "
"bootstrap script completes."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", None, action="store", dest="config_file",
help=("Specify the path to the buildout configuration "
"file to be used."))
options, args = parser.parse_args()
# if -c was provided, we push it back into args for buildout's main function
if options.config_file is not None:
args += ['-c', options.config_file]
if options.eggs:
eggs_dir = os.path.abspath(os.path.expanduser(options.eggs))
else:
eggs_dir = tempfile.mkdtemp()
if options.setup_source is None:
if options.use_distribute:
options.setup_source = distribute_source
else:
options.setup_source = setuptools_source
if options.accept_buildout_test_releases:
args.append('buildout:accept-buildout-test-releases=true')
args.append('bootstrap')
try:
import pkg_resources
import setuptools # A flag. Sometimes pkg_resources is installed alone.
if not hasattr(pkg_resources, '_distribute'):
raise ImportError
except ImportError:
ez_code = urllib2.urlopen(
options.setup_source).read().replace('\r\n', '\n')
ez = {}
exec ez_code in ez
setup_args = dict(to_dir=eggs_dir, download_delay=0)
if options.download_base:
setup_args['download_base'] = options.download_base
if options.use_distribute:
setup_args['no_fake'] = True
ez['use_setuptools'](**setup_args)
if 'pkg_resources' in sys.modules:
reload(sys.modules['pkg_resources'])
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
cmd = [quote(sys.executable),
'-c',
quote('from setuptools.command.easy_install import main; main()'),
'-mqNxd',
quote(eggs_dir)]
if not has_broken_dash_S:
cmd.insert(1, '-S')
find_links = options.download_base
if not find_links:
find_links = os.environ.get('bootstrap-testing-find-links')
if find_links:
cmd.extend(['-f', quote(find_links)])
if options.use_distribute:
setup_requirement = 'distribute'
else:
setup_requirement = 'setuptools'
ws = pkg_resources.working_set
setup_requirement_path = ws.find(
pkg_resources.Requirement.parse(setup_requirement)).location
env = dict(
os.environ,
PYTHONPATH=setup_requirement_path)
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setup_requirement_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
if is_jython:
import subprocess
exitcode = subprocess.Popen(cmd, env=env).wait()
else: # Windows prefers this, apparently; otherwise we would prefer subprocess
exitcode = os.spawnle(*([os.P_WAIT, sys.executable] + cmd + [env]))
if exitcode != 0:
sys.stdout.flush()
sys.stderr.flush()
print ("An error occurred when trying to install zc.buildout. "
"Look above this message for any errors that "
"were output by easy_install.")
sys.exit(exitcode)
ws.add_entry(eggs_dir)
ws.require(requirement)
import zc.buildout.buildout
zc.buildout.buildout.main(args)
if not options.eggs: # clean up temporary egg directory
shutil.rmtree(eggs_dir) | zope.pytest | /zope.pytest-0.1.tar.gz/zope.pytest-0.1/bootstrap.py | bootstrap.py |
Examples
========
.. testsetup::
import zope.pytest
import os
import sys
zope_pytest_dir = os.path.dirname(zope.pytest.__file__)
fixture_dir = os.path.join(zope_pytest_dir, 'tests', 'sample_fixtures')
def register_fixture(name):
fixture_path = os.path.join(fixture_dir, name)
sys.path.append(fixture_path)
return fixture_path
def unregister_fixture(fixture_path):
sys.path.remove(fixture_path)
# Unload all modules in sample_fixtures...
mod_paths = [(x, getattr(y, '__file__', ''))
for x,y in sys.modules.items()]
for key, path in mod_paths:
if not 'sample_fixtures' in path:
continue
del sys.modules[key]
Preparing a Package
-------------------
Zope projects often use `zc.buildout` along with `distutils` and
`setuptools` to declare their dependencies from other packages and
create locally executable scripts (including testing scripts). This
step is explained in :ref:`project_setup`.
Here we concentrate on the main Python code, i.e. we leave out the
`setup.py` and `zc.buildout` stuff for a while.
A simple Zope-geared package now could be look like this::
rootdir
!
+--mypkg/
!
+---__init__.py
!
+---app.py
!
+---interfaces.py
!
+---ftesting.zcml
!
+---configure.zcml
!
+---tests/
!
+----__init__.py
!
+----test_app.py
We prepared several such projects in the sources of :mod:`zope.pytest`
(see ``sample_fixtures/`` in :mod:`zope.pytest`'s ``tests/``
directory). There we have different versions of a package called
``mypkg`` (or ``mypkg2`` or similar) which we will use here.
.. doctest::
:hide:
>>> import os, shutil, sys, tempfile
>>> import zope.pytest.tests
>>> fixture = os.path.join(
... os.path.dirname(zope.pytest.tests.__file__), 'mypkg_fixture')
>>> mypkg_dirtree = os.path.join(fixture, 'mypkg')
The important files contained in the `mypkg` package (beside the real
test modules, changing with each sample) look like this:
`app.py`:
.. literalinclude:: ../src/zope/pytest/tests/sample_fixtures/simple/mypkg/app.py
`interfaces.py`:
.. literalinclude:: ../src/zope/pytest/tests/sample_fixtures/simple/mypkg/interfaces.py
`configure.zcml`:
.. literalinclude:: ../src/zope/pytest/tests/sample_fixtures/simple/mypkg/configure.zcml
:language: xml
`ftesting.zcml`:
.. literalinclude:: ../src/zope/pytest/tests/sample_fixtures/simple/mypkg/ftesting.zcml
:language: xml
Writing Simple Tests
--------------------
For simple tests we do not need any special setup at all. Instead we
can just put modules starting with ``test_`` into some Python package
and ask pytest to run the tests.
In our package we add the following, pretty plain test file:
`tests/test_app.py`:
.. literalinclude:: ../src/zope/pytest/tests/sample_fixtures/simple/mypkg/tests/test_app.py
All tests do the usual plain pytest_ stuff: they are named starting
with ``test_`` so that pytest_ can find them. The second and third
tests check whether the specified interfaces are implemented by the
``SampleApp`` class and instances thereof.
For plain :mod:`zope.interface` related tests we need no special
setup.
.. doctest::
:hide:
>>> mypkg_dir = register_fixture('simple')
Then, we run py.test_ with this package as argument. In real-world
usage we would call ``bin/py.test`` or even ``py.test`` (if `pytest`
is installed globally in your system Python) from the commandline:
>>> import pytest
>>> pytest.main(mypkg_dir) # doctest: +REPORT_UDIFF
=============...=== test session starts ====...================
platform ... -- Python 2... -- pytest-...
collecting ...collected 3 items
<BLANKLINE>
.../mypkg/tests/test_app.py ...
<BLANKLINE>
=============...=== 3 passed in ... seconds ===...=============
0
.. doctest::
:hide:
>>> unregister_fixture(mypkg_dir)
Excellent! py.test found our tests and executed them.
Apparently we didn't really need `zope.pytest` in this example, as
there was no Zope specific code to test.
Making Use of ZCML
------------------
To make real use of `zope.pytest` we now want to test some ZCML_
registrations we can make in (you guessed it) ZCML_ files.
Imagine our project had a certain utility defined that looks like
this:
`app.py`:
.. literalinclude:: ../src/zope/pytest/tests/sample_fixtures/zcml/mypkg2/app.py
The `FooUtility` can be registered via ZCML_ like this:
`configure.zcml`:
.. literalinclude:: ../src/zope/pytest/tests/sample_fixtures/zcml/mypkg2/configure.zcml
:language: xml
To check whether the `FooUtility` was registered and is available we
first have to configure the Zope Component Architecture
(ZCA). `zope.pytest` here helps with the
:func:`zope.pytest.configure` function. It is normally used inside a
`funcarg`_ function you have to write yourself.
We use this approach in a new test module where we want to test the
`FooUtility`. The new test module is called ``test_foo``.
`tests/test_foo.py`:
.. literalinclude:: ../src/zope/pytest/tests/sample_fixtures/zcml/mypkg2/tests/test_foo.py
Here the `pytest_funcarg__config` function provides a ``config``
argument for arbitrary test functions you want to write. It can be
deployed by writing test functions that require an argument named
``config`` as shown in the `test_foo_utility` function.
If we had named the ``pytest_funcarg__`` function
``"pytest_funcarg__manfred"``, we had to use an argument called
``manfred`` instead of ``config`` with our test functions.
The configuration used here is based on the local ``ftesting.zcml``
file (which includes ``configure.zcml``). We could easily write
several other funcarg_ functions based on other ZCML files and decide
for each test function, which configuratio we would like to pick for
the respective test, based on the funcarg_ name.
The main point about the shown ``pytest_funcarg__`` function is that
it calls :func:`zope.pytest.configure` which injects setup and
teardown calls into the test that are called automatically
before/after your test. This way the given ZCML files are already
parsed when the `test_foo_utility()` test starts and any registrations
are cleared up afterwards. This is the reason, why the ``foo utility``
looked up in our test can actually be found.
Please note, that in the actual tests we make no use of the passed
`config` parameter. We only request it to inject the necessary setup
and teardown functionality.
.. doctest::
:hide:
>>> mypkg_dir = register_fixture('zcml')
When run, all tests pass:
>>> import pytest
>>> pytest.main(mypkg_dir)
=============...=== test session starts ====...================
platform ... -- Python 2... -- pytest-...
collecting ...collected 5 items
<BLANKLINE>
.../mypkg2/tests/test_app.py ...
.../mypkg2/tests/test_foo.py ..
<BLANKLINE>
=============...=== 5 passed in ... seconds ===...=============
0
.. doctest::
:hide:
>>> unregister_fixture(mypkg_dir)
Both foo tests would fail without `pytest_funcarg__config` preparing
the tests.
Functional Testing: Browsing Objects
------------------------------------
The most interesting point about functional testing might be to check
Zope-generated output, i.e. browser pages or similar. This is, what
normally is referred to as 'functional testing'.
This task normally needs much more setup where `zope.pytest` can come
to help to minimize the efforts dramatically.
To show this we add a view for the `SampleApp` class we defined in
``app.py`` above. We add a new module ``browser.py`` in our `mypkg`
package with the following contents:
New module `browser.py`:
.. literalinclude:: ../src/zope/pytest/tests/sample_fixtures/browser/mypkg3/browser.py
This is a simple browser page that sets the content type of any HTTP
response and returns a simple string as content.
However, to make content browsable we need more registrations. In
``configure.zcml`` we register the main components as above but this
time including also the new browser page:
`configure.zcml`:
.. literalinclude:: ../src/zope/pytest/tests/sample_fixtures/browser/mypkg3/configure.zcml
:language: xml
In ``ftesting.zcml`` we do all the registration stuff that is normally
done in the ``site.zcml``.
`ftesting.zcml`:
.. literalinclude:: ../src/zope/pytest/tests/sample_fixtures/browser/mypkg3/ftesting.zcml
:language: xml
Now we are ready to add another test module that checks the new view
defined in the `browser` module:
`tests/test_browser.py`:
.. literalinclude:: ../src/zope/pytest/tests/sample_fixtures/browser/mypkg3/tests/test_browser.py
Here we have three tests. While the first one checks only whether the
component architecture can generate the new view in general, with the
latter ones (`test_browser` and `test_infrae_browser`) we access the
whole machinery via a real WSGI application. This gives us a
sufficient level of abstraction for real functional testing.
Please note, that we make no strong assumptions about the existence of
some ZODB working in background or similar. While in fact here a ZODB
is working, the tests do not reflect this. We therefore can deploy
non-Zope-specific packages like WebOb_.
One of the main parts of this test module therefore is the funcarg_
function `pytest_funcarg__apps` that sets up a complete WSGI
application and returns it together with a `SampleApp` object stored
somewhere.
To do the complete setup `pytest_funcarg__apps` calls the
`zope.pytest` function :func:`zope.pytest.create_app` with a
`SampleApp` instance to be stored in the
database. :func:`zope.pytest.create_app` stores this instance under
the name ``test`` in the DB root and returns a ready-to-use WSGI
application along with the `SampleApp` instance created.
In the first functional test (`test_browser`) we create and perform an
HTTP request that is sent to the setup WSGI application and check the
output returned by that request. Please note that we use
``http://localhost/test/index.html`` as URL. That's because
:func:`zope.pytest.create_app` stores our application under the name
``test`` in the DB and we registered the view on `SampleApp` objects
under the name ``index.html``.
The second functional test (`test_infrae_browser`) does nearly the
same but this time deploying a faked browser provided by the
:mod:`infrae.testbrowser` package. The latter is well prepared for
simulations of browser sessions, virtual browser clicks, filling out
HTML forms and much more you usually do with a browser. See the
`infrae.testbrowser documentation`_ for details.
Usage of :mod:`infrae.testbrowser`, however, requires Python 2.6 at
least. We therefore expect the respective test to fail if using older
Python versions and mark this condition with a ``@pytest.mark.xfail``
decorator. Another great feature of `py.test` (see `py.test skip and
xfail mechanisms <http://www.pytest.org/skipping.html>`_ for details).
.. doctest::
:hide:
>>> mypkg_dir = register_fixture('browser')
Finally, when run, all tests pass:
>>> import pytest
>>> pytest.main(mypkg_dir)
=============...=== test session starts ====...================
platform ... -- Python 2... -- pytest-...
collecting ...collected 8 items
<BLANKLINE>
.../mypkg3/tests/test_app.py ...
.../mypkg3/tests/test_browser.py ...
.../mypkg3/tests/test_foo.py ..
<BLANKLINE>
=============...=== ... passed... in ... seconds ===...=============
0
.. doctest::
:hide:
>>> unregister_fixture(mypkg_dir)
Writing and running doctests (unsupported)
------------------------------------------
:mod:`zope.pytest` currently has no specific support for
doctesting. That means you can write and run regular doctests but
there is currently no special hook or similar included for setting up
Zope-geared environments/ZCML parsing and the like. We hope to provide
doctest support in some future release.
.. _ZCML: http://docs.zope.org/zopetoolkit/codingstyle/zcml-style.html
.. _pytest: http://pytest.org/
.. _py.test: http://pytest.org/
.. _funcarg: http://pytest.org/funcargs.html
.. _WebOb: http://pythonpaste.org/webob/
.. _`infrae.testbrowser documentation`: http://infrae.com/download/tools/infrae.testbrowser
| zope.pytest | /zope.pytest-0.1.tar.gz/zope.pytest-0.1/doc/samples.rst | samples.rst |
.. zope.pytest documentation master file, created by
sphinx-quickstart on Thu Jan 6 13:39:53 2011.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to zope.pytest's documentation!
=======================================
Contents:
.. toctree::
:maxdepth: 2
intro
samples
grok_samples
api
community
development
changes
license
copyright
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| zope.pytest | /zope.pytest-0.1.tar.gz/zope.pytest-0.1/doc/index.rst | index.rst |
.. module:: zope.pytest
`zope.pytest` API
*****************
Helpers for py.test integration in Zope-based environments.
The main test helpers are the first two functions (:func:`create_app`,
:func:`configure`) which are normally accessed deploying the py.test_
`funcarg`_ mechanism.
:func:`create_app`
==================
.. autofunction:: zope.pytest.create_app
:func:`configure`
=================
.. autofunction:: zope.pytest.configure
:func:`setup_config`
====================
.. autofunction:: zope.pytest.setup_config
:func:`teardown_config`
=======================
.. autofunction:: zope.pytest.teardown_config
:func:`setup_db`
================
.. autofunction:: zope.pytest.setup_db
:func:`teardown_db`
===================
.. autofunction:: zope.pytest.teardown_db
:func:`setup_connection`
========================
.. autofunction:: zope.pytest.setup_connection
:func:`teardown_connection`
===========================
.. autofunction:: zope.pytest.teardown_connection
:func:`setup_root`
==================
.. autofunction:: zope.pytest.setup_root
:func:`teardown_root`
=====================
.. autofunction:: zope.pytest.teardown_root
.. _py.test: http://pytest.org/
.. _funcarg: http://pytest.org/funcargs.html
.. _ZODB: http://www.zodb.org/
.. _ZCML: http://www.muthukadan.net/docs/zca.html#zcml
| zope.pytest | /zope.pytest-0.1.tar.gz/zope.pytest-0.1/doc/api.rst | api.rst |
Developing :mod:`zope.pytest`
*****************************
You want to contribute to :mod:`zope.pytest`? Great!
Please talk to us our on our :ref:`mailing list <mailing_list>` about
your plans!
Sources
-------
`zope.pytest` source code is maintained on Zope subversion repository:
http://svn.zope.org/zope.pytest
You can check out `zope.pytest` using `Subversion`_ (SVN).
.. _`Subversion`: http://subversion.tigris.org/
Feel free to checkout `zope.pytest` from Zope repository if you want
to hack on it, and send us a request when you want us to merge
your improvements.
Development install of `zope.pytest`
------------------------------------
`zope.pytest` requires Python 2.5 or 2.6.
To install `zope.pytest` for development, first check it out, then run the
buildout::
$ python bootstrap.py -d
$ bin/buildout
This uses Buildout_. Don't worry, that's all you need to know to get
going. The ``-d`` option is to use Distribute_ instead of Setuptools_
and is optional. The buildout process will download and install all
dependencies for `zope.pytest`.
.. _Buildout: http://buildout.org
.. _Distribute: http://packages.python.org/distribute/
.. _Setuptools: http://pypi.python.org/pypi/setuptools
Tests
-----
To run the tests::
$ bin/py.test
This uses `py.test`_. We love tests, so please write some if you want
to contribute. There are many examples of tests in the ``test_*.py``
modules.
.. _`py.test`: http://pytest.org/
Test coverage
-------------
To get a test coverage report::
$ bin/py.test --cov zope.pytest
To get a report with more details::
bin/py.test --cov-report html --cov zope.pytest
The results will be stored in a subdirectory ``htmlcov``. You can point
a web browser to its ``index.html`` to get a detailed coverage report.
Building the documentation
--------------------------
To build the documentation using Sphinx_::
$ cd doc/
$ make html
.. _Sphinx: http://sphinx.pocoo.org/
If you use this command, all the dependencies will have been set up
for Sphinx so that the API documentation can be automatically
extracted from the `zope.pytest` source code. The docs source is in
``doc/``, the built documentation will be available in
``doc/_build/html``.
We also have to support for testing the docs. These tests can be run
in the ``doc`` dir as well::
$ make doctest
Releasers should make sure that all tests pass.
Python with `zope.pytest` on the sys.path
-----------------------------------------
It's often useful to have a project and its dependencies available for
import on a Python prompt for experimentation:
$ bin/devpython
You can now import `zope.pytest`::
>>> import zope.pytest
You can also run your own scripts with this custom interpreter if you
like::
$ bin/devpython somescript.py
This can be useful for quick experimentation. When you want to use
`zope.pytest` in your own projects you would normally include it in
your project's ``setup.py`` dependencies instead.
Releases
--------
The buildout also installs `zest.releaser`_ which can be used to make
automatic releases to PyPI (using ``bin/fullrelease``).
.. _`zest.releaser`: http://pypi.python.org/pypi/zest.releaser
| zope.pytest | /zope.pytest-0.1.tar.gz/zope.pytest-0.1/doc/development.rst | development.rst |
Introduction
************
:mod:`zope.pytest` contains a set of helper functions to test
Zope_/Grok_ using pytest_.
Use pytest_ for your Zope_/Grok_ tests
======================================
Many Zope_/Grok_ applications today are tested using regular Python
:mod:`unittest` and/or the respective zope packages, testrunners, etc.
Many Python developers, however, prefer the more pythonic way of
pytest_ to detect, execute and evaluate tests.
:mod:`zope.pytest` now brings the power and beauty of pytest_ into
Zope_/Grok_ projects.
Quickstart
**********
Zope_ applications normally have to setup a ZODB_ database and
configure the Zope_ component architecture to run. At least
integration and functional tests therefore have to perform a lot of
setup/teardown functionality. That's where :mod:`zope.pytest` comes to
rescue.
With :mod:`zope.pytest` you can define `pytest`_-compatible
setup/teardown code simply like this::
import mypkg
from mypkg.app import MyApp
from zope.pytest import configure, create_app
def pytest_funcarg__config(request):
return configure(request, mypkg, 'ftesting.zcml')
def pytest_funcarg__app(request):
return create_app(request, MyApp())
def test_app(config, app):
assert 1 is 1
This setup requires that you provide a valid configuration in an
``ftesting.zcml`` file in your package.
.. _project_setup:
Activating pytest_ and :mod:`zope.pytest` in your project
*********************************************************
In the ``buildout.cfg`` of your project simply add ``pytest`` as a
requirement to build tests. This can for instance be done like this::
[buildout]
develop = .
parts = test
versions = versions
[versions]
[test]
recipe = zc.recipe.egg
eggs =
mypkg [tests]
pytest
The ``test`` extra requirement mentioned in the ``[test]`` section can
be setup like this in your project's ``setup.py``::
tests_require = [
'pytest',
'zope.app.appsetup',
'zope.pytest',
]
setuptools.setup(
# ...
extras_require = {
'tests': tests_require,
# ...
}
)
That's it. Run `buildout` and a `py.test` script should be created in
your ``bin/`` directory. Now you can go on and write your tests.
.. _pytest: http://pytest.org/
.. _Zope: http://www.zope.org/
.. _Grok: http://grok.zope.org/
.. _ZODB: http://www.zodb.org/
| zope.pytest | /zope.pytest-0.1.tar.gz/zope.pytest-0.1/doc/intro.rst | intro.rst |
"""RAM cache implementation.
"""
__docformat__ = 'restructuredtext'
from contextlib import contextmanager
from pickle import dumps
from threading import Lock
from time import time
from persistent import Persistent
from zope.interface import implementer
from zope.location.interfaces import IContained
from zope.ramcache.interfaces.ram import IRAMCache
# A global caches dictionary shared between threads
caches = {}
# A writelock for caches dictionary
writelock = Lock()
# A counter for cache ids and its lock
cache_id_counter = 0
cache_id_writelock = Lock()
@implementer(IRAMCache, IContained)
class RAMCache(Persistent):
"""The design of this class is heavily based on RAMCacheManager in Zope2.
The idea behind the `RAMCache` is that it should be shared between threads,
so that the same objects are not cached in each thread. This is achieved by
storing the cache data structure itself as a module level variable
(`RAMCache.caches`). This, of course, requires locking on modifications of
that data structure.
`RAMCache` is a persistent object. The actual data storage is a volatile
object, which can be acquired/created by calling ``_getStorage()``. Storage
objects are shared between threads and handle their blocking internally.
"""
__parent__ = __name__ = None
def __init__(self):
# A timestamp and a counter are used here because using just a
# timestamp and an id (address) produced unit test failures on
# Windows (where ticks are 55ms long). If we want to use just
# the counter, we need to make it persistent, because the
# RAMCaches are persistent.
with cache_id_writelock:
global cache_id_counter
cache_id_counter += 1
self._cacheId = "%s_%f_%d" % (id(self), time(), cache_id_counter)
self.requestVars = ()
self.maxEntries = 1000
self.maxAge = 3600
self.cleanupInterval = 300
def getStatistics(self):
s = self._getStorage()
return s.getStatistics()
def update(self, maxEntries=None, maxAge=None, cleanupInterval=None):
if maxEntries is not None:
self.maxEntries = maxEntries
if maxAge is not None:
self.maxAge = maxAge
if cleanupInterval is not None:
self.cleanupInterval = cleanupInterval
self._getStorage().update(maxEntries, maxAge, cleanupInterval)
def invalidate(self, ob, key=None):
s = self._getStorage()
if key:
key = self._buildKey(key)
s.invalidate(ob, key)
else:
s.invalidate(ob)
def invalidateAll(self):
s = self._getStorage()
s.invalidateAll()
def query(self, ob, key=None, default=None):
s = self._getStorage()
key = self._buildKey(key)
try:
return s.getEntry(ob, key)
except KeyError:
return default
def set(self, data, ob, key=None):
s = self._getStorage()
key = self._buildKey(key)
s.setEntry(ob, key, data)
def _getStorage(self):
"""Finds or creates a storage object."""
cacheId = self._cacheId
with writelock:
if cacheId not in caches:
caches[cacheId] = Storage(self.maxEntries, self.maxAge,
self.cleanupInterval)
return caches[cacheId]
@staticmethod
def _buildKey(kw):
"""Build a tuple which can be used as an index for a cached value"""
if kw:
items = sorted(kw.items())
return tuple(items)
return ()
class _StorageData:
__slots__ = ('value', 'ctime', 'access_count')
def __init__(self, value):
self.value = value
self.ctime = time()
self.access_count = 0
def __eq__(self, other):
# For tests
return (self.value == other.value
and self.ctime == other.ctime
and self.access_count == other.access_count)
def __getstate__(self):
# For getStatistics only.
return self.value
class Storage:
"""Storage keeps the count and does the aging and cleanup of cached
entries.
This object is shared between threads. It corresponds to a single
persistent `RAMCache` object. Storage does the locking necessary
for thread safety.
"""
def __init__(self, maxEntries=1000, maxAge=3600, cleanupInterval=300):
self._data = {}
self._misses = {}
self._invalidate_queue = []
self.maxEntries = maxEntries
self.maxAge = maxAge
self.cleanupInterval = cleanupInterval
self.writelock = Lock()
self.lastCleanup = time()
def update(self, maxEntries=None, maxAge=None, cleanupInterval=None):
"""Set the registration options. ``None`` values are ignored."""
if maxEntries is not None:
self.maxEntries = maxEntries
if maxAge is not None:
self.maxAge = maxAge
if cleanupInterval is not None:
self.cleanupInterval = cleanupInterval
def getEntry(self, ob, key):
if self.lastCleanup <= time() - self.cleanupInterval:
self.cleanup()
try:
data = self._data[ob][key]
except KeyError:
if ob not in self._misses:
self._misses[ob] = 0
self._misses[ob] += 1
raise
else:
data.access_count += 1
return data.value
def setEntry(self, ob, key, value):
"""Stores a value for the object. Creates the necessary
dictionaries."""
if self.lastCleanup <= time() - self.cleanupInterval:
self.cleanup()
with self._invalidate_queued_after_writelock():
if ob not in self._data:
self._data[ob] = {}
self._data[ob][key] = _StorageData(value)
def _do_invalidate(self, ob, key=None):
"""This does the actual invalidation, but does not handle the locking.
This method is supposed to be called from `invalidate`
"""
try:
if key is None:
del self._data[ob]
self._misses[ob] = 0
else:
del self._data[ob][key]
if not self._data[ob]:
del self._data[ob]
except KeyError:
pass
@contextmanager
def _invalidate_queued_after_writelock(self):
"""
A context manager that obtains the writelock for the body, and
then, after it is released, invalidates the queue.
"""
try:
with self.writelock:
yield
finally:
self._invalidate_queued()
def _invalidate_queued(self):
while self._invalidate_queue:
obj, key = self._invalidate_queue.pop()
self.invalidate(obj, key)
def invalidate(self, ob, key=None):
"""Drop the cached values.
Drop all the values for an object if no key is provided or
just one entry if the key is provided.
"""
if self.writelock.acquire(0):
try:
self._do_invalidate(ob, key)
finally:
self.writelock.release()
else:
self._invalidate_queue.append((ob, key))
def invalidateAll(self):
"""Drop all the cached values.
"""
with self.writelock:
self._data = {}
self._misses = {}
self._invalidate_queue = []
def removeStaleEntries(self):
"""Remove the entries older than `maxAge`"""
if self.maxAge > 0:
punchline = time() - self.maxAge
with self._invalidate_queued_after_writelock():
data = self._data
# creating copies as we modify:
for path, path_data in tuple(data.items()):
for key, val in tuple(path_data.items()):
if val.ctime < punchline:
del path_data[key]
if not path_data:
del data[path]
def cleanup(self):
"""Cleanup the data"""
self.removeStaleEntries()
self.removeLeastAccessed()
self.lastCleanup = time()
def removeLeastAccessed(self):
with self._invalidate_queued_after_writelock():
data = self._data
keys = [(ob, k) for ob, v in data.items() for k in v]
if len(keys) > self.maxEntries:
def getKey(item):
ob, key = item
return data[ob][key].access_count
keys.sort(key=getKey)
ob, key = keys[self.maxEntries]
maxDropCount = data[ob][key].access_count
keys.reverse()
for ob, key in keys:
if data[ob][key].access_count <= maxDropCount:
del data[ob][key]
if not data[ob]:
del data[ob]
self._clearAccessCounters()
def _clearAccessCounters(self):
for path_data in self._data.values():
for val in path_data.values():
val.access_count = 0
self._misses = {}
def getKeys(self, object):
return self._data[object].keys()
def getStatistics(self):
objects = sorted(self._data.items())
result = []
for path, path_data in objects:
try:
size = len(dumps(path_data))
except Exception:
# Some value couldn't be pickled properly.
# That's OK, they shouldn't have to be. Return
# a distinct value that can be recognized as such,
# but that also works in arithmetic.
size = False
hits = sum(entry.access_count for entry in path_data.values())
result.append({'path': path,
'hits': hits,
'misses': self._misses.get(path, 0),
'size': size,
'entries': len(path_data)})
return tuple(result) | zope.ramcache | /zope.ramcache-3.0-py3-none-any.whl/zope/ramcache/ram.py | ram.py |
from zope.interface import Interface
from zope.interface import Attribute
from zope.schema import TextLine
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('zope')
class IDBITypeInfoProvider(Interface):
"""This object can get the Type Info for a particular DBI
implementation."""
def getTypeInfo():
"""Return an IDBITypeInfo object."""
class IDBITypeInfo(Interface):
"""Database adapter specific information"""
paramstyle = Attribute("""
String constant stating the type of parameter marker formatting
expected by the interface. Possible values are [2]:
'qmark' = Question mark style, e.g. '...WHERE name=?'
'numeric' = Numeric, positional style, e.g. '...WHERE name=:1'
'named' = Named style, e.g. '...WHERE name=:name'
'format' = ANSI C printf format codes, e.g. '...WHERE name=%s'
'pyformat' = Python extended format codes, e.g. '...WHERE name=%(name)s'
""")
threadsafety = Attribute("""
Integer constant stating the level of thread safety the interface
supports. Possible values are:
0 = Threads may not share the module.
1 = Threads may share the module, but not connections.
2 = Threads may share the module and connections.
3 = Threads may share the module, connections and cursors.
Sharing in the above context means that two threads may use a resource
without wrapping it using a mutex semaphore to implement resource
locking. Note that you cannot always make external resources thread
safe by managing access using a mutex: the resource may rely on global
variables or other external sources that are beyond your control.
""")
encoding = TextLine(
title=_("Database encoding"),
description=_("Encoding of the database content"),
default=u"utf-8",
required=False
)
def getEncoding():
"""Get the database encoding."""
def setEncoding(encoding):
"""Set the database encoding."""
def getConverter(type):
"""Return a converter function for field type matching key"""
class IResultSet(Interface):
"""Holds results, and allows iteration."""
columns = Attribute("""A list of the column names of the returned result
set.""")
def __getitem__(index):
"""Return a brain row for index."""
class DatabaseException(Exception):
"""Generic Database Error"""
class DatabaseAdapterError(DatabaseException):
pass
arraysize = 1 # default constant, symbolic
class IDBICursor(Interface):
"""DB API ICursor interface"""
description = Attribute("""This read-only attribute is a sequence of
7-item sequences. Each of these sequences contains information
describing one result column: (name, type_code, display_size,
internal_size, precision, scale, null_ok). This attribute will be None
for operations that do not return rows or if the cursor has not had an
operation invoked via the executeZZZ() method yet.
The type_code can be interpreted by comparing it to the Type Objects
specified in the section below. """)
arraysize = Attribute("""This read/write attribute specifies the number of
rows to fetch at a time with fetchmany(). It defaults to 1 meaning to
fetch a single row at a time.
Implementations must observe this value with respect to the
fetchmany() method, but are free to interact with the database a
single row at a time. It may also be used in the implementation of
executemany().
""")
def close():
"""Close the cursor now (rather than whenever __del__ is called). The
cursor will be unusable from this point forward; an Error (or
subclass) exception will be raised if any operation is attempted with
the cursor.
"""
def execute(operation, parameters=None):
"""Prepare and execute a database operation (query or
command). Parameters may be provided as sequence or mapping and will
be bound to variables in the operation. Variables are specified in a
database-specific notation (see the module's paramstyle attribute for
details). [5]
A reference to the operation will be retained by the cursor. If the
same operation object is passed in again, then the cursor can optimize
its behavior. This is most effective for algorithms where the same
operation is used, but different parameters are bound to it (many
times).
For maximum efficiency when reusing an operation, it is best to use
the setinputsizes() method to specify the parameter types and sizes
ahead of time. It is legal for a parameter to not match the predefined
information; the implementation should compensate, possibly with a
loss of efficiency.
The parameters may also be specified as list of tuples to e.g. insert
multiple rows in a single operation, but this kind of usage is
depreciated: executemany() should be used instead.
Return values are not defined.
"""
def executemany(operation, seq_of_parameters):
"""Prepare a database operation (query or command) and then execute it
against all parameter sequences or mappings found in the sequence
seq_of_parameters.
Modules are free to implement this method using multiple calls to the
execute() method or by using array operations to have the database
process the sequence as a whole in one call.
The same comments as for execute() also apply accordingly to this
method.
Return values are not defined.
"""
def fetchone():
"""Fetch the next row of a query result set, returning a single
sequence, or None when no more data is available. [6]
An Error (or subclass) exception is raised if the previous call to
executeZZZ() did not produce any result set or no call was issued yet.
"""
def fetchmany(size=arraysize):
"""Fetch the next set of rows of a query result, returning a sequence
of sequences (e.g. a list of tuples). An empty sequence is returned
when no more rows are available.
The number of rows to fetch per call is specified by the parameter. If
it is not given, the cursor's arraysize determines the number of rows
to be fetched. The method should try to fetch as many rows as
indicated by the size parameter. If this is not possible due to the
specified number of rows not being available, fewer rows may be
returned.
An Error (or subclass) exception is raised if the previous call to
executeZZZ() did not produce any result set or no call was issued yet.
Note there are performance considerations involved with the size
parameter. For optimal performance, it is usually best to use the
arraysize attribute. If the size parameter is used, then it is best
for it to retain the same value from one fetchmany() call to the next.
"""
def fetchall():
"""Fetch all (remaining) rows of a query result, returning them as a
sequence of sequences (e.g. a list of tuples). Note that the cursor's
arraysize attribute can affect the performance of this operation.
An Error (or subclass) exception is raised if the previous call to
executeZZZ() did not produce any result set or no call was issued yet.
"""
class IDBIConnection(Interface):
"""A DB-API based Interface """
def cursor():
"""Return a new IDBICursor Object using the connection.
If the database does not provide a direct cursor concept, the module
will have to emulate cursors using other means to the extent needed by
this specification. """
def commit():
"""Commit any pending transaction to the database. Note that if the
database supports an auto-commit feature, this must be initially off.
An interface method may be provided to turn it back on.
Database modules that do not support transactions should implement
this method with void functionality.
"""
def rollback():
"""In case a database does provide transactions this method causes the
database to roll back to the start of any pending transaction. Closing
a connection without committing the changes first will cause an
implicit rollback to be performed. """
def close():
"""Close the connection now (rather than whenever __del__ is
called). The connection will be unusable from this point forward; an
Error (or subclass) exception will be raised if any operation is
attempted with the connection. The same applies to all cursor objects
trying to use the connection. """
class ISQLCommand(Interface):
"""Static SQL commands."""
connectionName = Attribute("""The name of the database connection
to use in getConnection """)
def getConnection():
"""Get the database connection."""
def __call__():
"""Execute an sql query and return a result object if appropriate"""
class IZopeDatabaseAdapter(IDBITypeInfo):
"""Interface for persistent object that returns
volatile IZopeConnections."""
def isConnected():
"""Check whether the Zope Connection is actually connected to the
database."""
def __call__():
"""Return an IZopeConnection object"""
class IZopeDatabaseAdapterManagement(Interface):
def setDSN(dsn):
"""Set the DSN for the Adapter instance"""
def getDSN():
"""Get the DSN of the Adapter instance"""
dsn = TextLine(
title=_("DSN"),
description=_(
"Specify the DSN (Data Source Name) of the database. "
"Examples include:\n"
"\n"
"dbi://dbname\n"
"dbi://dbname;param1=value...\n"
"dbi://user:passwd/dbname\n"
"dbi://user:passwd/dbname;param1=value...\n"
"dbi://user:passwd@host:port/dbname\n"
"dbi://user:passwd@host:port/dbname;param1=value...\n"
"\n"
"All values should be properly URL-encoded."),
default=u"dbi://dbname",
required=True)
def connect():
"""Connect to the specified database."""
def disconnect():
"""Disconnect from the database."""
class IManageableZopeDatabaseAdapter(IZopeDatabaseAdapter,
IZopeDatabaseAdapterManagement):
"""Database adapters with management functions
"""
class IZopeConnection(IDBIConnection, IDBITypeInfoProvider):
# An implementation of this object will be exposed to the
# user. Therefore the Zope connection represents a connection in
# the Zope sense, meaning that the object might not be actually
# connected to a real relational database.
def cursor():
"""Return an IZopeCursor object."""
def registerForTxn():
"""Join the current transaction.
This method should only be inovoked by the Zope/DB transaction
manager.
"""
class IZopeCursor(IDBICursor):
"""An IDBICursor that integrates with Zope's transactions"""
def execute(operation, parameters=None):
"""Executes an operation, registering the underlying connection with
the transaction system.
See IDBICursor for more detailed execute information.
"""
def executemany(operation, seq_of_parameters):
"""Executes an operation, registering the underlying connection with
the transaction system.
See IDBICursor for more detailed executemany information.
""" | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/interfaces.py | interfaces.py |
import gadfly
import os
from zope.rdb import ZopeDatabaseAdapter, parseDSN
from zope.rdb import DatabaseException, DatabaseAdapterError
from zope.rdb import ZopeConnection, ZopeCursor
GadflyError = DatabaseAdapterError
class GadflyAdapterCursor(ZopeCursor):
def executemany(self, operation, parameters):
command = operation.split(None, 1)[0].lower()
if command not in ("insert", "update", "delete"):
raise DatabaseAdapterError(
"executemany() is not applicable for %r" % operation)
operation, parameters = self._prepareOperation(operation, parameters)
self.connection.registerForTxn()
if command == "insert":
self.execute(operation, parameters)
else:
for param in parameters:
self.execute(operation, param)
class GadflyAdapterConnection(ZopeConnection):
def cursor(self):
return GadflyAdapterCursor(self.conn.cursor(), self)
class GadflyAdapter(ZopeDatabaseAdapter):
"""A Gadfly adapter for Zope3"""
# The registerable object needs to have a container
__name__ = __parent__ = None
_v_connection = None
paramstyle = 'qmark'
def _connection_factory(self):
"""Create a Gadfly DBI connection based on the DSN.
Only local (filesystem-based) Gadfly connections are supported
at this moment."""
conn_info = parseDSN(self.dsn)
if conn_info['host'] != '' or conn_info['username'] != '' or \
conn_info['port'] != '' or conn_info['password'] != '':
raise DatabaseAdapterError(
"DSN for GadflyDA must be of the form "
"dbi://dbname or dbi://dbname;dir=directory."
)
connection = conn_info['dbname']
dir = os.path.join(getGadflyRoot(),
conn_info['parameters'].get('dir', connection))
if not os.path.isdir(dir):
raise DatabaseAdapterError('Not a directory ' + dir)
if not os.path.exists(os.path.join(dir, connection + ".gfd")):
db = gadfly.gadfly()
db.startup(connection, dir)
else:
db = gadfly.gadfly(connection, dir)
return db
def connect(self):
if not self.isConnected():
try:
self._v_connection = GadflyAdapterConnection(
self._connection_factory(), self)
except gadfly.error, error:
raise DatabaseException(str(error))
_gadflyRoot = 'gadfly'
def setGadflyRoot(path='gadfly'):
global _gadflyRoot
_gadflyRoot = path
def getGadflyRoot():
return _gadflyRoot | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadflyda.py | gadflyda.py |
import re
import time, random, thread, threading
from urllib import unquote_plus
from persistent import Persistent
import transaction
from transaction.interfaces import IDataManager
from zope.security.checker import NamesChecker
from zope.interface import implements
from zope.container.contained import Contained
from zope.rdb.interfaces import DatabaseException, DatabaseAdapterError
from zope.rdb.interfaces import IResultSet
from zope.rdb.interfaces import IZopeConnection, IZopeCursor
from zope.rdb.interfaces import IManageableZopeDatabaseAdapter
DEFAULT_ENCODING = "utf-8"
def sqlquote(x):
r"""
Escape data suitable for inclusion in generated ANSI SQL92 code for
cases where bound variables are not suitable.
>>> sqlquote("Hi")
"'Hi'"
>>> sqlquote("It's mine")
"'It''s mine'"
>>> sqlquote("\\'")
"'\\\\'''"
>>> sqlquote(u"\\'")
u"'\\\\'''"
>>> sqlquote(32)
32
>>> sqlquote(None)
'NULL'
"""
if isinstance(x, (str, unicode)):
x = "'%s'" % x.replace('\\', '\\\\').replace("'", "''")
elif isinstance(x, (int, long, float)):
pass
elif x is None:
x = 'NULL'
else:
raise TypeError('do not know how to handle type %s' % type(x))
return x
class ResultSet(list):
"""Database Result Set.
Currently we don't do lazy instantation of rows.
"""
implements(IResultSet)
__slots__ = ('columns',)
def __init__(self, columns, rows):
self.columns = tuple(columns)
row_class = RowClassFactory(columns)
super(ResultSet, self).__init__(map(row_class, rows))
__safe_for_unpickling__ = True
def __reduce__(self):
cols = self.columns
return (ResultSet,
(cols, [[getattr(row, col) for col in cols] for row in self])
)
def __cmp__(self, other):
if not isinstance(other, ResultSet):
return super(ResultSet, self).__cmp__(other)
c = cmp(self.columns, other.columns)
if c:
return c
for row, other_row in zip(self, other):
c = cmp(row, other_row)
if c:
return c
return cmp(len(self), len(other))
class ZopeDatabaseAdapter(Persistent, Contained):
implements(IManageableZopeDatabaseAdapter)
# We need to store our connections in a thread local to ensure that
# different threads do not accidently use the same connection. This
# is important when instantiating database adapters using
# rdb:provideConnection as the same ZopeDatabaseAdapter instance will
# be used by all threads.
_connections = threading.local()
def __init__(self, dsn):
self.setDSN(dsn)
self._unique_id = '%s.%s.%s' % (
time.time(), random.random(), thread.get_ident()
)
def _get_v_connection(self):
"""We used to store the ZopeConnection in a volatile attribute.
However this was not always thread safe.
"""
return getattr(ZopeDatabaseAdapter._connections, self._unique_id, None)
def _set_v_connection(self, value):
setattr(ZopeDatabaseAdapter._connections, self._unique_id, value)
_v_connection = property(_get_v_connection, _set_v_connection)
def _connection_factory(self):
"""This method should be overwritten by all subclasses"""
conn_info = parseDSN(self.dsn)
def setDSN(self, dsn):
assert dsn.startswith('dbi://'), "The DSN has to start with 'dbi://'"
self.dsn = dsn
def getDSN(self):
return self.dsn
def connect(self):
if not self.isConnected():
try:
self._v_connection = ZopeConnection(
self._connection_factory(), self)
except DatabaseException:
raise
# Note: I added the general Exception, since the DA can return
# implementation-specific errors. But we really want to catch all
# issues at this point, so that we can convert it to a
# DatabaseException.
except Exception, error:
raise DatabaseException(str(error))
def disconnect(self):
if self.isConnected():
self._v_connection.close()
self._v_connection = None
def isConnected(self):
return self._v_connection is not None
def __call__(self):
self.connect()
return self._v_connection
# Pessimistic defaults
paramstyle = 'pyformat'
threadsafety = 0
encoding = DEFAULT_ENCODING
def setEncoding(self, encoding):
# Check the encoding
"".decode(encoding)
self.encoding = encoding
def getEncoding(self):
return self.encoding
def getConverter(self, type):
'See IDBITypeInfo'
return identity
def identity(x):
return x
_dsnFormat = re.compile(
r"dbi://"
r"(((?P<username>.*?)(:(?P<password>.*?))?)?"
r"(@(?P<host>.*?)(:(?P<port>.*?))?)?/)?"
r"(?P<dbname>.*?)(;(?P<raw_params>.*))?"
r"$"
)
_paramsFormat = re.compile(r"([^=]+)=([^;]*);?")
def parseDSN(dsn):
"""Parses a database connection string.
We could have the following cases:
dbi://dbname
dbi://dbname;param1=value...
dbi://user/dbname
dbi://user:passwd/dbname
dbi://user:passwd/dbname;param1=value...
dbi://user@host/dbname
dbi://user:passwd@host/dbname
dbi://user:passwd@host:port/dbname
dbi://user:passwd@host:port/dbname;param1=value...
Any values that might contain characters special for URIs need to be
quoted as it would be returned by `urllib.quote_plus`.
Return value is a mapping with the following keys:
username username (if given) or an empty string
password password (if given) or an empty string
host host (if given) or an empty string
port port (if given) or an empty string
dbname database name
parameters a mapping of additional parameters to their values
"""
if not isinstance(dsn, (str, unicode)):
raise ValueError('The dsn is not a string. It is a %r' % type(dsn))
match = _dsnFormat.match(dsn)
if match is None:
raise ValueError('Invalid DSN; must start with "dbi://": %r' % dsn)
result = match.groupdict("")
raw_params = result.pop("raw_params")
for key, value in result.items():
result[key] = unquote_plus(value)
params = _paramsFormat.findall(raw_params)
result["parameters"] = dict([(unquote_plus(key), unquote_plus(value))
for key, value in params])
return result
class ZopeCursor(object):
implements(IZopeCursor)
def __init__(self, cursor, connection):
self.cursor = cursor
self.connection = connection
def execute(self, operation, parameters=None):
"""Executes an operation, registering the underlying
connection with the transaction system. """
operation, parameters = self._prepareOperation(operation, parameters)
self.connection.registerForTxn()
if parameters is None:
return self.cursor.execute(operation)
return self.cursor.execute(operation, parameters)
def executemany(self, operation, parameters):
"""Executes an operation, registering the underlying
connection with the transaction system. """
operation, parameters = self._prepareOperation(operation, parameters)
# If executemany() is not defined pass parameters
# to execute() as defined by DB API v.1
method = getattr(self.cursor, "executemany", self.cursor.execute)
self.connection.registerForTxn()
return method(operation, parameters)
def _prepareOperation(self, operation, parameters):
encoding = self.connection.getTypeInfo().getEncoding()
if isinstance(operation, unicode):
operation = operation.encode(encoding)
parameters = self._prepareParameters(parameters, encoding)
return operation, parameters
def _prepareParameters(self, parameters, encoding):
if isinstance(parameters, list):
for i, v in enumerate(parameters):
if isinstance(v, unicode):
parameters[i] = v.encode(encoding)
else:
parameters[i] = self._prepareParameters(v, encoding)
elif isinstance(parameters, tuple):
parameters = list(parameters)
for i, v in enumerate(parameters):
if isinstance(v, unicode):
parameters[i] = v.encode(encoding)
parameters = tuple(parameters)
elif isinstance(parameters, dict):
for k, v in parameters.items():
if isinstance(v, unicode):
parameters[k] = v.encode(encoding)
return parameters
def __getattr__(self, key):
return getattr(self.cursor, key)
def fetchone(self):
results = self.cursor.fetchone()
if results is None:
return None
return self._convertTypes([results])[0]
def fetchmany(self, *args, **kw):
results = self.cursor.fetchmany(*args, **kw)
return self._convertTypes(results)
def fetchall(self):
results = self.cursor.fetchall()
return self._convertTypes(results)
def _convertTypes(self, results):
"Perform type conversion on query results"
getConverter = self.connection.getTypeInfo().getConverter
converters = [getConverter(col_info[1])
for col_info in self.cursor.description]
## A possible optimization -- need benchmarks to check if it is worth it
## if [x for x in converters if x is not ZopeDatabaseAdapter.identity]:
## return results # optimize away
def convertRow(row):
return map(lambda converter, value: converter(value),
converters, row)
return map(convertRow, results)
class ZopeConnection(object):
implements(IZopeConnection)
def __init__(self, conn, typeinfo):
self.conn = conn
self._txn_registered = False
self._type_info = typeinfo
def __getattr__(self, key):
# The IDBIConnection interface is hereby implemented
return getattr(self.conn, key)
def cursor(self):
'See IZopeConnection'
return ZopeCursor(self.conn.cursor(), self)
def registerForTxn(self):
'See IZopeConnection'
if not self._txn_registered:
tm = ZopeDBTransactionManager(self)
transaction.get().join(tm)
self._txn_registered = True
def commit(self):
'See IDBIConnection'
self._txn_registered = False
self.conn.commit()
def rollback(self):
'See IDBIConnection'
self._txn_registered = False
self.conn.rollback()
def getTypeInfo(self):
'See IDBITypeInfoProvider'
return self._type_info
def queryForResults(conn, query):
"""Convenience function to quickly execute a query."""
cursor = conn.cursor()
try:
cursor.execute(query)
except Exception, error:
# Just catch the exception, so that we can convert it to a database
# exception.
raise DatabaseException(str(error))
if cursor.description is not None:
columns = [c[0] for c in cursor.description]
results = cursor.fetchall()
else:
# Handle the case that the query was not a SELECT
columns = []
results = []
return ResultSet(columns, results)
class ZopeDBTransactionManager(object):
implements(IDataManager)
def __init__(self, dbconn):
self._dbconn = dbconn
self.transaction_manager = transaction.manager
def prepare(self, txn):
pass
def tpc_begin(self, txn):
pass
def tpc_vote(self, txn):
pass
def tpc_finish(self, txn):
pass
def tpc_abort(self, txn):
pass
def abort(self, txn):
self._dbconn.rollback()
def commit(self, txn):
self._dbconn.commit()
def sortKey(self):
"""
ZODB uses a global sort order to prevent deadlock when it commits
transactions involving multiple resource managers. The resource
manager must define a sortKey() method that provides a global ordering
for resource managers.
(excerpt from transaction/notes.txt)
"""
return 'rdb' + str(id(self))
class Row(object):
"""Represents a row in a ResultSet"""
def __init__(self, data):
for k, v in zip(self.__slots__, data):
setattr(self, k, v)
def __str__(self):
return "row class %s" % str(self.__slots__)
def __cmp__(self, other):
if not isinstance(other, Row):
return super(Row, self).__cmp__(other)
c = cmp(self.__slots__, other.__slots__)
if c:
return c
for column in self.__slots__:
c = cmp(getattr(self, column), getattr(other, column))
if c:
return c
return 0
class InstanceOnlyDescriptor(object):
__marker = object()
def __init__(self, value=__marker):
if value is not self.__marker:
self.value = value
def __get__(self, inst, cls=None):
if inst is None:
raise AttributeError
return self.value
def __set__(self, inst, value):
self.value = value
def __delete__(self, inst):
del self.value
def RowClassFactory(columns):
"""Creates a Row object"""
klass_namespace = {}
klass_namespace['__Security_checker__'] = InstanceOnlyDescriptor(
NamesChecker(columns))
klass_namespace['__slots__'] = tuple(columns)
return type('GeneratedRowClass', (Row,), klass_namespace) | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/__init__.py | __init__.py |
# BUGS:
# A bad grammar that has no derivations for
# the root nonterminal may cause a name error
# on the variable "GoodStartingPlace"
# this needs to be modified so the RULEGRAM is loaded from a
# compiled representation if available.
import string
import kjSet
import kjParser
# import some constants
from kjParser import \
TERMFLAG, NOMATCHFLAG, MOVETOFLAG, REDUCEFLAG, TRANSFLAG, KEYFLAG, \
NONTERMFLAG, TERMFLAG, EOFFLAG, ENDOFFILETOKEN
PMODULE = kjParser.THISMODULE
# errors raised here
TokenError = "TokenError" # may happen on autogen with bad grammar
NotSLRError = "NotSLRError" # may happen for nonSLR grammar
# set this flag for regression testing at each load
RUNTESTS = 0
# set this flag to abort automatic generation on Errors
ABORTONERROR = 0
# token used to mark null productions
NULLTOKEN = (None,None)
# a derived FSM class, with closure computation methods defined
# (compilable FSMachine)
#
class CFSMachine(kjParser.FSMachine):
def __init__(self, nonterm):
kjParser.FSMachine.__init__(self, nonterm)
# return the epsilon closure of the FSM as a new FSM
#
# DoNullMap, if set, will map unexpected tokens to
# the "empty" state (usually creating a really big fsm)
#
def Eclosure(self, Epsilon, DoNullMaps=0):
Closure = CFSMachine( self.root_nonTerminal )
# compute the Epsilon Graph between states
EGraph = kjSet.NewDG([])
for State in range(0,self.maxState+1):
# every state is E-connected to self
kjSet.AddArc( EGraph, State, State )
# add possible transition on epsilon (ONLY ONE SUPPORTED!)
key = (State, Epsilon)
if self.StateTokenMap.has_key(key):
keymap = self.StateTokenMap[key]
if keymap[0][0] != MOVETOFLAG:
raise TypeError, "unexpected map type in StateTokenMap"
for (Flag,ToState) in keymap:
kjSet.AddArc( EGraph, State, ToState )
#endfor
# transitively close EGraph
kjSet.TransClose( EGraph )
# Translate EGraph into a dictionary of lists
EMap = {}
for State in range(0,self.maxState+1):
EMap[State] = kjSet.Neighbors( EGraph, State )
# make each e-closure of each self.state a state of the closure FSM.
# here closure states assumed transient -- reset elsewhere.
# first do the initial state
Closure.States[ Closure.initial_state ] = \
[TRANSFLAG, kjSet.NewSet(EMap[self.initial_state]) ]
# do all other states (save initial and successful final states)
#for State in range(0,self.maxState+1):
# if State != self.initial_state \
# and State != self.successful_final_state:
# Closure.NewSetState(TRANSFLAG, kjSet.NewSet(EMap[State]) )
##endfor
# compute set of all known tokens EXCEPT EPSILON
Tokens = kjSet.NewSet( [] )
for (State, Token) in self.StateTokenMap.keys():
if Token != Epsilon:
kjSet.addMember(Token, Tokens)
# tranform it into a list
Tokens = kjSet.get_elts(Tokens)
# for each state of the the closure FSM (past final) add transitions
# and add new states as needed until all states are processed
# (uses convention that states are allocated sequentially)
ThisClosureState = 1
while ThisClosureState <= Closure.maxState:
MemberStates = kjSet.get_elts(Closure.States[ThisClosureState][1])
# for each possible Token, compute the union UTrans of all
# e-closures for all transitions for all member states,
# on the Token, make UTrans a new state (if needed),
# and transition ThisClosureState to UTrans on Token
for Token in Tokens:
UTrans = kjSet.NewSet( [] )
for MState in MemberStates:
# if MState has a transition on Token, include
# EMap for the destination state
key = (MState, Token)
if self.StateTokenMap.has_key(key):
DStateTup = self.StateTokenMap[key]
if DStateTup[0][0] != MOVETOFLAG:
raise TypeError, "unknown map type"
for (DFlag, DState) in DStateTup:
for EDState in EMap[DState]:
kjSet.addMember(EDState, UTrans)
#endif
#endfor MState
# register UTrans as a new state if needed
UTState = Closure.NewSetState(TRANSFLAG, UTrans)
# record transition from
# ThisClosureState to UTState on Token
if DoNullMaps:
Closure.SetMap( ThisClosureState, Token, UTState)
else:
if not kjSet.Empty(UTrans):
Closure.SetMap( ThisClosureState, Token, UTState)
#endfor Token
ThisClosureState = ThisClosureState +1
#endwhile
return Closure
#enddef Eclosure
# add an set-marked state to self if not present
# uses self.States[s][1] as the set marking the state s
#
# only used by Eclosure above
#
def NewSetState(self, kind, InSet):
# return existing state if one is present that matches the set
LastState= self.maxState
# skip state 0 (successful final state)???
for State in range(1,LastState+1):
MarkSet = self.States[State][1]
if kjSet.Same(InSet,MarkSet):
return State # nonlocal
#endfor
# if not exited then allocate a new state
LastState = LastState + 1
self.States[LastState] = [ kind , InSet ]
self.maxState = LastState
return LastState
#enddef newSetState
#endclass CFSMachine
# Ruleset class, used to compute NFA and then DFA for
# parsing based on a list of rules.
#
class ruleset:
def __init__(self, StartNonterm, Rulelist):
# initialize the ruleset
self.StartNonterm = StartNonterm
self.Rules = Rulelist
# method to compute prefixes and First sets for nonterminals
def CompFirst(self):
# uses the special null production token NULLTOKEN
# snarfed directly from Aho+Ullman (terminals glossed)
First = kjSet.NewDG( [] )
# repeat the while loop until no change is made to First
done = 0
while not done:
done = 1 # assume we're done until a change is made to First
# iterate through all rules looking for a new arc to add
# indicating Terminal > possible first token derivation
#
for R in self.Rules:
GoalNonterm = R.Nonterm
Bodylength = len(R.Body)
# look through the body of the rule up to the token with
# no epsilon production (yet seen)
Bodyindex = 0
Processindex = 1
while Processindex:
# unless otherwise indicated below, don't go to next token
Processindex = 0
# if index is past end of body then record
# an epsilon production for this nonterminal
if Bodyindex >= Bodylength:
if not kjSet.HasArc(First, GoalNonterm, NULLTOKEN ):
kjSet.AddArc( First, GoalNonterm, NULLTOKEN )
done = 0 # change made to First
else:
# otherwise try to add firsts of this token
# to firsts of the Head of the rule.
Token = R.Body[Bodyindex]
(type, name) = Token
if type in (KEYFLAG,TERMFLAG):
# try to add this terminal to First for GoalNonterm
if not kjSet.HasArc(First, GoalNonterm, Token):
kjSet.AddArc( First, GoalNonterm, Token)
done = 0
elif type == NONTERMFLAG:
# try to add each First entry for nonterminal
# to First entry for GoalNonterm
for FToken in kjSet.Neighbors( First, Token ):
if not kjSet.HasArc(First, GoalNonterm, FToken):
kjSet.AddArc( First, GoalNonterm, FToken)
done = 0
# does this nonterminal have a known e production?
if kjSet.HasArc( First, Token, NULLTOKEN ):
# if so, process next token in rule
Processindex = 1
else:
raise TokenError, "unknown token type in rule body"
#endif
Bodyindex = Bodyindex + 1
#endwhile Processindex
#endfor R in self.Rules
#endwhile not done
self.First = First
#enddef CompFirst
# computing the Follow set for the ruleset
# the good news: I think it's correct.
# the bad news: It's slower than it needs to be for epsilon cases.
def CompFollow(self):
Follow = kjSet.NewDG( [] )
# put end marker on follow of start nonterminal
kjSet.AddArc(Follow, self.StartNonterm, kjParser.ENDOFFILETOKEN)
# now compute other follows using the rules;
# repeat the loop until no change to Follow.
done = 0
while not done:
done = 1 # assume done unless Follow changes
for R in self.Rules:
#print R
# work backwards in the rule body to
# avoid retesting for epsilon nonterminals
Bodylength = len(R.Body)
EpsilonTail = 1 # the tail of rule may expand to null
BodyIndex = Bodylength - 1
Last = 1 # loop starts at the last
from types import TupleType
while BodyIndex >= 0:
Token = R.Body[BodyIndex]
(Ttype,Tname) = Token
if Ttype in (KEYFLAG,TERMFLAG):
# keywords etc cancel epsilon tail, otherwise ignore
EpsilonTail = 0
elif Ttype == NONTERMFLAG:
# if the tail expands to epsilon, map
# follow for the goal nonterminal to this token
# and also follow for the tail nonterms
if EpsilonTail:
# add follow for goal
for FToken in kjSet.Neighbors(Follow,R.Nonterm):
if not kjSet.HasArc(Follow,Token,FToken):
kjSet.AddArc(Follow,Token,FToken)
#if type(FToken[0])==TupleType:
# raise ValueError, "bad FToken"+`FToken`
#print "new", Token, FToken
done = 0 # follow changed, loop again
# add follow for tail members
#for Index2 in range(BodyIndex+1, Bodylength):
# TailToken = R.Body[Index2]
# for FToken in kjSet.Neighbors(Follow,TailToken):
# if not kjSet.HasArc(Follow,Token,FToken):
# kjSet.AddArc(Follow,Token,FToken)
# done = 0
#endif EpsilonTail
# if we are not at the end use First set for next token
if not Last:
NextToken = R.Body[BodyIndex+1]
(NTtype, NTname) = NextToken
if NTtype in (KEYFLAG,TERMFLAG):
if not kjSet.HasArc(Follow,Token,NextToken):
kjSet.AddArc(Follow,Token,NextToken)
#print "next", Token, NextToken
done = 0
elif NTtype == NONTERMFLAG:
for FToken in kjSet.Neighbors(self.First, NextToken):
if FToken != NULLTOKEN:
if not kjSet.HasArc(Follow,Token,FToken):
kjSet.AddArc(Follow,Token,FToken)
#print "neighbor", Token, FToken
done = 0
else:
# next token expands to epsilon:
# add its follow, unless already done above
#if not EpsilonTail:
for FToken in kjSet.Neighbors(Follow,NextToken):
if not kjSet.HasArc(Follow,Token,FToken):
kjSet.AddArc(Follow,Token,FToken)
#print "epsilon", Token, FToken
done = 0
else:
raise TokenError, "unknown token type in rule body"
#endif not Last
# finally, check whether next iteration has epsilon tail
if not kjSet.HasArc(self.First, Token, NULLTOKEN):
EpsilonTail = 0
else:
raise TokenError, "unknown token type in rule body"
BodyIndex = BodyIndex - 1
Last = 0 # no longer at the last token of the rule
#endwhile
#endfor
#endwhile
self.Follow = Follow
#enddef CompFollow
def DumpFirstFollow(self):
First = self.First
Follow = self.Follow
print "First:"
for key in First.keys():
name = key[1]
print name," :: ",
for (flag2,name2) in First[key].keys():
print name2,", ",
print
print "Follow:"
for key in Follow.keys():
name = key[1]
print name," :: ",
for (flag2,name2) in Follow[key].keys():
print name2,", ",
print
# computing the "first" of the tail of a rule followed by an
# optional terminal
# doesn't include NULLTOKEN
# requires self.First to be computed
#
def FirstOfTail(self, Rule, TailIndex, Token=None):
Result = kjSet.NewSet( [] )
# go through all tokens in rule tail so long as there is a
# null derivation for the remainder
Nullprefix = 1
BodyLength = len(Rule.Body)
ThisIndex = TailIndex
while Nullprefix and ThisIndex < BodyLength:
RToken = Rule.Body[ThisIndex]
(RTtype, RTname) = RToken
if RTtype == NONTERMFLAG:
for FToken in kjSet.Neighbors(self.First, RToken):
if FToken != NULLTOKEN:
kjSet.addMember(FToken, Result)
#endfor
# check whether this symbol might have a null production
if not kjSet.HasArc(self.First, RToken, NULLTOKEN):
Nullprefix = 0
elif RTtype in [KEYFLAG, TERMFLAG]:
kjSet.addMember(RToken, Result)
Nullprefix = 0
else:
raise TokenError, "unknown token type in rule body"
ThisIndex = ThisIndex + 1
#endwhile
# add the optional token if given and Nullprefix still set
if Nullprefix and Token != None:
kjSet.addMember(Token, Result)
return Result
#enddef FirstOfTail
# compute an SLR NFA for the ruleset with states for each SLR "item"
# and transitions, eg:
# X > .AB
# on A maps to X > A.B
# on epsilon maps to A > .ZC
# and A > .WK
# an item is a pair (rulenumber, bodyposition)
# where body position 0 is interpreted to point before the
# beginning of the body.
#
# SLR = "simple LR" in Aho+Ullman terminology
#
def CompSLRNFA(self):
NFA = CFSMachine(self.StartNonterm)
Nrules = len(self.Rules)
itemStateMap = {}
for Ruleindex in range(0,Nrules):
Rule = self.Rules[Ruleindex]
# make an item for each "dot" position in the body
for DotPos in range(0, len(Rule.Body) + 1):
item = (Ruleindex, DotPos)
itemState = NFA.NewState(TRANSFLAG, [item])
itemStateMap[item] = itemState
#endfor DotPos
#endfor Ruleindex
# now that the states are initialized
# compute transitions except for the last item of a rule
# (which has none)
for Ruleindex in range(0,Nrules):
Rule = self.Rules[Ruleindex]
for DotPos in range(0, len(Rule.Body)):
item = (Ruleindex, DotPos)
CurrentToken = Rule.Body[DotPos]
ThisState = itemStateMap[item]
NextState = itemStateMap[ (Ruleindex, DotPos + 1) ]
NFA.SetMap( ThisState, CurrentToken, NextState )
# if the current token is a nonterminal
# ad epsilon transitions to first item for any
# rule that derives this nonterminal
(CTtype, CTname) = CurrentToken
if CTtype == NONTERMFLAG:
for Rule2index in range(0,Nrules):
Rule2 = self.Rules[Rule2index]
Head = Rule2.Nonterm
if Head == CurrentToken:
NextState = itemStateMap[( Rule2index, 0 )]
NFA.SetMap( ThisState, NULLTOKEN, NextState )
#endfor Rule2index
#endif CTtype == NONTERMFLAG
#endfor DotPos
#endfor Ruleindex
# must handle the initial state properly here!
# Make a dummy state with e-transitions to all first items
# for rules that derive the initial nonterminal
ThisState = NFA.initial_state
GoodStartingPlace = None
for Ruleindex in range(0,Nrules):
Rule = self.Rules[Ruleindex]
Head = Rule.Nonterm
if Head == self.StartNonterm:
GoodStartingPlace= (Ruleindex, 0)
NextState = itemStateMap[ GoodStartingPlace ]
NFA.SetMap( ThisState, NULLTOKEN, NextState )
# fix the NFA.States entry
if GoodStartingPlace == None:
raise NotSLRError, "No derivation for root nonterminal."
NFA.States[ NFA.initial_state ] = \
[ 'transient', GoodStartingPlace ]
self.SLRNFA = NFA
#enddef CompSLRNFA
# dump an item
def ItemDump(self, item):
(ruleindex, position) = item
Rule = self.Rules[ruleindex]
print Rule.Nonterm[1],' >> ',
for bindex in range(0, len(Rule.Body)):
if position == bindex:
print " (*) ",
print Rule.Body[bindex][1],
if position == len(Rule.Body):
print " (*) "
else:
print
# utility function -- returns true if an item is a final item
def SLRItemIsFinal(self, item):
(ruleindex, position) = item
Rule = self.Rules[ruleindex]
if position == len(Rule.Body):
return 1
else:
return 0
# dump the NFA
def DumpSLRNFA(self):
NFA = self.SLRNFA
print "root: ", NFA.root_nonTerminal
for key in NFA.StateTokenMap.keys():
map = NFA.StateTokenMap[key]
(fromstate, token) = key
fromitem = NFA.States[ fromstate ][1]
self.ItemDump(fromitem)
print " on ", token[1], " maps "
for Tostate in map:
Toitem = NFA.States[Tostate][1]
print " ",
self.ItemDump(Toitem)
# compute DFA for ruleset by computing the E-closure of the
# NFA
def CompDFA(self):
self.DFA = self.SLRNFA.Eclosure(NULLTOKEN)
def DumpDFAsets(self):
DFA = self.DFA
print "root: ", DFA.root_nonTerminal
for State in range(1, len(DFA.States) ):
self.DumpItemSet(State)
def DumpItemSet(self,State):
DFA = self.DFA
NFA = self.SLRNFA
print
print "STATE ", State, " *******"
fromNFAindices = kjSet.get_elts(DFA.States[State][1])
for NFAindex in fromNFAindices:
item = NFA.States[NFAindex][1]
print " ", NFAindex, ": ",
self.ItemDump(item)
# this function completes the computation of an SLR DFA
# by adding reduction states for each DFA state S containing
# item H > B.
# which reduces rule H > B
# for each token T in Follow of H.
# if S already has a transition for T then there is a conflict!
#
# assumes DFA and SLRNFA and Follow have been computed.
#
def SLRFixDFA(self):
DFA = self.DFA
NFA = self.SLRNFA
# look through the states (except 0=success) of the DFA
# initially don't add any new states, just record
# actions to be done
# uses convention that 0 is successful final state
# ToDo is a dictionary which maps
# (State, Token) to a item to reduce
ToDo = {}
Error = None
for State in range(1, len(DFA.States) ):
# look for a final item for a rule in this state
fromNFAindices = kjSet.get_elts(DFA.States[State][1])
for NFAindex in fromNFAindices:
item = NFA.States[NFAindex][1]
# if the item is final remember to do the reductions...
if self.SLRItemIsFinal(item):
(ruleindex, position) = item
Rule = self.Rules[ruleindex]
Head = Rule.Nonterm
Following = kjSet.Neighbors( self.Follow, Head )
for Token in Following:
key = (State, Token)
if not ToDo.has_key(key):
ToDo[ key ] = item
else:
# it might be okay if the items are identical?
item2 = ToDo[key]
if item != item2:
print "reduce/reduce conflict on ",key
self.ItemDump(item)
self.ItemDump(item2)
Error = " apparent reduce/reduce conflict"
#endif
#endfor
#endif
#endfor NFAindex
#endfor State
# for each (State,Token) pair which indicates a reduction
# record the reduction UNLESS the map is already set for the pair
for key in ToDo.keys():
(State,Token) = key
item = ToDo[key]
(rulenum, dotpos) = item
ExistingMap = DFA.map( State, Token )
if ExistingMap[0] == NOMATCHFLAG:
DFA.SetReduction( State, Token, rulenum )
else:
print "apparent shift/reduce conflict"
print "reduction: ", key, ": "
self.ItemDump(item)
print "existing map ", ExistingMap
Error = " apparent shift/reduce conflict"
#endfor
if Error and ABORTONERROR:
raise NotSLRError, Error
#enddef SLRfixDFA()
# do complete SLR DFA creation starting after initialization
def DoSLRGeneration(self):
self.CompFirst()
self.CompFollow()
self.CompSLRNFA()
self.CompDFA()
self.SLRFixDFA()
#endclass ruleset
################ the following are interpretation functions
################ used by RULEGRAM meta grammar
# some constants used here
COMMENTFORM = "##.*\n"
RSKEY = "@R"
COLKEY = "::"
LTKEY = ">>"
IDNAME = "ident"
# an identifier in the meta grammar is any nonwhite string
# except the keywords @R :: >> or comment flag ##
IDFORM = "[^" + string.whitespace + "]+"
# for identifiers simply return the string
def IdentFun(string):
return string
# RootReduction should receive list of form
# [ nontermtoken, keyword COLKEY, RuleList ]
def RootReduction(list, ObjectGram):
if len(list) != 3 or list[1] != COLKEY:
raise FlowError, "unexpected metagrammar root reduction"
return (list[0], list[2])
# NullRuleList should receive list of form
# []
def NullRuleList(list, ObjectGram):
if list != []:
raise FlowError, "unexpected null RuleList form"
return []
# FullRuleList should receive list of form
# [ Rule, RuleList ]
def FullRuleList(list, ObjectGram):
if type(list) != type([]) or len(list)!=2:
raise FlowError, "unexpected full RuleList form"
NewRule = list[0]
OldRules = list[1]
return [NewRule] + OldRules
# InterpRule should receive list of form
# [keyword RSKEY,
# RuleNameStr,
# keyword COLKEY,
# Nontermtoken,
# keyword LTKEY,
# Bodylist]
#
def InterpRule(list, ObjectGram):
# check keywords:
if len(list) != 6 or \
list[0] != RSKEY or \
list[2] != COLKEY or \
list[4] != LTKEY:
raise FlowError, "unexpected meta rule reduction form"
ruleName = list[1]
ruleNonterm = list[3]
ruleBody = list[5]
# upcase the the representation of keywords if needed
if not ObjectGram.LexD.isCaseSensitive():
for i in range(0,len(ruleBody)):
(flag, name) = ruleBody[i]
if flag == KEYFLAG:
ruleBody[i] = (KEYFLAG, string.upper(name))
elif not flag in (TERMFLAG, NONTERMFLAG):
raise FlowError, "unexpected rule body member"
rule = kjParser.ParseRule( ruleNonterm, ruleBody )
rule.Name = ruleName
return rule
# InterpRuleName should receive
# [ string ]
def InterpRuleName(list, ObjectGram):
#print list
# add error checking?
return list[0]
# InterpNonTerm should receive
# [ string ]
def InterpNonTerm(list, ObjectGram):
#print list
if type(list)!=type([]) or len(list)!=1:
raise FlowError, "unexpected rulename form"
Name = list[0]
# determine whether this is a valid nonterminal
if not ObjectGram.NonTermDict.has_key(Name):
#print Name
raise TokenError, "LHS of Rule must be nonterminal: "+Name
return ObjectGram.NonTermDict[Name]
# NullBody should receive []
def NullBody(list, ObjectGram):
#print list
if list != []:
raise FlowError, "unexpected null Body form"
return []
# FullBody should receive
# [ string, Bodylist]
# must determine whether the string represents
# a keyword, a nonterminal, or a terminal of the object
# grammar.
# returns (KEYFLAG, string) (TERMFLAG, string) or
# (NONTERMFLAG, string) respectively
#
def FullBody(list,ObjectGram):
#print list
if type(list)!=type([]) or len(list)!=2:
raise FlowError, "unexpected body form"
Name = list[0]
# Does the Name rep a nonterm, keyword or term
# of the object grammar (in that order).
if ObjectGram.NonTermDict.has_key(Name):
kind = NONTERMFLAG
elif ObjectGram.LexD.keywordmap.has_key(Name):
kind = KEYFLAG
elif ObjectGram.TermDict.has_key(Name):
kind = TERMFLAG
else:
raise TokenError, "Rule body contains unregistered string: "+Name
restOfBody = list[1]
return [(kind, Name)] + restOfBody
# function to generate a grammar for parsing grammar rules
#
def ruleGrammar():
LexD = kjParser.LexDictionary()
# use SQL/Ansi style comments
LexD.comment( COMMENTFORM )
# declare keywords
RStart = LexD.keyword( RSKEY )
TwoColons = LexD.keyword( COLKEY )
LeadsTo = LexD.keyword( LTKEY )
# declare terminals
ident = LexD.terminal(IDNAME, IDFORM, IdentFun )
# declare nonterminals
Root = kjParser.nonterminal("Root")
Rulelist = kjParser.nonterminal("RuleList")
Rule = kjParser.nonterminal("Rule")
RuleName = kjParser.nonterminal("RuleName")
NonTerm = kjParser.nonterminal("NonTerm")
Body = kjParser.nonterminal("Body")
# declare rules
# Root >> NonTerm :: Rulelist
InitRule = kjParser.ParseRule( Root, \
[NonTerm, TwoColons, Rulelist], RootReduction )
# Rulelist >>
RLNull = kjParser.ParseRule( Rulelist, [], NullRuleList)
# Rulelist >> Rule Rulelist
RLFull = kjParser.ParseRule( Rulelist, [Rule,Rulelist], FullRuleList)
# Rule >> "@R :: NonTerm >> Body
RuleR = kjParser.ParseRule( Rule, \
[RStart, RuleName, TwoColons, NonTerm, LeadsTo, Body],\
InterpRule)
# Rulename >> ident
RuleNameR = kjParser.ParseRule( RuleName, [ident], InterpRuleName)
# NonTerm >> ident
NonTermR = kjParser.ParseRule( NonTerm, [ident], InterpNonTerm)
# Body >>
BodyNull = kjParser.ParseRule( Body, [], NullBody)
# Body >> ident Body
BodyFull = kjParser.ParseRule( Body, [ident,Body], FullBody)
# declare Rules list and Associated Name dictionary
Rules = [RLNull, RLFull, RuleR, RuleNameR, NonTermR,\
BodyNull, BodyFull, InitRule]
RuleDict = \
{ "RLNull":0, "RLFull":1, "RuleR":2, "RuleNameR":3, \
"NonTermR":4, "BodyNull":5, "BodyFull":6 , "InitRule":7 }
# make the RuleSet and compute the associate DFA
RuleSet = ruleset( Root, Rules )
RuleSet.DoSLRGeneration()
# construct the Grammar object
Result = kjParser.Grammar( LexD, RuleSet.DFA, Rules, RuleDict )
return Result
#enddef RuleGrammar()
# this is the rule grammar object for
# parsing
RULEGRAM = ruleGrammar()
# a derived grammar class (object oriented programming is cool!)
# this is a compilable grammar for automatic parser generation.
#
class CGrammar(kjParser.Grammar):
# initialization is handled by the base class
# insert a white separated list of keywords into the LexD
# THIS SHOULD CHECK FOR KEYWORD/NONTERMINAL/PUNCT NAME
# COLLISIONS (BUT DOESN'T YET).
def Keywords(self, Stringofkeys):
keywordlist = string.split(Stringofkeys)
for keyword in keywordlist:
self.LexD.keyword( keyword )
# insert a string of punctuations into the LexD
def punct(self, Stringofpuncts):
for p in Stringofpuncts:
self.LexD.punctuation(p)
# register a list of regular expression strings
# to represent comments in LexD
def comments(self, listOfCommentStrings):
for str in listOfCommentStrings:
self.LexD.comment(str)
# register a white separated list of nonterminal strings
def Nonterms(self, StringofNonterms):
nonTermlist = string.split(StringofNonterms)
for NonTerm in nonTermlist:
self.NonTermDict[NonTerm] = kjParser.nonterminal(NonTerm)
# initialize or add more rules to the RuleString
def Declarerules(self, StringWithRules):
self.RuleString = self.RuleString + "\n" + StringWithRules
# The compilation function assumes
# NonTermDict
# RuleString
# LexD
# TermDict
# have all been set up properly
# (at least if the default MetaGrammar is used).
# On successful completion it will set up
# DFA
# RuleL
# RuleNameToIndex
def Compile(self, MetaGrammar=RULEGRAM):
# the following should return a list of rules
# with punctuations of self.LexD interpreted as trivial keywords
# keywords of seld.LexD interpreted as keywords
# and nonterminals registered in NonTermDict interpreted as
# nonterms.
# ParseResult should be of form ( (rootNT, RuleL), self )
ParseResult = MetaGrammar.DoParse1( self.RuleString, self )
(RootNonterm, Rulelist) = ParseResult
# make a ruleset and compute its DFA
RuleS = ruleset( RootNonterm, Rulelist )
RuleS.DoSLRGeneration()
# make the rulename to index map to allow future bindings
for i in range(0,len(Rulelist)):
Rule = Rulelist[i]
self.RuleNameToIndex[ Rule.Name ] = i
# fill in the blanks
self.DFA = RuleS.DFA
self.RuleL = Rulelist
# FOR DEBUG AND TESTING
self.Ruleset = RuleS
# DON'T clean up the grammar (misc structures are used)
# in future bindings
#enddef Compile
# Write a reconstructable representation for this grammar
# to a file
#EXCEPT:
# - rule associations to reduction functions
# will be lost (must be reset elsewhere)
# - terminals in the lexical dictionary
# will not be initialized
#
# IND is used for indentation, should be whitespace (add check!)
#
# FName if given will cause the reconstructed to be placed
# inside a function `FName`+"()" returning the grammar object
#
# NOTE: this function violates information hiding principles;
# in particular it "knows" the guts of the FSM and LexD classes
#
def Reconstruct(self, VarName, Tofile, FName=None, indent=""):
Reconstruction = codeReconstruct(VarName, Tofile, self, FName, indent)
GrammarDumpSequence(Reconstruction)
#enddef Reconstruct
# marshalling of a grammar to a file
def MarshalDump(self, Tofile):
Reconstruction = marshalReconstruct(self, Tofile)
GrammarDumpSequence(Reconstruction)
#endclass CGrammar
# general procedure for different types of archiving for grammars
def GrammarDumpSequence(ReconstructObj):
# assume an initialized Reconstruct Object with appropriate grammar etc.
# put the lexical part
ReconstructObj.PutLex()
# put the rules
ReconstructObj.PutRules()
# put transitions
ReconstructObj.PutTransitions()
# finish up
ReconstructObj.Cleanup()
# function to create a "null CGrammar"
def NullCGrammar():
return CGrammar(None,None,None,{})
# utility classes -- Grammar reconstruction objects
# encapsulate the process of grammar archiving.
#
class Reconstruct:
# this "virtual class" is only for common behaviors of subclasses.
def MakeTokenArchives(self):
# make a list of all tokens and
# initialize token > int dictionary
keys = self.Gram.DFA.StateTokenMap.keys()
tokenToInt = {}
tokenSet = kjSet.NewSet([])
for k in keys:
kjSet.addMember(k[1], tokenSet)
tokens = kjSet.get_elts(tokenSet)
for i in range(0,len(tokens)):
tokenToInt[ tokens[i] ] = i
self.keys = keys
self.tokens = tokens # global sub
self.tokInt = tokenToInt # global sub
# grammar reconstruction to a file
class codeReconstruct(Reconstruct):
def __init__(self, VarName, Tofile, Grammar, FName=None, indent =""):
# do global subs for each of these
self.Var = VarName
self.File = Tofile
self.FName = FName
self.Gram = Grammar
# put the reconstruction in a function if FName is given
if FName != None:
Tofile.write("\n\n")
Tofile.write(indent+"def "+FName+"():\n")
IND = indent+" "
else:
IND = indent
self.I = IND # global sub!
Tofile.write("\n\n")
Tofile.write(IND+"# ******************************BEGIN RECONSTRUCTION\n")
Tofile.write(IND+"# Python declaration of Grammar variable "+VarName+".\n")
Tofile.write(IND+"# automatically generated by module "+PMODULE+".\n")
Tofile.write(IND+"# Altering this sequence by hand will probably\n")
Tofile.write(IND+"# leave it unusable.\n")
Tofile.write(IND+"#\n")
Tofile.write(IND+"import "+PMODULE+"\n\n")
Tofile.write(IND+"# variable declaration:\n")
Tofile.write(IND+VarName+"= "+PMODULE+".NullGrammar()\n\n")
# make self.keys list of dfa keys,
# self.tokens list of grammar tokens,
# self.tokInt inverted dictionary for self.tokens
self.MakeTokenArchives()
Tofile.write("\n\n"+IND+"# case sensitivity behavior for keywords.\n")
if self.Gram.LexD.isCaseSensitive():
Tofile.write(IND+VarName+".SetCaseSensitivity(1)\n")
else:
Tofile.write(IND+VarName+".SetCaseSensitivity(0)\n")
#enddef __init__
def PutLex(self):
IND = self.I
Tofile = self.File
VarName = self.Var
LexD = self.Gram.LexD
tokens = self.tokens
Tofile.write("\n\n"+IND+"# declaration of lexical dictionary.\n")
Tofile.write(IND+"# EXCEPT FOR TERMINALS\n")
Tofile.write(IND+VarName+".LexD.punctuationlist = ")
Tofile.write(`LexD.punctuationlist`+"\n")
Tofile.write(IND+"# now comment patterns\n")
for comment in LexD.commentstrings:
Tofile.write(IND+VarName+".LexD.comment("+`comment`+")\n")
Tofile.write(IND+"# now define tokens\n")
for i in range(0,len(tokens)):
tok = tokens[i]
(kind, name) = tok
if kind == TERMFLAG:
# put warning at end!
# nonterminal not installed in lexical dictionary here!
Tofile.write(IND+VarName+".IndexToToken["+`i`+"] = ")
Tofile.write(PMODULE+".termrep("+`name`+")\n")
elif kind == KEYFLAG:
Tofile.write(IND+VarName+".IndexToToken["+`i`+"] = ")
Tofile.write(VarName+".LexD.keyword("+`name`+")\n")
elif kind == NONTERMFLAG:
Tofile.write(IND+VarName+".IndexToToken["+`i`+"] = ")
Tofile.write(PMODULE+".nonterminal("+`name`+")\n")
else:
raise FlowError, "unknown token type"
#enddef PutLex
def PutRules(self):
IND = self.I
VarName = self.Var
Rules = self.Gram.RuleL
Tofile = self.File
Root = self.Gram.DFA.root_nonTerminal
Tofile.write("\n\n"+IND+"# declaration of rule list with names.\n")
Tofile.write(IND+"# EXCEPT FOR INTERP FUNCTIONS\n")
nrules = len(Rules)
Tofile.write(IND+VarName+".RuleL = [None] * "+`nrules`+"\n")
for i in range(0,nrules):
# put warning at end:
# rule reduction function not initialized here!
rule = Rules[i]
name = rule.Name
Tofile.write(IND+"rule = "+`rule`+"\n")
Tofile.write(IND+"name = "+`name`+"\n")
Tofile.write(IND+"rule.Name = name\n")
Tofile.write(IND+VarName+".RuleL["+`i`+"] = rule\n")
Tofile.write(IND+VarName+".RuleNameToIndex[name] = "+`i`+"\n")
Tofile.write("\n\n"+IND+"# DFA root nonterminal.\n")
Tofile.write(IND+VarName+".DFA.root_nonTerminal =")
Tofile.write(`Root`+"\n")
#enddef PutRules
def PutTransitions(self):
IND = self.I
Tofile = self.File
VarName = self.Var
maxState = self.Gram.DFA.maxState
tokenToInt = self.tokInt
StateTokenMap = self.Gram.DFA.StateTokenMap
keys = self.keys
Tofile.write("\n\n"+IND+"# DFA state declarations.\n")
for state in range(1, maxState+1):
Tofile.write(IND+VarName+".DFA.States["+`state`+"] = ")
Tofile.write('['+`TRANSFLAG`+']\n')
Tofile.write(IND+VarName+".DFA.maxState = "+`maxState`+"\n")
Tofile.write("\n\n"+IND+"# DFA transition declarations.\n")
for key in keys:
(fromState, TokenRep) = key
TokenIndex = tokenToInt[TokenRep]
TokenArg = VarName+".IndexToToken["+`TokenIndex`+"]"
TMap = StateTokenMap[key]
TMaptype = TMap[0][0]
if TMaptype == REDUCEFLAG:
# reduction
rulenum = TMap[0][1]
Args = "("+`fromState`+","+TokenArg+","+`rulenum`+")"
Tofile.write(IND+VarName+".DFA.SetReduction"+Args+"\n")
elif TMaptype == MOVETOFLAG:
# MoveTo
Args = "("+`fromState`+","+TokenArg+","+`TMap[0][1]`+")"
Tofile.write(IND+VarName+".DFA.SetMap"+Args+"\n")
else:
raise FlowError, "unexpected else (2)"
#enddef
def Cleanup(self):
Tofile = self.File
RuleL = self.Gram.RuleL
tokens = self.tokens
VarName = self.Var
IND = self.I
FName = self.FName
Tofile.write("\n\n"+IND+"# Clean up the grammar.\n")
Tofile.write(IND+VarName+".CleanUp()\n")
# if the Fname was given return the grammar as function result
if FName != None:
Tofile.write("\n\n"+IND+"# return the grammar.\n")
Tofile.write(IND+"return "+VarName+"\n")
Tofile.write("\n\n"+IND+"# WARNINGS ****************************** \n")
Tofile.write(IND+"# You must bind the following rule names \n")
Tofile.write(IND+"# to reduction interpretation functions \n")
for R in RuleL:
Tofile.write(IND+"# "+VarName+".Bind("+`R.Name`+", ??function??)\n")
Tofile.write(IND+"#(last rule)\n")
Tofile.write("\n\n"+IND+"# WARNINGS ****************************** \n")
Tofile.write(IND+"# You must bind the following terminals \n")
Tofile.write(IND+"# to regular expressions and interpretation functions \n")
warningPrinted = 0
for tok in tokens:
(kind, name) = tok
if kind == TERMFLAG and tok != ENDOFFILETOKEN:
Tofile.write(IND+"# "+VarName+\
".Addterm("+`name`+", ??regularExp??, ??function??)\n")
warningPrinted = 1
if not warningPrinted:
Tofile.write(IND+"# ***NONE** \n")
Tofile.write(IND+"#(last terminal)\n")
Tofile.write(IND+"# ******************************END RECONSTRUCTION\n")
#enddef
#endclass
# reconstruction using marshalling to a file
# encodes internal structures for grammar using marshal-able
# objects. Final marshalling to the file is done at CleanUp()
# storing one big tuple.
#
class marshalReconstruct(Reconstruct):
def __init__(self, Grammar, Tofile):
self.Gram = Grammar
self.File = Tofile
# should archive self.tokens structure
self.MakeTokenArchives()
# archive this
self.CaseSensitivity = Grammar.LexD.isCaseSensitive()
def PutLex(self):
LexD = self.Gram.LexD
# archive these
self.punct = LexD.punctuationlist
self.comments = LexD.commentstrings
def PutRules(self):
# archive this
self.Root = self.Gram.DFA.root_nonTerminal
# make a list of tuples that can be used with
# rule = apply(ParseRule, tuple[1])
# rule.Name = tuple[0]
Rules = self.Gram.RuleL
nrules = len(Rules)
RuleTuples = [None] * nrules
for i in range(nrules):
rule = Rules[i]
RuleTuples[i] = (rule.Name, rule.components())
#archive this
self.RuleTups = RuleTuples
def PutTransitions(self):
keys = self.keys
tokenToInt = self.tokInt
StateTokenMap = self.Gram.DFA.StateTokenMap
# archive this
self.MaxStates = self.Gram.DFA.maxState
# create two lists,
# one for reductions with contents (fromState, tokennumber, rulenum)
# one for movetos with contents (fromstate, tokennumber, tostate)
# (note: token number not token itself to allow sharing)
# to allow arbitrary growing, first use dicts:
reductDict = {}
nreducts = 0
moveToDict = {}
nmoveTos = 0
for key in self.keys:
(fromState, TokenRep) = key
TokenIndex = tokenToInt[TokenRep]
TMap = StateTokenMap[key]
TMaptype = TMap[0][0]
if TMaptype == REDUCEFLAG:
rulenum = TMap[0][1]
reductDict[nreducts] = (fromState, TokenIndex, rulenum)
nreducts = nreducts + 1
elif TMaptype == MOVETOFLAG:
ToState = TMap[0][1]
moveToDict[nmoveTos] = (fromState, TokenIndex, ToState)
nmoveTos = nmoveTos + 1
else:
raise FlowError, "unexpected else"
#endfor
# translate dicts to lists
reducts = [None] * nreducts
for i in range(nreducts):
reducts[i] = reductDict[i]
moveTos = [None] * nmoveTos
for i in range(nmoveTos):
moveTos[i] = moveToDict[i]
# archive these
self.reducts = reducts
self.moveTos = moveTos
# this is the function that does the marshalling
def Cleanup(self):
import marshal
# make the big list to marshal
BigList = [None] * 9
BigList[0] = self.tokens
BigList[1] = self.punct
BigList[2] = self.comments
BigList[3] = self.RuleTups
BigList[4] = self.MaxStates
BigList[5] = self.reducts
BigList[6] = self.moveTos
BigList[7] = self.Root
BigList[8] = self.CaseSensitivity
# dump the big list to the file
marshal.dump( BigList, self.File )
#end class
#######################testing stuff
if RUNTESTS:
def echo(x): return x
# simple grammar stolen from a text
LD0 = kjParser.LexDictionary()
id = LD0.terminal("id","id",echo)
plus = LD0.punctuation("+")
star = LD0.punctuation("*")
oppar = LD0.punctuation("(")
clpar = LD0.punctuation(")")
equals = LD0.punctuation("=")
E = kjParser.nonterminal("E")
T = kjParser.nonterminal("T")
Tp = kjParser.nonterminal("Tp")
Ep = kjParser.nonterminal("Ep")
F = kjParser.nonterminal("F")
rule1 = kjParser.ParseRule( E, [ T, Ep ] )
rule2 = kjParser.ParseRule( Ep, [ plus, T, Ep ] )
rule3 = kjParser.ParseRule( Ep, [ ] )
rule4 = kjParser.ParseRule( T, [ F, Tp ] )
rule5 = kjParser.ParseRule( Tp, [ star, F, Tp ] )
rule6 = kjParser.ParseRule( Tp, [ ] )
rule7 = kjParser.ParseRule( F, [ oppar, E, clpar ] )
rule8 = kjParser.ParseRule( F, [ id ] )
rl0 = [ rule1, rule2, rule3, rule4, rule5, rule6, rule7,rule8]
rs0 = ruleset(E, rl0)
rs0.CompFirst()
Firstpairs = kjSet.GetPairs(rs0.First)
rs0.CompFollow()
Followpairs = kjSet.GetPairs(rs0.Follow)
rs0.CompSLRNFA()
NFA0 = rs0.SLRNFA
rs0.CompDFA()
rs0.SLRFixDFA()
DFA0 = rs0.DFA
class dummy: pass
ttt0 = dummy()
def TESTDFA( STRING , ttt, DFA, Rulelist, DOREDUCTIONS = 1):
ttt.STRING = STRING
#ttt.List = kjParser.LexList(LD0, ttt.STRING)
ttt.Stream = kjParser.LexStringWalker( ttt.STRING, LD0 )
ttt.Stack = {-1:0}# Walkers.SimpleStack()
ttt.ParseObj = kjParser.ParserObj( Rulelist, \
ttt.Stream, DFA, ttt.Stack,DOREDUCTIONS)
ttt.RESULT = ttt.ParseObj.GO()
#ttt.Stack.Dump(10)
return ttt.RESULT
def TESTDFA0( STRING , DOREDUCTIONS = 1):
return TESTDFA( STRING, ttt0, DFA0, rl0, DOREDUCTIONS )
TESTDFA0( " id + id * id ")
# an even simpler grammar
S = kjParser.nonterminal("S")
M = kjParser.nonterminal("M")
A = kjParser.nonterminal("A")
rr1 = kjParser.ParseRule( S, [M] )
#rr2 = kjParser.ParseRule( A, [A, plus, M])
#rr3 = kjParser.ParseRule( A, [M], echo)
#rr4 = kjParser.ParseRule( M, [M, star, M])
rr5 = kjParser.ParseRule( M, [oppar, M, clpar])
rr6 = kjParser.ParseRule( M, [id])
rl1 = [rr1,rr5,rr6]
rs1 = ruleset(S, rl1)
rs1.CompFirst()
rs1.CompFollow()
rs1.CompSLRNFA()
rs1.CompDFA()
rs1.SLRFixDFA()
DFA1 = rs1.DFA
ttt1=dummy()
def TESTDFA1( STRING , DOREDUCTIONS = 1):
return TESTDFA( STRING, ttt1, DFA1, rl1, DOREDUCTIONS )
X = kjParser.nonterminal("X")
Y = kjParser.nonterminal("Y")
RX = kjParser.ParseRule( X, [ oppar, Y, clpar ] )
RY = kjParser.ParseRule( Y, [] )
rl2 = [RX,RY]
rs2 = ruleset(X, rl2)
rs2.CompFirst()
rs2.CompFollow()
rs2.CompSLRNFA()
rs2.CompDFA()
rs2.SLRFixDFA()
DFA2 = rs2.DFA
ttt2 = dummy()
def TESTDFA2( STRING, DOREDUCTIONS = 1):
return TESTDFA( STRING, ttt2, DFA2, rl2, DOREDUCTIONS )
# the following grammar should fail to be slr
# (Aho,Ullman p. 213)
S = kjParser.nonterminal("S")
L = kjParser.nonterminal("L")
R = kjParser.nonterminal("R")
RS1 = kjParser.ParseRule( S, [L, equals, R] )
RS2 = kjParser.ParseRule( S, [R], echo )
RL1 = kjParser.ParseRule( L, [star, R])
RL2 = kjParser.ParseRule( L, [id])
RR1 = kjParser.ParseRule( R, [L] )
rs3 = ruleset(S, [RS1,RS2,RL1,RL2,RR1])
rs3.CompFirst()
rs3.CompFollow()
rs3.CompSLRNFA()
rs3.CompDFA()
#rs3.SLRFixDFA() # should fail and does.
# testing RULEGRAM
ObjG = NullCGrammar()
ObjG.Addterm("id","id",echo)
ObjG.Nonterms("T E Ep F Tp")
ObjG.Keywords("begin end")
ObjG.punct("+*()")
ObjG.comments(["--.*\n"])
# PROBLEM WITH COMMENTS???
Rulestr = """
## what a silly grammar!
T ::
@R One :: T >> begin E end
@R Three :: E >>
@R Two :: E >> E + T
@R Four :: E >> ( T )
"""
RL = RULEGRAM.DoParse1( Rulestr, ObjG ) | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/kjParseBuild.py | kjParseBuild.py |
verbosity = 0
import os
try:
from hashlib import md5
except ImportError:
# Python 2.4 and earlier
from md5 import md5
# use whatever kjbuckets sqlsem is using
#from sqlsem import kjbuckets, maketuple
# error on checking of data integrity
StorageError = "StorageError"
# use md5 checksum (stub if md5 unavailable?)
def checksum(string):
return md5(string).digest()
def recursive_dump(data, prefix="["):
"""for debugging"""
from types import StringType
if type(data) is StringType:
#print prefix, data
return
p2 = prefix+"["
try:
for x in data:
recursive_dump(x, p2)
except:
print prefix, data
def checksum_dump(data, file):
"""checksum and dump marshallable data to file"""
#print "checksum_dump", file
#recursive_dump(data)
from marshal import dumps, dump
#print "data\n",data
storage = dumps(data)
checkpair = (checksum(storage), storage)
dump(checkpair, file)
def checksum_undump(file):
"""undump marshallable data from file, checksum"""
from marshal import load, loads
checkpair = load(file)
(check, storage) = checkpair
if checksum(storage)!=check:
raise StorageError, "data load checksum fails"
data = loads(storage)
return data
def backup_file(filename, backupname):
"""backup file, if unopenable ignore"""
try:
f = open(filename, "rb")
except:
return
data = f.read()
f.close()
f = open(backupname, "wb")
f.write(data)
f.close()
def del_file(filename):
"""delete file, ignore errors"""
from os import unlink
try:
unlink(filename)
except:
pass
class Database0:
"""quick and dirty in core database representation."""
# db.log is not None == use db.log to log modifications
# set for verbose prints
verbose = verbosity
# set for read only copy
readonly = 0
# set for temp/scratch db copy semantics
is_scratch = 0
# set to add introspective tables
introspect = 1
def __init__(self, shadowing=None, log=None):
"""dictionary of relations."""
verbose = self.verbose
self.shadowing = shadowing
self.log = log
self.touched = 0
if log:
self.is_scratch = log.is_scratch
if shadowing and not log:
raise ValueError, "shadowing db requires log"
if verbose:
print "Database0 init"
if log:
log.verbose = 1
if shadowing:
# shadow structures of shadowed db
self.rels = shadow_dict(shadowing.rels, Relation0.unshadow)
self.datadefs = shadow_dict(shadowing.datadefs)
self.indices = shadow_dict(shadowing.indices)
else:
self.rels = {}
self.datadefs = {}
self.indices = {}
if self.introspect:
self.set_introspection()
def set_introspection(self):
import gfintrospect
self["dual"] = gfintrospect.DualView()
self["__table_names__"] = gfintrospect.RelationsView()
self["__datadefs__"] = gfintrospect.DataDefsView()
self["__indices__"] = gfintrospect.IndicesView()
self["__columns__"] = gfintrospect.ColumnsView()
self["__indexcols__"] = gfintrospect.IndexAttsView()
def reshadow(self, db, dblog):
"""(re)make self into shadow of db with dblog"""
self.shadowing = db
self.log = dblog
self.rels = shadow_dict(db.rels, Relation0.unshadow)
self.datadefs = shadow_dict(db.datadefs)
self.indices = shadow_dict(db.indices)
def clear(self):
"""I'm not sure if database has circular structure, so this added"""
self.shadowing = None
self.log = None
self.rels = {}
self.datadefs = {}
self.indices = {}
def commit(self):
"""commit shadowed changes"""
verbose = self.verbose
if self.shadowing and self.touched:
# log commit handled elsewhere
#log = self.log
#if log and not log.is_scratch:
#if verbose: print "committing log"
#self.log.commit(verbose)
if verbose: print "committing rels"
self.rels.commit(verbose)
if verbose: print "committing datadefs"
self.datadefs.commit(verbose)
if verbose: print "committing indices"
self.indices.commit(verbose)
st = self.shadowing.touched
if not st:
if verbose: "print setting touched", self.touched
self.shadowing.touched = self.touched
elif verbose:
print "shadowed database is touched"
elif verbose:
print "db0: commit on nonshadow instance"
def __setitem__(self, name, relation):
"""bind a name (uppercased) to tuples as a relation."""
from string import upper
if self.indices.has_key(name):
raise NameError, "cannot set index"
self.rels[ upper(name) ] = relation
if self.verbose: print "db0 sets rel", name
def add_index(self, name, index):
if self.rels.has_key(name):
raise NameError, `name`+": is relation"
self.indices[name] = index
if self.verbose: print "db0 sets index", name
def drop_index(self, name):
if self.verbose: print "db0 drops index", name
del self.indices[name]
def __getitem__(self, name):
if self.verbose: print "db0 gets rel", name
from string import upper
return self.rels[upper(name)]
def get_for_update(self, name):
"""note: does not imply updates, just possibility of them"""
verbose = self.verbose
if verbose: print "db0 gets rel for update", name
shadowing = self.shadowing
gotit = 0
from string import upper
name = upper(name)
rels = self.rels
if shadowing:
if rels.is_shadowed(name):
test = rels[name]
# do we really have a shadow or a db copy?
if test.is_shadow:
gotit = 1
if not gotit:
if shadowing.has_relation(name):
test = shadowing.get_for_update(name)
else:
# uncommitted whole relation
test = rels[name]
gotit = 1
else:
test = rels[name]
gotit = 1
if self.readonly:
raise ValueError, "cannot update, db is read only"
elif test.is_view:
raise ValueError, "VIEW %s cannot be updated" % name
elif shadowing and not gotit:
if verbose: print "db0: making shadow for", name
if test.is_shadow: return test
shadow = Relation0(())
shadow = shadow.shadow(test, self.log, name, self)
rels[name] = shadow
return shadow
else:
return test
def __delitem__(self, name):
if self.verbose: print "db0 drops rel", name
from string import upper
del self.rels[upper(name)]
def relations(self):
return self.rels.keys()
def has_relation(self, name):
return self.rels.has_key(name)
def getdatadefs(self):
result = self.datadefs.values()
# sort to make create tables first, eg
result.sort()
return result
def add_datadef(self, name, defn, logit=1):
"""only log the datadef if logit is set, else ignore redefinitions"""
dd = self.datadefs
if logit and dd.has_key(name):
raise KeyError, `name`+": already defined"
if logit:
self.touched = 1
dd[name] = defn
def has_datadef(self, name):
return self.datadefs.has_key(name)
def drop_datadef(self, name):
if self.verbose: print "db0 drops datadef",name
dd = self.datadefs
#print dd.keys()
if not dd.has_key(name):
raise KeyError, `name`+": no such element"
del dd[name]
def __repr__(self):
l = []
from string import join
l.append("INDICES: "+`self.indices.keys()`)
for (name, ddef) in self.datadefs.items():
l.append("data definition %s::\n%s" % (name, ddef))
for (name, rel) in self.rels.items():
l.append(name + ":")
l.append(rel.irepr())
return join(l, "\n\n")
def bindings(self, fromlist):
"""return (attdict, reldict, amb, ambatts) from fromlist = [(name,alias)...]
where reldict: alias > tuplelist
attdict: attribute_name > unique_relation
amb: dict of dottedname > (rel, att)
ambatts: dict of ambiguous_name > witness_alias
"""
from string import upper
rels = self.rels
ambiguous_atts = {}
ambiguous = {}
relseen = {}
attbindings = {}
relbindings = {}
for (name,alias) in fromlist:
name = upper(name)
alias = upper(alias)
if relseen.has_key(alias):
raise NameError, `alias` + ": bound twice in from list"
relseen[alias]=alias
try:
therel = rels[name]
except KeyError:
raise NameError, `name` + " no such relation in DB"
relbindings[alias] = therel
for attname in therel.attributes():
if not ambiguous_atts.has_key(attname):
if attbindings.has_key(attname):
oldrel = attbindings[attname]
oldbind = (oldrel, attname)
ambiguous[ "%s.%s" % oldbind] = oldbind
del attbindings[attname]
ambiguous_atts[attname]=alias
newbind = (alias, attname)
ambiguous[ "%s.%s" % newbind ] = newbind
else:
attbindings[attname] = alias
else:
newbind = (alias, attname)
ambiguous[ "%s.%s" % newbind ] = newbind
return (attbindings, relbindings, ambiguous, ambiguous_atts)
class File_Storage0:
"""quick and dirty file storage mechanism.
relation names in directory/dbname.gfd
contains a white separated list of relation names
relations in directory/relname.grl
contains sequence of marshalled tuples reps
prefixed by marshalled list of atts
"""
verbose = verbosity
def __init__(self, dbname, directory):
"""directory must exist."""
if self.verbose: print "fs0 init:", dbname, directory
self.dbname = dbname
self.directory = directory
self.relation_implementation = Relation0
self.recovery_mode = 0
def load(self, parser=None, forscratch=0):
# if logfile is present, need to recover
# error condition: fail to load relation, ddf, but no log file!
logfile = self.logfilename()
blogfile = self.backup_logfilename()
verbose = self.verbose
if verbose: print "fs0 load, checking", logfile
try:
testlog = open(logfile, "rb")
if verbose: print "fs0: opened", testlog
testlog.close()
testlog = open(blogfile, "rb")
testlog.close()
testlog = None
except:
recovery_mode = self.recovery_mode = 0
if verbose: print "recovery not needed"
else:
recovery_mode = self.recovery_mode = 1
if verbose: print "FS0 RECOVERY MODE LOAD!"
resultdb = Database0()
resultdb.is_scratch = forscratch
commands = self.get_initstatements()
#commands = parser.DoParse1(initstatements)
for command in commands:
if verbose: print "fs0 evals", command
command.relbind(resultdb)
command.eval()
for name in resultdb.relations():
if verbose: print "fs0 loads rel", name
rel = resultdb[name]
if rel.is_view:
# don't need to load views
continue
rel.set_empty()
try:
data = self.get_relation(name)
except StorageError, detail:
raise StorageError, "load failure %s: %s" % (name, detail)
attsin = tuple(data.attributes())
attsout = tuple(rel.attributes())
if attsin!=attsout:
raise StorageError, "rel %s: atts %s don't match %s" % (
name, attsin, attsout)
rel.add_tuples( data.rows() )
# in sync!
rel.touched = 0
# db in sync
resultdb.touched = 0
# do recovery, if needed
if recovery_mode:
if verbose: print "fs0 recovering from logfile", logfile
# restart the log file only if db is not scratch
restart = not forscratch
Log = DB_Logger(logfile, blogfile)
if verbose: Log.verbose=1
Log.recover(resultdb, restart)
# do a checkpoint
self.recovery_mode = 0
if restart and not forscratch:
Log.shutdown()
Log = None
del_file(logfile)
if verbose: print "FS0: dumping database"
self.dump(resultdb)
Log = resultdb.log = DB_Logger(logfile, blogfile)
Log.startup()
elif not forscratch:
Log = DB_Logger(logfile, blogfile)
Log.startup()
resultdb.log = Log
return resultdb
def relfilename(self, name):
#return "%s/%s.grl" % (self.directory, name)
return os.path.join(self.directory, name+".grl")
def backup_relfilename(self, name):
#return "%s/%s.brl" % (self.directory, name)
return os.path.join(self.directory, name+".brl")
def relfile(self, name, mode="rb"):
if self.recovery_mode:
return self.getfile_fallback(
self.backup_relfilename(name), self.relfilename(name), mode)
else:
name = self.relfilename(name)
return open(name, mode)
def getfile_fallback(self, first, second, mode):
try:
return open(first, mode)
except:
return open(second, mode)
def get_relation(self, name):
f = self.relfile(name, "rb")
rel = self.relation_implementation(())
try:
rel.load(f)
except StorageError:
if self.recovery_mode:
f = open(self.relfilename(name), "rb")
rel.load(f)
else:
raise StorageError, \
"fs: could not unpack backup rel file or rel file in recovery mode: "+name
return rel
def dbfilename(self):
#return "%s/%s.gfd" % (self.directory, self.dbname)
return os.path.join(self.directory, self.dbname+".gfd")
def backup_dbfilename(self):
#return "%s/%s.bfd" % (self.directory, self.dbname)
return os.path.join(self.directory, self.dbname+".bfd")
def logfilename(self):
#return "%s/%s.gfl" % (self.directory, self.dbname)
return os.path.join(self.directory, self.dbname+".gfl")
def backup_logfilename(self):
#return "%s/%s.glb" % (self.directory, self.dbname)
return os.path.join(self.directory, self.dbname+".glb")
def get_initstat_file(self, mode):
if self.recovery_mode:
return self.getfile_fallback(
self.backup_dbfilename(), self.dbfilename(), mode)
else:
return open(self.dbfilename(), mode)
def get_initstatements(self):
f = self.get_initstat_file("rb")
if self.verbose:
print "init statement from file", f
try:
data = checksum_undump(f)
except StorageError:
if self.recovery_mode:
f = open(self.dbfilename, "rb")
data = checksum_undump(f)
else:
raise StorageError, \
"could not unpack ddf backup or ddf file in recovery mode: "+self.dbname
f.close()
from sqlsem import deserialize
stats = map(deserialize, data)
return stats
def dump(self, db):
"""perform a checkpoint (no active transactions!)"""
# db should be non-shadowing db
# first thing: back up the log
backup_file(self.logfilename(), self.backup_logfilename())
verbose = self.verbose
if verbose: print "fs0: checkpointing db"
if db.is_scratch or db.readonly:
# don't need to do anything.
if verbose: print "fs0: scratch or readonly, returning"
return
log = db.log
if log:
log.commit()
if verbose:
print "DEBUG LOG TRACE"
log.dump()
log.shutdown()
if db.touched:
if verbose: print "fs0: db touched, backing up ddf file"
backup_file(self.dbfilename(),
self.backup_dbfilename())
relations = db.relations()
for r in relations:
rel = db[r]
#print r
if rel.touched:
if verbose: print "fs0: backing up touched rel", r
backup_file(self.relfilename(r),
self.backup_relfilename(r))
for r in relations:
if verbose: print "fs0: dumping relations now"
self.dumprelation(r, db[r])
if verbose: print "fs0: dumping datadefs now"
self.dumpdatadefs(db)
# del of logfile signals successful commit.
if verbose: print "fs0: successful dump, deleting log file"
logfilename = self.logfilename()
blogfilename = self.backup_logfilename()
del_file(logfilename)
del_file(blogfilename)
if db.touched:
if verbose: print "fs0: deleting backup ddf file"
del_file(self.backup_dbfilename())
db.touched = 0
for r in relations:
rel = db[r]
if rel.touched:
if verbose: print "fs0: deleting rel backup", r
del_file(self.backup_relfilename(r))
rel.touched = 0
if verbose: print "fs0: restarting db log"
log = db.log = DB_Logger(logfilename, blogfilename)
log.startup()
if verbose: print "fs0: dump complete"
self.recovery_mode = 0
def dumprelation(self, name, rel, force=0):
"""set force to ignore the "touch" flag."""
# ignore self.backup_mode
if (force or rel.touched) and not rel.is_view:
fn = self.relfilename(name)
if self.verbose:
print "dumping touched rel", name, "to", fn
f = open(fn, "wb")
rel.dump(f)
def dumpdatadefs(self, db, force=0):
"""set force to ignore the touch flag"""
# ignore self.backup_mode
if not (force or db.touched): return
#from marshal import dump, dumps
fn = self.dbfilename()
f = open(fn, "wb")
datadefs = db.getdatadefs()
from sqlsem import serialize
datadefsd = map(serialize, datadefs)
#for (defn, ser) in map(None, datadefs, datadefsd):
#print defn
#print ser
#dumps(ser) ### debug test
checksum_dump(datadefsd, f)
f.close()
class Relation0:
"""quick and dirty in core relation representation.
self.tuples contains tuples or 0 if erased.
tuples must not move (to preserve indices)
unless indices regenerate.
"""
is_view = 0 # Relation0 is not a view
def __init__(self, attribute_names, tuples=None, filter=None):
from sqlsem import kjbuckets
self.indices = kjbuckets.kjGraph()
self.index_list = []
self.attribute_names = attribute_names
if tuples is None:
tuples = []
self.filter = filter
self.set_empty()
self.add_tuples(tuples)
# indices map attname > indices containing att
# relation to shadow and log (if non-null)
self.log = None
self.name = None # anonymous by default
self.is_shadow = 0
self.touched = 0
def shadow(self, otherrelation, log, name, inshadowdb):
"""return structural replica of otherrelation (as self)
for non-updatable relation (eg, view) may return otherrelation"""
if otherrelation.is_view:
# for now, assume VIEWS CANNOT BE UPDATED
return otherrelation
self.is_shadow = 1
self.shadow_of_shadow = otherrelation.is_shadow
self.log = log
self.name = name
# don't make any updates permanent if set.
self.tuples = otherrelation.tuples[:]
self.attribute_names = otherrelation.attribute_names
self.filter = otherrelation.filter
for index in otherrelation.index_list:
copy = index.copy()
name = copy.name
self.add_index(copy, recordtuples=0)
# record in shadowdb, but don't log it
inshadowdb.add_index(name, copy)
#inshadowdb.add_datadef(name, copy, logit=0)
self.touched = otherrelation.touched
return self
def unshadow(self):
"""make self into a replacement for shadowed, return self."""
if self.is_shadow:
self.log = None
self.is_shadow = self.shadow_of_shadow
return self
def dump(self, file):
attributes = tuple(self.attributes())
rows = self.rows()
newrows = rows[:]
count = 0
tt = type
from types import IntType
for i in xrange(len(rows)):
this = rows[i]
if this is not None and tt(this) is not IntType:
newrows[count] = rows[i].dump(attributes)
count = count + 1
newrows = newrows[:count]
newrows.append(attributes)
checksum_dump(newrows, file)
def load(self, file):
"""checksum must succeed."""
rows = checksum_undump(file)
attributes = rows[-1]
self.attribute_names = attributes
rows = rows[:-1]
from sqlsem import kjbuckets
undump = kjbuckets.kjUndump
for i in xrange(len(rows)):
rows[i] = undump(attributes, rows[i])
self.set_empty()
self.add_tuples(rows)
# in sync with disk copy!
self.touched = 0
def add_index(self, index, recordtuples=1):
"""unset recordtuples if the index is initialized already."""
# does not "touch" the relation
index_list = self.index_list
indices = self.indices
atts = index.attributes()
for a in atts:
indices[a] = index
if recordtuples:
(tuples, seqnums) = self.rows(1)
index.clear()
if tuples:
index.add_tuples(tuples, seqnums)
index_list.append(index)
def drop_index(self, index):
# does not "touch" the relation
name = index.name
if verbosity:
print "rel.drop_index", index
print "...", self.indices, self.index_list
indices = self.indices
for a in index.attributes():
# contorted since one index be clone of the other.
aindices = indices.neighbors(a)
for ind in aindices:
if ind.name == name:
indices.delete_arc(a, ind)
theind = ind
# the (non-clone) index ought to have been found above...
self.index_list.remove(theind)
def choose_index(self, attributes):
"""choose an index including subset of attributes or None"""
from sqlsem import kjbuckets
kjSet = kjbuckets.kjSet
atts = kjSet(attributes)
#print "choosing index", atts
indices = (atts * self.indices).values()
choice = None
for index in indices:
indexatts = index.attributes()
#print "index atts", indexatts
iatts = kjSet(indexatts)
if iatts.subset(atts):
if choice is None:
#print "chosen", index.name
choice = index
lchoice = len(choice.attributes())
else:
if index.unique or lchoice<len(indexatts):
choice = index
lchoice = len(choice.attributes())
return choice
def __repr__(self):
rows = self.rows()
atts = self.attributes()
list_rep = [list(atts)]
for r in rows:
rlist = []
for a in atts:
try:
elt = r[a]
except KeyError:
elt = "NULL"
else:
elt = str(elt)
rlist.append(elt)
list_rep.append(rlist)
# compute maxen for formatting
maxen = [0] * len(atts)
for i in xrange(len(atts)):
for l in list_rep:
maxen[i] = max(maxen[i], len(l[i]))
for i in xrange(len(atts)):
mm = maxen[i]
for l in list_rep:
old = l[i]
l[i] = old + (" " * (mm-len(old)))
from string import join
for i in xrange(len(list_rep)):
list_rep[i] = join(list_rep[i], " | ")
first = list_rep[0]
list_rep.insert(1, "=" * len(first))
return join(list_rep, "\n")
def irepr(self):
List = [self] + list(self.index_list)
List = map(str, List)
from string import join
return join(List, "\n")
def set_empty(self):
self.tuples = []
for index in self.index_list:
index.clear()
def drop_indices(self, db):
for index in self.index_list:
name = index.name
db.drop_datadef(name)
db.drop_index(name)
self.index_list = []
from sqlsem import kjbuckets
self.indices = kjbuckets.kjGraph()
def regenerate_indices(self):
(tuples, seqnums) = self.rows(1)
#self.tuples = tuples
for index in self.index_list:
index.clear()
index.add_tuples(tuples, seqnums)
def add_tuples(self, tuples):
if not tuples: return
tuples = filter(self.filter, tuples)
oldtuples = self.tuples
first = len(oldtuples)
oldtuples[first:] = list(tuples)
last = len(oldtuples)
for index in self.index_list:
index.add_tuples(tuples, xrange(first,last))
self.touched = 1
def attributes(self):
return self.attribute_names
def rows(self, andseqnums=0):
tups = self.tuples
# short cut
if 0 not in tups:
if andseqnums:
return (tups, xrange(len(tups)))
else:
return tups
tt = type
from types import IntType
result = list(self.tuples)
if andseqnums: seqnums = result[:]
count = 0
for i in xrange(len(result)):
t = result[i]
if tt(t) is not IntType:
result[count] = t
if andseqnums: seqnums[count] = i
count = count+1
result = result[:count]
if andseqnums:
return (result, seqnums[:count])
else:
return result
def erase_tuples(self, seqnums):
#print "et seqnums", seqnums
if not seqnums: return
tups = self.tuples
# order important! indices first!
for index in self.index_list:
index.erase_tuples(seqnums, tups)
for i in seqnums:
#print "deleting", i
tups[i] = 0
#print self
self.touched = 1
def reset_tuples(self, tups, seqnums):
# KISS for indices, maybe optimize someday...
if not tups: return
mytups = self.tuples
for index in self.index_list:
index.erase_tuples(seqnums, mytups)
for i in xrange(len(seqnums)):
seqnum = seqnums[i]
mytups[seqnum] = tups[i]
for index in self.index_list:
index.add_tuples(tups, seqnums)
self.touched = 1
# should views be here?
class View(Relation0):
"""view object, acts like relation, with addl operations."""
touched = 0
is_view = 1
is_shadow = 0
### must fix namelist!
def __init__(self, name, namelist, selection, indb):
"""set namelist to None for implicit namelist"""
self.name = name
self.namelist = namelist
self.selection = selection
# attempt a relbind, no outer bindings!
self.relbind(indb, {})
self.cached_rows = None
self.translate = None
def __repr__(self):
return "view %s as %s" % (self.name, self.selection)
irepr = __repr__
def uncache(self):
self.cached_rows = None
def UNDEFINED_OP_FOR_VIEW(*args, **kw):
raise ValueError, "operation explicitly undefined for view object"
shadow = dump = load = add_index = drop_index = set_empty = \
add_tuples = erase_tuples = reset_tuples = UNDEFINED_OP_FOR_VIEW
def ignore_op_for_view(*args, **kw):
"""ignore this op when applied to view"""
pass
drop_indices = regenerate_indices = ignore_op_for_view
def choose_index(s, a):
"""no indices on views (might change this?)"""
return None
def relbind(self, db, atts):
"""bind self to db, ignore atts"""
name = self.name
selection = self.selection
selection = self.selection = selection.relbind(db)
namelist = self.namelist
if namelist is not None:
from sqlsem import kjbuckets
target_atts = selection.attributes()
if len(namelist)!=len(target_atts):
raise "select list and namelist don't match in %s"%name
pairs = map(None, namelist, target_atts)
self.translate = kjbuckets.kjGraph(pairs)
return self
def attributes(self):
namelist = self.namelist
if self.namelist is None:
return self.selection.attributes()
return namelist
def rows(self, andseqs=0):
cached_rows = self.cached_rows
if cached_rows is None:
cached_rows = self.cached_rows = self.selection.eval().rows()
if self.namelist is not None:
# translate the attribute names
translate = self.translate
for i in range(len(cached_rows)):
cached_rows[i] = cached_rows[i].remap(translate)
if andseqs:
return (cached_rows[:], range(len(cached_rows)))
else:
return cached_rows[:]
class Index:
"""Index for tuples in relation. Tightly bound to relation rep."""
### should add "unique index" and check enforce uniqueness...
def __init__(self, name, attributes, unique=0):
self.unique = unique
self.name = name
self.atts = tuple(attributes)
# values > tuples
self.index = {}
self.dseqnums = {}
def __repr__(self):
un = ""
if self.unique: un="UNIQUE "
return "%sindex %s on %s" % (un, self.name, self.atts)
def copy(self):
"""make a fast structural copy of self"""
result = Index(self.name, self.atts, unique=self.unique)
rindex = result.index
rdseqnums = result.dseqnums
myindex = self.index
mydseqnums = self.dseqnums
for k in myindex.keys():
rindex[k] = myindex[k][:]
for k in mydseqnums.keys():
rdseqnums[k] = mydseqnums[k][:]
return result
def attributes(self):
return self.atts
def matches(self, tuple, translate=None):
"""return (tuples, seqnums) for tuples matching tuple
(with possible translations"""
if translate:
tuple = translate * tuple
atts = self.atts
dump = tuple.dump(atts)
index = self.index
if index.has_key(dump):
return (index[dump], self.dseqnums[dump])
else:
return ((), ())
def clear(self):
self.index = {}
self.dseqnums = {}
def add_tuples(self, tuples, seqnums):
unique = self.unique
atts = self.atts
index = self.index
dseqnums = self.dseqnums
test = index.has_key
for i in xrange(len(tuples)):
tup = tuples[i]
seqnum = seqnums[i]
dump = tup.dump(atts)
#print self.name, dump
if test(dump):
bucket = index[dump]
#print "self", self
#print "unique", unique
#print "bucket", bucket
if unique and bucket:
raise StorageError, "uniqueness violation: %s %s" %(
dump, self)
bucket.append(tup)
dseqnums[dump].append(seqnum)
else:
index[dump] = [tup]
dseqnums[dump] = [seqnum]
def erase_tuples(self, seqnums, all_tuples):
# all_tuples must be internal rel tuple list
atts = self.atts
index = self.index
dseqnums = self.dseqnums
for seqnum in seqnums:
tup = all_tuples[seqnum]
dump = tup.dump(atts)
index[dump].remove(tup)
dseqnums[dump].remove(seqnum)
class shadow_dict:
"""shadow dictionary. defer & remember updates."""
verbose = verbosity
def __init__(self, shadowing, value_transform=None):
self.shadowed = shadowing
shadow = self.shadow = {}
self.touched = {}
for key in shadowing.keys():
shadow[key] = shadowing[key]
self.value_transform = value_transform
# defeats inheritance! careful!
self.values = shadow.values
self.items = shadow.items
self.keys = shadow.keys
self.has_key = shadow.has_key
def is_shadowed(self, name):
return self.touched.has_key(name)
def __len__(self):
return len(self.shadow)
def commit(self, verbose=0):
"""apply updates to shadowed."""
import sys
verbose = verbose or self.verbose
if self.touched:
shadowed = self.shadowed
shadow = self.shadow
value_transform = self.value_transform
keys = shadowed.keys()
if verbose:
print "shadowdict oldkeys", keys
for k in keys:
del shadowed[k]
keys = shadow.keys()
if verbose:
print "shadowdict newkeys", keys
for k in shadow.keys():
value = shadow[k]
if value_transform is not None:
try:
value = value_transform(value)
except:
raise "transform fails", (sys.exc_type, sys.exc_value, k, value)
shadowed[k] = value
self.touched = {}
def __getitem__(self, key):
return self.shadow[key]
def __setitem__(self, key, item):
from types import StringType
if type(key) is not StringType:
raise "nonstring", key
if item is None:
raise "none set", (key, item)
self.touched[key] = 1
self.shadow[key] = item
def __delitem__(self, key):
self.touched[key] = 1
del self.shadow[key]
# stored mutations on relations
class Add_Tuples:
"""stored rel.add_tuples(tuples)"""
def __init__(self, name):
self.to_rel = name
self.indb = None
def initargs(self):
return (self.to_rel,)
def set_data(self, tuples, rel):
"""store self.data as tuple with tuple[-1] as to_rel, rest data"""
attributes = tuple(rel.attributes())
ltuples = len(tuples)
data = list(tuples)
for i in xrange(ltuples):
tdata = tuples[i].dump(attributes)
data[i] = tdata
self.data = tuple(data)
def __repr__(self):
from string import join
datarep = map(repr, self.data)
datarep = join(datarep, "\n ")
return "add tuples to %s\n %s\n\n" % (self.to_rel, datarep)
def marshaldata(self):
return self.data
def demarshal(self, data):
self.data = data
def relbind(self, db):
self.indb = db
def eval(self, dyn=None):
"""apply operation to db"""
db = self.indb
data = self.data
name = self.to_rel
rel = db[name]
attributes = tuple(rel.attributes())
tuples = list(data)
from sqlsem import kjbuckets
undump = kjbuckets.kjUndump
for i in xrange(len(tuples)):
tuples[i] = undump(attributes, tuples[i])
rel.add_tuples(tuples)
class Erase_Tuples(Add_Tuples):
"""stored rel.erase_tuples(seqnums)"""
def set_data(self, seqnums, rel):
seqnums = list(seqnums)
self.data = tuple(seqnums)
def __repr__(self):
return "Erase seqnums in %s\n %s\n\n" % (self.to_rel, self.data)
def eval(self, dyn=None):
db = self.indb
seqnums = self.data
name = self.to_rel
rel = db[name]
rel.erase_tuples(seqnums)
class Reset_Tuples(Add_Tuples):
"""stored rel.reset_tuples(tups, seqnums)"""
def set_data(self, tups, seqnums, rel):
attributes = tuple(rel.attributes())
dtups = list(tups)
for i in xrange(len(dtups)):
dtups[i] = dtups[i].dump(attributes)
self.data = (tuple(dtups), tuple(seqnums))
def __repr__(self):
(dtups, seqnums) = self.data
pairs = map(None, seqnums, dtups)
from string import join
datarep = map(repr, pairs)
datarep = join(datarep, " \n")
return "Reset tuples in %s\n %s\n\n" % (self.to_rel, datarep)
def eval(self, dyn=None):
db = self.indb
(dtups, seqnums) = self.data
tups = list(dtups)
rel = db[self.to_rel]
attributes = tuple(rel.attributes())
from sqlsem import kjbuckets
undump = kjbuckets.kjUndump
for i in xrange(len(dtups)):
tups[i] = undump(attributes, dtups[i])
rel.reset_tuples(tups, seqnums)
# Log entry tags
START = "START"
COMMIT = "COMMIT"
ABORT = "ABORT"
UNREADABLE = "UNREADABLE"
class Transaction_Logger:
"""quick and dirty Log implementation per transaction."""
verbose = verbosity
def __init__(self, db_log, transactionid, is_scratch=0):
self.db_log = db_log
self.transactionid = transactionid
# ignore all operations if set
self.is_scratch = is_scratch
self.dirty = 0
self.deferred = []
def reset(self):
self.deferred = []
def __repr__(self):
return "Transaction_Logger(%s, %s, %s)" % (
self.db_log, self.transactionid, self.is_scratch)
def log(self, operation):
verbose = self.verbose
tid = self.transactionid
if not self.is_scratch:
self.deferred.append(operation)
if verbose:
print "tid logs", tid, operation
def flush(self):
verbose = self.verbose
if not self.is_scratch:
tid = self.transactionid
deferred = self.deferred
self.deferred = []
db_log = self.db_log
if db_log:
for operation in deferred:
db_log.log(operation, tid)
self.dirty = 1
elif verbose:
print "scratch log ignored", tid, operation
def commit(self, verbose=0):
verbose = self.verbose or verbose
tid = self.transactionid
if verbose: print "committing trans log", tid
if self.is_scratch:
if verbose:
print "scratch commit ignored", tid
return
if not self.dirty:
if verbose:
print "nondirty commit", tid
return
self.flush()
db_log = self.db_log
db_log.commit(verbose, tid)
if verbose:
print "transaction is considered recoverable", tid
class DB_Logger:
"""quick and dirty global db logger."""
verbose = verbosity
is_scratch = 0
def __init__(self, filename, backupname):
self.filename = filename
# backup name is never kept open: existence indicates log in use.
self.backupname = backupname
self.file = None
self.dirty = 0
if self.verbose:
print id(self), "created DB_Logger on", self.filename
def __repr__(self):
return "DB_Logger(%s)" % self.filename
def startup(self):
if self.verbose:
print id(self), "preparing", self.filename
# open happens automagically
#self.file = open(self.filename, "wb")
self.clear()
self.dirty = 0
def shutdown(self):
if self.verbose:
print id(self), "shutting down log", self.filename
file = self.file
if file:
file.close()
self.file = None
def clear(self):
if self.verbose:
print id(self), "clearing"
self.shutdown()
del_file(self.filename)
def restart(self):
if self.verbose:
print id(self), "restarting log file", self.filename
if self.file is not None:
self.file.close()
self.file = open(self.filename, "ab")
dummy = open(self.backupname, "ab")
dummy.close()
self.dirty = 0
def clear_log_file(self):
if self.verbose:
print id(self), "clearing logfile", self.filename
if self.file is not None:
self.file.close()
self.file = None
del_file(self.filename)
del_file(self.backupname)
self.dirty = 0
def log(self, operation, transactionid=None):
"""transactionid of None means no transaction: immediate."""
file = self.file
if file is None:
self.restart()
file = self.file
verbose = self.verbose
from sqlsem import serialize
serial = serialize(operation)
data = (transactionid, serial)
if verbose:
print id(self), "logging:", transactionid
print operation
checksum_dump(data, file)
self.dirty = 1
def commit(self, verbose=0, transactionid=None):
"""add commit, if appropriate, flush."""
verbose = self.verbose or verbose
if not self.dirty and transactionid is None:
if verbose: print "commit not needed", transactionid
return
elif verbose:
print "attempting commit", transactionid
if transactionid is not None:
self.log( COMMIT, transactionid )
if verbose: print "committed", transactionid
if verbose: print "flushing", self.filename
self.file.flush()
self.dirty = 0
def recover(self, db, restart=1):
import sys
verbose = self.verbose
filename = self.filename
if verbose:
print "attempting recovery from", self.filename
file = self.file
if file is not None:
if verbose: print "closing file"
self.file.close()
self.file = None
if verbose:
print "opens should generate an error if no recovery needed"
try:
file = open(filename, "rb")
file2 = open(self.backupname, "rb")
except:
if verbose:
print "no recovery needed:", filename
print sys.exc_type, sys.exc_value
sys.exc_traceback = None
return
file2.close()
if verbose: print "log found, recovering from", filename
records = self.read_records(file)
if verbose: print "scan for commit records"
commits = {}
for (i, (tid, op)) in records:
if op==COMMIT:
if verbose: print "transaction", tid, "commit at", i
commits[tid] = i
elif verbose:
print i, tid, "operation\n", op
if verbose: print commits, "commits total"
if verbose: print "applying commited operations, in order"
committed = commits.has_key
from types import StringType
for (i, (tid, op)) in records:
if tid is None or (committed(tid) and commits[tid]>i):
if type(op) is StringType:
if verbose:
print "skipping marker", tid, op
if verbose:
print "executing for", tid, i
print op
#### Note: silently eat errors unless verbose
### (eg in case of table recreation...)
### There should be a better way to do this!!!
import sys
try:
op.relbind(db)
op.eval()
except:
if verbose:
print "error", sys.exc_type, sys.exc_value
print "binding or evaluating logged operation:"
print op
elif verbose:
print "uncommitted operation", tid, i
op
if verbose:
print "recovery successful: clearing log file"
self.clear()
if restart:
if verbose:
print "recreating empty log file"
self.startup()
def read_records(self, file):
"""return log record as (index, (tid, op)) list"""
verbose = self.verbose
if verbose: print "reading log records to error"
import sys
records = {}
from sqlsem import deserialize
count = 0
while 1:
try:
data = checksum_undump(file)
except:
if verbose:
print "record read terminated with error", len(records)
print sys.exc_type, sys.exc_value
break
(transactionid, serial) = data
operation = deserialize(serial)
records[count] = (transactionid, operation)
if verbose:
print count, ": read for", transactionid
print operation
count = count+1
if verbose: print len(records), "records total"
records = records.items()
records.sort()
return records
def dump(self):
verbose = self.verbose
self.shutdown()
print "dumping log"
self.verbose = 1
try:
file = open(self.filename, "rb")
except:
print "DUMP FAILED, cannot open", self.filename
else:
self.read_records(file)
self.verbose = verbose
self.restart() | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/gfdb0.py | gfdb0.py |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.