code
stringlengths
501
5.19M
package
stringlengths
2
81
path
stringlengths
9
304
filename
stringlengths
4
145
import os from os.path import join as opj import re from twisted.python import dist # errors class DirectoryExists(OSError): """Some directory exists when it shouldn't.""" pass class DirectoryDoesntExist(OSError): """Some directory doesn't exist when it should.""" pass class CommandFailed(OSError): pass # utilities def sh(command, null=True, prompt=False): """ I'll try to execute `command', and if `prompt' is true, I'll ask before running it. If the command returns something other than 0, I'll raise CommandFailed(command). """ print "--$", command if prompt: if raw_input("run ?? ").startswith('n'): return if null: command = "%s > /dev/null" % command if os.system(command) != 0: raise CommandFailed(command) def replaceInFile(filename, oldToNew): """ I replace the text `oldstr' with `newstr' in `filename' using sed and mv. """ os.rename(filename, filename+'.bak') f = open(filename+'.bak') d = f.read() f.close() for k,v in oldToNew.items(): d = d.replace(k, v) f = open(filename + '.new', 'w') f.write(d) f.close() os.rename(filename+'.new', filename) os.unlink(filename+'.bak') def runChdirSafe(f, *args, **kw): origdir = os.path.abspath('.') try: return f(*args, **kw) finally: os.chdir(origdir) class Project: """ A representation of a Twisted project with version information. """ newVersion = None versionfile = None name = None pkgname = None currentVersionStr = None dir = None def __init__(self, **kw): self.__dict__.update(kw) def fullyQualifiedName(self): """ Return a string naming the project: e.g. "Twisted", "Twisted Conch", etc. """ if self.name == "twisted": return "Twisted" return "Twisted %s" % (self.name.capitalize(),) class Done(Exception): """ Raised when the user is done answering questions. """ pass verstringMatcher = re.compile(r"^([0-9]+)\.([0-9]+)\.([0-9]+)$") def inputNewVersion(project): """ Ask the user to input a new version number for the given project, and return a three-tuple of (major, minor, micro). """ match = None while match is None: new_vers = raw_input("New version for %s? " % (project.name)) if not new_vers: return None if new_vers == 'done': raise Done match = verstringMatcher.match(new_vers) if match is None: print 'Invalid format. Use e.g. 2.0.0.' major, minor, micro = map(int, match.groups()) return major, minor, micro def getVersionSafely(proj): """ Call dist.getVersion, and if an error is raised, return None. """ try: currentVersionStr = dist.getVersion(proj) except: currentVersionStr = None return currentVersionStr def gatherCurrentInfo(): """ @returns: A list of L{Project} instances with current information when available. """ projects = [Project(name='twisted', pkgname='twisted', versionfile='twisted/_version.py', currentVersionStr=getVersionSafely('core'), dir='twisted')] for pname in dist.twisted_subprojects: dir = opj('twisted', pname) pkgname = 'twisted.'+pname projects.append( Project(name=pname, pkgname=pkgname, dir=dir, versionfile=opj(dir, '_version.py'), currentVersionStr=getVersionSafely(pname), ) ) return projects def replaceProjectVersion(filename, newversion): """ Write version specification code into the given filename, which sets the version to the given version number. @param filename: A filename which is most likely a "_version.py" under some Twisted project. @param newversion: A sequence of three numbers. """ f = open(filename, 'w') f.write('''\ # This is an auto-generated file. Use admin/change-versions to update. from twisted.python import versions version = versions.Version(__name__[:__name__.rfind('.')], %s, %s, %s) ''' % tuple(newversion)) f.close()
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/release.py
release.py
import sys, string, socket, struct def inet_pton(af, addr): if af == socket.AF_INET: return socket.inet_aton(addr) elif af == getattr(socket, 'AF_INET6', 'AF_INET6'): if [x for x in addr if x not in string.hexdigits + ':.']: raise ValueError("Illegal characters: %r" % (''.join(x),)) parts = addr.split(':') elided = parts.count('') ipv4Component = '.' in parts[-1] if len(parts) > (8 - ipv4Component) or elided > 3: raise ValueError("Syntactically invalid address") if elided == 3: return '\x00' * 16 if elided: zeros = ['0'] * (8 - len(parts) - ipv4Component + elided) if addr.startswith('::'): parts[:2] = zeros elif addr.endswith('::'): parts[-2:] = zeros else: idx = parts.index('') parts[idx:idx+1] = zeros if len(parts) != 8 - ipv4Component: raise ValueError("Syntactically invalid address") else: if len(parts) != (8 - ipv4Component): raise ValueError("Syntactically invalid address") if ipv4Component: if parts[-1].count('.') != 3: raise ValueError("Syntactically invalid address") rawipv4 = socket.inet_aton(parts[-1]) unpackedipv4 = struct.unpack('!HH', rawipv4) parts[-1:] = [hex(x)[2:] for x in unpackedipv4] parts = [int(x, 16) for x in parts] return struct.pack('!8H', *parts) else: raise socket.error(97, 'Address family not supported by protocol') def inet_ntop(af, addr): if af == socket.AF_INET: return socket.inet_ntoa(addr) elif af == socket.AF_INET6: if len(addr) != 16: raise ValueError("address length incorrect") parts = struct.unpack('!8H', addr) curBase = bestBase = None for i in range(8): if not parts[i]: if curBase is None: curBase = i curLen = 0 curLen += 1 else: if curBase is not None: if bestBase is None or curLen > bestLen: bestBase = curBase bestLen = curLen curBase = None if curBase is not None and (bestBase is None or curLen > bestLen): bestBase = curBase bestLen = curLen parts = [hex(x)[2:] for x in parts] if bestBase is not None: parts[bestBase:bestBase + bestLen] = [''] if parts[0] == '': parts.insert(0, '') if parts[-1] == '': parts.insert(len(parts) - 1, '') return ':'.join(parts) else: raise socket.error(97, 'Address family not supported by protocol') try: socket.inet_pton(socket.AF_INET6, "::") except (AttributeError, NameError, socket.error): socket.inet_pton = inet_pton socket.inet_ntop = inet_ntop socket.AF_INET6 = 'AF_INET6' adict = dict # OpenSSL/__init__.py imports OpenSSL.tsafe. OpenSSL/tsafe.py imports # threading. threading imports thread. All to make this stupid threadsafe # version of its Connection class. We don't even care about threadsafe # Connections. In the interest of not screwing over some crazy person # calling into OpenSSL from another thread and trying to use Twisted's SSL # support, we don't totally destroy OpenSSL.tsafe, but we will replace it # with our own version which imports threading as late as possible. class tsafe(object): class Connection: """ OpenSSL.tsafe.Connection, defined in such a way as to not blow. """ __module__ = 'OpenSSL.tsafe' def __init__(self, *args): from OpenSSL import SSL as _ssl self._ssl_conn = apply(_ssl.Connection, args) from threading import _RLock self._lock = _RLock() for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read', 'renegotiate', 'bind', 'listen', 'connect', 'accept', 'setblocking', 'fileno', 'shutdown', 'close', 'get_cipher_list', 'getpeername', 'getsockname', 'getsockopt', 'setsockopt', 'makefile', 'get_app_data', 'set_app_data', 'state_string', 'sock_shutdown', 'get_peer_certificate', 'want_read', 'want_write', 'set_connect_state', 'set_accept_state', 'connect_ex', 'sendall'): exec """def %s(self, *args): self._lock.acquire() try: return apply(self._ssl_conn.%s, args) finally: self._lock.release()\n""" % (f, f) sys.modules['OpenSSL.tsafe'] = tsafe import operator try: operator.attrgetter except AttributeError: class attrgetter(object): def __init__(self, name): self.name = name def __call__(self, obj): return getattr(obj, self.name) operator.attrgetter = attrgetter
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/compat.py
compat.py
# import tokenize, cgi, keyword import reflect class TokenPrinter: currentCol, currentLine = 0, 1 lastIdentifier = parameters = 0 def __init__(self, writer): self.writer = writer def printtoken(self, type, token, (srow, scol), (erow, ecol), line): #print "printtoken(%r,%r,%r,(%r,%r),(%r,%r),%r), row=%r,col=%r" % ( # self, type, token, srow,scol, erow,ecol, line, # self.currentLine, self.currentCol) if self.currentLine < srow: self.writer('\n'*(srow-self.currentLine)) self.currentLine, self.currentCol = srow, 0 self.writer(' '*(scol-self.currentCol)) if self.lastIdentifier: type = "identifier" self.parameters = 1 elif type == tokenize.NAME: if keyword.iskeyword(token): type = 'keyword' else: if self.parameters: type = 'parameter' else: type = 'variable' else: type = tokenize.tok_name.get(type).lower() self.writer(token, type) self.currentCol = ecol self.currentLine += token.count('\n') if self.currentLine != erow: self.currentCol = 0 self.lastIdentifier = token in ('def', 'class') if token == ':': self.parameters = 0 class HTMLWriter: noSpan = [] def __init__(self, writer): self.writer = writer noSpan = [] reflect.accumulateClassList(self.__class__, "noSpan", noSpan) self.noSpan = noSpan def write(self, token, type=None): token = cgi.escape(token) if (type is None) or (type in self.noSpan): self.writer(token) else: self.writer('<span class="py-src-%s">%s</span>' % (type, token)) class SmallerHTMLWriter(HTMLWriter): """HTMLWriter that doesn't generate spans for some junk. Results in much smaller HTML output. """ noSpan = ["endmarker", "indent", "dedent", "op", "newline", "nl"] def filter(inp, out, writer=HTMLWriter): out.write('<pre>\n') printer = TokenPrinter(writer(out.write).write).printtoken try: tokenize.tokenize(inp.readline, printer) except tokenize.TokenError: pass out.write('</pre>\n') def main(): import sys filter(open(sys.argv[1]), sys.stdout) if __name__ == '__main__': main()
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/htmlizer.py
htmlizer.py
from __future__ import nested_scopes # System Imports import sys import os import errno import types import warnings # Twisted imports from twisted.python import util # Sibling Imports from reflect import namedModule try: from os.path import realpath as cacheTransform except ImportError: from os.path import abspath as cacheTransform class PlugIn: """I am a Python module registered in a plugins.tml file. """ def __init__(self, name, module, **kw): self.name = name self.module = module for key, value in kw.items(): setattr(self, key, value) def isLoaded(self): """Check to see if the module for this plugin has been imported yet. @rtype: C{int} @return: A true value if the module for this plugin has been loaded, false otherwise. """ return sys.modules.has_key(self.module) def load(self): """Load the module for this plugin. @rtype: C{ModuleType} @return: The module object that is loaded. """ return namedModule(self.module) def __repr__(self): if self.isLoaded(): loaded = ' loaded' else: loaded = '' return "<Plugin %s %s%s>" % (repr(self.name), self.module, loaded) class DropIn: """I am a Python package containing plugins.tml. """ def __init__(self, name): self.name = name self.plugins = [] def register(self, name, module, **kw): """Register a new plug-in. """ warnings.warn("The twisted.python.plugin system is deprecated. " "See twisted.plugin for the revised edition.", DeprecationWarning, 2) self.plugins.append(PlugIn(name, module, **kw)) def __repr__(self): return "<Package %s %s>" % (self.name, self.plugins) def _prepCallbacks(debug, progress): if debug: try: debug('Looking for plugin.tml files') except: debug = lambda x: sys.stdout.write(x + '\n') debug('Looking for plugin.tml files') else: debug = lambda x: None if progress: try: progress(0.0) except: pb = util.makeStatusBar(76) progress = lambda x, pb=pb: sys.stdout.write(pb(x) + '\r') progress(0.0) else: progress = lambda x: None return debug, progress def getPluginFileList(debugInspection=None, showProgress=None): """Find plugin.tml files in subdirectories of paths in C{sys.path} @type debugInspection: C{None} or a callable taking one argument @param debugInspection: If not None, this is invoked with strings containing debug information about the loading process. If it is any other true value, this debug information is written to stdout (This behavior is deprecated). @type showProgress: C{None} or a callable taking one argument. @param showProgress: If not None, this is invoked with floating point values between 0 and 1 describing the progress of the loading process. If it is any other true value, this progress information is written to stdout. (This behavior is deprecated). @rtype: C{list} of C{str} @return: A list of the plugin.tml files found. """ if isinstance(debugInspection, types.IntType): warnings.warn( "int parameter for debugInspection is deprecated, pass None or " "a function that takes a single argument instead.", DeprecationWarning, 2 ) if isinstance(showProgress, types.IntType): warnings.warn( "int parameter for showProgress is deprecated, pass None or " "a function that takes a single argument instead.", DeprecationWarning, 2 ) debugInspection, showProgress = _prepCallbacks(debugInspection, showProgress) exists = os.path.exists join = os.sep.join result = [] loaded = {} seenNames = {} # XXX Some people claim to have found non-strings in sys.path (an empty # list, in particular). Instead of tracking down the cause for their # presence, they decided it was better to discard them unconditionally # without further investigation. At some point, someone should track # down where non-strings are coming from and do something about them. paths = [cacheTransform(p) for p in sys.path if isinstance(p, str) and os.path.isdir(p)] # special case for commonly used directories we *know* shouldn't be checked # and really slow down mktap and such-like in real installations for p in ("/usr/bin", "/usr/local/bin"): try: paths.remove(p) except ValueError: pass progress = 0.0 increments = 1.0 / len(paths) for (index, d) in zip(range(len(paths)), paths): showProgress(progress) if loaded.has_key(d): debugInspection('Already saw ' + d) continue else: debugInspection('Recursing through ' + d) try: subDirs = os.listdir(d) except OSError, (err, s): # Permission denied, carry on if err == errno.EACCES: debugInspection('Permission denied on ' + d) else: raise else: # filter out files we obviously don't need to check - ones with '.' in them subDirs = [s for s in subDirs if "." not in s] if not subDirs: continue incr = increments * (1.0 / len(subDirs)) for plugindir in subDirs: if seenNames.has_key(plugindir): debugInspection('Seen %s already' % plugindir) continue tmlname = join((d, plugindir, "plugins.tml")) if isAModule(join((d,plugindir))): seenNames[plugindir] = 1 if exists(tmlname): result.append(tmlname) debugInspection('Found ' + tmlname) else: debugInspection('Failed ' + tmlname) else: debugInspection('Not a module ' + tmlname) progress = progress + incr showProgress(progress) showProgress(1.0) return result def loadPlugins(plugInType, fileList, debugInspection=None, showProgress=None): """Traverse the given list of files and attempt to load plugins from them. @type plugInType: C{str} @param plugInType: The type of plugin to search for. This is tested against the C{type} argument to the C{register} function in the plugin.tml files. @type fileList: C{list} of C{str} @param fileList: A list of the files to attempt to load plugin information from. One name is put in their scope, the C{register} function. @type debugInspection: C{None} or a callable taking one argument @param debugInspection: If not None, this is invoked with strings containing debug information about the loading process. If it is any other true value, this debug information is written to stdout (This behavior is deprecated). @type showProgress: C{None} or a callable taking one argument. @param showProgress: If not None, this is invoked with floating point values between 0 and 1 describing the progress of the loading process. If it is any other true value, this progress information is written to stdout. (This behavior is deprecated). @rtype: C{list} @return: A list of the C{PlugIn} objects found. """ if isinstance(debugInspection, types.IntType): warnings.warn( "int parameter for debugInspection is deprecated, pass None or " "a function that takes a single argument instead.", DeprecationWarning, 4 ) if isinstance(showProgress, types.IntType): warnings.warn( "int parameter for showProgress is deprecated, pass None or " "a function that takes a single argument instead.", DeprecationWarning, 4 ) result = [] debugInspection, showProgress = _prepCallbacks(debugInspection, showProgress) if not fileList: raise ValueError("No plugins passed to loadPlugins") increments = 1.0 / len(fileList) progress = 0.0 for (index, tmlFile) in zip(range(len(fileList)), fileList): showProgress(progress) debugInspection("Loading from " + tmlFile) pname = os.path.split(os.path.abspath(tmlFile))[-2] dropin = DropIn(pname) ns = {'register': dropin.register, '__file__': tmlFile} try: execfile(tmlFile, ns) except (IOError, OSError), e: # guess we don't have permissions for that debugInspection("Error loading: %s" % e) continue ldp = len(dropin.plugins) or 1.0 incr = increments * (1.0 / ldp) for plugin in dropin.plugins: if plugInType == plugin.type: result.append(plugin) debugInspection("Found %r" % (plugin,)) else: debugInspection("Disqualified %r" % (plugin,)) progress = progress + incr showProgress(progress) debugInspection("Finished loading from %s!" % tmlFile) showProgress(1.0) debugInspection("Returning %r" % (result,)) return result def getPlugIns(plugInType, debugInspection=None, showProgress=None): """Helper function to get all the plugins of a particular type. @type plugInType: C{str} @param plugInType: The type of plugin to search for. This is tested against the C{type} argument to the C{register} function in the plugin.tml files. @type debugInspection: C{None} or a callable taking one argument @param debugInspection: If not None, this is invoked with strings containing debug information about the loading process. If it is any other true value, this debug information is written to stdout (This behavior is deprecated). @type showProgress: C{None} or a callable taking one argument. @param showProgress: If not None, this is invoked with floating point values between 0 and 1 describing the progress of the loading process. If it is any other true value, this progress information is written to stdout. (This behavior is deprecated). @rtype: C{list} @return: A list of C{PlugIn} objects that were found. """ warnings.warn("The twisted.python.plugin system is deprecated. " "See twisted.plugin for the revised edition.", DeprecationWarning, 2) return _getPlugIns(plugInType, debugInspection, showProgress) def _getPlugIns(plugInType, debugInspection=None, showProgress=None): if isinstance(debugInspection, types.IntType): warnings.warn( "int parameter for debugInspection is deprecated, pass None or " "a function that takes a single argument instead.", DeprecationWarning, 3 ) if isinstance(showProgress, types.IntType): warnings.warn( "int parameter for showProgress is deprecated, pass None or " "a function that takes a single argument instead.", DeprecationWarning, 3 ) debugInspection, showProgress = _prepCallbacks(debugInspection, showProgress) firstHalf = secondHalf = lambda x: None if showProgress: firstHalf = lambda x: showProgress(x / 2.0) secondHalf = lambda x: showProgress(x / 2.0 + 0.5) tmlFiles = getPluginFileList(debugInspection, firstHalf) if not tmlFiles: return [] return loadPlugins(plugInType, tmlFiles, debugInspection, secondHalf) def isAModule(d): """This function checks the directory for __init__ files. """ suffixes = ['py', 'pyc', 'pyo', 'so', 'pyd', 'dll'] exists = os.path.exists join = os.sep.join for s in suffixes: # bad algorithm, but probably works if exists(join((d,'__init__.%s' % s))): return 1 return 0 __all__ = ['PlugIn', 'DropIn', 'getPluginFileList', 'loadPlugins', 'getPlugIns']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/plugin.py
plugin.py
# Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. """Component architecture for Twisted, based on Zope3 components. Using the Zope3 API directly is strongly recommended. Everything you need is in the top-level of the zope.interface package, e.g.:: from zope.interface import Interface, implements class IFoo(Interface): pass class Foo: implements(IFoo) print IFoo.implementedBy(Foo) # True print IFoo.providedBy(Foo()) # True The one exception is L{twisted.python.components.registerAdapter}, which is still the way to register adapters (at least, if you want Twisted's global adapter registry). """ # twisted imports from twisted.python import reflect from twisted.persisted import styles # system imports import warnings # zope3 imports from zope.interface import interface, declarations from zope.interface.adapter import AdapterRegistry class ComponentsDeprecationWarning(DeprecationWarning): """Nothing emits this warning anymore.""" pass # Twisted's global adapter registry globalRegistry = AdapterRegistry() # Attribute that registerAdapter looks at. Is this supposed to be public? ALLOW_DUPLICATES = 0 # Define a function to find the registered adapter factory, using either a # version of Zope Interface which has the `registered' method or an older # version which does not. if getattr(AdapterRegistry, 'registered', None) is None: def _registered(registry, required, provided): """ Return the adapter factory for the given parameters in the given registry, or None if there is not one. """ return registry.get(required).selfImplied.get(provided, {}).get('') else: def _registered(registry, required, provided): """ Return the adapter factory for the given parameters in the given registry, or None if there is not one. """ return registry.registered([required], provided) def registerAdapter(adapterFactory, origInterface, *interfaceClasses): """Register an adapter class. An adapter class is expected to implement the given interface, by adapting instances implementing 'origInterface'. An adapter class's __init__ method should accept one parameter, an instance implementing 'origInterface'. """ self = globalRegistry assert interfaceClasses, "You need to pass an Interface" global ALLOW_DUPLICATES # deal with class->interface adapters: if not isinstance(origInterface, interface.InterfaceClass): origInterface = declarations.implementedBy(origInterface) for interfaceClass in interfaceClasses: factory = _registered(self, origInterface, interfaceClass) if factory is not None and not ALLOW_DUPLICATES: raise ValueError("an adapter (%s) was already registered." % (factory, )) for interfaceClass in interfaceClasses: self.register([origInterface], interfaceClass, '', adapterFactory) def getAdapterFactory(fromInterface, toInterface, default): """Return registered adapter for a given class and interface. Note that is tied to the *Twisted* global registry, and will thus not find adapters registered elsewhere. """ self = globalRegistry if not isinstance(fromInterface, interface.InterfaceClass): fromInterface = declarations.implementedBy(fromInterface) factory = self.lookup1(fromInterface, toInterface) if factory is None: factory = default return factory # add global adapter lookup hook for our newly created registry def _hook(iface, ob, lookup=globalRegistry.lookup1): factory = lookup(declarations.providedBy(ob), iface) if factory is None: return None else: return factory(ob) interface.adapter_hooks.append(_hook) ## backwardsCompatImplements and fixClassImplements should probably stick around for another ## release cycle. No harm doing so in any case. def backwardsCompatImplements(klass): """DEPRECATED. Does nothing. Previously handled backwards compat from a zope.interface using class to a class wanting old twisted components interface behaviors. """ warnings.warn("components.backwardsCompatImplements doesn't do anything in Twisted 2.3, stop calling it.", ComponentsDeprecationWarning, stacklevel=2) def fixClassImplements(klass): """DEPRECATED. Does nothing. Previously converted class from __implements__ to zope implementation. """ warnings.warn("components.fixClassImplements doesn't do anything in Twisted 2.3, stop calling it.", ComponentsDeprecationWarning, stacklevel=2) def getRegistry(): """Returns the Twisted global C{zope.interface.adapter.AdapterRegistry} instance. """ return globalRegistry # FIXME: deprecate attribute somehow? CannotAdapt = TypeError class Adapter: """I am the default implementation of an Adapter for some interface. This docstring contains a limerick, by popular demand:: Subclassing made Zope and TR much harder to work with by far. So before you inherit, be sure to declare it Adapter, not PyObject* @cvar temporaryAdapter: If this is True, the adapter will not be persisted on the Componentized. @cvar multiComponent: If this adapter is persistent, should it be automatically registered for all appropriate interfaces. """ # These attributes are used with Componentized. temporaryAdapter = 0 multiComponent = 1 def __init__(self, original): """Set my 'original' attribute to be the object I am adapting. """ self.original = original def __conform__(self, interface): """ I forward __conform__ to self.original if it has it, otherwise I simply return None. """ if hasattr(self.original, "__conform__"): return self.original.__conform__(interface) return None def isuper(self, iface, adapter): """ Forward isuper to self.original """ return self.original.isuper(iface, adapter) class Componentized(styles.Versioned): """I am a mixin to allow you to be adapted in various ways persistently. I define a list of persistent adapters. This is to allow adapter classes to store system-specific state, and initialized on demand. The getComponent method implements this. You must also register adapters for this class for the interfaces that you wish to pass to getComponent. Many other classes and utilities listed here are present in Zope3; this one is specific to Twisted. """ persistenceVersion = 1 def __init__(self): self._adapterCache = {} def locateAdapterClass(self, klass, interfaceClass, default): return getAdapterFactory(klass, interfaceClass, default) def setAdapter(self, interfaceClass, adapterClass): self.setComponent(interfaceClass, adapterClass(self)) def addAdapter(self, adapterClass, ignoreClass=0): """Utility method that calls addComponent. I take an adapter class and instantiate it with myself as the first argument. @return: The adapter instantiated. """ adapt = adapterClass(self) self.addComponent(adapt, ignoreClass) return adapt def setComponent(self, interfaceClass, component): """ """ self._adapterCache[reflect.qual(interfaceClass)] = component def addComponent(self, component, ignoreClass=0): """ Add a component to me, for all appropriate interfaces. In order to determine which interfaces are appropriate, the component's provided interfaces will be scanned. If the argument 'ignoreClass' is True, then all interfaces are considered appropriate. Otherwise, an 'appropriate' interface is one for which its class has been registered as an adapter for my class according to the rules of getComponent. @return: the list of appropriate interfaces """ for iface in declarations.providedBy(component): if (ignoreClass or (self.locateAdapterClass(self.__class__, iface, None) == component.__class__)): self._adapterCache[reflect.qual(iface)] = component def unsetComponent(self, interfaceClass): """Remove my component specified by the given interface class.""" del self._adapterCache[reflect.qual(interfaceClass)] def removeComponent(self, component): """ Remove the given component from me entirely, for all interfaces for which it has been registered. @return: a list of the interfaces that were removed. """ l = [] for k, v in self._adapterCache.items(): if v is component: del self._adapterCache[k] l.append(reflect.namedObject(k)) return l def getComponent(self, interface, default=None): """Create or retrieve an adapter for the given interface. If such an adapter has already been created, retrieve it from the cache that this instance keeps of all its adapters. Adapters created through this mechanism may safely store system-specific state. If you want to register an adapter that will be created through getComponent, but you don't require (or don't want) your adapter to be cached and kept alive for the lifetime of this Componentized object, set the attribute 'temporaryAdapter' to True on your adapter class. If you want to automatically register an adapter for all appropriate interfaces (with addComponent), set the attribute 'multiComponent' to True on your adapter class. """ k = reflect.qual(interface) if self._adapterCache.has_key(k): return self._adapterCache[k] else: adapter = interface.__adapt__(self) if adapter is not None and not ( hasattr(adapter, "temporaryAdapter") and adapter.temporaryAdapter): self._adapterCache[k] = adapter if (hasattr(adapter, "multiComponent") and adapter.multiComponent): self.addComponent(adapter) if adapter is None: return default return adapter def __conform__(self, interface): return self.getComponent(interface) class ReprableComponentized(Componentized): def __init__(self): Componentized.__init__(self) def __repr__(self): from cStringIO import StringIO from pprint import pprint sio = StringIO() pprint(self._adapterCache, sio) return sio.getvalue() __all__ = [ # Sticking around: "ComponentsDeprecationWarning", "registerAdapter", "getAdapterFactory", "Adapter", "Componentized", "ReprableComponentized", "getRegistry", # Deprecated: "backwardsCompatImplements", "fixClassImplements", ]
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/components.py
components.py
import string import random def stringToLong(s): """ Convert digest to long """ result = 0L for byte in s: result = (256 * result) + ord(byte) return result def stringToDWords(s): """ Convert digest to a list of four 32-bits words """ result = [] for a in xrange(len(s) / 4): tmp = 0L for byte in s[-4:]: tmp = (256 * tmp) + ord(byte) result.append(tmp) s = s[:-4] return result def longToString(l): """ Convert long to digest """ result = "" while l > 0L: result = chr(l % 256) + result l = l / 256L return result import md5, sha hashid = {md5: 'md5', sha: 'sha1'} INITIALSEQUENCE = 1000 MINIMUMSEQUENCE = 50 class Unauthorized(Exception): """the Unauthorized exception This exception is raised when an action is not allowed, or a user is not authenticated properly. """ class OTPAuthenticator: """A One Time Password System Based on RFC 2289, which is based on a the S/KEY Authentication-scheme. It uses the MD5- and SHA-algorithms for hashing The variable OTP is at all times a 64bit string""" def __init__(self, hash = md5): "Set the hash to either md5 or sha1" self.hash = hash pass def generateSeed(self): "Return a 10 char random seed, with 6 lowercase chars and 4 digits" seed = '' for x in range(6): seed = seed + chr(random.randrange(97,122)) for x in range(4): seed = seed + chr(random.randrange(48,57)) return seed def foldDigest(self, otp): if self.hash == md5: return self.foldDigest128(otp) if self.hash == sha: return self.foldDigest160(otp) def foldDigest128(self, otp128): "Fold a 128 bit digest to 64 bit" regs = stringToDWords(otp128) p0 = regs[0] ^ regs[2] p1 = regs[1] ^ regs[3] S = '' for a in xrange(4): S = chr(p0 & 0xFF) + S p0 = p0 >> 8 for a in xrange(4): S = chr(p1 & 0xFF) + S p1 = p1 >> 8 return S def foldDigest160(self, otp160): "Fold a 160 bit digest to 64 bit" regs = stringToDWords(otp160) p0 = regs[0] ^ regs[2] p1 = regs[1] ^ regs[3] p0 = regs[0] ^ regs[4] S = '' for a in xrange(4): S = chr(p0 & 0xFF) + S p0 = p0 >> 8 for a in xrange(4): S = chr(p1 & 0xFF) + S p1 = p1 >> 8 return S def hashUpdate(self, digest): "Run through the hash and fold to 64 bit" h = self.hash.new(digest) return self.foldDigest(h.digest()) def generateOTP(self, seed, passwd, sequence): """Return a 64 bit OTP based on inputs Run through makeReadable to get a 6 word pass-phrase""" seed = string.lower(seed) otp = self.hashUpdate(seed + passwd) for a in xrange(sequence): otp = self.hashUpdate(otp) return otp def calculateParity(self, otp): "Calculate the parity from a 64bit OTP" parity = 0 for i in xrange(0, 64, 2): parity = parity + otp & 0x3 otp = otp >> 2 return parity def makeReadable(self, otp): "Returns a 6 word pass-phrase from a 64bit OTP" digest = stringToLong(otp) list = [] parity = self.calculateParity(digest) for i in xrange(4,-1, -1): list.append(dict[(digest >> (i * 11 + 9)) & 0x7FF]) list.append(dict[(digest << 2) & 0x7FC | (parity & 0x03)]) return string.join(list) def challenge(self, seed, sequence): """Return a challenge in the format otp-<hash> <sequence> <seed>""" return "otp-%s %i %s" % (hashid[self.hash], sequence, seed) def parsePhrase(self, phrase): """Decode the phrase, and return a 64bit OTP I will raise Unauthorized if the parity is wrong TODO: Add support for hex (MUST) and the '2nd scheme'(SHOULD)""" words = string.split(phrase) for i in xrange(len(words)): words[i] = string.upper(words[i]) b = 0L for i in xrange(0,5): b = b | ((long(dict.index(words[i])) << ((4-i)*11L+9L))) tmp = dict.index(words[5]) b = b | (tmp & 0x7FC ) >> 2 if (tmp & 3) <> self.calculateParity(b): raise Unauthorized("Parity error") digest = longToString(b) return digest class OTP(OTPAuthenticator): """An automatic version of the OTP-Authenticator Updates the sequence and the keeps last approved password on success On the next authentication, the stored password is hashed and checked up against the one given by the user. If they match, the sequencecounter is decreased and the circle is closed. This object should be glued to each user Note: It does NOT reset the sequence when the combinations left approach zero, This has to be done manuelly by instancing a new object """ seed = None sequence = 0 lastotp = None def __init__(self, passwd, sequence = INITIALSEQUENCE, hash=md5): """Initialize the OTP-Sequence, and discard the password""" OTPAuthenticator.__init__(self, hash) seed = self.generateSeed() # Generate the 'last' password self.lastotp = OTPAuthenticator.generateOTP(self, seed, passwd, sequence+1) self.seed = seed self.sequence = sequence def challenge(self): """Return a challenge string""" result = OTPAuthenticator.challenge(self, self.seed, self.sequence) return result def authenticate(self, phrase): """Test the phrase against the last challenge issued""" try: digest = self.parsePhrase(phrase) hasheddigest = self.hashUpdate(digest) if (self.lastotp == hasheddigest): self.lastotp = digest if self.sequence > MINIMUMSEQUENCE: self.sequence = self.sequence - 1 return "ok" else: raise Unauthorized("Failed") except Unauthorized, msg: raise Unauthorized(msg) # # The 2048 word standard dictionary from RFC 1760 # dict = ["A", "ABE", "ACE", "ACT", "AD", "ADA", "ADD", "AGO", "AID", "AIM", "AIR", "ALL", "ALP", "AM", "AMY", "AN", "ANA", "AND", "ANN", "ANT", "ANY", "APE", "APS", "APT", "ARC", "ARE", "ARK", "ARM", "ART", "AS", "ASH", "ASK", "AT", "ATE", "AUG", "AUK", "AVE", "AWE", "AWK", "AWL", "AWN", "AX", "AYE", "BAD", "BAG", "BAH", "BAM", "BAN", "BAR", "BAT", "BAY", "BE", "BED", "BEE", "BEG", "BEN", "BET", "BEY", "BIB", "BID", "BIG", "BIN", "BIT", "BOB", "BOG", "BON", "BOO", "BOP", "BOW", "BOY", "BUB", "BUD", "BUG", "BUM", "BUN", "BUS", "BUT", "BUY", "BY", "BYE", "CAB", "CAL", "CAM", "CAN", "CAP", "CAR", "CAT", "CAW", "COD", "COG", "COL", "CON", "COO", "COP", "COT", "COW", "COY", "CRY", "CUB", "CUE", "CUP", "CUR", "CUT", "DAB", "DAD", "DAM", "DAN", "DAR", "DAY", "DEE", "DEL", "DEN", "DES", "DEW", "DID", "DIE", "DIG", "DIN", "DIP", "DO", "DOE", "DOG", "DON", "DOT", "DOW", "DRY", "DUB", "DUD", "DUE", "DUG", "DUN", "EAR", "EAT", "ED", "EEL", "EGG", "EGO", "ELI", "ELK", "ELM", "ELY", "EM", "END", "EST", "ETC", "EVA", "EVE", "EWE", "EYE", "FAD", "FAN", "FAR", "FAT", "FAY", "FED", "FEE", "FEW", "FIB", "FIG", "FIN", "FIR", "FIT", "FLO", "FLY", "FOE", "FOG", "FOR", "FRY", "FUM", "FUN", "FUR", "GAB", "GAD", "GAG", "GAL", "GAM", "GAP", "GAS", "GAY", "GEE", "GEL", "GEM", "GET", "GIG", "GIL", "GIN", "GO", "GOT", "GUM", "GUN", "GUS", "GUT", "GUY", "GYM", "GYP", "HA", "HAD", "HAL", "HAM", "HAN", "HAP", "HAS", "HAT", "HAW", "HAY", "HE", "HEM", "HEN", "HER", "HEW", "HEY", "HI", "HID", "HIM", "HIP", "HIS", "HIT", "HO", "HOB", "HOC", "HOE", "HOG", "HOP", "HOT", "HOW", "HUB", "HUE", "HUG", "HUH", "HUM", "HUT", "I", "ICY", "IDA", "IF", "IKE", "ILL", "INK", "INN", "IO", "ION", "IQ", "IRA", "IRE", "IRK", "IS", "IT", "ITS", "IVY", "JAB", "JAG", "JAM", "JAN", "JAR", "JAW", "JAY", "JET", "JIG", "JIM", "JO", "JOB", "JOE", "JOG", "JOT", "JOY", "JUG", "JUT", "KAY", "KEG", "KEN", "KEY", "KID", "KIM", "KIN", "KIT", "LA", "LAB", "LAC", "LAD", "LAG", "LAM", "LAP", "LAW", "LAY", "LEA", "LED", "LEE", "LEG", "LEN", "LEO", "LET", "LEW", "LID", "LIE", "LIN", "LIP", "LIT", "LO", "LOB", "LOG", "LOP", "LOS", "LOT", "LOU", "LOW", "LOY", "LUG", "LYE", "MA", "MAC", "MAD", "MAE", "MAN", "MAO", "MAP", "MAT", "MAW", "MAY", "ME", "MEG", "MEL", "MEN", "MET", "MEW", "MID", "MIN", "MIT", "MOB", "MOD", "MOE", "MOO", "MOP", "MOS", "MOT", "MOW", "MUD", "MUG", "MUM", "MY", "NAB", "NAG", "NAN", "NAP", "NAT", "NAY", "NE", "NED", "NEE", "NET", "NEW", "NIB", "NIL", "NIP", "NIT", "NO", "NOB", "NOD", "NON", "NOR", "NOT", "NOV", "NOW", "NU", "NUN", "NUT", "O", "OAF", "OAK", "OAR", "OAT", "ODD", "ODE", "OF", "OFF", "OFT", "OH", "OIL", "OK", "OLD", "ON", "ONE", "OR", "ORB", "ORE", "ORR", "OS", "OTT", "OUR", "OUT", "OVA", "OW", "OWE", "OWL", "OWN", "OX", "PA", "PAD", "PAL", "PAM", "PAN", "PAP", "PAR", "PAT", "PAW", "PAY", "PEA", "PEG", "PEN", "PEP", "PER", "PET", "PEW", "PHI", "PI", "PIE", "PIN", "PIT", "PLY", "PO", "POD", "POE", "POP", "POT", "POW", "PRO", "PRY", "PUB", "PUG", "PUN", "PUP", "PUT", "QUO", "RAG", "RAM", "RAN", "RAP", "RAT", "RAW", "RAY", "REB", "RED", "REP", "RET", "RIB", "RID", "RIG", "RIM", "RIO", "RIP", "ROB", "ROD", "ROE", "RON", "ROT", "ROW", "ROY", "RUB", "RUE", "RUG", "RUM", "RUN", "RYE", "SAC", "SAD", "SAG", "SAL", "SAM", "SAN", "SAP", "SAT", "SAW", "SAY", "SEA", "SEC", "SEE", "SEN", "SET", "SEW", "SHE", "SHY", "SIN", "SIP", "SIR", "SIS", "SIT", "SKI", "SKY", "SLY", "SO", "SOB", "SOD", "SON", "SOP", "SOW", "SOY", "SPA", "SPY", "SUB", "SUD", "SUE", "SUM", "SUN", "SUP", "TAB", "TAD", "TAG", "TAN", "TAP", "TAR", "TEA", "TED", "TEE", "TEN", "THE", "THY", "TIC", "TIE", "TIM", "TIN", "TIP", "TO", "TOE", "TOG", "TOM", "TON", "TOO", "TOP", "TOW", "TOY", "TRY", "TUB", "TUG", "TUM", "TUN", "TWO", "UN", "UP", "US", "USE", "VAN", "VAT", "VET", "VIE", "WAD", "WAG", "WAR", "WAS", "WAY", "WE", "WEB", "WED", "WEE", "WET", "WHO", "WHY", "WIN", "WIT", "WOK", "WON", "WOO", "WOW", "WRY", "WU", "YAM", "YAP", "YAW", "YE", "YEA", "YES", "YET", "YOU", "ABED", "ABEL", "ABET", "ABLE", "ABUT", "ACHE", "ACID", "ACME", "ACRE", "ACTA", "ACTS", "ADAM", "ADDS", "ADEN", "AFAR", "AFRO", "AGEE", "AHEM", "AHOY", "AIDA", "AIDE", "AIDS", "AIRY", "AJAR", "AKIN", "ALAN", "ALEC", "ALGA", "ALIA", "ALLY", "ALMA", "ALOE", "ALSO", "ALTO", "ALUM", "ALVA", "AMEN", "AMES", "AMID", "AMMO", "AMOK", "AMOS", "AMRA", "ANDY", "ANEW", "ANNA", "ANNE", "ANTE", "ANTI", "AQUA", "ARAB", "ARCH", "AREA", "ARGO", "ARID", "ARMY", "ARTS", "ARTY", "ASIA", "ASKS", "ATOM", "AUNT", "AURA", "AUTO", "AVER", "AVID", "AVIS", "AVON", "AVOW", "AWAY", "AWRY", "BABE", "BABY", "BACH", "BACK", "BADE", "BAIL", "BAIT", "BAKE", "BALD", "BALE", "BALI", "BALK", "BALL", "BALM", "BAND", "BANE", "BANG", "BANK", "BARB", "BARD", "BARE", "BARK", "BARN", "BARR", "BASE", "BASH", "BASK", "BASS", "BATE", "BATH", "BAWD", "BAWL", "BEAD", "BEAK", "BEAM", "BEAN", "BEAR", "BEAT", "BEAU", "BECK", "BEEF", "BEEN", "BEER", "BEET", "BELA", "BELL", "BELT", "BEND", "BENT", "BERG", "BERN", "BERT", "BESS", "BEST", "BETA", "BETH", "BHOY", "BIAS", "BIDE", "BIEN", "BILE", "BILK", "BILL", "BIND", "BING", "BIRD", "BITE", "BITS", "BLAB", "BLAT", "BLED", "BLEW", "BLOB", "BLOC", "BLOT", "BLOW", "BLUE", "BLUM", "BLUR", "BOAR", "BOAT", "BOCA", "BOCK", "BODE", "BODY", "BOGY", "BOHR", "BOIL", "BOLD", "BOLO", "BOLT", "BOMB", "BONA", "BOND", "BONE", "BONG", "BONN", "BONY", "BOOK", "BOOM", "BOON", "BOOT", "BORE", "BORG", "BORN", "BOSE", "BOSS", "BOTH", "BOUT", "BOWL", "BOYD", "BRAD", "BRAE", "BRAG", "BRAN", "BRAY", "BRED", "BREW", "BRIG", "BRIM", "BROW", "BUCK", "BUDD", "BUFF", "BULB", "BULK", "BULL", "BUNK", "BUNT", "BUOY", "BURG", "BURL", "BURN", "BURR", "BURT", "BURY", "BUSH", "BUSS", "BUST", "BUSY", "BYTE", "CADY", "CAFE", "CAGE", "CAIN", "CAKE", "CALF", "CALL", "CALM", "CAME", "CANE", "CANT", "CARD", "CARE", "CARL", "CARR", "CART", "CASE", "CASH", "CASK", "CAST", "CAVE", "CEIL", "CELL", "CENT", "CERN", "CHAD", "CHAR", "CHAT", "CHAW", "CHEF", "CHEN", "CHEW", "CHIC", "CHIN", "CHOU", "CHOW", "CHUB", "CHUG", "CHUM", "CITE", "CITY", "CLAD", "CLAM", "CLAN", "CLAW", "CLAY", "CLOD", "CLOG", "CLOT", "CLUB", "CLUE", "COAL", "COAT", "COCA", "COCK", "COCO", "CODA", "CODE", "CODY", "COED", "COIL", "COIN", "COKE", "COLA", "COLD", "COLT", "COMA", "COMB", "COME", "COOK", "COOL", "COON", "COOT", "CORD", "CORE", "CORK", "CORN", "COST", "COVE", "COWL", "CRAB", "CRAG", "CRAM", "CRAY", "CREW", "CRIB", "CROW", "CRUD", "CUBA", "CUBE", "CUFF", "CULL", "CULT", "CUNY", "CURB", "CURD", "CURE", "CURL", "CURT", "CUTS", "DADE", "DALE", "DAME", "DANA", "DANE", "DANG", "DANK", "DARE", "DARK", "DARN", "DART", "DASH", "DATA", "DATE", "DAVE", "DAVY", "DAWN", "DAYS", "DEAD", "DEAF", "DEAL", "DEAN", "DEAR", "DEBT", "DECK", "DEED", "DEEM", "DEER", "DEFT", "DEFY", "DELL", "DENT", "DENY", "DESK", "DIAL", "DICE", "DIED", "DIET", "DIME", "DINE", "DING", "DINT", "DIRE", "DIRT", "DISC", "DISH", "DISK", "DIVE", "DOCK", "DOES", "DOLE", "DOLL", "DOLT", "DOME", "DONE", "DOOM", "DOOR", "DORA", "DOSE", "DOTE", "DOUG", "DOUR", "DOVE", "DOWN", "DRAB", "DRAG", "DRAM", "DRAW", "DREW", "DRUB", "DRUG", "DRUM", "DUAL", "DUCK", "DUCT", "DUEL", "DUET", "DUKE", "DULL", "DUMB", "DUNE", "DUNK", "DUSK", "DUST", "DUTY", "EACH", "EARL", "EARN", "EASE", "EAST", "EASY", "EBEN", "ECHO", "EDDY", "EDEN", "EDGE", "EDGY", "EDIT", "EDNA", "EGAN", "ELAN", "ELBA", "ELLA", "ELSE", "EMIL", "EMIT", "EMMA", "ENDS", "ERIC", "EROS", "EVEN", "EVER", "EVIL", "EYED", "FACE", "FACT", "FADE", "FAIL", "FAIN", "FAIR", "FAKE", "FALL", "FAME", "FANG", "FARM", "FAST", "FATE", "FAWN", "FEAR", "FEAT", "FEED", "FEEL", "FEET", "FELL", "FELT", "FEND", "FERN", "FEST", "FEUD", "FIEF", "FIGS", "FILE", "FILL", "FILM", "FIND", "FINE", "FINK", "FIRE", "FIRM", "FISH", "FISK", "FIST", "FITS", "FIVE", "FLAG", "FLAK", "FLAM", "FLAT", "FLAW", "FLEA", "FLED", "FLEW", "FLIT", "FLOC", "FLOG", "FLOW", "FLUB", "FLUE", "FOAL", "FOAM", "FOGY", "FOIL", "FOLD", "FOLK", "FOND", "FONT", "FOOD", "FOOL", "FOOT", "FORD", "FORE", "FORK", "FORM", "FORT", "FOSS", "FOUL", "FOUR", "FOWL", "FRAU", "FRAY", "FRED", "FREE", "FRET", "FREY", "FROG", "FROM", "FUEL", "FULL", "FUME", "FUND", "FUNK", "FURY", "FUSE", "FUSS", "GAFF", "GAGE", "GAIL", "GAIN", "GAIT", "GALA", "GALE", "GALL", "GALT", "GAME", "GANG", "GARB", "GARY", "GASH", "GATE", "GAUL", "GAUR", "GAVE", "GAWK", "GEAR", "GELD", "GENE", "GENT", "GERM", "GETS", "GIBE", "GIFT", "GILD", "GILL", "GILT", "GINA", "GIRD", "GIRL", "GIST", "GIVE", "GLAD", "GLEE", "GLEN", "GLIB", "GLOB", "GLOM", "GLOW", "GLUE", "GLUM", "GLUT", "GOAD", "GOAL", "GOAT", "GOER", "GOES", "GOLD", "GOLF", "GONE", "GONG", "GOOD", "GOOF", "GORE", "GORY", "GOSH", "GOUT", "GOWN", "GRAB", "GRAD", "GRAY", "GREG", "GREW", "GREY", "GRID", "GRIM", "GRIN", "GRIT", "GROW", "GRUB", "GULF", "GULL", "GUNK", "GURU", "GUSH", "GUST", "GWEN", "GWYN", "HAAG", "HAAS", "HACK", "HAIL", "HAIR", "HALE", "HALF", "HALL", "HALO", "HALT", "HAND", "HANG", "HANK", "HANS", "HARD", "HARK", "HARM", "HART", "HASH", "HAST", "HATE", "HATH", "HAUL", "HAVE", "HAWK", "HAYS", "HEAD", "HEAL", "HEAR", "HEAT", "HEBE", "HECK", "HEED", "HEEL", "HEFT", "HELD", "HELL", "HELM", "HERB", "HERD", "HERE", "HERO", "HERS", "HESS", "HEWN", "HICK", "HIDE", "HIGH", "HIKE", "HILL", "HILT", "HIND", "HINT", "HIRE", "HISS", "HIVE", "HOBO", "HOCK", "HOFF", "HOLD", "HOLE", "HOLM", "HOLT", "HOME", "HONE", "HONK", "HOOD", "HOOF", "HOOK", "HOOT", "HORN", "HOSE", "HOST", "HOUR", "HOVE", "HOWE", "HOWL", "HOYT", "HUCK", "HUED", "HUFF", "HUGE", "HUGH", "HUGO", "HULK", "HULL", "HUNK", "HUNT", "HURD", "HURL", "HURT", "HUSH", "HYDE", "HYMN", "IBIS", "ICON", "IDEA", "IDLE", "IFFY", "INCA", "INCH", "INTO", "IONS", "IOTA", "IOWA", "IRIS", "IRMA", "IRON", "ISLE", "ITCH", "ITEM", "IVAN", "JACK", "JADE", "JAIL", "JAKE", "JANE", "JAVA", "JEAN", "JEFF", "JERK", "JESS", "JEST", "JIBE", "JILL", "JILT", "JIVE", "JOAN", "JOBS", "JOCK", "JOEL", "JOEY", "JOHN", "JOIN", "JOKE", "JOLT", "JOVE", "JUDD", "JUDE", "JUDO", "JUDY", "JUJU", "JUKE", "JULY", "JUNE", "JUNK", "JUNO", "JURY", "JUST", "JUTE", "KAHN", "KALE", "KANE", "KANT", "KARL", "KATE", "KEEL", "KEEN", "KENO", "KENT", "KERN", "KERR", "KEYS", "KICK", "KILL", "KIND", "KING", "KIRK", "KISS", "KITE", "KLAN", "KNEE", "KNEW", "KNIT", "KNOB", "KNOT", "KNOW", "KOCH", "KONG", "KUDO", "KURD", "KURT", "KYLE", "LACE", "LACK", "LACY", "LADY", "LAID", "LAIN", "LAIR", "LAKE", "LAMB", "LAME", "LAND", "LANE", "LANG", "LARD", "LARK", "LASS", "LAST", "LATE", "LAUD", "LAVA", "LAWN", "LAWS", "LAYS", "LEAD", "LEAF", "LEAK", "LEAN", "LEAR", "LEEK", "LEER", "LEFT", "LEND", "LENS", "LENT", "LEON", "LESK", "LESS", "LEST", "LETS", "LIAR", "LICE", "LICK", "LIED", "LIEN", "LIES", "LIEU", "LIFE", "LIFT", "LIKE", "LILA", "LILT", "LILY", "LIMA", "LIMB", "LIME", "LIND", "LINE", "LINK", "LINT", "LION", "LISA", "LIST", "LIVE", "LOAD", "LOAF", "LOAM", "LOAN", "LOCK", "LOFT", "LOGE", "LOIS", "LOLA", "LONE", "LONG", "LOOK", "LOON", "LOOT", "LORD", "LORE", "LOSE", "LOSS", "LOST", "LOUD", "LOVE", "LOWE", "LUCK", "LUCY", "LUGE", "LUKE", "LULU", "LUND", "LUNG", "LURA", "LURE", "LURK", "LUSH", "LUST", "LYLE", "LYNN", "LYON", "LYRA", "MACE", "MADE", "MAGI", "MAID", "MAIL", "MAIN", "MAKE", "MALE", "MALI", "MALL", "MALT", "MANA", "MANN", "MANY", "MARC", "MARE", "MARK", "MARS", "MART", "MARY", "MASH", "MASK", "MASS", "MAST", "MATE", "MATH", "MAUL", "MAYO", "MEAD", "MEAL", "MEAN", "MEAT", "MEEK", "MEET", "MELD", "MELT", "MEMO", "MEND", "MENU", "MERT", "MESH", "MESS", "MICE", "MIKE", "MILD", "MILE", "MILK", "MILL", "MILT", "MIMI", "MIND", "MINE", "MINI", "MINK", "MINT", "MIRE", "MISS", "MIST", "MITE", "MITT", "MOAN", "MOAT", "MOCK", "MODE", "MOLD", "MOLE", "MOLL", "MOLT", "MONA", "MONK", "MONT", "MOOD", "MOON", "MOOR", "MOOT", "MORE", "MORN", "MORT", "MOSS", "MOST", "MOTH", "MOVE", "MUCH", "MUCK", "MUDD", "MUFF", "MULE", "MULL", "MURK", "MUSH", "MUST", "MUTE", "MUTT", "MYRA", "MYTH", "NAGY", "NAIL", "NAIR", "NAME", "NARY", "NASH", "NAVE", "NAVY", "NEAL", "NEAR", "NEAT", "NECK", "NEED", "NEIL", "NELL", "NEON", "NERO", "NESS", "NEST", "NEWS", "NEWT", "NIBS", "NICE", "NICK", "NILE", "NINA", "NINE", "NOAH", "NODE", "NOEL", "NOLL", "NONE", "NOOK", "NOON", "NORM", "NOSE", "NOTE", "NOUN", "NOVA", "NUDE", "NULL", "NUMB", "OATH", "OBEY", "OBOE", "ODIN", "OHIO", "OILY", "OINT", "OKAY", "OLAF", "OLDY", "OLGA", "OLIN", "OMAN", "OMEN", "OMIT", "ONCE", "ONES", "ONLY", "ONTO", "ONUS", "ORAL", "ORGY", "OSLO", "OTIS", "OTTO", "OUCH", "OUST", "OUTS", "OVAL", "OVEN", "OVER", "OWLY", "OWNS", "QUAD", "QUIT", "QUOD", "RACE", "RACK", "RACY", "RAFT", "RAGE", "RAID", "RAIL", "RAIN", "RAKE", "RANK", "RANT", "RARE", "RASH", "RATE", "RAVE", "RAYS", "READ", "REAL", "REAM", "REAR", "RECK", "REED", "REEF", "REEK", "REEL", "REID", "REIN", "RENA", "REND", "RENT", "REST", "RICE", "RICH", "RICK", "RIDE", "RIFT", "RILL", "RIME", "RING", "RINK", "RISE", "RISK", "RITE", "ROAD", "ROAM", "ROAR", "ROBE", "ROCK", "RODE", "ROIL", "ROLL", "ROME", "ROOD", "ROOF", "ROOK", "ROOM", "ROOT", "ROSA", "ROSE", "ROSS", "ROSY", "ROTH", "ROUT", "ROVE", "ROWE", "ROWS", "RUBE", "RUBY", "RUDE", "RUDY", "RUIN", "RULE", "RUNG", "RUNS", "RUNT", "RUSE", "RUSH", "RUSK", "RUSS", "RUST", "RUTH", "SACK", "SAFE", "SAGE", "SAID", "SAIL", "SALE", "SALK", "SALT", "SAME", "SAND", "SANE", "SANG", "SANK", "SARA", "SAUL", "SAVE", "SAYS", "SCAN", "SCAR", "SCAT", "SCOT", "SEAL", "SEAM", "SEAR", "SEAT", "SEED", "SEEK", "SEEM", "SEEN", "SEES", "SELF", "SELL", "SEND", "SENT", "SETS", "SEWN", "SHAG", "SHAM", "SHAW", "SHAY", "SHED", "SHIM", "SHIN", "SHOD", "SHOE", "SHOT", "SHOW", "SHUN", "SHUT", "SICK", "SIDE", "SIFT", "SIGH", "SIGN", "SILK", "SILL", "SILO", "SILT", "SINE", "SING", "SINK", "SIRE", "SITE", "SITS", "SITU", "SKAT", "SKEW", "SKID", "SKIM", "SKIN", "SKIT", "SLAB", "SLAM", "SLAT", "SLAY", "SLED", "SLEW", "SLID", "SLIM", "SLIT", "SLOB", "SLOG", "SLOT", "SLOW", "SLUG", "SLUM", "SLUR", "SMOG", "SMUG", "SNAG", "SNOB", "SNOW", "SNUB", "SNUG", "SOAK", "SOAR", "SOCK", "SODA", "SOFA", "SOFT", "SOIL", "SOLD", "SOME", "SONG", "SOON", "SOOT", "SORE", "SORT", "SOUL", "SOUR", "SOWN", "STAB", "STAG", "STAN", "STAR", "STAY", "STEM", "STEW", "STIR", "STOW", "STUB", "STUN", "SUCH", "SUDS", "SUIT", "SULK", "SUMS", "SUNG", "SUNK", "SURE", "SURF", "SWAB", "SWAG", "SWAM", "SWAN", "SWAT", "SWAY", "SWIM", "SWUM", "TACK", "TACT", "TAIL", "TAKE", "TALE", "TALK", "TALL", "TANK", "TASK", "TATE", "TAUT", "TEAL", "TEAM", "TEAR", "TECH", "TEEM", "TEEN", "TEET", "TELL", "TEND", "TENT", "TERM", "TERN", "TESS", "TEST", "THAN", "THAT", "THEE", "THEM", "THEN", "THEY", "THIN", "THIS", "THUD", "THUG", "TICK", "TIDE", "TIDY", "TIED", "TIER", "TILE", "TILL", "TILT", "TIME", "TINA", "TINE", "TINT", "TINY", "TIRE", "TOAD", "TOGO", "TOIL", "TOLD", "TOLL", "TONE", "TONG", "TONY", "TOOK", "TOOL", "TOOT", "TORE", "TORN", "TOTE", "TOUR", "TOUT", "TOWN", "TRAG", "TRAM", "TRAY", "TREE", "TREK", "TRIG", "TRIM", "TRIO", "TROD", "TROT", "TROY", "TRUE", "TUBA", "TUBE", "TUCK", "TUFT", "TUNA", "TUNE", "TUNG", "TURF", "TURN", "TUSK", "TWIG", "TWIN", "TWIT", "ULAN", "UNIT", "URGE", "USED", "USER", "USES", "UTAH", "VAIL", "VAIN", "VALE", "VARY", "VASE", "VAST", "VEAL", "VEDA", "VEIL", "VEIN", "VEND", "VENT", "VERB", "VERY", "VETO", "VICE", "VIEW", "VINE", "VISE", "VOID", "VOLT", "VOTE", "WACK", "WADE", "WAGE", "WAIL", "WAIT", "WAKE", "WALE", "WALK", "WALL", "WALT", "WAND", "WANE", "WANG", "WANT", "WARD", "WARM", "WARN", "WART", "WASH", "WAST", "WATS", "WATT", "WAVE", "WAVY", "WAYS", "WEAK", "WEAL", "WEAN", "WEAR", "WEED", "WEEK", "WEIR", "WELD", "WELL", "WELT", "WENT", "WERE", "WERT", "WEST", "WHAM", "WHAT", "WHEE", "WHEN", "WHET", "WHOA", "WHOM", "WICK", "WIFE", "WILD", "WILL", "WIND", "WINE", "WING", "WINK", "WINO", "WIRE", "WISE", "WISH", "WITH", "WOLF", "WONT", "WOOD", "WOOL", "WORD", "WORE", "WORK", "WORM", "WORN", "WOVE", "WRIT", "WYNN", "YALE", "YANG", "YANK", "YARD", "YARN", "YAWL", "YAWN", "YEAH", "YEAR", "YELL", "YOGA", "YOKE"]
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/otp.py
otp.py
__metaclass__ = type # let's try to keep path imports to a minimum... from os.path import dirname, split as splitpath import sys import zipimport import inspect from errno import ENOTDIR from zope.interface import Interface, implements from twisted.python.components import registerAdapter from twisted.python.filepath import FilePath from twisted.python.zippath import ZipArchive from twisted.python.reflect import namedAny from twisted.python.win32 import WindowsError from twisted.python.win32 import ERROR_DIRECTORY _nothing = object() PYTHON_EXTENSIONS = ['.py'] OPTIMIZED_MODE = __doc__ is None if OPTIMIZED_MODE: PYTHON_EXTENSIONS.append('.pyo') else: PYTHON_EXTENSIONS.append('.pyc') def _isPythonIdentifier(string): """ cheezy fake test for proper identifier-ness. @param string: a str which might or might not be a valid python identifier. @return: True or False """ return (' ' not in string and '.' not in string and '-' not in string) def _isPackagePath(fpath): # Determine if a FilePath-like object is a Python package. TODO: deal with # __init__module.(so|dll|pyd)? extless = fpath.splitext()[0] basend = splitpath(extless)[1] return basend == "__init__" class _ModuleIteratorHelper: """ This mixin provides common behavior between python module and path entries, since the mechanism for searching sys.path and __path__ attributes is remarkably similar. """ def iterModules(self): """ Loop over the modules present below this entry or package on PYTHONPATH. For modules which are not packages, this will yield nothing. For packages and path entries, this will only yield modules one level down; i.e. if there is a package a.b.c, iterModules on a will only return a.b. If you want to descend deeply, use walkModules. @return: a generator which yields PythonModule instances that describe modules which can be, or have been, imported. """ yielded = {} if not self.filePath.exists(): return for placeToLook in self._packagePaths(): try: children = placeToLook.children() except WindowsError, e: errno = getattr(e, 'winerror', e.errno) if errno == ERROR_DIRECTORY: # It is a non-directory, skip it. continue raise except OSError, e: if e.errno in (ENOTDIR,): # It is a non-directory, skip it. continue raise for potentialTopLevel in children: name, ext = potentialTopLevel.splitext() if ext in PYTHON_EXTENSIONS: # TODO: this should be a little choosier about which path entry # it selects first, and it should do all the .so checking and # crud potentialBasename = potentialTopLevel.basename()[:-len(ext)] if not _isPythonIdentifier(potentialBasename): continue modname = self._subModuleName(potentialBasename) if modname.split(".")[-1] == '__init__': # This marks the directory as a package so it can't be # a module. continue if modname not in yielded: yielded[modname] = True pm = PythonModule(modname, potentialTopLevel, self._getEntry()) assert pm != self yield pm elif potentialTopLevel.isdir(): modname = self._subModuleName(potentialTopLevel.basename()) for ext in PYTHON_EXTENSIONS: initpy = potentialTopLevel.child("__init__"+ext) if initpy.exists(): yielded[modname] = True pm = PythonModule(modname, initpy, self._getEntry()) assert pm != self yield pm break def walkModules(self, importPackages=False): """ Similar to L{iterModules}, this yields self, and then every module in my package or entry, and every submodule in each package or entry. In other words, this is deep, and L{iterModules} is shallow. """ yield self for package in self.iterModules(): for module in package.walkModules(importPackages=importPackages): yield module def _subModuleName(self, mn): """ This is a hook to provide packages with the ability to specify their names as a prefix to submodules here. """ return mn def _packagePaths(self): """ Implement in subclasses to specify where to look for modules. @return: iterable of FilePath-like objects. """ raise NotImplementedError() def _getEntry(self): """ Implement in subclasses to specify what path entry submodules will come from. @return: a PathEntry instance. """ raise NotImplementedError() def __getitem__(self, modname): """ Retrieve a module from below this path or package. @param modname: a str naming a module to be loaded. For entries, this is a top-level, undotted package name, and for packages it is the name of the module without the package prefix. For example, if you have a PythonModule representing the 'twisted' package, you could use: twistedPackageObj['python']['modules'] to retrieve this module. @raise: KeyError if the module is not found. @return: a PythonModule. """ for module in self.iterModules(): if module.name == self._subModuleName(modname): return module raise KeyError(modname) def __iter__(self): """ Implemented to raise NotImplementedError for clarity, so that attempting to loop over this object won't call __getitem__. Note: in the future there might be some sensible default for iteration, like 'walkEverything', so this is deliberately untested and undefined behavior. """ raise NotImplementedError() class PythonAttribute: """ I represent a function, class, or other object that is present. @ivar name: the fully-qualified python name of this attribute. @ivar onObject: a reference to a PythonModule or other PythonAttribute that is this attribute's logical parent. @ivar name: the fully qualified python name of the attribute represented by this class. """ def __init__(self, name, onObject, loaded, pythonValue): """ Create a PythonAttribute. This is a private constructor. Do not construct me directly, use PythonModule.iterAttributes. @param name: the FQPN @param onObject: see ivar @param loaded: always True, for now @param pythonValue: the value of the attribute we're pointing to. """ self.name = name self.onObject = onObject self._loaded = loaded self.pythonValue = pythonValue def __repr__(self): return 'PythonAttribute<%r>'%(self.name,) def isLoaded(self): """ Return a boolean describing whether the attribute this describes has actually been loaded into memory by importing its module. Note: this currently always returns true; there is no Python parser support in this module yet. """ return self._loaded def load(self, default=_nothing): """ Load the value associated with this attribute. @return: an arbitrary Python object, or 'default' if there is an error loading it. """ return self.pythonValue def iterAttributes(self): for name, val in inspect.getmembers(self.load()): yield PythonAttribute(self.name+'.'+name, self, True, val) class PythonModule(_ModuleIteratorHelper): """ Representation of a module which could be imported from sys.path. @ivar name: the fully qualified python name of this module. @ivar filePath: a FilePath-like object which points to the location of this module. @ivar pathEntry: a L{PathEntry} instance which this module was located from. """ def __init__(self, name, filePath, pathEntry): """ Create a PythonModule. Do not construct this directly, instead inspect a PythonPath or other PythonModule instances. @param name: see ivar @param filePath: see ivar @param pathEntry: see ivar """ assert not name.endswith(".__init__") self.name = name self.filePath = filePath self.pathEntry = pathEntry def _getEntry(self): return self.pathEntry def __repr__(self): """ Return a string representation including the module name. """ return 'PythonModule<%r>' % (self.name,) def isLoaded(self): """ Determine if the module is loaded into sys.modules. @return: a boolean: true if loaded, false if not. """ return self.name in self.pathEntry.pythonPath.moduleDict def iterAttributes(self): """ List all the attributes defined in this module. Note: Future work is planned here to make it possible to list python attributes on a module without loading the module by inspecting ASTs or bytecode, but currently any iteration of PythonModule objects insists they must be loaded, and will use inspect.getmodule. @raise NotImplementedError: if this module is not loaded. @return: a generator yielding PythonAttribute instances describing the attributes of this module. """ if not self.isLoaded(): raise NotImplementedError( "You can't load attributes from non-loaded modules yet.") for name, val in inspect.getmembers(self.load()): yield PythonAttribute(self.name+'.'+name, self, True, val) def isPackage(self): """ Returns true if this module is also a package, and might yield something from iterModules. """ return _isPackagePath(self.filePath) def load(self, default=_nothing): """ Load this module. @param default: if specified, the value to return in case of an error. @return: a genuine python module. @raise: any type of exception. Importing modules is a risky business; the erorrs of any code run at module scope may be raised from here, as well as ImportError if something bizarre happened to the system path between the discovery of this PythonModule object and the attempt to import it. If you specify a default, the error will be swallowed entirely, and not logged. @rtype: types.ModuleType. """ try: return self.pathEntry.pythonPath.moduleLoader(self.name) except: # this needs more thought... if default is not _nothing: return default raise def __eq__(self, other): """ PythonModules with the same name are equal. """ if not isinstance(other, PythonModule): return False return other.name == self.name def __ne__(self, other): """ PythonModules with different names are not equal. """ if not isinstance(other, PythonModule): return True return other.name != self.name def walkModules(self, importPackages=False): if importPackages and self.isPackage(): self.load() return super(PythonModule, self).walkModules(importPackages=importPackages) def _subModuleName(self, mn): """ submodules of this module are prefixed with our name. """ return self.name + '.' + mn def _packagePaths(self): """ Yield a sequence of FilePath-like objects which represent path segments. """ if not self.isPackage(): return if self.isLoaded(): for fn in self.load().__path__: sfpp = self.filePath.parent() if fn == sfpp.path: # this should _really_ exist. assert sfpp.exists() yield sfpp else: smp = self.pathEntry.pythonPath._smartPath(fn) if smp.exists(): yield smp else: yield self.filePath.parent() class PathEntry(_ModuleIteratorHelper): """ I am a proxy for a single entry on sys.path. @ivar filePath: a FilePath-like object pointing at the filesystem location or archive file where this path entry is stored. @ivar pythonPath: a PythonPath instance. """ def __init__(self, filePath, pythonPath): """ Create a PathEntry. This is a private constructor. """ self.filePath = filePath self.pythonPath = pythonPath def _getEntry(self): return self def __repr__(self): return 'PathEntry<%r>' % (self.filePath,) def _packagePaths(self): yield self.filePath class IPathImportMapper(Interface): """ This is an internal interface, used to map importers to factories for FilePath-like objects. """ def mapPath(self, pathLikeString): """ Return a FilePath-like object. @param pathLikeString: a path-like string, like one that might be passed to an import hook. @return: a L{FilePath}, or something like it (currently only a L{ZipPath}, but more might be added later). """ class _DefaultMapImpl: """ Wrapper for the default importer, i.e. None. """ implements(IPathImportMapper) def mapPath(self, fsPathString): return FilePath(fsPathString) _theDefaultMapper = _DefaultMapImpl() class _ZipMapImpl: """ IPathImportMapper implementation for zipimport.ZipImporter. """ implements(IPathImportMapper) def __init__(self, importer): self.importer = importer def mapPath(self, fsPathString): """ Map the given FS path to a ZipPath, by looking at the ZipImporter's "archive" attribute and using it as our ZipArchive root, then walking down into the archive from there. @return: a L{zippath.ZipPath} or L{zippath.ZipArchive} instance. """ za = ZipArchive(self.importer.archive) myPath = FilePath(self.importer.archive) itsPath = FilePath(fsPathString) if myPath == itsPath: return za # This is NOT a general-purpose rule for sys.path or __file__: # zipimport specifically uses regular OS path syntax in its pathnames. segs = itsPath.segmentsFrom(myPath) zp = za for seg in segs: zp = zp.child(seg) return zp registerAdapter(_ZipMapImpl, zipimport.zipimporter, IPathImportMapper) def _defaultSysPathFactory(): """ Provide the default behavior of PythonPath's sys.path factory, which is to return the current value of sys.path. @return: L{sys.path} """ return sys.path class PythonPath: """ I represent the very top of the Python object-space, the module list in sys.path and the modules list in sys.modules. @ivar sysPath: a sequence of strings like sys.path. This attribute is read-only. @ivar moduleDict: a dictionary mapping string module names to module objects, like sys.modules. @ivar sysPathHooks: a list of PEP-302 path hooks, like sys.path_hooks. @ivar moduleLoader: a function that takes a fully-qualified python name and returns a module, like twisted.python.reflect.namedAny. """ def __init__(self, sysPath=None, moduleDict=sys.modules, sysPathHooks=sys.path_hooks, importerCache=sys.path_importer_cache, moduleLoader=namedAny, sysPathFactory=None): """ Create a PythonPath. You almost certainly want to use modules.theSystemPath, or its aliased methods, rather than creating a new instance yourself, though. All parameters are optional, and if unspecified, will use 'system' equivalents that makes this PythonPath like the global L{theSystemPath} instance. @param sysPath: a sys.path-like list to use for this PythonPath, to specify where to load modules from. @param moduleDict: a sys.modules-like dictionary to use for keeping track of what modules this PythonPath has loaded. @param sysPathHooks: sys.path_hooks-like list of PEP-302 path hooks to be used for this PythonPath, to determie which importers should be used. @param importerCache: a sys.path_importer_cache-like list of PEP-302 importers. This will be used in conjunction with the given sysPathHooks. @param moduleLoader: a module loader function which takes a string and returns a module. That is to say, it is like L{namedAny} - *not* like L{__import__}. @param sysPathFactory: a 0-argument callable which returns the current value of a sys.path-like list of strings. Specify either this, or sysPath, not both. This alternative interface is provided because the way the Python import mechanism works, you can re-bind the 'sys.path' name and that is what is used for current imports, so it must be a factory rather than a value to deal with modification by rebinding rather than modification by mutation. Note: it is not recommended to rebind sys.path. Although this mechanism can deal with that, it is a subtle point which some tools that it is easy for tools which interact with sys.path to miss. """ if sysPath is not None: sysPathFactory = lambda : sysPath elif sysPathFactory is None: sysPathFactory = _defaultSysPathFactory self._sysPathFactory = sysPathFactory self._sysPath = sysPath self.moduleDict = moduleDict self.sysPathHooks = sysPathHooks self.importerCache = importerCache self._moduleLoader = moduleLoader def _getSysPath(self): """ Retrieve the current value of sys.path. """ return self._sysPathFactory() sysPath = property(_getSysPath) def moduleLoader(self, modname): """ Replicate python2.4+ sys.modules preservation behavior. @param modname: a str module name. @return: an imported module. @raise: any type of exception that may arise from importing. """ freezeModules = self.moduleDict.copy() try: return self._moduleLoader(modname) except: self.moduleDict.clear() self.moduleDict.update(freezeModules) raise def _findEntryPathString(self, modobj): """ Determine where a given Python module object came from by looking at path entries. """ topPackageObj = modobj while '.' in topPackageObj.__name__: topPackageObj = self.moduleDict['.'.join( topPackageObj.__name__.split('.')[:-1])] if _isPackagePath(FilePath(topPackageObj.__file__)): # if package 'foo' is on sys.path at /a/b/foo, package 'foo's # __file__ will be /a/b/foo/__init__.py, and we are looking for # /a/b here, the path-entry; so go up two steps. rval = dirname(dirname(topPackageObj.__file__)) else: # the module is completely top-level, not within any packages. The # path entry it's on is just its dirname. rval = dirname(topPackageObj.__file__) # There are probably some awful tricks that an importer could pull # which would break this, so let's just make sure... it's a loaded # module after all, which means that its path MUST be in # path_importer_cache according to PEP 302 -glyph from pprint import pformat assert rval in self.importerCache, '%r for %r not in import cache %s' % ( rval, modobj, pformat(self.importerCache)) return rval def _smartPath(self, pathName): """ Given a path entry from sys.path which may refer to an importer, return the appropriate FilePath-like instance. @param pathName: a str describing the path. @return: a FilePath-like object. """ importr = self.importerCache.get(pathName, _nothing) if importr is _nothing: for hook in self.sysPathHooks: try: importr = hook(pathName) except ImportError, ie: pass if importr is _nothing: # still importr = None return IPathImportMapper(importr, _theDefaultMapper).mapPath(pathName) def iterEntries(self): """ Iterate the entries on my sysPath. @return: a generator yielding PathEntry objects """ for pathName in self.sysPath: fp = self._smartPath(pathName) yield PathEntry(fp, self) def __getitem__(self, modname): """ Get a python module by a given fully-qualified name. @return: a PythonModule object. @raise: KeyError, if the module name is a module name. """ # See if the module is already somewhere in Python-land. if modname in self.moduleDict: # we need 2 paths; one of the path entry and one for the module. moduleObject = self.moduleDict[modname] pe = PathEntry( self._smartPath( self._findEntryPathString(moduleObject)), self) mp = self._smartPath(moduleObject.__file__) return PythonModule(modname, mp, pe) # Recurse if we're trying to get a submodule. if '.' in modname: pkg = self for name in modname.split('.'): pkg = pkg[name] return pkg # Finally do the slowest possible thing and iterate for module in self.iterModules(): if module.name == modname: return module raise KeyError(modname) def __repr__(self): """ Display my sysPath and moduleDict in a string representation. """ return "PythonPath(%r,%r)" % (self.sysPath, self.moduleDict) def iterModules(self): """ Yield all top-level modules on my sysPath. """ for entry in self.iterEntries(): for module in entry.iterModules(): yield module def walkModules(self, importPackages=False): """ Similar to L{iterModules}, this yields every module on the path, then every submodule in each package or entry. """ for package in self.iterModules(): for module in package.walkModules(importPackages=False): yield module theSystemPath = PythonPath() def walkModules(importPackages=False): """ Deeply iterate all modules on the global python path. @param importPackages: Import packages as they are seen. """ return theSystemPath.walkModules(importPackages=importPackages) def iterModules(): """ Iterate all modules and top-level packages on the global Python path, but do not descend into packages. @param importPackages: Import packages as they are seen. """ return theSystemPath.iterModules() def getModule(moduleName): """ Retrieve a module from the system path. """ return theSystemPath[moduleName]
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/modules.py
modules.py
# Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. """ A rotating, browsable log file. """ # System Imports import os, glob, string, time # sibling imports import threadable class BaseLogFile: """The base class for a log file that can be rotated. """ synchronized = ["write", "rotate"] def __init__(self, name, directory, defaultMode=None): self.directory = directory assert os.path.isdir(self.directory) self.name = name self.path = os.path.join(directory, name) if defaultMode is None and os.path.exists(self.path) and hasattr(os, "chmod"): self.defaultMode = os.stat(self.path)[0] else: self.defaultMode = defaultMode self._openFile() def shouldRotate(self): """Override with a method to that returns true if the log should be rotated""" raise NotImplementedError def _openFile(self): """Open the log file.""" self.closed = 0 if os.path.exists(self.path): self._file = open(self.path, "r+", 1) self._file.seek(0, 2) else: self._file = open(self.path, "w+", 1) # set umask to be same as original log file if self.defaultMode is not None: try: os.chmod(self.path, self.defaultMode) except OSError: # Probably /dev/null or something? pass def __getstate__(self): state = self.__dict__.copy() del state["_file"] return state def __setstate__(self, state): self.__dict__ = state self._openFile() def write(self, data): """Write some data to the file.""" if self.shouldRotate(): self.flush() self.rotate() self._file.write(data) def flush(self): """Flush the file.""" self._file.flush() def close(self): """Close the file. The file cannot be used once it has been closed. """ self.closed = 1 self._file.close() self._file = None def getCurrentLog(self): """Return a LogReader for the current log file.""" return LogReader(self.path) class LogFile(BaseLogFile): """A log file that can be rotated. A rotateLength of None disables automatic log rotation. """ def __init__(self, name, directory, rotateLength=1000000, defaultMode=None): BaseLogFile.__init__(self, name, directory, defaultMode) self.rotateLength = rotateLength def _openFile(self): BaseLogFile._openFile(self) self.size = self._file.tell() def shouldRotate(self): """Rotate when the log file size is larger than rotateLength""" return self.rotateLength and self.size >= self.rotateLength def getLog(self, identifier): """Given an integer, return a LogReader for an old log file.""" filename = "%s.%d" % (self.path, identifier) if not os.path.exists(filename): raise ValueError, "no such logfile exists" return LogReader(filename) def write(self, data): """Write some data to the file""" BaseLogFile.write(self, data) self.size += len(data) def rotate(self): """Rotate the file and create a new one. If it's not possible to open new logfile, this will fail silently, and continue logging to old logfile. """ if not (os.access(self.directory, os.W_OK) and os.access(self.path, os.W_OK)): return logs = self.listLogs() logs.reverse() for i in logs: os.rename("%s.%d" % (self.path, i), "%s.%d" % (self.path, i + 1)) self._file.close() os.rename(self.path, "%s.1" % self.path) self._openFile() def listLogs(self): """Return sorted list of integers - the old logs' identifiers.""" result = [] for name in glob.glob("%s.*" % self.path): try: counter = int(string.split(name, '.')[-1]) if counter: result.append(counter) except ValueError: pass result.sort() return result def __getstate__(self): state = BaseLogFile.__getstate__(self) del state["size"] return state threadable.synchronize(LogFile) class DailyLogFile(BaseLogFile): """A log file that is rotated daily (at or after midnight localtime) """ def _openFile(self): BaseLogFile._openFile(self) self.lastDate = self.toDate(os.stat(self.path)[8]) def shouldRotate(self): """Rotate when the date has changed since last write""" return self.toDate() > self.lastDate def toDate(self, *args): """Convert a unixtime to (year, month, day) localtime tuple, or return the current (year, month, day) localtime tuple. This function primarily exists so you may overload it with gmtime, or some cruft to make unit testing possible. """ # primarily so this can be unit tested easily return time.localtime(*args)[:3] def suffix(self, tupledate): """Return the suffix given a (year, month, day) tuple or unixtime""" try: return '_'.join(map(str, tupledate)) except: # try taking a float unixtime return '_'.join(map(str, self.toDate(tupledate))) def getLog(self, identifier): """Given a unix time, return a LogReader for an old log file.""" if self.toDate(identifier) == self.lastDate: return self.getCurrentLog() filename = "%s.%s" % (self.path, self.suffix(identifier)) if not os.path.exists(filename): raise ValueError, "no such logfile exists" return LogReader(filename) def write(self, data): """Write some data to the log file""" BaseLogFile.write(self, data) # Guard against a corner case where time.time() # could potentially run backwards to yesterday. # Primarily due to network time. self.lastDate = max(self.lastDate, self.toDate()) def rotate(self): """Rotate the file and create a new one. If it's not possible to open new logfile, this will fail silently, and continue logging to old logfile. """ if not (os.access(self.directory, os.W_OK) and os.access(self.path, os.W_OK)): return newpath = "%s.%s" % (self.path, self.suffix(self.lastDate)) if os.path.exists(newpath): return self._file.close() os.rename(self.path, newpath) self._openFile() def __getstate__(self): state = BaseLogFile.__getstate__(self) del state["lastDate"] return state threadable.synchronize(DailyLogFile) class LogReader: """Read from a log file.""" def __init__(self, name): self._file = open(name, "r") def readLines(self, lines=10): """Read a list of lines from the log file. This doesn't returns all of the files lines - call it multiple times. """ result = [] for i in range(lines): line = self._file.readline() if not line: break result.append(line) return result def close(self): self._file.close()
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/logfile.py
logfile.py
import re try: import win32api import win32con except ImportError: win32api = win32con = None from twisted.python.runtime import platform # http://msdn.microsoft.com/library/default.asp?url=/library/en-us/debug/base/system_error_codes.asp ERROR_FILE_NOT_FOUND = 2 ERROR_PATH_NOT_FOUND = 3 ERROR_INVALID_NAME = 123 ERROR_DIRECTORY = 267 try: WindowsError = WindowsError except NameError: class WindowsError: """ Stand-in for sometimes-builtin exception on platforms for which it is missing. """ # XXX fix this to use python's builtin _winreg? def getProgramsMenuPath(): """Get the path to the Programs menu. Probably will break on non-US Windows. @returns: the filesystem location of the common Start Menu->Programs. """ if not platform.isWinNT(): return "C:\\Windows\\Start Menu\\Programs" keyname = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders' hShellFolders = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, keyname, 0, win32con.KEY_READ) return win32api.RegQueryValueEx(hShellFolders, 'Common Programs')[0] def getProgramFilesPath(): """Get the path to the Program Files folder.""" keyname = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion' currentV = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, keyname, 0, win32con.KEY_READ) return win32api.RegQueryValueEx(currentV, 'ProgramFilesDir')[0] _cmdLineQuoteRe = re.compile(r'(\\*)"') _cmdLineQuoteRe2 = re.compile(r'(\\+)\Z') def cmdLineQuote(s): """ Internal method for quoting a single command-line argument. @param s: an unquoted string that you want to quote so that something that does cmd.exe-style unquoting will interpret it as a single argument, even if it contains spaces. @return: a quoted string. """ quote = ((" " in s) or ("\t" in s) or ('"' in s)) and '"' or '' return quote + _cmdLineQuoteRe2.sub(r"\1\1", _cmdLineQuoteRe.sub(r'\1\1\\"', s)) + quote def quoteArguments(arguments): """ Quote an iterable of command-line arguments for passing to CreateProcess or a similar API. This allows the list passed to C{reactor.spawnProcess} to match the child process's C{sys.argv} properly. @param arglist: an iterable of C{str}, each unquoted. @return: a single string, with the given sequence quoted as necessary. """ return ' '.join([cmdLineQuote(a) for a in arguments])
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/win32.py
win32.py
from __future__ import generators import zipfile import os.path import binascii import zlib import struct class ChunkingZipFile(zipfile.ZipFile): """A ZipFile object which, with readfile(), also gives you access to a filelike object for each entry. """ def readfile(self, name): """Return file-like object for name.""" if self.mode not in ("r", "a"): raise RuntimeError, 'read() requires mode "r" or "a"' if not self.fp: raise RuntimeError, \ "Attempt to read ZIP archive that was already closed" zinfo = self.getinfo(name) self.fp.seek(zinfo.header_offset, 0) # Skip the file header: fheader = self.fp.read(30) if fheader[0:4] != zipfile.stringFileHeader: raise zipfile.BadZipfile, "Bad magic number for file header" fheader = struct.unpack(zipfile.structFileHeader, fheader) fname = self.fp.read(fheader[zipfile._FH_FILENAME_LENGTH]) if fheader[zipfile._FH_EXTRA_FIELD_LENGTH]: self.fp.read(fheader[zipfile._FH_EXTRA_FIELD_LENGTH]) if fname != zinfo.orig_filename: raise zipfile.BadZipfile, \ 'File name in directory "%s" and header "%s" differ.' % ( zinfo.orig_filename, fname) if zinfo.compress_type == zipfile.ZIP_STORED: return ZipFileEntry(self.fp, zinfo.compress_size) elif zinfo.compress_type == zipfile.ZIP_DEFLATED: if not zlib: raise RuntimeError, \ "De-compression requires the (missing) zlib module" return DeflatedZipFileEntry(self.fp, zinfo.compress_size) else: raise zipfile.BadZipfile, \ "Unsupported compression method %d for file %s" % \ (zinfo.compress_type, name) def read(self, name): """Return file bytes (as a string) for name.""" f = self.readfile(name) zinfo = self.getinfo(name) bytes = f.read() crc = binascii.crc32(bytes) if crc != zinfo.CRC: raise zipfile.BadZipfile, "Bad CRC-32 for file %s" % name return bytes class ZipFileEntry: """File-like object used to read an uncompressed entry in a ZipFile""" def __init__(self, fp, length): self.fp = fp self.readBytes = 0 self.length = length self.finished = 0 def tell(self): return self.readBytes def read(self, n=None): if n is None: n = self.length - self.readBytes if n == 0 or self.finished: return '' data = self.fp.read(min(n, self.length - self.readBytes)) self.readBytes += len(data) if self.readBytes == self.length or len(data) < n: self.finished = 1 return data def close(self): self.finished = 1 del self.fp class DeflatedZipFileEntry: """File-like object used to read a deflated entry in a ZipFile""" def __init__(self, fp, length): self.fp = fp self.returnedBytes = 0 self.readBytes = 0 self.decomp = zlib.decompressobj(-15) self.buffer = "" self.length = length self.finished = 0 def tell(self): return self.returnedBytes def read(self, n=None): if self.finished: return "" if n is None: result = [self.buffer,] result.append(self.decomp.decompress(self.fp.read(self.length - self.readBytes))) result.append(self.decomp.decompress("Z")) result.append(self.decomp.flush()) self.buffer = "" self.finished = 1 result = "".join(result) self.returnedBytes += len(result) return result else: while len(self.buffer) < n: data = self.fp.read(min(n, 1024, self.length - self.readBytes)) self.readBytes += len(data) if not data: result = self.buffer + self.decomp.decompress("Z") + self.decomp.flush() self.finished = 1 self.buffer = "" self.returnedBytes += len(result) return result else: self.buffer += self.decomp.decompress(data) result = self.buffer[:n] self.buffer = self.buffer[n:] self.returnedBytes += len(result) return result def close(self): self.finished = 1 del self.fp def unzip(filename, directory=".", overwrite=0): """Unzip the file @param filename: the name of the zip file @param directory: the directory into which the files will be extracted @param overwrite: if on, overwrite files when they exist. You can still get an error if you try to create a directory over a file with the same name or vice-versa. """ for i in unzipIter(filename, directory, overwrite): pass DIR_BIT=16 def unzipIter(filename, directory='.', overwrite=0): """Return a generator for the zipfile. This implementation will yield after every file. The value it yields is the number of files left to unzip. """ zf=zipfile.ZipFile(filename, 'r') names=zf.namelist() if not os.path.exists(directory): os.makedirs(directory) remaining=countZipFileEntries(filename) for entry in names: remaining=remaining - 1 isdir=zf.getinfo(entry).external_attr & DIR_BIT f=os.path.join(directory, entry) if isdir: # overwrite flag only applies to files if not os.path.exists(f): os.makedirs(f) else: # create the directory the file will be in first, # since we can't guarantee it exists fdir=os.path.split(f)[0] if not os.path.exists(fdir): os.makedirs(f) if overwrite or not os.path.exists(f): outfile=file(f, 'wb') outfile.write(zf.read(entry)) outfile.close() yield remaining def countZipFileChunks(filename, chunksize): """Predict the number of chunks that will be extracted from the entire zipfile, given chunksize blocks. """ totalchunks=0 zf=ChunkingZipFile(filename) for info in zf.infolist(): totalchunks=totalchunks+countFileChunks(info, chunksize) return totalchunks def countFileChunks(zipinfo, chunksize): size=zipinfo.file_size count=size/chunksize if size%chunksize > 0: count=count+1 # each file counts as at least one chunk return count or 1 def countZipFileEntries(filename): zf=zipfile.ZipFile(filename) return len(zf.namelist()) def unzipIterChunky(filename, directory='.', overwrite=0, chunksize=4096): """Return a generator for the zipfile. This implementation will yield after every chunksize uncompressed bytes, or at the end of a file, whichever comes first. The value it yields is the number of chunks left to unzip. """ czf=ChunkingZipFile(filename, 'r') if not os.path.exists(directory): os.makedirs(directory) remaining=countZipFileChunks(filename, chunksize) names=czf.namelist() infos=czf.infolist() for entry, info in zip(names, infos): isdir=info.external_attr & DIR_BIT f=os.path.join(directory, entry) if isdir: # overwrite flag only applies to files if not os.path.exists(f): os.makedirs(f) remaining=remaining-1 assert remaining>=0 yield remaining else: # create the directory the file will be in first, # since we can't guarantee it exists fdir=os.path.split(f)[0] if not os.path.exists(fdir): os.makedirs(f) if overwrite or not os.path.exists(f): outfile=file(f, 'wb') fp=czf.readfile(entry) if info.file_size==0: remaining=remaining-1 assert remaining>=0 yield remaining fread=fp.read ftell=fp.tell owrite=outfile.write size=info.file_size while ftell() < size: hunk=fread(chunksize) owrite(hunk) remaining=remaining-1 assert remaining>=0 yield remaining outfile.close() else: remaining=remaining-countFileChunks(info, chunksize) assert remaining>=0 yield remaining
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/zipstream.py
zipstream.py
import sys, os from distutils import sysconfig from distutils.command import build_scripts, install_data, build_ext, build_py from distutils.errors import CompileError from distutils import core twisted_subprojects = ["conch", "flow", "lore", "mail", "names", "news", "pair", "runner", "web", "web2", "words", "vfs"] def setup(**kw): """ An alternative to distutils' setup() which is specially designed for Twisted subprojects. Pass twisted_subproject=projname if you want package and data files to automatically be found for you. Pass detectExtensions=detectorFunction if your project has extension modules. detectorFunction will be called with an instance of build_ext_twisted and should return a list of distutils Extensions. """ if 'twisted_subproject' in kw: if 'twisted' not in os.listdir('.'): raise RuntimeError("Sorry, you need to run setup.py from the " "toplevel source directory.") projname = kw['twisted_subproject'] projdir = os.path.join('twisted', projname) kw['packages'] = getPackages(projdir, parent='twisted') kw['version'] = getVersion(projname) plugin = "twisted/plugins/twisted_" + projname + ".py" if os.path.exists(plugin): kw.setdefault('py_modules', []).append(plugin.replace("/", ".")[:-3]) kw['data_files'] = getDataFiles(projdir, parent='twisted') del kw['twisted_subproject'] else: if 'plugins' in kw: py_modules = [] for plg in kw['plugins']: py_modules.append("twisted.plugins." + plg) kw.setdefault('py_modules', []).extend(py_modules) del kw['plugins'] if 'cmdclass' not in kw: kw['cmdclass'] = { 'install_data': install_data_twisted, 'build_scripts': build_scripts_twisted} if sys.version_info[:3] < (2, 3, 0): kw['cmdclass']['build_py'] = build_py_twisted if 'detectExtensions' in kw: if 'ext_modules' not in kw: kw['ext_modules'] = [True] # distutils is so lame dE = kw['detectExtensions'] del kw['detectExtensions'] class my_build_ext(build_ext_twisted): detectExtensions = dE kw.setdefault('cmdclass', {})['build_ext'] = my_build_ext return core.setup(**kw) def getVersion(proj, base="twisted"): """ Extract the version number for a given project. @param proj: the name of the project. Examples are "core", "conch", "words", "mail". @rtype: str @returns: The version number of the project, as a string like "2.0.0". """ if proj == 'core': vfile = os.path.join(base, '_version.py') else: vfile = os.path.join(base, proj, '_version.py') ns = {'__name__': 'Nothing to see here'} execfile(vfile, ns) return ns['version'].base() # Names that are exluded from globbing results: EXCLUDE_NAMES = ["{arch}", "CVS", ".cvsignore", "_darcs", "RCS", "SCCS", ".svn"] EXCLUDE_PATTERNS = ["*.py[cdo]", "*.s[ol]", ".#*", "*~", "*.py"] import fnmatch def _filterNames(names): """Given a list of file names, return those names that should be copied. """ names = [n for n in names if n not in EXCLUDE_NAMES] # This is needed when building a distro from a working # copy (likely a checkout) rather than a pristine export: for pattern in EXCLUDE_PATTERNS: names = [n for n in names if (not fnmatch.fnmatch(n, pattern)) and (not n.endswith('.py'))] return names def relativeTo(base, relativee): """ Gets 'relativee' relative to 'basepath'. i.e., >>> relativeTo('/home/', '/home/radix/') 'radix' >>> relativeTo('.', '/home/radix/Projects/Twisted') # curdir is /home/radix 'Projects/Twisted' The 'relativee' must be a child of 'basepath'. """ basepath = os.path.abspath(base) relativee = os.path.abspath(relativee) if relativee.startswith(basepath): relative = relativee[len(basepath):] if relative.startswith(os.sep): relative = relative[1:] return os.path.join(base, relative) raise ValueError("%s is not a subpath of %s" % (relativee, basepath)) def getDataFiles(dname, ignore=None, parent=None): """ Get all the data files that should be included in this distutils Project. 'dname' should be the path to the package that you're distributing. 'ignore' is a list of sub-packages to ignore. This facilitates disparate package hierarchies. That's a fancy way of saying that the 'twisted' package doesn't want to include the 'twisted.conch' package, so it will pass ['conch'] as the value. 'parent' is necessary if you're distributing a subpackage like twisted.conch. 'dname' should point to 'twisted/conch' and 'parent' should point to 'twisted'. This ensures that your data_files are generated correctly, only using relative paths for the first element of the tuple ('twisted/conch/*'). The default 'parent' is the current working directory. """ parent = parent or "." ignore = ignore or [] result = [] for directory, subdirectories, filenames in os.walk(dname): resultfiles = [] for exname in EXCLUDE_NAMES: if exname in subdirectories: subdirectories.remove(exname) for ig in ignore: if ig in subdirectories: subdirectories.remove(ig) for filename in _filterNames(filenames): resultfiles.append(filename) if resultfiles: result.append((relativeTo(parent, directory), [relativeTo(parent, os.path.join(directory, filename)) for filename in resultfiles])) return result def getPackages(dname, pkgname=None, results=None, ignore=None, parent=None): """ Get all packages which are under dname. This is necessary for Python 2.2's distutils. Pretty similar arguments to getDataFiles, including 'parent'. """ parent = parent or "" prefix = [] if parent: prefix = [parent] bname = os.path.basename(dname) ignore = ignore or [] if bname in ignore: return [] if results is None: results = [] if pkgname is None: pkgname = [] subfiles = os.listdir(dname) abssubfiles = [os.path.join(dname, x) for x in subfiles] if '__init__.py' in subfiles: results.append(prefix + pkgname + [bname]) for subdir in filter(os.path.isdir, abssubfiles): getPackages(subdir, pkgname=pkgname + [bname], results=results, ignore=ignore, parent=parent) res = ['.'.join(result) for result in results] return res def getScripts(projname, basedir=''): """ Returns a list of scripts for a Twisted subproject; this works in any of an SVN checkout, a project-specific tarball. """ scriptdir = os.path.join(basedir, 'bin', projname) if not os.path.isdir(scriptdir): # Probably a project-specific tarball, in which case only this # project's bins are included in 'bin' scriptdir = os.path.join(basedir, 'bin') if not os.path.isdir(scriptdir): return [] thingies = os.listdir(scriptdir) if '.svn' in thingies: thingies.remove('.svn') return filter(os.path.isfile, [os.path.join(scriptdir, x) for x in thingies]) ## Helpers and distutil tweaks class build_py_twisted(build_py.build_py): """ Changes behavior in Python 2.2 to support simultaneous specification of `packages' and `py_modules'. """ def run(self): if self.py_modules: self.build_modules() if self.packages: self.build_packages() self.byte_compile(self.get_outputs(include_bytecode=0)) class build_scripts_twisted(build_scripts.build_scripts): """Renames scripts so they end with '.py' on Windows.""" def run(self): build_scripts.build_scripts.run(self) if not os.name == "nt": return for f in os.listdir(self.build_dir): fpath=os.path.join(self.build_dir, f) if not fpath.endswith(".py"): try: os.unlink(fpath + ".py") except EnvironmentError, e: if e.args[1]=='No such file or directory': pass os.rename(fpath, fpath + ".py") class install_data_twisted(install_data.install_data): """I make sure data files are installed in the package directory.""" def finalize_options(self): self.set_undefined_options('install', ('install_lib', 'install_dir') ) install_data.install_data.finalize_options(self) class build_ext_twisted(build_ext.build_ext): """ Allow subclasses to easily detect and customize Extensions to build at install-time. """ def build_extensions(self): """ Override the build_ext build_extensions method to call our module detection function before it tries to build the extensions. """ # always define WIN32 under Windows if os.name == 'nt': self.define_macros = [("WIN32", 1)] else: self.define_macros = [] self.extensions = self.detectExtensions() or [] build_ext.build_ext.build_extensions(self) def _remove_conftest(self): for filename in ("conftest.c", "conftest.o", "conftest.obj"): try: os.unlink(filename) except EnvironmentError: pass def _compile_helper(self, content): conftest = open("conftest.c", "w") try: conftest.write(content) conftest.close() try: self.compiler.compile(["conftest.c"], output_dir='') except CompileError: return False return True finally: self._remove_conftest() def _check_header(self, header_name): """ Check if the given header can be included by trying to compile a file that contains only an #include line. """ self.compiler.announce("checking for %s ..." % header_name, 0) return self._compile_helper("#include <%s>\n" % header_name)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/dist.py
dist.py
# System Imports import Queue import threading import threadable import copy import sys # Twisted Imports from twisted.python import log, runtime, context class WorkerStop: pass WorkerStop = WorkerStop() # initialize threading threadable.init(1) class ThreadPool: """ This class (hopefully) generalizes the functionality of a pool of threads to which work can be dispatched. dispatch(), dispatchWithCallback() and stop() should only be called from a single thread, unless you make a subclass where stop() and _startSomeWorkers() are synchronized. """ __inited = 0 min = 5 max = 20 joined = 0 started = 0 workers = 0 name = None def __init__(self, minthreads=5, maxthreads=20, name=None): """Create a new threadpool. @param minthreads: minimum number of threads in the pool @param maxthreads: maximum number of threads in the pool """ assert minthreads >= 0, 'minimum is negative' assert minthreads <= maxthreads, 'minimum is greater than maximum' self.q = Queue.Queue(0) self.min = minthreads self.max = maxthreads self.name = name if runtime.platform.getType() != "java": self.waiters = [] self.threads = [] self.working = [] else: self.waiters = ThreadSafeList() self.threads = ThreadSafeList() self.working = ThreadSafeList() def start(self): """Start the threadpool. """ self.joined = 0 self.started = 1 # Start some threads. self.adjustPoolsize() def startAWorker(self): self.workers = self.workers + 1 name = "PoolThread-%s-%s" % (self.name or id(self), self.workers) try: firstJob = self.q.get(0) except Queue.Empty: firstJob = None newThread = threading.Thread(target=self._worker, name=name, args=(firstJob,)) self.threads.append(newThread) newThread.start() def stopAWorker(self): self.q.put(WorkerStop) self.workers = self.workers-1 def __setstate__(self, state): self.__dict__ = state ThreadPool.__init__(self, self.min, self.max) def __getstate__(self): state = {} state['min'] = self.min state['max'] = self.max return state def _startSomeWorkers(self): while ( self.workers < self.max and # Don't create too many len(self.waiters) < self.q.qsize() # but create enough ): self.startAWorker() def dispatch(self, owner, func, *args, **kw): """Dispatch a function to be a run in a thread. """ self.callInThread(func,*args,**kw) def callInThread(self, func, *args, **kw): if self.joined: return ctx = context.theContextTracker.currentContext().contexts[-1] o = (ctx, func, args, kw) self.q.put(o) if self.started: self._startSomeWorkers() def _runWithCallback(self, callback, errback, func, args, kwargs): try: result = apply(func, args, kwargs) except: errback(sys.exc_info()[1]) else: callback(result) def dispatchWithCallback(self, owner, callback, errback, func, *args, **kw): """Dispatch a function, returning the result to a callback function. The callback function will be called in the thread - make sure it is thread-safe. """ self.callInThread(self._runWithCallback, callback, errback, func, args, kw) def _worker(self, o): ct = threading.currentThread() while 1: if o is WorkerStop: break elif o is not None: self.working.append(ct) ctx, function, args, kwargs = o try: context.call(ctx, function, *args, **kwargs) except: context.call(ctx, log.deferr) self.working.remove(ct) del o, ctx, function, args, kwargs self.waiters.append(ct) o = self.q.get() self.waiters.remove(ct) self.threads.remove(ct) def stop(self): """Shutdown the threads in the threadpool.""" self.joined = 1 threads = copy.copy(self.threads) for thread in range(self.workers): self.q.put(WorkerStop) self.workers = self.workers-1 # and let's just make sure # FIXME: threads that have died before calling stop() are not joined. for thread in threads: thread.join() def adjustPoolsize(self, minthreads=None, maxthreads=None): if minthreads is None: minthreads = self.min if maxthreads is None: maxthreads = self.max assert minthreads >= 0, 'minimum is negative' assert minthreads <= maxthreads, 'minimum is greater than maximum' self.min = minthreads self.max = maxthreads if not self.started: return # Kill of some threads if we have too many. while self.workers > self.max: self.stopAWorker() # Start some threads if we have too few. while self.workers < self.min: self.startAWorker() # Start some threads if there is a need. self._startSomeWorkers() def dumpStats(self): log.msg('queue: %s' % self.q.queue) log.msg('waiters: %s' % self.waiters) log.msg('workers: %s' % self.working) log.msg('total: %s' % self.threads) class ThreadSafeList: """In Jython 2.1 lists aren't thread-safe, so this wraps it.""" def __init__(self): self.lock = threading.Lock() self.l = [] def append(self, i): self.lock.acquire() try: self.l.append(i) finally: self.lock.release() def remove(self, i): self.lock.acquire() try: self.l.remove(i) finally: self.lock.release() def __len__(self): return len(self.l)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/threadpool.py
threadpool.py
import calendar class FormException(Exception): """An error occurred calling the form method. """ def __init__(self, *args, **kwargs): Exception.__init__(self, *args) self.descriptions = kwargs class InputError(FormException): """ An error occurred with some input. """ class Argument: """Base class for form arguments.""" # default value for argument, if no other default is given defaultDefault = None def __init__(self, name, default=None, shortDesc=None, longDesc=None, hints=None, allowNone=1): self.name = name self.allowNone = allowNone if default is None: default = self.defaultDefault self.default = default self.shortDesc = shortDesc self.longDesc = longDesc if not hints: hints = {} self.hints = hints def addHints(self, **kwargs): self.hints.update(kwargs) def getHint(self, name, default=None): return self.hints.get(name, default) def getShortDescription(self): return self.shortDesc or self.name.capitalize() def getLongDescription(self): return self.longDesc or '' #self.shortDesc or "The %s." % self.name def coerce(self, val): """Convert the value to the correct format.""" raise NotImplementedError, "implement in subclass" class String(Argument): """A single string. """ defaultDefault = '' min = 0 max = None def __init__(self, name, default=None, shortDesc=None, longDesc=None, hints=None, allowNone=1, min=0, max=None): Argument.__init__(self, name, default=default, shortDesc=shortDesc, longDesc=longDesc, hints=hints, allowNone=allowNone) self.min = min self.max = max def coerce(self, val): s = str(val) if len(s) < self.min: raise InputError, "Value must be at least %s characters long" % self.min if self.max != None and len(s) > self.max: raise InputError, "Value must be at most %s characters long" % self.max return str(val) class Text(String): """A long string. """ class Password(String): """A string which should be obscured when input. """ class VerifiedPassword(String): """A string that should be obscured when input and needs verification.""" def coerce(self, vals): if len(vals) != 2 or vals[0] != vals[1]: raise InputError, "Please enter the same password twice." s = str(vals[0]) if len(s) < self.min: raise InputError, "Value must be at least %s characters long" % self.min if self.max != None and len(s) > self.max: raise InputError, "Value must be at most %s characters long" % self.max return s class Hidden(String): """A string which is not displayed. The passed default is used as the value. """ class Integer(Argument): """A single integer. """ defaultDefault = None def __init__(self, name, allowNone=1, default=None, shortDesc=None, longDesc=None, hints=None): #although Argument now has allowNone, that was recently added, and #putting it at the end kept things which relied on argument order #from breaking. However, allowNone originally was in here, so #I have to keep the same order, to prevent breaking code that #depends on argument order only Argument.__init__(self, name, default, shortDesc, longDesc, hints, allowNone) def coerce(self, val): if not val.strip() and self.allowNone: return None try: return int(val) except ValueError: raise InputError, "%s is not valid, please enter a whole number, e.g. 10" % val class IntegerRange(Integer): def __init__(self, name, min, max, allowNone=1, default=None, shortDesc=None, longDesc=None, hints=None): self.min = min self.max = max Integer.__init__(self, name, allowNone=allowNone, default=default, shortDesc=shortDesc, longDesc=longDesc, hints=hints) def coerce(self, val): result = Integer.coerce(self, val) if self.allowNone and result == None: return result if result < self.min: raise InputError, "Value %s is too small, it should be at least %s" % (result, self.min) if result > self.max: raise InputError, "Value %s is too large, it should be at most %s" % (result, self.max) return result class Float(Argument): defaultDefault = None def __init__(self, name, allowNone=1, default=None, shortDesc=None, longDesc=None, hints=None): #although Argument now has allowNone, that was recently added, and #putting it at the end kept things which relied on argument order #from breaking. However, allowNone originally was in here, so #I have to keep the same order, to prevent breaking code that #depends on argument order only Argument.__init__(self, name, default, shortDesc, longDesc, hints, allowNone) def coerce(self, val): if not val.strip() and self.allowNone: return None try: return float(val) except ValueError: raise InputError, "Invalid float: %s" % val class Choice(Argument): """ The result of a choice between enumerated types. The choices should be a list of tuples of tag, value, and description. The tag will be the value returned if the user hits "Submit", and the description is the bale for the enumerated type. default is a list of all the values (seconds element in choices). If no defaults are specified, initially the first item will be selected. Only one item can (should) be selected at once. """ def __init__(self, name, choices=[], default=[], shortDesc=None, longDesc=None, hints=None, allowNone=1): self.choices = choices if choices and not default: default.append(choices[0][1]) Argument.__init__(self, name, default, shortDesc, longDesc, hints, allowNone=allowNone) def coerce(self, inIdent): for ident, val, desc in self.choices: if ident == inIdent: return val else: raise InputError("Invalid Choice: %s" % inIdent) class Flags(Argument): """ The result of a checkbox group or multi-menu. The flags should be a list of tuples of tag, value, and description. The tag will be the value returned if the user hits "Submit", and the description is the bale for the enumerated type. default is a list of all the values (second elements in flags). If no defaults are specified, initially nothing will be selected. Several items may be selected at once. """ def __init__(self, name, flags=(), default=(), shortDesc=None, longDesc=None, hints=None, allowNone=1): self.flags = flags Argument.__init__(self, name, default, shortDesc, longDesc, hints, allowNone=allowNone) def coerce(self, inFlagKeys): if not inFlagKeys: return [] outFlags = [] for inFlagKey in inFlagKeys: for flagKey, flagVal, flagDesc in self.flags: if inFlagKey == flagKey: outFlags.append(flagVal) break else: raise InputError("Invalid Flag: %s" % inFlagKey) return outFlags class CheckGroup(Flags): pass class RadioGroup(Choice): pass class Boolean(Argument): def coerce(self, inVal): if not inVal: return 0 lInVal = str(inVal).lower() if lInVal in ('no', 'n', 'f', 'false', '0'): return 0 return 1 class File(Argument): def __init__(self, name, allowNone=1, shortDesc=None, longDesc=None, hints=None): self.allowNone = allowNone Argument.__init__(self, name, None, shortDesc, longDesc, hints) def coerce(self, file): if not file and self.allowNone: return None elif file: return file else: raise InputError, "Invalid File" def positiveInt(x): x = int(x) if x <= 0: raise ValueError return x class Date(Argument): """A date -- (year, month, day) tuple.""" defaultDefault = None def __init__(self, name, allowNone=1, default=None, shortDesc=None, longDesc=None, hints=None): Argument.__init__(self, name, default, shortDesc, longDesc, hints) self.allowNone = allowNone if not allowNone: self.defaultDefault = (1970, 1, 1) def coerce(self, args): """Return tuple of ints (year, month, day).""" if tuple(args) == ("", "", "") and self.allowNone: return None try: year, month, day = map(positiveInt, args) except ValueError: raise InputError, "Invalid date" if (month, day) == (2, 29): if not calendar.isleap(year): raise InputError, "%d was not a leap year" % year else: return year, month, day try: mdays = calendar.mdays[month] except IndexError: raise InputError, "Invalid date" if day > mdays: raise InputError, "Invalid date" return year, month, day class Submit(Choice): """Submit button or a reasonable facsimile thereof.""" def __init__(self, name, choices=[("Submit", "submit", "Submit form")], reset=0, shortDesc=None, longDesc=None, allowNone=0, hints=None): Choice.__init__(self, name, choices=choices, shortDesc=shortDesc, longDesc=longDesc, hints=hints) self.allowNone = allowNone self.reset = reset def coerce(self, value): if self.allowNone and not value: return None else: return Choice.coerce(self, value) class PresentationHint: """ A hint to a particular system. """ class MethodSignature: def __init__(self, *sigList): """ """ self.methodSignature = sigList def getArgument(self, name): for a in self.methodSignature: if a.name == name: return a def method(self, callable, takesRequest=False): return FormMethod(self, callable, takesRequest) class FormMethod: """A callable object with a signature.""" def __init__(self, signature, callable, takesRequest=False): self.signature = signature self.callable = callable self.takesRequest = takesRequest def getArgs(self): return tuple(self.signature.methodSignature) def call(self,*args,**kw): return self.callable(*args,**kw)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/formmethod.py
formmethod.py
import string, types def stringyString(object, indentation=''): """Expansive string formatting for sequence types. list.__str__ and dict.__str__ use repr() to display their elements. This function also turns these sequence types into strings, but uses str() on their elements instead. Sequence elements are also displayed on seperate lines, and nested sequences have nested indentation. """ braces = '' sl = [] if type(object) is types.DictType: braces = '{}' for key, value in object.items(): value = stringyString(value, indentation + ' ') if isMultiline(value): if endsInNewline(value): value = value[:-len('\n')] sl.append("%s %s:\n%s" % (indentation, key, value)) else: # Oops. Will have to move that indentation. sl.append("%s %s: %s" % (indentation, key, value[len(indentation) + 3:])) elif type(object) in (types.TupleType, types.ListType): if type(object) is types.TupleType: braces = '()' else: braces = '[]' for element in object: element = stringyString(element, indentation + ' ') sl.append(string.rstrip(element) + ',') else: sl[:] = map(lambda s, i=indentation: i+s, string.split(str(object),'\n')) if not sl: sl.append(indentation) if braces: sl[0] = indentation + braces[0] + sl[0][len(indentation) + 1:] sl[-1] = sl[-1] + braces[-1] s = string.join(sl, "\n") if isMultiline(s) and not endsInNewline(s): s = s + '\n' return s def isMultiline(s): """Returns True if this string has a newline in it.""" return (string.find(s, '\n') != -1) def endsInNewline(s): """Returns True if this string ends in a newline.""" return (s[-len('\n'):] == '\n') def docstringLStrip(docstring): """Gets rid of unsightly lefthand docstring whitespace residue. You'd think someone would have done this already, but apparently not in 1.5.2. BUT since we're all using Python 2.1 now, use L{inspect.getdoc} instead. I{This function should go away soon.} """ if not docstring: return docstring docstring = string.replace(docstring, '\t', ' ' * 8) lines = string.split(docstring,'\n') leading = 0 for l in xrange(1,len(lines)): line = lines[l] if string.strip(line): while 1: if line[leading] == ' ': leading = leading + 1 else: break if leading: break outlines = lines[0:1] for l in xrange(1,len(lines)): outlines.append(lines[l][leading:]) return string.join(outlines, '\n') def greedyWrap(inString, width=80): """Given a string and a column width, return a list of lines. Caveat: I'm use a stupid greedy word-wrapping algorythm. I won't put two spaces at the end of a sentence. I don't do full justification. And no, I've never even *heard* of hypenation. """ outLines = [] #eww, evil hacks to allow paragraphs delimited by two \ns :( if inString.find('\n\n') >= 0: paragraphs = string.split(inString, '\n\n') for para in paragraphs: outLines.extend(greedyWrap(para) + ['']) return outLines inWords = string.split(inString) column = 0 ptr_line = 0 while inWords: column = column + len(inWords[ptr_line]) ptr_line = ptr_line + 1 if (column > width): if ptr_line == 1: # This single word is too long, it will be the whole line. pass else: # We've gone too far, stop the line one word back. ptr_line = ptr_line - 1 (l, inWords) = (inWords[0:ptr_line], inWords[ptr_line:]) outLines.append(string.join(l,' ')) ptr_line = 0 column = 0 elif not (len(inWords) > ptr_line): # Clean up the last bit. outLines.append(string.join(inWords, ' ')) del inWords[:] else: # Space column = column + 1 # next word return outLines wordWrap = greedyWrap def removeLeadingBlanks(lines): ret = [] for line in lines: if ret or line.strip(): ret.append(line) return ret def removeLeadingTrailingBlanks(s): lines = removeLeadingBlanks(s.split('\n')) lines.reverse() lines = removeLeadingBlanks(lines) lines.reverse() return '\n'.join(lines)+'\n' def splitQuoted(s): """Like string.split, but don't break substrings inside quotes. >>> splitQuoted('the \"hairy monkey\" likes pie') ['the', 'hairy monkey', 'likes', 'pie'] Another one of those \"someone must have a better solution for this\" things. This implementation is a VERY DUMB hack done too quickly. """ out = [] quot = None phrase = None for word in s.split(): if phrase is None: if word and (word[0] in ("\"", "'")): quot = word[0] word = word[1:] phrase = [] if phrase is None: out.append(word) else: if word and (word[-1] == quot): word = word[:-1] phrase.append(word) out.append(" ".join(phrase)) phrase = None else: phrase.append(word) return out def strFile(p, f, caseSensitive=True): """Find whether string p occurs in a read()able object f @rtype: C{bool} """ buf = "" buf_len = max(len(p), 2**2**2**2) if not caseSensitive: p = p.lower() while 1: r = f.read(buf_len-len(p)) if not caseSensitive: r = r.lower() bytes_read = len(r) if bytes_read == 0: return False l = len(buf)+bytes_read-buf_len if l <= 0: buf = buf + r else: buf = buf[l:] + r if buf.find(p) != -1: return True
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/text.py
text.py
# System Imports import string import os import sys import getopt from os import path # Sibling Imports import reflect import text import util class UsageError(Exception): pass error = UsageError class Options(dict): """ An option list parser class C{optFlags} and C{optParameters} are lists of available parameters which your program can handle. The difference between the two is the 'flags' have an on(1) or off(0) state (off by default) whereas 'parameters' have an assigned value, with an optional default. (Compare '--verbose' and '--verbosity=2') optFlags is assigned a list of lists. Each list represents a flag parameter, as so:: | optFlags = [['verbose', 'v', 'Makes it tell you what it doing.'], | ['quiet', 'q', 'Be vewy vewy quiet.']] As you can see, the first item is the long option name (prefixed with '--' on the command line), followed by the short option name (prefixed with '-'), and the description. The description is used for the built-in handling of the --help switch, which prints a usage summary. C{optParameters} is much the same, except the list also contains a default value:: | optParameters = [['outfile', 'O', 'outfile.log', 'Description...']] subCommands is a list of 4-tuples of (command name, command shortcut, parser class, documentation). If the first non-option argument found is one of the given command names, an instance of the given parser class is instantiated and given the remainder of the arguments to parse and self.opts[command] is set to the command name. For example:: | subCommands = [ | ['inquisition', 'inquest', InquisitionOptions, 'Perform an inquisition'], | ['holyquest', 'quest', HolyQuestOptions, 'Embark upon a holy quest'] | ] In this case, C{"<program> holyquest --horseback --for-grail"} will cause C{HolyQuestOptions} to be instantiated and asked to parse C{['--horseback', '--for-grail']}. Currently, only the first sub-command is parsed, and all options following it are passed to its parser. If a subcommand is found, the subCommand attribute is set to its name and the subOptions attribute is set to the Option instance that parses the remaining options. If a subcommand is not given to parseOptions, the subCommand attribute will be None. You can also mark one of the subCommands to be the default. | defaultSubCommand = 'holyquest' In this case, the subCommand attribute will never be None, and the subOptions attribute will always be set. If you want to handle your own options, define a method named C{opt_paramname} that takes C{(self, option)} as arguments. C{option} will be whatever immediately follows the parameter on the command line. Options fully supports the mapping interface, so you can do things like C{'self["option"] = val'} in these methods. Advanced functionality is covered in the howto documentation, available at U{http://twistedmatrix.com/projects/core/documentation/howto/options.html}, or doc/howto/options.html in your Twisted directory. """ subCommand = None defaultSubCommand = None parent = None def __init__(self): super(Options, self).__init__() self.opts = self self.defaults = {} # These are strings/lists we will pass to getopt self.longOpt = [] self.shortOpt = '' self.docs = {} self.synonyms = {} self.__dispatch = {} collectors = [ self._gather_flags, self._gather_parameters, self._gather_handlers, ] for c in collectors: (longOpt, shortOpt, docs, settings, synonyms, dispatch) = c() self.longOpt.extend(longOpt) self.shortOpt = self.shortOpt + shortOpt self.docs.update(docs) self.opts.update(settings) self.defaults.update(settings) self.synonyms.update(synonyms) self.__dispatch.update(dispatch) def __hash__(self): """ Define a custom hash function so that Options instances can be used as dictionary keys. This is an internal feature used to implement the parser. Do not rely on it in application code. """ return int(id(self) % sys.maxint) def opt_help(self): """Display this help and exit.""" print self.__str__() sys.exit(0) def opt_version(self): from twisted import copyright print "Twisted version:", copyright.version sys.exit(0) #opt_h = opt_help # this conflicted with existing 'host' options. def parseOptions(self, options=None): """The guts of the command-line parser. """ if options is None: options = sys.argv[1:] try: opts, args = getopt.getopt(options, self.shortOpt, self.longOpt) except getopt.error, e: raise UsageError(str(e)) for opt, arg in opts: if opt[1] == '-': opt = opt[2:] else: opt = opt[1:] optMangled = opt if not self.synonyms.has_key(optMangled): optMangled = string.replace(opt, "-", "_") if not self.synonyms.has_key(optMangled): raise UsageError, "No such option '%s'" % (opt,) optMangled = self.synonyms[optMangled] self.__dispatch[optMangled](optMangled, arg) if (getattr(self, 'subCommands', None) and (args or self.defaultSubCommand is not None)): if not args: args = [self.defaultSubCommand] sub, rest = args[0], args[1:] for (cmd, short, parser, doc) in self.subCommands: if sub == cmd or sub == short: self.subCommand = cmd self.subOptions = parser() self.subOptions.parent = self self.subOptions.parseOptions(rest) break else: raise UsageError("Unknown command: %s" % sub) else: try: self.parseArgs(*args) except TypeError: raise UsageError("Wrong number of arguments.") self.postOptions() def postOptions(self): """I am called after the options are parsed. Override this method in your subclass to do something after the options have been parsed and assigned, like validate that all options are sane. """ pass def parseArgs(self): """I am called with any leftover arguments which were not options. Override me to do something with the remaining arguments on the command line, those which were not flags or options. e.g. interpret them as a list of files to operate on. Note that if there more arguments on the command line than this method accepts, parseArgs will blow up with a getopt.error. This means if you don't override me, parseArgs will blow up if I am passed any arguments at all! """ pass def _generic_flag(self, flagName, value=None): if value not in ('', None): raise UsageError, ("Flag '%s' takes no argument." " Not even \"%s\"." % (flagName, value)) self.opts[flagName] = 1 def _generic_parameter(self, parameterName, value): if value is None: raise UsageError, ("Parameter '%s' requires an argument." % (parameterName,)) self.opts[parameterName] = value def _gather_flags(self): """Gather up boolean (flag) options. """ longOpt, shortOpt = [], '' docs, settings, synonyms, dispatch = {}, {}, {}, {} flags = [] reflect.accumulateClassList(self.__class__, 'optFlags', flags) for flag in flags: long, short, doc = util.padTo(3, flag) if not long: raise ValueError, "A flag cannot be without a name." docs[long] = doc settings[long] = 0 if short: shortOpt = shortOpt + short synonyms[short] = long longOpt.append(long) synonyms[long] = long dispatch[long] = self._generic_flag return longOpt, shortOpt, docs, settings, synonyms, dispatch def _gather_parameters(self): """Gather options which take a value. """ longOpt, shortOpt = [], '' docs, settings, synonyms, dispatch = {}, {}, {}, {} parameters = [] reflect.accumulateClassList(self.__class__, 'optStrings', parameters) if parameters: import warnings warnings.warn("Options.optStrings is deprecated, please use optParameters instead.", stacklevel=2) reflect.accumulateClassList(self.__class__, 'optParameters', parameters) synonyms = {} for parameter in parameters: long, short, default, doc = util.padTo(4, parameter) if not long: raise ValueError, "A parameter cannot be without a name." docs[long] = doc settings[long] = default if short: shortOpt = shortOpt + short + ':' synonyms[short] = long longOpt.append(long + '=') synonyms[long] = long dispatch[long] = self._generic_parameter return longOpt, shortOpt, docs, settings, synonyms, dispatch def _gather_handlers(self): """Gather up options with their own handler methods. """ longOpt, shortOpt = [], '' docs, settings, synonyms, dispatch = {}, {}, {}, {} dct = {} reflect.addMethodNamesToDict(self.__class__, dct, "opt_") for name in dct.keys(): method = getattr(self, 'opt_'+name) takesArg = not flagFunction(method, name) prettyName = string.replace(name, '_', '-') doc = getattr(method, '__doc__', None) if doc: ## Only use the first line. #docs[name] = string.split(doc, '\n')[0] docs[prettyName] = doc else: docs[prettyName] = self.docs.get(prettyName) synonyms[prettyName] = prettyName # A little slight-of-hand here makes dispatching much easier # in parseOptions, as it makes all option-methods have the # same signature. if takesArg: fn = lambda name, value, m=method: m(value) else: # XXX: This won't raise a TypeError if it's called # with a value when it shouldn't be. fn = lambda name, value=None, m=method: m() dispatch[prettyName] = fn if len(name) == 1: shortOpt = shortOpt + name if takesArg: shortOpt = shortOpt + ':' else: if takesArg: prettyName = prettyName + '=' longOpt.append(prettyName) reverse_dct = {} # Map synonyms for name in dct.keys(): method = getattr(self, 'opt_'+name) if not reverse_dct.has_key(method): reverse_dct[method] = [] reverse_dct[method].append(name) cmpLength = lambda a, b: cmp(len(a), len(b)) for method, names in reverse_dct.items(): if len(names) < 2: continue names_ = names[:] names_.sort(cmpLength) longest = names_.pop() for name in names_: synonyms[name] = longest return longOpt, shortOpt, docs, settings, synonyms, dispatch def __str__(self): return self.getSynopsis() + '\n' + self.getUsage(width=None) def getSynopsis(self): """ Returns a string containing a description of these options and how to pass them to the executed file. """ default = "%s%s" % (path.basename(sys.argv[0]), (self.longOpt and " [options]") or '') if self.parent is None: default = "Usage: %s%s" % (path.basename(sys.argv[0]), (self.longOpt and " [options]") or '') else: default = '%s' % ((self.longOpt and "[options]") or '') synopsis = getattr(self, "synopsis", default) synopsis = synopsis.rstrip() if self.parent is not None: synopsis = ' '.join((self.parent.getSynopsis(), self.parent.subCommand, synopsis)) return synopsis def getUsage(self, width=None): #If subOptions exists by now, then there was probably an error while #parsing its options. if hasattr(self, 'subOptions'): return self.subOptions.getUsage(width=width) if not width: width = int(os.environ.get('COLUMNS', '80')) if hasattr(self, 'subCommands'): cmdDicts = [] for (cmd, short, parser, desc) in self.subCommands: cmdDicts.append( {'long': cmd, 'short': short, 'doc': desc, 'optType': 'command', 'default': None }) chunks = docMakeChunks(cmdDicts, width) commands = 'Commands:\n' + ''.join(chunks) else: commands = '' longToShort = {} for key, value in self.synonyms.items(): longname = value if (key != longname) and (len(key) == 1): longToShort[longname] = key else: if not longToShort.has_key(longname): longToShort[longname] = None else: pass optDicts = [] for opt in self.longOpt: if opt[-1] == '=': optType = 'parameter' opt = opt[:-1] else: optType = 'flag' optDicts.append( {'long': opt, 'short': longToShort[opt], 'doc': self.docs[opt], 'optType': optType, 'default': self.defaults.get(opt, None) }) if not (getattr(self, "longdesc", None) is None): longdesc = self.longdesc else: import __main__ if getattr(__main__, '__doc__', None): longdesc = __main__.__doc__ else: longdesc = '' if longdesc: longdesc = ('\n' + string.join(text.wordWrap(longdesc, width), '\n').strip() + '\n') if optDicts: chunks = docMakeChunks(optDicts, width) s = "Options:\n%s" % (string.join(chunks, '')) else: s = "Options: None\n" return s + longdesc + commands #def __repr__(self): # XXX: It'd be cool if we could return a succinct representation # of which flags and options are set here. def docMakeChunks(optList, width=80): """Makes doc chunks for option declarations. Takes a list of dictionaries, each of which may have one or more of the keys 'long', 'short', 'doc', 'default', 'optType'. Returns a list of strings. The strings may be multiple lines, all of them end with a newline. """ # XXX: sanity check to make sure we have a sane combination of keys. maxOptLen = 0 for opt in optList: optLen = len(opt.get('long', '')) if optLen: if opt.get('optType', None) == "parameter": # these take up an extra character optLen = optLen + 1 maxOptLen = max(optLen, maxOptLen) colWidth1 = maxOptLen + len(" -s, -- ") colWidth2 = width - colWidth1 # XXX - impose some sane minimum limit. # Then if we don't have enough room for the option and the doc # to share one line, they can take turns on alternating lines. colFiller1 = " " * colWidth1 optChunks = [] seen = {} for opt in optList: if seen.has_key(opt.get('short', None)) \ or seen.has_key(opt.get('long', None)): continue for x in opt.get('short', None), opt.get('long', None): if x is not None: seen[x]=1 optLines = [] comma = " " if opt.get('short', None): short = "-%c" % (opt['short'],) else: short = '' if opt.get('long', None): long = opt['long'] if opt.get("optType", None) == "parameter": long = long + '=' long = "%-*s" % (maxOptLen, long) if short: comma = "," else: long = " " * (maxOptLen + len('--')) if opt.get('optType', None) == 'command': column1 = ' %s ' % long else: column1 = " %2s%c --%s " % (short, comma, long) if opt.get('doc', ''): doc = opt['doc'].strip() else: doc = '' if (opt.get("optType", None) == "parameter") \ and not (opt.get('default', None) is None): doc = "%s [default: %s]" % (doc, opt['default']) if doc: column2_l = text.wordWrap(doc, colWidth2) else: column2_l = [''] optLines.append("%s%s\n" % (column1, column2_l.pop(0))) for line in column2_l: optLines.append("%s%s\n" % (colFiller1, line)) optChunks.append(string.join(optLines, '')) return optChunks def flagFunction(method, name = None): reqArgs = method.im_func.func_code.co_argcount if reqArgs > 2: raise UsageError('Invalid Option function for %s' % (name or method.func_name)) if reqArgs == 2: # argName = method.im_func.func_code.co_varnames[1] return 0 return 1
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/usage.py
usage.py
# Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. """ Standardized versions of various cool and/or strange things that you can do with Python's reflection capabilities. """ from __future__ import nested_scopes # System Imports import sys import os import types import string import pickle import new import traceback import weakref import re import warnings RegexType = type(re.compile("")) try: import cStringIO as StringIO except ImportError: import StringIO class Settable: """ A mixin class for syntactic sugar. Lets you assign attributes by calling with keyword arguments; for example, C{x(a=b,c=d,y=z)} is the same as C{x.a=b;x.c=d;x.y=z}. The most useful place for this is where you don't want to name a variable, but you do want to set some attributes; for example, C{X()(y=z,a=b)}. """ def __init__(self, **kw): self(**kw) def __call__(self,**kw): for key,val in kw.items(): setattr(self,key,val) return self class AccessorType(type): """Metaclass that generates properties automatically. This is for Python 2.2 and up. Using this metaclass for your class will give you explicit accessor methods; a method called set_foo, will automatically create a property 'foo' that uses set_foo as a setter method. Same for get_foo and del_foo. Note that this will only work on methods that are present on class creation. If you add methods after the class is defined they will not automatically become properties. Likewise, class attributes will only be used if they are present upon class creation, and no getter function was set - if a getter is present, the class attribute will be ignored. This is a 2.2-only alternative to the Accessor mixin - just set in your class definition:: __metaclass__ = AccessorType """ def __init__(self, name, bases, d): type.__init__(self, name, bases, d) accessors = {} prefixs = ["get_", "set_", "del_"] for k in d.keys(): v = getattr(self, k) for i in range(3): if k.startswith(prefixs[i]): accessors.setdefault(k[4:], [None, None, None])[i] = v for name, (getter, setter, deler) in accessors.items(): # create default behaviours for the property - if we leave # the getter as None we won't be able to getattr, etc.. if getter is None: if hasattr(self, name): value = getattr(self, name) def getter(this, value=value, name=name): if this.__dict__.has_key(name): return this.__dict__[name] else: return value else: def getter(this, name=name): if this.__dict__.has_key(name): return this.__dict__[name] else: raise AttributeError, "no such attribute %r" % name if setter is None: def setter(this, value, name=name): this.__dict__[name] = value if deler is None: def deler(this, name=name): del this.__dict__[name] setattr(self, name, property(getter, setter, deler, "")) class PropertyAccessor(object): """A mixin class for Python 2.2 that uses AccessorType. This provides compatability with the pre-2.2 Accessor mixin, up to a point. Extending this class will give you explicit accessor methods; a method called set_foo, for example, is the same as an if statement in __setattr__ looking for 'foo'. Same for get_foo and del_foo. There are also reallyDel and reallySet methods, so you can override specifics in subclasses without clobbering __setattr__ and __getattr__, or using non-2.1 compatible code. There is are incompatibilities with Accessor - accessor methods added after class creation will *not* be detected. OTOH, this method is probably way faster. In addition, class attributes will only be used if no getter was defined, and instance attributes will not override getter methods whereas in original Accessor the class attribute or instance attribute would override the getter method. """ # addendum to above: # The behaviour of Accessor is wrong IMHO, and I've found bugs # caused by it. # -- itamar __metaclass__ = AccessorType def reallySet(self, k, v): self.__dict__[k] = v def reallyDel(self, k): del self.__dict__[k] class Accessor: """ Extending this class will give you explicit accessor methods; a method called C{set_foo}, for example, is the same as an if statement in L{__setattr__} looking for C{'foo'}. Same for C{get_foo} and C{del_foo}. There are also L{reallyDel} and L{reallySet} methods, so you can override specifics in subclasses without clobbering L{__setattr__} and L{__getattr__}. This implementation is for Python 2.1. """ def __setattr__(self, k,v): kstring='set_%s'%k if hasattr(self.__class__,kstring): return getattr(self,kstring)(v) else: self.reallySet(k,v) def __getattr__(self, k): kstring='get_%s'%k if hasattr(self.__class__,kstring): return getattr(self,kstring)() raise AttributeError("%s instance has no accessor for: %s" % (qual(self.__class__),k)) def __delattr__(self, k): kstring='del_%s'%k if hasattr(self.__class__,kstring): getattr(self,kstring)() return self.reallyDel(k) def reallySet(self, k,v): """ *actually* set self.k to v without incurring side-effects. This is a hook to be overridden by subclasses. """ if k == "__dict__": self.__dict__.clear() self.__dict__.update(v) else: self.__dict__[k]=v def reallyDel(self, k): """ *actually* del self.k without incurring side-effects. This is a hook to be overridden by subclasses. """ del self.__dict__[k] # just in case OriginalAccessor = Accessor class Summer(Accessor): """ Extend from this class to get the capability to maintain 'related sums'. Have a tuple in your class like the following:: sums=(('amount','credit','credit_total'), ('amount','debit','debit_total')) and the 'credit_total' member of the 'credit' member of self will always be incremented when the 'amount' member of self is incremented, similiarly for the debit versions. """ def reallySet(self, k,v): "This method does the work." for sum in self.sums: attr=sum[0] obj=sum[1] objattr=sum[2] if k == attr: try: oldval=getattr(self, attr) except: oldval=0 diff=v-oldval if hasattr(self, obj): ob=getattr(self,obj) if ob is not None: try:oldobjval=getattr(ob, objattr) except:oldobjval=0.0 setattr(ob,objattr,oldobjval+diff) elif k == obj: if hasattr(self, attr): x=getattr(self,attr) setattr(self,attr,0) y=getattr(self,k) Accessor.reallySet(self,k,v) setattr(self,attr,x) Accessor.reallySet(self,y,v) Accessor.reallySet(self,k,v) class QueueMethod: """ I represent a method that doesn't exist yet.""" def __init__(self, name, calls): self.name = name self.calls = calls def __call__(self, *args): self.calls.append((self.name, args)) def funcinfo(function): """ this is more documentation for myself than useful code. """ warnings.warn( "[v2.5] Use inspect.getargspec instead of twisted.python.reflect.funcinfo", DeprecationWarning, stacklevel=2) code=function.func_code name=function.func_name argc=code.co_argcount argv=code.co_varnames[:argc] defaults=function.func_defaults out = [] out.append('The function %s accepts %s arguments' % (name ,argc)) if defaults: required=argc-len(defaults) out.append('It requires %s arguments' % required) out.append('The arguments required are: %s' % argv[:required]) out.append('additional arguments are:') for i in range(argc-required): j=i+required out.append('%s which has a default of' % (argv[j], defaults[i])) return out ISNT=0 WAS=1 IS=2 def fullFuncName(func): qualName = (str(pickle.whichmodule(func, func.__name__)) + '.' + func.__name__) if namedObject(qualName) is not func: raise Exception("Couldn't find %s as %s." % (func, qualName)) return qualName def qual(clazz): """Return full import path of a class.""" return clazz.__module__ + '.' + clazz.__name__ def getcurrent(clazz): assert type(clazz) == types.ClassType, 'must be a class...' module = namedModule(clazz.__module__) currclass = getattr(module, clazz.__name__, None) if currclass is None: return clazz return currclass def getClass(obj): """Return the class or type of object 'obj'. Returns sensible result for oldstyle and newstyle instances and types.""" if hasattr(obj, '__class__'): return obj.__class__ else: return type(obj) # class graph nonsense # I should really have a better name for this... def isinst(inst,clazz): if type(inst) != types.InstanceType or type(clazz)!=types.ClassType: return isinstance(inst,clazz) cl = inst.__class__ cl2 = getcurrent(cl) clazz = getcurrent(clazz) if issubclass(cl2,clazz): if cl == cl2: return WAS else: inst.__class__ = cl2 return IS else: return ISNT def namedModule(name): """Return a module given its name.""" topLevel = __import__(name) packages = name.split(".")[1:] m = topLevel for p in packages: m = getattr(m, p) return m def namedObject(name): """Get a fully named module-global object. """ classSplit = string.split(name, '.') module = namedModule(string.join(classSplit[:-1], '.')) return getattr(module, classSplit[-1]) namedClass = namedObject # backwards compat def namedAny(name): """Get a fully named package, module, module-global object, or attribute. """ names = name.split('.') topLevelPackage = None moduleNames = names[:] while not topLevelPackage: try: trialname = '.'.join(moduleNames) topLevelPackage = __import__(trialname) except ImportError: # if the ImportError happened in the module being imported, # this is a failure that should be handed to our caller. # count stack frames to tell the difference. exc_info = sys.exc_info() if len(traceback.extract_tb(exc_info[2])) > 1: try: # Clean up garbage left in sys.modules. del sys.modules[trialname] except KeyError: # Python 2.4 has fixed this. Yay! pass raise exc_info[0], exc_info[1], exc_info[2] moduleNames.pop() obj = topLevelPackage for n in names[1:]: obj = getattr(obj, n) return obj def _reclass(clazz): clazz = getattr(namedModule(clazz.__module__),clazz.__name__) clazz.__bases__ = tuple(map(_reclass, clazz.__bases__)) return clazz def macro(name, filename, source, **identifiers): """macro(name, source, **identifiers) This allows you to create macro-like behaviors in python. See twisted.python.hook for an example of its usage. """ if not identifiers.has_key('name'): identifiers['name'] = name source = source % identifiers codeplace = "<%s (macro)>" % filename code = compile(source, codeplace, 'exec') # shield your eyes! sm = sys.modules tprm = "twisted.python.reflect.macros" if not sm.has_key(tprm): macros = new.module(tprm) sm[tprm] = macros macros.count = 0 macros = sm[tprm] macros.count += 1 macroname = 'macro_' + str(macros.count) tprmm = tprm + '.' + macroname mymod = new.module(tprmm) sys.modules[tprmm] = mymod setattr(macros, macroname, mymod) dict = mymod.__dict__ # Before we go on, I guess I should explain why I just did that. Basically # it's a gross hack to get epydoc to work right, but the general idea is # that it will be a useful aid in debugging in _any_ app which expects # sys.modules to have the same globals as some function. For example, it # would be useful if you were foolishly trying to pickle a wrapped function # directly from a class that had been hooked. exec code in dict, dict return dict[name] def _determineClass(x): try: return x.__class__ except: return type(x) def _determineClassName(x): c = _determineClass(x) try: return c.__name__ except: try: return str(c) except: return '<BROKEN CLASS AT %s>' % id(c) def safe_repr(o): """safe_repr(anything) -> string Returns a string representation of an object, or a string containing a traceback, if that object's __repr__ raised an exception. """ try: return repr(o) except: io = StringIO.StringIO() traceback.print_stack(file=io) whati = _determineClassName(o) swron = io.getvalue() gwith = id(o) you ='<%s instance at %s with repr error %s>' % ( whati,swron,gwith) return you def safe_str(o): """safe_str(anything) -> string Returns a string representation of an object, or a string containing a traceback, if that object's __str__ raised an exception. """ try: return str(o) except: strExc = '\n'.join(traceback.format_exception(*sys.exc_info())) clsName = _determineClassName(o) obId = id(o) return '<%s instance at %s with str error %s>' % ( clsName, obId, strExc) ##the following were factored out of usage def allYourBase(classObj, baseClass=None): """allYourBase(classObj, baseClass=None) -> list of all base classes that are subclasses of baseClass, unless it is None, in which case all bases will be added. """ l = [] accumulateBases(classObj, l, baseClass) return l def accumulateBases(classObj, l, baseClass=None): for base in classObj.__bases__: if baseClass is None or issubclass(base, baseClass): l.append(base) accumulateBases(base, l, baseClass) def prefixedMethodNames(classObj, prefix): """A list of method names with a given prefix in a given class. """ dct = {} addMethodNamesToDict(classObj, dct, prefix) return dct.keys() def addMethodNamesToDict(classObj, dict, prefix, baseClass=None): """ addMethodNamesToDict(classObj, dict, prefix, baseClass=None) -> dict this goes through 'classObj' (and its bases) and puts method names starting with 'prefix' in 'dict' with a value of 1. if baseClass isn't None, methods will only be added if classObj is-a baseClass If the class in question has the methods 'prefix_methodname' and 'prefix_methodname2', the resulting dict should look something like: {"methodname": 1, "methodname2": 1}. """ for base in classObj.__bases__: addMethodNamesToDict(base, dict, prefix, baseClass) if baseClass is None or baseClass in classObj.__bases__: for name, method in classObj.__dict__.items(): optName = name[len(prefix):] if ((type(method) is types.FunctionType) and (name[:len(prefix)] == prefix) and (len(optName))): dict[optName] = 1 def prefixedMethods(obj, prefix=''): """A list of methods with a given prefix on a given instance. """ dct = {} accumulateMethods(obj, dct, prefix) return dct.values() def accumulateMethods(obj, dict, prefix='', curClass=None): """accumulateMethods(instance, dict, prefix) I recurse through the bases of instance.__class__, and add methods beginning with 'prefix' to 'dict', in the form of {'methodname':*instance*method_object}. """ if not curClass: curClass = obj.__class__ for base in curClass.__bases__: accumulateMethods(obj, dict, prefix, base) for name, method in curClass.__dict__.items(): optName = name[len(prefix):] if ((type(method) is types.FunctionType) and (name[:len(prefix)] == prefix) and (len(optName))): dict[optName] = getattr(obj, name) def accumulateClassDict(classObj, attr, adict, baseClass=None): """Accumulate all attributes of a given name in a class heirarchy into a single dictionary. Assuming all class attributes of this name are dictionaries. If any of the dictionaries being accumulated have the same key, the one highest in the class heirarchy wins. (XXX: If \"higest\" means \"closest to the starting class\".) Ex:: | class Soy: | properties = {\"taste\": \"bland\"} | | class Plant: | properties = {\"colour\": \"green\"} | | class Seaweed(Plant): | pass | | class Lunch(Soy, Seaweed): | properties = {\"vegan\": 1 } | | dct = {} | | accumulateClassDict(Lunch, \"properties\", dct) | | print dct {\"taste\": \"bland\", \"colour\": \"green\", \"vegan\": 1} """ for base in classObj.__bases__: accumulateClassDict(base, attr, adict) if baseClass is None or baseClass in classObj.__bases__: adict.update(classObj.__dict__.get(attr, {})) def accumulateClassList(classObj, attr, listObj, baseClass=None): """Accumulate all attributes of a given name in a class heirarchy into a single list. Assuming all class attributes of this name are lists. """ for base in classObj.__bases__: accumulateClassList(base, attr, listObj) if baseClass is None or baseClass in classObj.__bases__: listObj.extend(classObj.__dict__.get(attr, [])) def isSame(a, b): return (a is b) def isLike(a, b): return (a == b) def modgrep(goal): return objgrep(sys.modules, goal, isLike, 'sys.modules') def isOfType(start, goal): return ((type(start) is goal) or (isinstance(start, types.InstanceType) and start.__class__ is goal)) def findInstances(start, t): return objgrep(start, t, isOfType) def objgrep(start, goal, eq=isLike, path='', paths=None, seen=None, showUnknowns=0, maxDepth=None): '''An insanely CPU-intensive process for finding stuff. ''' if paths is None: paths = [] if seen is None: seen = {} if eq(start, goal): paths.append(path) if seen.has_key(id(start)): if seen[id(start)] is start: return if maxDepth is not None: if maxDepth == 0: return maxDepth -= 1 seen[id(start)] = start if isinstance(start, types.DictionaryType): r = [] for k, v in start.items(): objgrep(k, goal, eq, path+'{'+repr(v)+'}', paths, seen, showUnknowns, maxDepth) objgrep(v, goal, eq, path+'['+repr(k)+']', paths, seen, showUnknowns, maxDepth) elif isinstance(start, types.ListType) or isinstance(start, types.TupleType): for idx in xrange(len(start)): objgrep(start[idx], goal, eq, path+'['+str(idx)+']', paths, seen, showUnknowns, maxDepth) elif isinstance(start, types.MethodType): objgrep(start.im_self, goal, eq, path+'.im_self', paths, seen, showUnknowns, maxDepth) objgrep(start.im_func, goal, eq, path+'.im_func', paths, seen, showUnknowns, maxDepth) objgrep(start.im_class, goal, eq, path+'.im_class', paths, seen, showUnknowns, maxDepth) elif hasattr(start, '__dict__'): for k, v in start.__dict__.items(): objgrep(v, goal, eq, path+'.'+k, paths, seen, showUnknowns, maxDepth) if isinstance(start, types.InstanceType): objgrep(start.__class__, goal, eq, path+'.__class__', paths, seen, showUnknowns, maxDepth) elif isinstance(start, weakref.ReferenceType): objgrep(start(), goal, eq, path+'()', paths, seen, showUnknowns, maxDepth) elif (isinstance(start, types.StringTypes+ (types.IntType, types.FunctionType, types.BuiltinMethodType, RegexType, types.FloatType, types.NoneType, types.FileType)) or type(start).__name__ in ('wrapper_descriptor', 'method_descriptor', 'member_descriptor', 'getset_descriptor')): pass elif showUnknowns: print 'unknown type', type(start), start return paths def _startswith(s, sub): # aug python2.1 return s[:len(sub)] == sub def filenameToModuleName(fn): """Convert a name in the filesystem to the name of the Python module it is. This is agressive about getting a module name back from a file; it will always return a string. Agressive means 'sometimes wrong'; it won't look at the Python path or try to do any error checking: don't use this method unless you already know that the filename you're talking about is a Python module. """ fullName = os.path.abspath(fn) modName = os.path.splitext(os.path.basename(fn))[0] while 1: fullName = os.path.dirname(fullName) if os.path.exists(os.path.join(fullName, "__init__.py")): modName = "%s.%s" % (os.path.basename(fullName), modName) else: break return modName #boo python
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/reflect.py
reflect.py
# System Imports import sys import types import time import linecache # Sibling Imports import log import reflect lastRebuild = time.time() class Sensitive: """A utility mixin that's sensitive to rebuilds. This is a mixin for classes (usually those which represent collections of callbacks) to make sure that their code is up-to-date before running. """ lastRebuild = lastRebuild def needRebuildUpdate(self): yn = (self.lastRebuild < lastRebuild) return yn def rebuildUpToDate(self): self.lastRebuild = time.time() def latestVersionOf(self, anObject): """Get the latest version of an object. This can handle just about anything callable; instances, functions, methods, and classes. """ t = type(anObject) if t == types.FunctionType: return latestFunction(anObject) elif t == types.MethodType: if anObject.im_self is None: return getattr(anObject.im_class, anObject.__name__) else: return getattr(anObject.im_self, anObject.__name__) elif t == types.InstanceType: # Kick it, if it's out of date. getattr(anObject, 'nothing', None) return anObject elif t == types.ClassType: return latestClass(anObject) else: log.msg('warning returning anObject!') return anObject _modDictIDMap = {} def latestFunction(oldFunc): """Get the latest version of a function. """ # This may be CPython specific, since I believe jython instantiates a new # module upon reload. dictID = id(oldFunc.func_globals) module = _modDictIDMap.get(dictID) if module is None: return oldFunc return getattr(module, oldFunc.__name__) def latestClass(oldClass): """Get the latest version of a class. """ module = reflect.namedModule(oldClass.__module__) newClass = getattr(module, oldClass.__name__) newBases = [] for base in newClass.__bases__: newBases.append(latestClass(base)) try: # This makes old-style stuff work newClass.__bases__ = tuple(newBases) return newClass except TypeError: ctor = getattr(newClass, '__metaclass__', type) return ctor(newClass.__name__, tuple(newBases), dict(newClass.__dict__)) def updateInstance(self): """Updates an instance to be current """ self.__class__ = latestClass(self.__class__) def __getattr__(self, name): """A getattr method to cause a class to be refreshed. """ if name == '__del__': raise AttributeError("Without this, Python segfaults.") updateInstance(self) log.msg("(rebuilding stale %s instance (%s))" % (reflect.qual(self.__class__), name)) result = getattr(self, name) return result def rebuild(module, doLog=1): """Reload a module and do as much as possible to replace its references. """ global lastRebuild lastRebuild = time.time() if hasattr(module, 'ALLOW_TWISTED_REBUILD'): # Is this module allowed to be rebuilt? if not module.ALLOW_TWISTED_REBUILD: raise RuntimeError, "I am not allowed to be rebuilt." if doLog: log.msg( 'Rebuilding %s...' % str(module.__name__)) ## Safely handle adapter re-registration from twisted.python import components components.ALLOW_DUPLICATES = 1 d = module.__dict__ _modDictIDMap[id(d)] = module newclasses = {} classes = {} functions = {} values = {} if doLog: log.msg(' (scanning %s): ' % str(module.__name__)) for k, v in d.items(): if type(v) == types.ClassType: # Failure condition -- instances of classes with buggy # __hash__/__cmp__ methods referenced at the module level... if v.__module__ == module.__name__: classes[v] = 1 if doLog: log.logfile.write("c") log.logfile.flush() elif type(v) == types.FunctionType: if v.func_globals is module.__dict__: functions[v] = 1 if doLog: log.logfile.write("f") log.logfile.flush() elif isinstance(v, type): if v.__module__ == module.__name__: newclasses[v] = 1 if doLog: log.logfile.write("o") log.logfile.flush() values.update(classes) values.update(functions) fromOldModule = values.has_key newclasses = newclasses.keys() classes = classes.keys() functions = functions.keys() if doLog: log.msg('') log.msg(' (reload %s)' % str(module.__name__)) # Boom. reload(module) # Make sure that my traceback printing will at least be recent... linecache.clearcache() if doLog: log.msg(' (cleaning %s): ' % str(module.__name__)) for clazz in classes: if getattr(module, clazz.__name__) is clazz: log.msg("WARNING: class %s not replaced by reload!" % reflect.qual(clazz)) else: if doLog: log.logfile.write("x") log.logfile.flush() clazz.__bases__ = () clazz.__dict__.clear() clazz.__getattr__ = __getattr__ clazz.__module__ = module.__name__ if newclasses: import gc for nclass in newclasses: ga = getattr(module, nclass.__name__) if ga is nclass: log.msg("WARNING: new-class %s not replaced by reload!" % reflect.qual(nclass)) else: for r in gc.get_referrers(nclass): if getattr(r, '__class__', None) is nclass: r.__class__ = ga if doLog: log.msg('') log.msg(' (fixing %s): ' % str(module.__name__)) modcount = 0 for mk, mod in sys.modules.items(): modcount = modcount + 1 if mod == module or mod is None: continue if not hasattr(mod, '__file__'): # It's a builtin module; nothing to replace here. continue changed = 0 for k, v in mod.__dict__.items(): try: hash(v) except TypeError: continue if fromOldModule(v): if type(v) == types.ClassType: if doLog: log.logfile.write("c") log.logfile.flush() nv = latestClass(v) else: if doLog: log.logfile.write("f") log.logfile.flush() nv = latestFunction(v) changed = 1 setattr(mod, k, nv) else: # Replace bases of non-module classes just to be sure. if type(v) == types.ClassType: for base in v.__bases__: if fromOldModule(base): latestClass(v) if doLog and not changed and ((modcount % 10) ==0) : log.logfile.write(".") log.logfile.flush() components.ALLOW_DUPLICATES = 0 if doLog: log.msg('') log.msg(' Rebuilt %s.' % str(module.__name__)) return module
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/rebuild.py
rebuild.py
from __future__ import division # System Imports import sys import time import warnings import datetime # Sibling Imports from twisted.python import util, context, reflect class ILogContext: """Actually, this interface is just a synoym for the dictionary interface, but it serves as a key for the default information in a log. I do not inherit from Interface because the world is a cruel place. """ context.setDefault(ILogContext, {"isError": 0, "system": "-"}) def callWithContext(ctx, func, *args, **kw): newCtx = context.get(ILogContext).copy() newCtx.update(ctx) return context.call({ILogContext: newCtx}, func, *args, **kw) def callWithLogger(logger, func, *args, **kw): """ Utility method which wraps a function in a try:/except:, logs a failure if one occurrs, and uses the system's logPrefix. """ try: lp = logger.logPrefix() except KeyboardInterrupt: raise except: lp = '(buggy logPrefix method)' err(system=lp) try: return callWithContext({"system": lp}, func, *args, **kw) except KeyboardInterrupt: raise except: err(system=lp) def showwarning(message, category, filename, lineno, file=None): if file is None: msg(warning=message, category=reflect.qual(category), filename=filename, lineno=lineno, format="%(filename)s:%(lineno)s: %(category)s: %(warning)s") else: _oldshowwarning(message, category, filename, lineno, file) _keepErrors = 0 _keptErrors = [] _ignoreErrors = [] def startKeepingErrors(): """ DEPRECATED in Twisted 2.5. Support function for testing frameworks. Start keeping errors in a buffer which can be retrieved (and emptied) with flushErrors. """ warnings.warn("log.startKeepingErrors is deprecated since Twisted 2.5", category=DeprecationWarning, stacklevel=2) global _keepErrors _keepErrors = 1 def flushErrors(*errorTypes): """ DEPRECATED in Twisted 2.5. See L{TestCase.flushLoggedErrors}. Support function for testing frameworks. Return a list of errors that occurred since the last call to flushErrors(). (This will return None unless startKeepingErrors has been called.) """ warnings.warn("log.flushErrors is deprecated since Twisted 2.5. " "If you need to flush errors from within a unittest, " "use TestCase.flushLoggedErrors instead.", category=DeprecationWarning, stacklevel=2) return _flushErrors(*errorTypes) def _flushErrors(*errorTypes): """ PRIVATE. DEPRECATED. DON'T USE. """ global _keptErrors k = _keptErrors _keptErrors = [] if errorTypes: for erk in k: shouldReLog = 1 for errT in errorTypes: if erk.check(errT): shouldReLog = 0 if shouldReLog: err(erk) return k def ignoreErrors(*types): """DEPRECATED""" warnings.warn("log.ignoreErrors is deprecated since Twisted 2.5", category=DeprecationWarning, stacklevel=2) _ignore(*types) def _ignore(*types): """ PRIVATE. DEPRECATED. DON'T USE. """ for type in types: _ignoreErrors.append(type) def clearIgnores(): """DEPRECATED""" warnings.warn("log.clearIgnores is deprecated since Twisted 2.5", category=DeprecationWarning, stacklevel=2) _clearIgnores() def _clearIgnores(): """ PRIVATE. DEPRECATED. DON'T USE. """ global _ignoreErrors _ignoreErrors = [] def err(_stuff=None, _why=None, **kw): """ Write a failure to the log. """ if _stuff is None: _stuff = failure.Failure() if isinstance(_stuff, failure.Failure): if _keepErrors: if _ignoreErrors: keep = 0 for err in _ignoreErrors: r = _stuff.check(err) if r: keep = 0 break else: keep = 1 if keep: _keptErrors.append(_stuff) else: _keptErrors.append(_stuff) msg(failure=_stuff, why=_why, isError=1, **kw) elif isinstance(_stuff, Exception): msg(failure=failure.Failure(_stuff), why=_why, isError=1, **kw) else: msg(repr(_stuff), why=_why, isError=1, **kw) deferr = err class Logger: """ This represents a class which may 'own' a log. Used by subclassing. """ def logPrefix(self): """ Override this method to insert custom logging behavior. Its return value will be inserted in front of every line. It may be called more times than the number of output lines. """ return '-' class LogPublisher: """Class for singleton log message publishing.""" synchronized = ['msg'] def __init__(self): self.observers = [] def addObserver(self, other): """Add a new observer. Observers are callable objects that will be called with each new log message (a dict). """ assert callable(other) self.observers.append(other) def removeObserver(self, other): """Remove an observer.""" self.observers.remove(other) def msg(self, *message, **kw): """Log a new message. For example:: | log.msg('Hello, world.') In particular, you MUST avoid the forms:: | log.msg(u'Hello, world.') | log.msg('Hello ', 'world.') These forms work (sometimes) by accident and will be disabled entirely in the future. """ actualEventDict = (context.get(ILogContext) or {}).copy() actualEventDict.update(kw) actualEventDict['message'] = message actualEventDict['time'] = time.time() for i in xrange(len(self.observers) - 1, -1, -1): try: self.observers[i](actualEventDict) except KeyboardInterrupt: # Don't swallow keyboard interrupt! raise except UnicodeEncodeError: raise except: o = self.observers.pop(i) err(failure.Failure(), "Log observer %s failed, removing from observer list." % (o,)) try: theLogPublisher except NameError: theLogPublisher = LogPublisher() addObserver = theLogPublisher.addObserver removeObserver = theLogPublisher.removeObserver msg = theLogPublisher.msg class FileLogObserver: """ Log observer that writes to a file-like object. @type timeFormat: C{str} or C{NoneType} @ivar timeFormat: If not C{None}, the format string passed to strftime(). """ timeFormat = None def __init__(self, f): self.write = f.write self.flush = f.flush def _safeFormat(self, fmtString, crap): #There's a way we could make this if not safer at least more #informative: perhaps some sort of str/repr wrapper objects #could be wrapped around the things inside of 'crap'. That way #if the event dict contains an object with a bad __repr__, we #can only cry about that individual object instead of the #entire event dict. try: text = fmtString % crap except KeyboardInterrupt: raise except: try: text = ('Invalid format string or unformattable object in log message: %r, %s' % (fmtString, crap)) except: try: text = 'UNFORMATTABLE OBJECT WRITTEN TO LOG with fmt %r, MESSAGE LOST' % (fmtString,) except: text = 'PATHOLOGICAL ERROR IN BOTH FORMAT STRING AND MESSAGE DETAILS, MESSAGE LOST' return text def getTimezoneOffset(self): """ Return the current local timezone offset from UTC. @rtype: C{int} @return: The number of seconds offset from UTC. West is positive, east is negative. """ if time.daylight: return time.altzone return time.timezone def formatTime(self, when): """ Return the given UTC value formatted as a human-readable string representing that time in the local timezone. @type when: C{int} @param when: POSIX timestamp to convert to a human-readable string. @rtype: C{str} """ if self.timeFormat is not None: return time.strftime(self.timeFormat, time.localtime(when)) tzOffset = -self.getTimezoneOffset() when = datetime.datetime.utcfromtimestamp(when + tzOffset) tzHour = int(tzOffset / 60 / 60) tzMin = int(tzOffset / 60 % 60) return '%d/%02d/%02d %02d:%02d %+03d%02d' % ( when.year, when.month, when.day, when.hour, when.minute, tzHour, tzMin) def emit(self, eventDict): edm = eventDict['message'] if not edm: if eventDict['isError'] and eventDict.has_key('failure'): text = ((eventDict.get('why') or 'Unhandled Error') + '\n' + eventDict['failure'].getTraceback()) elif eventDict.has_key('format'): text = self._safeFormat(eventDict['format'], eventDict) else: # we don't know how to log this return else: text = ' '.join(map(reflect.safe_str, edm)) timeStr = self.formatTime(eventDict['time']) fmtDict = {'system': eventDict['system'], 'text': text.replace("\n", "\n\t")} msgStr = self._safeFormat("[%(system)s] %(text)s\n", fmtDict) util.untilConcludes(self.write, timeStr + " " + msgStr) util.untilConcludes(self.flush) # Hoorj! def start(self): """Start observing log events.""" addObserver(self.emit) def stop(self): """Stop observing log events.""" removeObserver(self.emit) class StdioOnnaStick: """Class that pretends to be stout/err.""" closed = 0 softspace = 0 mode = 'wb' name = '<stdio (log)>' def __init__(self, isError=0): self.isError = isError self.buf = '' def close(self): pass def fileno(self): return -1 def flush(self): pass def read(self): raise IOError("can't read from the log!") readline = read readlines = read seek = read tell = read def write(self, data): d = (self.buf + data).split('\n') self.buf = d[-1] messages = d[0:-1] for message in messages: msg(message, printed=1, isError=self.isError) def writelines(self, lines): for line in lines: msg(line, printed=1, isError=self.isError) try: _oldshowwarning except NameError: _oldshowwarning = None def startLogging(file, *a, **kw): """Initialize logging to a specified file. """ flo = FileLogObserver(file) startLoggingWithObserver(flo.emit, *a, **kw) def startLoggingWithObserver(observer, setStdout=1): """Initialize logging to a specified observer. If setStdout is true (defaults to yes), also redirect sys.stdout and sys.stderr to the specified file. """ global defaultObserver, _oldshowwarning if not _oldshowwarning: _oldshowwarning = warnings.showwarning warnings.showwarning = showwarning if defaultObserver: defaultObserver.stop() defaultObserver = None addObserver(observer) msg("Log opened.") if setStdout: sys.stdout = logfile sys.stderr = logerr class NullFile: softspace = 0 def read(self): pass def write(self, bytes): pass def flush(self): pass def close(self): pass def discardLogs(): """Throw away all logs. """ global logfile logfile = NullFile() # Prevent logfile from being erased on reload. This only works in cpython. try: logfile except NameError: logfile = StdioOnnaStick(0) logerr = StdioOnnaStick(1) class DefaultObserver: """Default observer. Will ignore all non-error messages and send error messages to sys.stderr. Will be removed when startLogging() is called for the first time. """ def _emit(self, eventDict): if eventDict["isError"]: if eventDict.has_key('failure'): text = eventDict['failure'].getTraceback() else: text = " ".join([str(m) for m in eventDict["message"]]) + "\n" sys.stderr.write(text) sys.stderr.flush() def start(self): addObserver(self._emit) def stop(self): removeObserver(self._emit) # Some more sibling imports, at the bottom and unqualified to avoid # unresolvable circularity import threadable, failure threadable.synchronize(LogPublisher) try: defaultObserver except NameError: defaultObserver = DefaultObserver() defaultObserver.start()
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/log.py
log.py
# System imports import types from twisted.python import reflect class NotSupportedError(NotImplementedError): """ An exception meaning that the tree-manipulation operation you're attempting to perform is not supported. """ class Request: """I am an abstract representation of a request for an entity. I also function as the response. The request is responded to by calling self.write(data) until there is no data left and then calling self.finish(). """ # This attribute should be set to the string name of the protocol being # responded to (e.g. HTTP or FTP) wireProtocol = None def write(self, data): """Add some data to the response to this request. """ raise NotImplementedError("%s.write" % reflect.qual(self.__class__)) def finish(self): """The response to this request is finished; flush all data to the network stream. """ raise NotImplementedError("%s.finish" % reflect.qual(self.__class__)) class Entity: """I am a terminal object in a hierarchy, with no children. I represent a null interface; certain non-instance objects (strings and integers, notably) are Entities. Methods on this class are suggested to be implemented, but are not required, and will be emulated on a per-protocol basis for types which do not handle them. """ def render(self, request): """ I produce a stream of bytes for the request, by calling request.write() and request.finish(). """ raise NotImplementedError("%s.render" % reflect.qual(self.__class__)) class Collection: """I represent a static collection of entities. I contain methods designed to represent collections that can be dynamically created. """ def __init__(self, entities=None): """Initialize me. """ if entities is not None: self.entities = entities else: self.entities = {} def getStaticEntity(self, name): """Get an entity that was added to me using putEntity. This method will return 'None' if it fails. """ return self.entities.get(name) def getDynamicEntity(self, name, request): """Subclass this to generate an entity on demand. This method should return 'None' if it fails. """ def getEntity(self, name, request): """Retrieve an entity from me. I will first attempt to retrieve an entity statically; static entities will obscure dynamic ones. If that fails, I will retrieve the entity dynamically. If I cannot retrieve an entity, I will return 'None'. """ ent = self.getStaticEntity(name) if ent is not None: return ent ent = self.getDynamicEntity(name, request) if ent is not None: return ent return None def putEntity(self, name, entity): """Store a static reference on 'name' for 'entity'. Raises a KeyError if the operation fails. """ self.entities[name] = entity def delEntity(self, name): """Remove a static reference for 'name'. Raises a KeyError if the operation fails. """ del self.entities[name] def storeEntity(self, name, request): """Store an entity for 'name', based on the content of 'request'. """ raise NotSupportedError("%s.storeEntity" % reflect.qual(self.__class__)) def removeEntity(self, name, request): """Remove an entity for 'name', based on the content of 'request'. """ raise NotSupportedError("%s.removeEntity" % reflect.qual(self.__class__)) def listStaticEntities(self): """Retrieve a list of all name, entity pairs that I store references to. See getStaticEntity. """ return self.entities.items() def listDynamicEntities(self, request): """A list of all name, entity that I can generate on demand. See getDynamicEntity. """ return [] def listEntities(self, request): """Retrieve a list of all name, entity pairs I contain. See getEntity. """ return self.listStaticEntities() + self.listDynamicEntities(request) def listStaticNames(self): """Retrieve a list of the names of entities that I store references to. See getStaticEntity. """ return self.entities.keys() def listDynamicNames(self): """Retrieve a list of the names of entities that I store references to. See getDynamicEntity. """ return [] def listNames(self, request): """Retrieve a list of all names for entities that I contain. See getEntity. """ return self.listStaticNames() class ConstraintViolation(Exception): """An exception raised when a constraint is violated. """ class Constrained(Collection): """A collection that has constraints on its names and/or entities.""" def nameConstraint(self, name): """A method that determines whether an entity may be added to me with a given name. If the constraint is satisfied, return 1; if the constraint is not satisfied, either return 0 or raise a descriptive ConstraintViolation. """ return 1 def entityConstraint(self, entity): """A method that determines whether an entity may be added to me. If the constraint is satisfied, return 1; if the constraint is not satisfied, either return 0 or raise a descriptive ConstraintViolation. """ return 1 def reallyPutEntity(self, name, entity): Collection.putEntity(self, name, entity) def putEntity(self, name, entity): """Store an entity if it meets both constraints. Otherwise raise a ConstraintViolation. """ if self.nameConstraint(name): if self.entityConstraint(entity): self.reallyPutEntity(name, entity) else: raise ConstraintViolation("Entity constraint violated.") else: raise ConstraintViolation("Name constraint violated.") class Locked(Constrained): """A collection that can be locked from adding entities.""" locked = 0 def lock(self): self.locked = 1 def entityConstraint(self, entity): return not self.locked class Homogenous(Constrained): """A homogenous collection of entities. I will only contain entities that are an instance of the class or type specified by my 'entityType' attribute. """ entityType = types.InstanceType def entityConstraint(self, entity): if isinstance(entity, self.entityType): return 1 else: raise ConstraintViolation("%s of incorrect type (%s)" % (entity, self.entityType)) def getNameType(self): return "Name" def getEntityType(self): return self.entityType.__name__
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/roots.py
roots.py
__metaclass__ = type import os from twisted.python.zipstream import ChunkingZipFile from twisted.python.filepath import FilePath, _PathHelper # using FilePath here exclusively rather than os to make sure that we don't do # anything OS-path-specific here. ZIP_PATH_SEP = '/' # In zipfiles, "/" is universally used as the # path separator, regardless of platform. class ZipPath(_PathHelper): """ I represent a file or directory contained within a zip file. """ def __init__(self, archive, pathInArchive): """ Don't construct me directly. Use ZipArchive.child(). @param archive: a ZipArchive instance. @param pathInArchive: a ZIP_PATH_SEP-separated string. """ self.archive = archive self.pathInArchive = pathInArchive # self.path pretends to be os-specific because that's the way the # 'zipimport' module does it. self.path = os.path.join(archive.zipfile.filename, *(self.pathInArchive.split(ZIP_PATH_SEP))) def __cmp__(self, other): if not isinstance(other, ZipPath): return NotImplemented return cmp((self.archive, self.pathInArchive), (other.archive, other.pathInArchive)) def __repr__(self): return 'ZipPath(%r)' % (self.path,) def parent(self): splitup = self.pathInArchive.split(ZIP_PATH_SEP) if len(splitup) == 1: return self.archive return ZipPath(self.archive, ZIP_PATH_SEP.join(splitup[:-1])) def child(self, path): return ZipPath(self.archive, ZIP_PATH_SEP.join([self.pathInArchive, path])) def sibling(self, path): return self.parent().child(path) # preauthChild = child def exists(self): return self.isdir() or self.isfile() def isdir(self): return self.pathInArchive in self.archive.childmap def isfile(self): return self.pathInArchive in self.archive.zipfile.NameToInfo def islink(self): return False def listdir(self): return self.archive.childmap[self.pathInArchive].keys() def splitext(self): # ugh, terrible API, these paths are unusable; but the extension is # what we're after 99% of the time n = self.path.rfind('.') return self.path[:n], self.path[n:] def basename(self): return self.pathInArchive.split(ZIP_PATH_SEP)[-1] def dirname(self): # XXX NOTE: This API isn't a very good idea on filepath, but it's even # less meaningful here. return self.parent().path def open(self): return self.archive.zipfile.readfile(self.pathInArchive) def restat(self): pass class ZipArchive(ZipPath): """ I am a FilePath-like object which can wrap a zip archive as if it were a directory. """ archive = property(lambda self: self) def __init__(self, archivePathname): """Create a ZipArchive, treating the archive at archivePathname as a zip file. @param archivePathname: a str, naming a path in the filesystem. """ self.zipfile = ChunkingZipFile(archivePathname) self.path = archivePathname self.pathInArchive = '' # zipfile is already wasting O(N) memory on cached ZipInfo instances, # so there's no sense in trying to do this lazily or intelligently self.childmap = {} # map parent: list of children for name in self.zipfile.namelist(): name = name.split(ZIP_PATH_SEP) for x in range(len(name)): child = name[-x] parent = ZIP_PATH_SEP.join(name[:-x]) if parent not in self.childmap: self.childmap[parent] = {} self.childmap[parent][child] = 1 parent = '' def child(self, path): """ Create a ZipPath pointing at a path within the archive. @param path: a str with no path separators in it, either '/' or the system path separator, if it's different. """ return ZipPath(self, path) def exists(self): """ Returns true if the underlying archive exists. """ return FilePath(self.zipfile.filename).exists()
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/zippath.py
zippath.py
from twisted.python.runtime import platform import os import errno import random import sha from os.path import isabs, exists, normpath, abspath, splitext from os.path import basename, dirname from os.path import join as joinpath from os import sep as slash from os import listdir, utime, stat from stat import ST_MODE, ST_MTIME, ST_ATIME, ST_CTIME, ST_SIZE from stat import S_ISREG, S_ISDIR try: from os.path import islink except ImportError: def islink(path): return False try: from os import urandom as randomBytes except ImportError: def randomBytes(n): randomData = [random.randrange(256) for n in xrange(n)] return ''.join(map(chr, randomData)) try: from base64 import urlsafe_b64encode as armor except ImportError: def armor(s): return s.encode('hex') class InsecurePath(Exception): pass def _secureEnoughString(): """ Create a pseudorandom, 16-character string for use in secure filenames. """ return armor(sha.new(randomBytes(64)).digest())[:16] class _PathHelper: """ Abstract helper class also used by ZipPath; implements certain utility methods. """ def getContent(self): return self.open().read() def children(self): return map(self.child, self.listdir()) def walk(self): """ Yield myself, then each of my children, and each of those children's children in turn. @return: a generator yielding FilePath-like objects. """ yield self if self.isdir(): for c in self.children(): for subc in c.walk(): yield subc def sibling(self, path): return self.parent().child(path) def segmentsFrom(self, ancestor): """ Return a list of segments between a child and its ancestor. For example, in the case of a path X representing /a/b/c/d and a path Y representing /a/b, C{Y.segmentsFrom(X)} will return C{['c', 'd']}. @param ancestor: an instance of the same class as self, ostensibly an ancestor of self. @raise: ValueError if the 'ancestor' parameter is not actually an ancestor, i.e. a path for /x/y/z is passed as an ancestor for /a/b/c/d. @return: a list of strs """ # this might be an unnecessarily inefficient implementation but it will # work on win32 and for zipfiles; later I will deterimine if the # obvious fast implemenation does the right thing too f = self p = f.parent() segments = [] while f != ancestor and p != f: segments[0:0] = [f.basename()] f = p p = p.parent() if f == ancestor and segments: return segments raise ValueError("%r not parent of %r" % (ancestor, self)) class FilePath(_PathHelper): """I am a path on the filesystem that only permits 'downwards' access. Instantiate me with a pathname (for example, FilePath('/home/myuser/public_html')) and I will attempt to only provide access to files which reside inside that path. I may be a path to a file, a directory, or a file which does not exist. The correct way to use me is to instantiate me, and then do ALL filesystem access through me. In other words, do not import the 'os' module; if you need to open a file, call my 'open' method. If you need to list a directory, call my 'path' method. Even if you pass me a relative path, I will convert that to an absolute path internally. @type alwaysCreate: C{bool} @ivar alwaysCreate: When opening this file, only succeed if the file does not already exist. """ # __slots__ = 'path abs'.split() statinfo = None path = None def __init__(self, path, alwaysCreate=False): self.path = abspath(path) self.alwaysCreate = alwaysCreate def __getstate__(self): d = self.__dict__.copy() if d.has_key('statinfo'): del d['statinfo'] return d def child(self, path): if platform.isWindows() and path.count(":"): # Catch paths like C:blah that don't have a slash raise InsecurePath("%r contains a colon." % (path,)) norm = normpath(path) if slash in norm: raise InsecurePath("%r contains one or more directory separators" % (path,)) newpath = abspath(joinpath(self.path, norm)) if not newpath.startswith(self.path): raise InsecurePath("%r is not a child of %s" % (newpath, self.path)) return self.clonePath(newpath) def preauthChild(self, path): """ Use me if `path' might have slashes in it, but you know they're safe. (NOT slashes at the beginning. It still needs to be a _child_). """ newpath = abspath(joinpath(self.path, normpath(path))) if not newpath.startswith(self.path): raise InsecurePath("%s is not a child of %s" % (newpath, self.path)) return self.clonePath(newpath) def childSearchPreauth(self, *paths): """Return my first existing child with a name in 'paths'. paths is expected to be a list of *pre-secured* path fragments; in most cases this will be specified by a system administrator and not an arbitrary user. If no appropriately-named children exist, this will return None. """ p = self.path for child in paths: jp = joinpath(p, child) if exists(jp): return self.clonePath(jp) def siblingExtensionSearch(self, *exts): """Attempt to return a path with my name, given multiple possible extensions. Each extension in exts will be tested and the first path which exists will be returned. If no path exists, None will be returned. If '' is in exts, then if the file referred to by this path exists, 'self' will be returned. The extension '*' has a magic meaning, which means "any path that begins with self.path+'.' is acceptable". """ p = self.path for ext in exts: if not ext and self.exists(): return self if ext == '*': basedot = basename(p)+'.' for fn in listdir(dirname(p)): if fn.startswith(basedot): return self.clonePath(joinpath(dirname(p), fn)) p2 = p + ext if exists(p2): return self.clonePath(p2) def siblingExtension(self, ext): return self.clonePath(self.path+ext) def open(self, mode='r'): if self.alwaysCreate: assert 'a' not in mode, "Appending not supported when alwaysCreate == True" return self.create() return open(self.path, mode+'b') # stat methods below def restat(self, reraise=True): try: self.statinfo = stat(self.path) except OSError: self.statinfo = 0 if reraise: raise def getsize(self): st = self.statinfo if not st: self.restat() st = self.statinfo return st[ST_SIZE] def getmtime(self): st = self.statinfo if not st: self.restat() st = self.statinfo return st[ST_MTIME] def getctime(self): st = self.statinfo if not st: self.restat() st = self.statinfo return st[ST_CTIME] def getatime(self): st = self.statinfo if not st: self.restat() st = self.statinfo return st[ST_ATIME] def exists(self): if self.statinfo: return True elif self.statinfo is None: self.restat(False) return self.exists() else: return False def isdir(self): st = self.statinfo if not st: self.restat(False) st = self.statinfo if not st: return False return S_ISDIR(st[ST_MODE]) def isfile(self): st = self.statinfo if not st: self.restat(False) st = self.statinfo if not st: return False return S_ISREG(st[ST_MODE]) def islink(self): # We can't use cached stat results here, because that is the stat of # the destination - (see #1773) which in *every case* but this one is # the right thing to use. We could call lstat here and use that, but # it seems unlikely we'd actually save any work that way. -glyph return islink(self.path) def isabs(self): return isabs(self.path) def listdir(self): return listdir(self.path) def splitext(self): return splitext(self.path) def __repr__(self): return 'FilePath(%r)' % (self.path,) def touch(self): try: self.open('a').close() except IOError: pass utime(self.path, None) def remove(self): if self.isdir(): for child in self.children(): child.remove() os.rmdir(self.path) else: os.remove(self.path) self.restat(False) def makedirs(self): return os.makedirs(self.path) def globChildren(self, pattern): """ Assuming I am representing a directory, return a list of FilePaths representing my children that match the given pattern. """ import glob path = self.path[-1] == '/' and self.path + pattern or slash.join([self.path, pattern]) return map(self.clonePath, glob.glob(path)) def basename(self): return basename(self.path) def dirname(self): return dirname(self.path) def parent(self): return self.clonePath(self.dirname()) def setContent(self, content, ext='.new'): sib = self.siblingExtension(ext) sib.open('w').write(content) if platform.isWindows() and exists(self.path): os.unlink(self.path) os.rename(sib.path, self.path) # new in 2.2.0 def __cmp__(self, other): if not isinstance(other, FilePath): return NotImplemented return cmp(self.path, other.path) def createDirectory(self): os.mkdir(self.path) def requireCreate(self, val=1): self.alwaysCreate = val def create(self): """Exclusively create a file, only if this file previously did not exist. """ fdint = os.open(self.path, (os.O_EXCL | os.O_CREAT | os.O_RDWR)) # XXX TODO: 'name' attribute of returned files is not mutable or # settable via fdopen, so this file is slighly less functional than the # one returned from 'open' by default. send a patch to Python... return os.fdopen(fdint, 'w+b') def temporarySibling(self): """ Create a path naming a temporary sibling of this path in a secure fashion. """ sib = self.sibling(_secureEnoughString() + self.basename()) sib.requireCreate() return sib _chunkSize = 2 ** 2 ** 2 ** 2 def copyTo(self, destination): # XXX TODO: *thorough* audit and documentation of the exact desired # semantics of this code. Right now the behavior of existent # destination symlinks is convenient, and quite possibly correct, but # its security properties need to be explained. if self.isdir(): if not destination.exists(): destination.createDirectory() for child in self.children(): destChild = destination.child(child.basename()) child.copyTo(destChild) elif self.isfile(): writefile = destination.open('w') readfile = self.open() while 1: # XXX TODO: optionally use os.open, os.read and O_DIRECT and # use os.fstatvfs to determine chunk sizes and make # *****sure**** copy is page-atomic; the following is good # enough for 99.9% of everybody and won't take a week to audit # though. chunk = readfile.read(self._chunkSize) writefile.write(chunk) if len(chunk) < self._chunkSize: break writefile.close() readfile.close() else: # If you see the following message because you want to copy # symlinks, fifos, block devices, character devices, or unix # sockets, please feel free to add support to do sensible things in # reaction to those types! raise NotImplementedError( "Only copying of files and directories supported") def moveTo(self, destination): try: os.rename(self.path, destination.path) self.restat(False) except OSError, ose: if ose.errno == errno.EXDEV: # man 2 rename, ubuntu linux 5.10 "breezy": # oldpath and newpath are not on the same mounted filesystem. # (Linux permits a filesystem to be mounted at multiple # points, but rename(2) does not work across different mount # points, even if the same filesystem is mounted on both.) # that means it's time to copy trees of directories! secsib = destination.temporarySibling() self.copyTo(secsib) # slow secsib.moveTo(destination) # visible # done creating new stuff. let's clean me up. mysecsib = self.temporarySibling() self.moveTo(mysecsib) # visible mysecsib.remove() # slow else: raise FilePath.clonePath = FilePath
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/filepath.py
filepath.py
import itertools, sys, commands, os.path from twisted.python import reflect, util, usage from twisted.scripts.mktap import IServiceMaker class MyOptions(usage.Options): """ Options for this file """ longdesc = "" synopsis = "Usage: python zshcomp.py [--install | -i] | <output directory>" optFlags = [["install", "i", 'Output files to the "installation" directory ' \ '(twisted/python/zsh in the currently active ' \ 'Twisted package)']] optParameters = [["directory", "d", None, "Output files to this directory"]] def postOptions(self): if self['install'] and self['directory']: raise usage.UsageError, "Can't have --install and " \ "--directory at the same time" if not self['install'] and not self['directory']: raise usage.UsageError, "Not enough arguments" if self['directory'] and not os.path.isdir(self['directory']): raise usage.UsageError, "%s is not a directory" % self['directory'] class Builder: def __init__(self, cmd_name, options, file): """ @type cmd_name: C{str} @param cmd_name: The name of the command @type options: C{twisted.usage.Options} @param options: The C{twisted.usage.Options} instance defined for this command @type file: C{file} @param file: The C{file} to write the completion function to """ self.cmd_name = cmd_name self.options = options self.file = file def write(self): """ Write the completion function to the file given to __init__ @return: C{None} """ # by default, we just write out a single call to _arguments self.file.write('#compdef %s\n' % (self.cmd_name,)) gen = ArgumentsGenerator(self.cmd_name, self.options, self.file) gen.write() class SubcommandBuilder(Builder): """ Use this builder for commands that have sub-commands. twisted.python.usage has the notion of sub-commands that are defined using an entirely seperate Options class. """ interface = None subcmdLabel = None def write(self): """ Write the completion function to the file given to __init__ @return: C{None} """ self.file.write('#compdef %s\n' % (self.cmd_name,)) self.file.write('local _zsh_subcmds_array\n_zsh_subcmds_array=(\n') from twisted import plugin as newplugin plugins = newplugin.getPlugins(self.interface) for p in plugins: self.file.write('"%s:%s"\n' % (p.tapname, p.description)) self.file.write(")\n\n") self.options.__class__.zsh_extras = ['*::subcmd:->subcmd'] gen = ArgumentsGenerator(self.cmd_name, self.options, self.file) gen.write() self.file.write("""if (( CURRENT == 1 )); then _describe "%s" _zsh_subcmds_array && ret=0 fi (( ret )) || return 0 service="$words[1]" case $service in\n""" % (self.subcmdLabel,)) plugins = newplugin.getPlugins(self.interface) for p in plugins: self.file.write(p.tapname + ")\n") gen = ArgumentsGenerator(p.tapname, p.options(), self.file) gen.write() self.file.write(";;\n") self.file.write("*) _message \"don't know how to" \ " complete $service\";;\nesac") class MktapBuilder(SubcommandBuilder): """ Builder for the mktap command """ interface = IServiceMaker subcmdLabel = 'tap to build' class TwistdBuilder(SubcommandBuilder): """ Builder for the twistd command """ interface = IServiceMaker subcmdLabel = 'service to run' class ArgumentsGenerator: """ Generate a call to the zsh _arguments completion function based on data in a usage.Options subclass """ def __init__(self, cmd_name, options, file): """ @type cmd_name: C{str} @param cmd_name: The name of the command @type options: C{twisted.usage.Options} @param options: The C{twisted.usage.Options} instance defined for this command @type file: C{file} @param file: The C{file} to write the completion function to """ self.cmd_name = cmd_name self.options = options self.file = file self.altArgDescr = {} self.actionDescr = {} self.multiUse = [] self.mutuallyExclusive = [] self.actions = {} self.extras = [] aCL = reflect.accumulateClassList aCD = reflect.accumulateClassDict aCD(options.__class__, 'zsh_altArgDescr', self.altArgDescr) aCD(options.__class__, 'zsh_actionDescr', self.actionDescr) aCL(options.__class__, 'zsh_multiUse', self.multiUse) aCL(options.__class__, 'zsh_mutuallyExclusive', self.mutuallyExclusive) aCD(options.__class__, 'zsh_actions', self.actions) aCL(options.__class__, 'zsh_extras', self.extras) optFlags = [] optParams = [] aCL(options.__class__, 'optFlags', optFlags) aCL(options.__class__, 'optParameters', optParams) for i, optList in enumerate(optFlags): if len(optList) != 3: optFlags[i] = util.padTo(3, optList) for i, optList in enumerate(optParams): if len(optList) != 4: optParams[i] = util.padTo(4, optList) self.optFlags = optFlags self.optParams = optParams optParams_d = {} for optList in optParams: optParams_d[optList[0]] = optList[1:] self.optParams_d = optParams_d optFlags_d = {} for optList in optFlags: optFlags_d[optList[0]] = optList[1:] self.optFlags_d = optFlags_d optAll_d = {} optAll_d.update(optParams_d) optAll_d.update(optFlags_d) self.optAll_d = optAll_d self.addAdditionalOptions() # makes sure none of the zsh_ data structures reference option # names that don't exist. (great for catching typos) self.verifyZshNames() self.excludes = self.makeExcludesDict() def write(self): """ Write the zsh completion code to the file given to __init__ @return: C{None} """ self.writeHeader() self.writeExtras() self.writeOptions() self.writeFooter() def writeHeader(self): """ This is the start of the code that calls _arguments @return: C{None} """ self.file.write('_arguments -s -A "-*" \\\n') def writeOptions(self): """ Write out zsh code for each option in this command @return: C{None} """ optNames = self.optAll_d.keys() optNames.sort() for long in optNames: self.writeOpt(long) def writeExtras(self): """ Write out the "extras" list. These are just passed verbatim to the _arguments call @return: C{None} """ for s in self.extras: self.file.write(escape(s)) self.file.write(' \\\n') def writeFooter(self): """ Write the last bit of code that finishes the call to _arguments @return: C{None} """ self.file.write('&& return 0\n') def verifyZshNames(self): """ Ensure that none of the names given in zsh_* variables are typoed @return: C{None} @raise ValueError: Raised if unknown option names have been given in zsh_* variables """ def err(name): raise ValueError, "Unknown option name \"%s\" found while\n" \ "examining zsh_ attributes for the %s command" % ( name, self.cmd_name) for name in itertools.chain(self.altArgDescr, self.actionDescr, self.actions, self.multiUse): if name not in self.optAll_d: err(name) for seq in self.mutuallyExclusive: for name in seq: if name not in self.optAll_d: err(name) def excludeStr(self, long, buildShort=False): """ Generate an "exclusion string" for the given option @type long: C{str} @param long: The long name of the option (i.e. "verbose" instead of "v") @type buildShort: C{bool} @param buildShort: May be True to indicate we're building an excludes string for the short option that correspondes to the given long opt @return: The generated C{str} """ if long in self.excludes: exclusions = self.excludes[long][:] else: exclusions = [] # if long isn't a multiUse option (can't appear on the cmd line more # than once), then we have to exclude the short option if we're # building for the long option, and vice versa. if long not in self.multiUse: if buildShort is False: short = self.getShortOption(long) if short is not None: exclusions.append(short) else: exclusions.append(long) if not exclusions: return '' strings = [] for optName in exclusions: if len(optName) == 1: # short option strings.append("-" + optName) else: strings.append("--" + optName) return "(%s)" % " ".join(strings) def makeExcludesDict(self): """ @return: A C{dict} that maps each option name appearing in self.mutuallyExclusive to a list of those option names that is it mutually exclusive with (can't appear on the cmd line with) """ #create a mapping of long option name -> single character name longToShort = {} for optList in itertools.chain(self.optParams, self.optFlags): try: if optList[1] != None: longToShort[optList[0]] = optList[1] except IndexError: pass excludes = {} for lst in self.mutuallyExclusive: for i, long in enumerate(lst): tmp = [] tmp.extend(lst[:i]) tmp.extend(lst[i+1:]) for name in tmp[:]: if name in longToShort: tmp.append(longToShort[name]) if long in excludes: excludes[long].extend(tmp) else: excludes[long] = tmp return excludes def writeOpt(self, long): """ Write out the zsh code for the given argument. This is just part of the one big call to _arguments @type long: C{str} @param long: The long name of the option (i.e. "verbose" instead of "v") @return: C{None} """ if long in self.optFlags_d: # It's a flag option. Not one that takes a parameter. long_field = "--%s" % long else: long_field = "--%s=" % long short = self.getShortOption(long) if short != None: short_field = "-" + short else: short_field = '' descr = self.getDescription(long) descr_field = descr.replace("[", "\[") descr_field = descr_field.replace("]", "\]") descr_field = '[%s]' % descr_field if long in self.actionDescr: actionDescr_field = self.actionDescr[long] else: actionDescr_field = descr action_field = self.getAction(long) if long in self.multiUse: multi_field = '*' else: multi_field = '' longExclusions_field = self.excludeStr(long) if short: #we have to write an extra line for the short option if we have one shortExclusions_field = self.excludeStr(long, buildShort=True) self.file.write(escape('%s%s%s%s%s' % (shortExclusions_field, multi_field, short_field, descr_field, action_field))) self.file.write(' \\\n') self.file.write(escape('%s%s%s%s%s' % (longExclusions_field, multi_field, long_field, descr_field, action_field))) self.file.write(' \\\n') def getAction(self, long): """ Return a zsh "action" string for the given argument @return: C{str} """ if long in self.actions: if callable(self.actions[long]): action = self.actions[long]() else: action = self.actions[long] return ":%s:%s" % (self.getActionDescr(long), action) if long in self.optParams_d: return ':%s:_files' % self.getActionDescr(long) return '' def getActionDescr(self, long): """ Return the description to be used when this argument is completed @return: C{str} """ if long in self.actionDescr: return self.actionDescr[long] else: return long def getDescription(self, long): """ Return the description to be used for this argument @return: C{str} """ #check if we have an alternate descr for this arg, and if so use it if long in self.altArgDescr: return self.altArgDescr[long] #otherwise we have to get it from the optFlags or optParams try: descr = self.optFlags_d[long][1] except KeyError: try: descr = self.optParams_d[long][2] except KeyError: descr = None if descr is not None: return descr # lets try to get it from the opt_foo method doc string if there is one longMangled = long.replace('-', '_') # this is what t.p.usage does obj = getattr(self.options, 'opt_%s' % longMangled, None) if obj: descr = descrFromDoc(obj) if descr is not None: return descr return long # we really ought to have a good description to use def getShortOption(self, long): """ Return the short option letter or None @return: C{str} or C{None} """ optList = self.optAll_d[long] try: return optList[0] or None except IndexError: pass def addAdditionalOptions(self): """ Add additional options to the optFlags and optParams lists. These will be defined by 'opt_foo' methods of the Options subclass @return: C{None} """ methodsDict = {} reflect.accumulateMethods(self.options, methodsDict, 'opt_') methodToShort = {} for name in methodsDict.copy(): if len(name) == 1: methodToShort[methodsDict[name]] = name del methodsDict[name] for methodName, methodObj in methodsDict.items(): long = methodName.replace('_', '-') # t.p.usage does this # if this option is already defined by the optFlags or # optParameters then we don't want to override that data if long in self.optAll_d: continue descr = self.getDescription(long) short = None if methodObj in methodToShort: short = methodToShort[methodObj] reqArgs = methodObj.im_func.func_code.co_argcount if reqArgs == 2: self.optParams.append([long, short, None, descr]) self.optParams_d[long] = [short, None, descr] self.optAll_d[long] = [short, None, descr] elif reqArgs == 1: self.optFlags.append([long, short, descr]) self.optFlags_d[long] = [short, descr] self.optAll_d[long] = [short, None, descr] else: raise TypeError, '%r has wrong number ' \ 'of arguments' % (methodObj,) def descrFromDoc(obj): """ Generate an appropriate description from docstring of the given object """ if obj.__doc__ is None: return None lines = obj.__doc__.split("\n") descr = None try: if lines[0] != "" and not lines[0].isspace(): descr = lines[0].lstrip() # skip first line if it's blank elif lines[1] != "" and not lines[1].isspace(): descr = lines[1].lstrip() except IndexError: pass return descr def firstLine(s): """ Return the first line of the given string """ try: i = s.index('\n') return s[:i] except ValueError: return s def escape(str): """ Shell escape the given string """ return commands.mkarg(str)[1:] def siteFunctionsPath(): """ Return the path to the system-wide site-functions directory or C{None} if it cannot be determined """ try: cmd = "zsh -f -c 'echo ${(M)fpath:#/*/site-functions}'" output = commands.getoutput(cmd) if os.path.isdir(output): return output except: pass generateFor = [('conch', 'twisted.conch.scripts.conch', 'ClientOptions'), ('mktap', 'twisted.scripts.mktap', 'FirstPassOptions'), ('trial', 'twisted.scripts.trial', 'Options'), ('cftp', 'twisted.conch.scripts.cftp', 'ClientOptions'), ('tapconvert', 'twisted.scripts.tapconvert', 'ConvertOptions'), ('twistd', 'twisted.scripts.twistd', 'ServerOptions'), ('ckeygen', 'twisted.conch.scripts.ckeygen', 'GeneralOptions'), ('lore', 'twisted.lore.scripts.lore', 'Options'), ('pyhtmlizer', 'twisted.scripts.htmlizer', 'Options'), ('tap2deb', 'twisted.scripts.tap2deb', 'MyOptions'), ('tkconch', 'twisted.conch.scripts.tkconch', 'GeneralOptions'), ('manhole', 'twisted.scripts.manhole', 'MyOptions'), ('tap2rpm', 'twisted.scripts.tap2rpm', 'MyOptions'), ('websetroot', None, None), ('tkmktap', None, None), ] # NOTE: the commands using None above are no longer included in Twisted. # However due to limitations in zsh's completion system the version of # _twisted_zsh_stub shipped with zsh contains a static list of Twisted's # commands. It will display errors if completion functions for these missing # commands are not found :( So we just include dummy (empty) completion # function files specialBuilders = {'mktap' : MktapBuilder, 'twistd' : TwistdBuilder} def makeCompFunctionFiles(out_path, generateFor=generateFor, specialBuilders=specialBuilders): """ Generate completion function files in the given directory for all twisted commands @type out_path: C{str} @param out_path: The path to the directory to generate completion function fils in @param generateFor: Sequence in the form of the 'generateFor' top-level variable as defined in this module. Indicates what commands to build completion files for. @param specialBuilders: Sequence in the form of the 'specialBuilders' top-level variable as defined in this module. Indicates what commands require a special Builder class. @return: C{list} of 2-tuples of the form (cmd_name, error) indicating commands that we skipped building completions for. cmd_name is the name of the skipped command, and error is the Exception that was raised when trying to import the script module. Commands are usually skipped due to a missing dependency, e.g. Tkinter. """ skips = [] for cmd_name, module_name, class_name in generateFor: if module_name is None: # create empty file f = _openCmdFile(out_path, cmd_name) f.close() continue try: m = __import__('%s' % (module_name,), None, None, (class_name)) f = _openCmdFile(out_path, cmd_name) o = getattr(m, class_name)() # instantiate Options class if cmd_name in specialBuilders: b = specialBuilders[cmd_name](cmd_name, o, f) b.write() else: b = Builder(cmd_name, o, f) b.write() except Exception, e: skips.append( (cmd_name, e) ) continue return skips def _openCmdFile(out_path, cmd_name): return file(os.path.join(out_path, '_'+cmd_name), 'w') def run(): options = MyOptions() try: options.parseOptions(sys.argv[1:]) except usage.UsageError, e: print e print options.getUsage() sys.exit(2) if options['install']: import twisted dir = os.path.join(os.path.dirname(twisted.__file__), "python", "zsh") skips = makeCompFunctionFiles(dir) else: skips = makeCompFunctionFiles(options['directory']) for cmd_name, error in skips: sys.stderr.write("zshcomp: Skipped building for %s. Script module " \ "could not be imported:\n" % (cmd_name,)) sys.stderr.write(str(error)+'\n') if skips: sys.exit(3) if __name__ == '__main__': run()
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/zshcomp.py
zshcomp.py
# import urlparse import urllib class URLPath: def __init__(self, scheme='', netloc='localhost', path='', query='', fragment=''): self.scheme = scheme or 'http' self.netloc = netloc self.path = path or '/' self.query = query self.fragment = fragment _qpathlist = None _uqpathlist = None def pathList(self, unquote=0, copy=1): if self._qpathlist is None: self._qpathlist = self.path.split('/') self._uqpathlist = map(urllib.unquote, self._qpathlist) if unquote: result = self._uqpathlist else: result = self._qpathlist if copy: return result[:] else: return result def fromString(klass, st): t = urlparse.urlsplit(st) u = klass(*t) return u fromString = classmethod(fromString) def fromRequest(klass, request): return klass.fromString(request.prePathURL()) fromRequest = classmethod(fromRequest) def _pathMod(self, newpathsegs, keepQuery): if keepQuery: query = self.query else: query = '' return URLPath(self.scheme, self.netloc, '/'.join(newpathsegs), query) def sibling(self, path, keepQuery=0): l = self.pathList() l[-1] = path return self._pathMod(l, keepQuery) def child(self, path, keepQuery=0): l = self.pathList() if l[-1] == '': l[-1] = path else: l.append(path) return self._pathMod(l, keepQuery) def parent(self, keepQuery=0): l = self.pathList() if l[-1] == '': del l[-2] else: # We are a file, such as http://example.com/foo/bar # our parent directory is http://example.com/ l.pop() l[-1] = '' return self._pathMod(l, keepQuery) def here(self, keepQuery=0): l = self.pathList() if l[-1] != '': l[-1] = '' return self._pathMod(l, keepQuery) def click(self, st): """Return a path which is the URL where a browser would presumably take you if you clicked on a link with an HREF as given. """ scheme, netloc, path, query, fragment = urlparse.urlsplit(st) if not scheme: scheme = self.scheme if not netloc: netloc = self.netloc if not path: path = self.path if not query: query = self.query elif path[0] != '/': l = self.pathList() l[-1] = path path = '/'.join(l) return URLPath(scheme, netloc, path, query, fragment) def __str__(self): x = urlparse.urlunsplit(( self.scheme, self.netloc, self.path, self.query, self.fragment)) return x def __repr__(self): return ('URLPath(scheme=%r, netloc=%r, path=%r, query=%r, fragment=%r)' % (self.scheme, self.netloc, self.path, self.query, self.fragment))
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/python/urlpath.py
urlpath.py
# """Service architecture for Twisted Services are arranged in a hierarchy. At the leafs of the hierarchy, the services which actually interact with the outside world are started. Services can be named or anonymous -- usually, they will be named if there is need to access them through the hierarchy (from a parent or a sibling). API Stability: unstable Maintainer: U{Moshe Zadka<mailto:[email protected]>} """ from zope.interface import implements, Interface, Attribute from twisted.python import components from twisted.internet import defer from twisted.persisted import sob class IServiceMaker(Interface): """ An object which can be used to construct services in a flexible way. This interface should most often be implemented along with twisted.plugin.IPlugin, and will most often be used by the 'twistd' command. """ tapname = Attribute( "A short string naming this Twisted plugin, for example 'web' or " "'pencil'. This name will be used as the subcommand of 'twistd'.") description = Attribute( "A brief summary of the features provided by this " "Twisted application plugin.") options = Attribute( "A C{twisted.python.usage.Options} subclass defining the" "configuration options for this application.") def makeService(options): """ Create and return an object providing L{twisted.application.service.IService}. @param options: A mapping (typically a C{dict} or C{twisted.python.usage.Options} instance) of configuration options to desired configuration values. """ class IService(Interface): """ A service. Run start-up and shut-down code at the appropriate times. @type name: C{string} @ivar name: The name of the service (or None) @type running: C{boolean} @ivar running: Whether the service is running. """ def setName(name): """Set the name of the service. @type name: C{str} @raise RuntimeError: Raised if the service already has a parent. """ def setServiceParent(parent): """Set the parent of the service. @type name: L{IServiceCollection} @raise RuntimeError: Raised if the service already has a parent or if the service has a name and the parent already has a child by that name. """ def disownServiceParent(): """Remove the parent of the service. @rtype: L{Deferred} @return: a deferred which is triggered when the service has finished shutting down. If shutting down is immediate, a value can be returned (usually, None). """ def startService(): """Start the service.""" def stopService(): """Stop the service. @rtype: L{Deferred} @return: a deferred which is triggered when the service has finished shutting down. If shutting down is immediate, a value can be returned (usually, None). """ def privilegedStartService(): """Do preparation work for starting the service. Here things which should be done before changing directory, root or shedding privileges are done.""" class Service: """ Base class for services Most services should inherit from this class. It handles the book-keeping reponsibilities of starting and stopping, as well as not serializing this book-keeping information. """ implements(IService) running = 0 name = None parent = None def __getstate__(self): dict = self.__dict__.copy() if dict.has_key("running"): del dict['running'] return dict def setName(self, name): if self.parent is not None: raise RuntimeError("cannot change name when parent exists") self.name = name def setServiceParent(self, parent): if self.parent is not None: self.disownServiceParent() parent = IServiceCollection(parent, parent) self.parent = parent self.parent.addService(self) def disownServiceParent(self): d = self.parent.removeService(self) self.parent = None return d def privilegedStartService(self): pass def startService(self): self.running = 1 def stopService(self): self.running = 0 class IServiceCollection(Interface): """Collection of services. Contain several services, and manage their start-up/shut-down. Services can be accessed by name if they have a name, and it is always possible to iterate over them. """ def getServiceNamed(name): """Get the child service with a given name. @type name: C{str} @rtype: L{IService} @raise KeyError: Raised if the service has no child with the given name. """ def __iter__(): """Get an iterator over all child services""" def addService(service): """Add a child service. @type service: L{IService} @raise RuntimeError: Raised if the service has a child with the given name. """ def removeService(service): """Remove a child service. @type service: L{IService} @raise ValueError: Raised if the given service is not a child. @rtype: L{Deferred} @return: a deferred which is triggered when the service has finished shutting down. If shutting down is immediate, a value can be returned (usually, None). """ class MultiService(Service): """Straightforward Service Container Hold a collection of services, and manage them in a simplistic way. No service will wait for another, but this object itself will not finish shutting down until all of its child services will finish. """ implements(IServiceCollection) def __init__(self): self.services = [] self.namedServices = {} self.parent = None def privilegedStartService(self): Service.privilegedStartService(self) for service in self: service.privilegedStartService() def startService(self): Service.startService(self) for service in self: service.startService() def stopService(self): Service.stopService(self) l = [] services = list(self) services.reverse() for service in services: l.append(defer.maybeDeferred(service.stopService)) return defer.DeferredList(l) def getServiceNamed(self, name): return self.namedServices[name] def __iter__(self): return iter(self.services) def addService(self, service): if service.name is not None: if self.namedServices.has_key(service.name): raise RuntimeError("cannot have two services with same name" " '%s'" % service.name) self.namedServices[service.name] = service self.services.append(service) if self.running: # It may be too late for that, but we will do our best service.privilegedStartService() service.startService() def removeService(self, service): if service.name: del self.namedServices[service.name] self.services.remove(service) if self.running: # Returning this so as not to lose information from the # MultiService.stopService deferred. return service.stopService() else: return None class IProcess(Interface): """Process running parameters Represents parameters for how processes should be run. @ivar processName: the name the process should have in ps (or None) @type processName: C{str} @ivar uid: the user-id the process should run under. @type uid: C{int} @ivar gid: the group-id the process should run under. @type gid: C{int} """ class Process: """Process running parameters Sets up uid/gid in the constructor, and has a default of C{None} as C{processName}. """ implements(IProcess) processName = None def __init__(self, uid=None, gid=None): """Set uid and gid. @param uid: The user ID as whom to execute the process. If this is None, no attempt will be made to change the UID. @param gid: The group ID as whom to execute the process. If this is None, no attempt will be made to change the GID. """ self.uid = uid self.gid = gid def Application(name, uid=None, gid=None): """Return a compound class. Return an object supporting the L{IService}, L{IServiceCollection}, L{IProcess} and L{sob.IPersistable} interfaces, with the given parameters. Always access the return value by explicit casting to one of the interfaces. """ ret = components.Componentized() for comp in (MultiService(), sob.Persistent(ret, name), Process(uid, gid)): ret.addComponent(comp, ignoreClass=1) IService(ret).setName(name) return ret def loadApplication(filename, kind, passphrase=None): """Load Application from a given file. The serialization format it was saved in should be given as C{kind}, and is one of 'pickle', 'source', 'xml' or 'python'. If C{passphrase} is given, the application was encrypted with the given passphrase. @type filename: C{str} @type kind: C{str} @type passphrase: C{str} """ if kind == 'python': application = sob.loadValueFromFile(filename, 'application', passphrase) else: application = sob.load(filename, kind, passphrase) return application __all__ = ['IServiceMaker', 'IService', 'Service', 'IServiceCollection', 'MultiService', 'IProcess', 'Process', 'Application', 'loadApplication']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/application/service.py
service.py
import sys, os, pdb, getpass, traceback, signal from twisted.python import runtime, log, usage, reflect, failure, util, logfile from twisted.persisted import sob from twisted.application import service, reactors from twisted.internet import defer from twisted import copyright # Expose the new implementation of installReactor at the old location. from twisted.application.reactors import installReactor def runWithProfiler(reactor, config): """Run reactor under standard profiler.""" try: import profile except ImportError, e: s = "Failed to import module profile: %s" % e s += """ This is most likely caused by your operating system not including profile.py due to it being non-free. Either do not use the option --profile, or install profile.py; your operating system vendor may provide it in a separate package. """ traceback.print_exc(file=log.logfile) log.msg(s) log.deferr() sys.exit('\n' + s + '\n') p = profile.Profile() p.runcall(reactor.run) if config['savestats']: p.dump_stats(config['profile']) else: # XXX - omfg python sucks tmp, sys.stdout = sys.stdout, open(config['profile'], 'a') p.print_stats() sys.stdout, tmp = tmp, sys.stdout tmp.close() def runWithHotshot(reactor, config): """Run reactor under hotshot profiler.""" try: import hotshot.stats except ImportError, e: s = "Failed to import module hotshot: %s" % e s += """ This is most likely caused by your operating system not including profile.py due to it being non-free. Either do not use the option --profile, or install profile.py; your operating system vendor may provide it in a separate package. """ traceback.print_exc(file=log.logfile) log.msg(s) log.deferr() sys.exit('\n' + s + '\n') # this writes stats straight out p = hotshot.Profile(config["profile"]) p.runcall(reactor.run) if config["savestats"]: # stats are automatically written to file, nothing to do return else: s = hotshot.stats.load(config["profile"]) s.strip_dirs() s.sort_stats(-1) tmp, sys.stdout = sys.stdout, open(config['profile'], 'w') s.print_stats() sys.stdout, tmp = tmp, sys.stdout tmp.close() def fixPdb(): def do_stop(self, arg): self.clear_all_breaks() self.set_continue() from twisted.internet import reactor reactor.callLater(0, reactor.stop) return 1 def help_stop(self): print """stop - Continue execution, then cleanly shutdown the twisted reactor.""" def set_quit(self): os._exit(0) pdb.Pdb.set_quit = set_quit pdb.Pdb.do_stop = do_stop pdb.Pdb.help_stop = help_stop def runReactorWithLogging(config, oldstdout, oldstderr): from twisted.internet import reactor try: if config['profile']: if not config['nothotshot']: runWithHotshot(reactor, config) else: runWithProfiler(reactor, config) elif config['debug']: sys.stdout = oldstdout sys.stderr = oldstderr if runtime.platformType == 'posix': signal.signal(signal.SIGUSR2, lambda *args: pdb.set_trace()) signal.signal(signal.SIGINT, lambda *args: pdb.set_trace()) fixPdb() pdb.runcall(reactor.run) else: reactor.run() except: if config['nodaemon']: file = oldstdout else: file = open("TWISTD-CRASH.log",'a') traceback.print_exc(file=file) file.flush() def getPassphrase(needed): if needed: return getpass.getpass('Passphrase: ') else: return None def getSavePassphrase(needed): if needed: passphrase = util.getPassword("Encryption passphrase: ") else: return None class ApplicationRunner(object): """ An object which helps running an application based on a config object. Subclass me and implement preApplication and postApplication methods. postApplication generally will want to run the reactor after starting the application. @ivar config: The config object, which provides a dict-like interface. @ivar application: Available in postApplication, but not preApplication. This is the application object. """ def __init__(self, config): self.config = config def run(self): """Run the application.""" self.preApplication() self.application = self.createOrGetApplication() self.postApplication() def preApplication(self): """ Override in subclass. This should set up any state necessary before loading and running the Application. """ raise NotImplementedError def postApplication(self): """ Override in subclass. This will be called after the application has been loaded (so the C{application} attribute will be set). Generally this should start the application and run the reactor. """ raise NotImplementedError def createOrGetApplication(self): """ Create or load an Application based on the parameters found in the given L{ServerOptions} instance. If a subcommand was used, the L{service.IServiceMaker} that it represents will be used to construct a service to be added to a newly-created Application. Otherwise, an application will be loaded based on parameters in the config. """ if self.config.subCommand: # If a subcommand was given, it's our responsibility to create # the application, instead of load it from a file. # loadedPlugins is set up by the ServerOptions.subCommands # property, which is iterated somewhere in the bowels of # usage.Options. plg = self.config.loadedPlugins[self.config.subCommand] ser = plg.makeService(self.config.subOptions) application = service.Application(plg.tapname) ser.setServiceParent(application) else: passphrase = getPassphrase(self.config['encrypted']) application = getApplication(self.config, passphrase) return application def getApplication(config, passphrase): s = [(config[t], t) for t in ['python', 'xml', 'source', 'file'] if config[t]][0] filename, style = s[0], {'file':'pickle'}.get(s[1],s[1]) try: log.msg("Loading %s..." % filename) application = service.loadApplication(filename, style, passphrase) log.msg("Loaded.") except Exception, e: s = "Failed to load application: %s" % e if isinstance(e, KeyError) and e.args[0] == "application": s += """ Could not find 'application' in the file. To use 'twistd -y', your .tac file must create a suitable object (e.g., by calling service.Application()) and store it in a variable named 'application'. twistd loads your .tac file and scans the global variables for one of this name. Please read the 'Using Application' HOWTO for details. """ traceback.print_exc(file=log.logfile) log.msg(s) log.deferr() sys.exit('\n' + s + '\n') return application def reportProfile(report_profile, name): if not report_profile: return if name: from twisted.python.dxprofile import report log.msg("Sending DXP stats...") report(report_profile, name) log.msg("DXP stats sent.") else: log.err("--report-profile specified but application has no " "name (--appname unspecified)") def _reactorZshAction(): return "(%s)" % " ".join([r.shortName for r in reactors.getReactorTypes()]) class ReactorSelectionMixin: """ Provides options for selecting a reactor to install. """ zsh_actions = {"reactor" : _reactorZshAction} def opt_help_reactors(self): """ Display a list of possibly available reactor names. """ for r in reactors.getReactorTypes(): print ' ', r.shortName, '\t', r.description raise SystemExit(0) def opt_reactor(self, shortName): """ Which reactor to use (see --help-reactors for a list of possibilities) """ # Actually actually actually install the reactor right at this very # moment, before any other code (for example, a sub-command plugin) # runs and accidentally imports and installs the default reactor. # # This could probably be improved somehow. installReactor(shortName) opt_r = opt_reactor class ServerOptions(usage.Options, ReactorSelectionMixin): optFlags = [['savestats', None, "save the Stats object rather than the text output of " "the profiler."], ['no_save','o', "do not save state on shutdown"], ['encrypted', 'e', "The specified tap/aos/xml file is encrypted."], ['nothotshot', None, "Don't use the 'hotshot' profiler even if it's available."]] optParameters = [['logfile','l', None, "log to a specified file, - for stdout"], ['profile', 'p', None, "Run in profile mode, dumping results to specified file"], ['file','f','twistd.tap', "read the given .tap file"], ['python','y', None, "read an application from within a Python file (implies -o)"], ['xml', 'x', None, "Read an application from a .tax file " "(Marmalade format)."], ['source', 's', None, "Read an application from a .tas file (AOT format)."], ['rundir','d','.', 'Change to a supplied directory before running'], ['report-profile', None, None, 'E-mail address to use when reporting dynamic execution ' 'profiler stats. This should not be combined with ' 'other profiling options. This will only take effect ' 'if the application to be run has an application ' 'name.']] #zsh_altArgDescr = {"foo":"use this description for foo instead"} #zsh_multiUse = ["foo", "bar"] zsh_mutuallyExclusive = [("file", "python", "xml", "source")] zsh_actions = {"file":'_files -g "*.tap"', "python":'_files -g "*.(tac|py)"', "xml":'_files -g "*.tax"', "source":'_files -g "*.tas"', "rundir":"_dirs"} #zsh_actionDescr = {"logfile":"log file name", "random":"random seed"} def __init__(self, *a, **kw): self['debug'] = False usage.Options.__init__(self, *a, **kw) def opt_debug(self): """ run the application in the Python Debugger (implies nodaemon), sending SIGUSR2 will drop into debugger """ defer.setDebugging(True) failure.startDebugMode() self['debug'] = True opt_b = opt_debug def opt_spew(self): """Print an insanely verbose log of everything that happens. Useful when debugging freezes or locks in complex code.""" sys.settrace(util.spewer) try: import threading except ImportError: return threading.settrace(util.spewer) def parseOptions(self, options=None): if options is None: options = sys.argv[1:] or ["--help"] usage.Options.parseOptions(self, options) def postOptions(self): if self.subCommand or self['python']: self['no_save'] = True def subCommands(self): from twisted import plugin plugins = plugin.getPlugins(service.IServiceMaker) self.loadedPlugins = {} for plug in plugins: self.loadedPlugins[plug.tapname] = plug yield (plug.tapname, None, lambda: plug.options(), plug.description) subCommands = property(subCommands) def run(runApp, ServerOptions): config = ServerOptions() try: config.parseOptions() except usage.error, ue: print config print "%s: %s" % (sys.argv[0], ue) else: runApp(config) def initialLog(): from twisted.internet import reactor log.msg("twistd %s (%s %s) starting up" % (copyright.version, sys.executable, runtime.shortPythonVersion())) log.msg('reactor class: %s' % reactor.__class__) def convertStyle(filein, typein, passphrase, fileout, typeout, encrypt): application = service.loadApplication(filein, typein, passphrase) sob.IPersistable(application).setStyle(typeout) passphrase = getSavePassphrase(encrypt) if passphrase: fileout = None sob.IPersistable(application).save(filename=fileout, passphrase=passphrase) def startApplication(application, save): from twisted.internet import reactor service.IService(application).startService() if save: p = sob.IPersistable(application) reactor.addSystemEventTrigger('after', 'shutdown', p.save, 'shutdown') reactor.addSystemEventTrigger('before', 'shutdown', service.IService(application).stopService) def getLogFile(logfilename): logPath = os.path.abspath(logfilename) logFile = logfile.LogFile(os.path.basename(logPath), os.path.dirname(logPath)) return logFile
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/application/app.py
app.py
# Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. # """Port description language This module implements a description mini-language for ports, and provides functions to parse it and to use it to directly construct appropriate network server services or to directly listen on them. Here are some examples:: >>> s=service("80", server.Site()) >>> s=service("tcp:80", server.Site()) >>> s=service("tcp:80:interface=127.0.0.1", server.Site()) >>> s=service("ssl:443", server.Site()) >>> s=service("ssl:443:privateKey=mykey.pem", server.Site()) >>> s=service("ssl:443:privateKey=mykey.pem:certKey=cert.pem", server.Site()) >>> s=service("unix:/var/run/finger", FingerFactory()) >>> s=service("unix:/var/run/finger:mode=660", FingerFactory()) >>> p=listen("80", server.Site()) >>> p=listen("tcp:80", server.Site()) >>> p=listen("tcp:80:interface=127.0.0.1", server.Site()) >>> p=listen("ssl:443", server.Site()) >>> p=listen("ssl:443:privateKey=mykey.pem", server.Site()) >>> p=listen("ssl:443:privateKey=mykey.pem:certKey=cert.pem", server.Site()) >>> p=listen("unix:/var/run/finger", FingerFactory()) >>> p=listen("unix:/var/run/finger:mode=660", FingerFactory()) See specific function documentation for more information. API Stability: unstable Maintainer: U{Moshe Zadka<mailto:[email protected]>} """ from __future__ import generators def _parseTCP(factory, port, interface="", backlog=50): return (int(port), factory), {'interface': interface, 'backlog': int(backlog)} def _parseUNIX(factory, address, mode='666', backlog=50): return (address, factory), {'mode': int(mode, 8), 'backlog': int(backlog)} def _parseSSL(factory, port, privateKey="server.pem", certKey=None, sslmethod=None, interface='', backlog=50): from twisted.internet import ssl if certKey is None: certKey = privateKey kw = {} if sslmethod is not None: kw['sslmethod'] = getattr(ssl.SSL, sslmethod) cf = ssl.DefaultOpenSSLContextFactory(privateKey, certKey, **kw) return ((int(port), factory, cf), {'interface': interface, 'backlog': int(backlog)}) _funcs = {"tcp": _parseTCP, "unix": _parseUNIX, "ssl": _parseSSL} _OP, _STRING = range(2) def _tokenize(description): current = '' ops = ':=' nextOps = {':': ':=', '=': ':'} description = iter(description) for n in description: if n in ops: yield _STRING, current yield _OP, n current = '' ops = nextOps[n] elif n=='\\': current += description.next() else: current += n yield _STRING, current def _parse(description): args, kw = [], {} def add(sofar): if len(sofar)==1: args.append(sofar[0]) else: kw[sofar[0]] = sofar[1] sofar = () for (type, value) in _tokenize(description): if type is _STRING: sofar += (value,) elif value==':': add(sofar) sofar = () add(sofar) return args, kw def parse(description, factory, default=None): """ Parse the description of a reliable virtual circuit server (that is, a TCP port, a UNIX domain socket or an SSL port) and return the data necessary to call the reactor methods to listen on the given socket with the given factory. An argument with no colons means a default port. Usually the default type is C{tcp}, but passing a non-C{None} value as C{default} will set that as the default. Otherwise, it is a colon-separated string. The first part means the type -- currently, it can only be ssl, unix or tcp. After that, comes a list of arguments. Arguments can be positional or keyword, and can be mixed. Keyword arguments are indicated by C{'name=value'}. If a value is supposed to contain a C{':'}, a C{'='} or a C{'\\'}, escape it with a C{'\\'}. For TCP, the arguments are the port (port number) and, optionally the interface (interface on which to listen) and backlog (how many clients to keep in the backlog). For UNIX domain sockets, the arguments are address (the file name of the socket) and optionally the mode (the mode bits of the file, as an octal number) and the backlog (how many clients to keep in the backlog). For SSL sockets, the arguments are the port (port number) and, optionally, the privateKey (file in which the private key is in), certKey (file in which the certification is in), sslmethod (the name of the SSL method to allow), the interface (interface on which to listen) and the backlog (how many clients to keep in the backlog). @type description: C{str} @type factory: L{twisted.internet.interfaces.IProtocolFactory} @type default: C{str} or C{None} @rtype: C{tuple} @return: a tuple of string, tuple and dictionary. The string is the name of the method (sans C{'listen'}) to call, and the tuple and dictionary are the arguments and keyword arguments to the method. @raises ValueError: if the string is formatted incorrectly. @raises KeyError: if the type is other than unix, ssl or tcp. """ args, kw = _parse(description) if not args or (len(args)==1 and not kw): args[0:0] = [default or 'tcp'] return (args[0].upper(),)+_funcs[args[0]](factory, *args[1:], **kw) def service(description, factory, default=None): """Return the service corresponding to a description @type description: C{str} @type factory: L{twisted.internet.interfaces.IProtocolFactory} @type default: C{str} or C{None} @rtype: C{twisted.application.service.IService} @return: the service corresponding to a description of a reliable virtual circuit server. See the documentation of the C{parse} function for description of the semantics of the arguments. """ from twisted.application import internet name, args, kw = parse(description, factory, default) return getattr(internet, name+'Server')(*args, **kw) def listen(description, factory, default=None): """Listen on a port corresponding to a description @type description: C{str} @type factory: L{twisted.internet.interfaces.IProtocolFactory} @type default: C{str} or C{None} @rtype: C{twisted.internet.interfaces.IListeningPort} @return: the port corresponding to a description of a reliable virtual circuit server. See the documentation of the C{parse} function for description of the semantics of the arguments. """ from twisted.internet import reactor name, args, kw = parse(description, factory, default) return getattr(reactor, 'listen'+name)(*args, **kw) __all__ = ['parse', 'service', 'listen']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/application/strports.py
strports.py
# Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. # """Reactor-based Services Here are services to run clients, servers and periodic services using the reactor. This module (dynamically) defines various Service subclasses that let you represent clients and servers in a Service hierarchy. They are as follows:: TCPServer, TCPClient, UNIXServer, UNIXClient, SSLServer, SSLClient, UDPServer, UDPClient, UNIXDatagramServer, UNIXDatagramClient, MulticastServer These classes take arbitrary arguments in their constructors and pass them straight on to their respective reactor.listenXXX or reactor.connectXXX calls. For example, the following service starts a web server on port 8080: C{TCPServer(8080, server.Site(r))}. See the documentation for the reactor.listen/connect* methods for more information. API Stability: unstable Maintainer: U{Moshe Zadka<mailto:[email protected]>} """ from twisted.python import log from twisted.application import service from twisted.internet import task class _VolatileDataService(service.Service): volatile = [] def __getstate__(self): d = service.Service.__getstate__(self) for attr in self.volatile: if d.has_key(attr): del d[attr] return d class _AbstractServer(_VolatileDataService): privileged = True volatile = ['_port'] method = None _port = None def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs def privilegedStartService(self): service.Service.privilegedStartService(self) self._port = self._getPort() def startService(self): service.Service.startService(self) if self._port is None: self._port = self._getPort() def stopService(self): service.Service.stopService(self) # TODO: if startup failed, should shutdown skip stopListening? # _port won't exist if self._port is not None: d = self._port.stopListening() del self._port return d def _getPort(self): from twisted.internet import reactor return getattr(reactor, 'listen'+self.method)(*self.args, **self.kwargs) class _AbstractClient(_VolatileDataService): volatile = ['_connection'] method = None _connection = None def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs def startService(self): service.Service.startService(self) self._connection = self._getConnection() def stopService(self): service.Service.stopService(self) if self._connection is not None: self._connection.disconnect() del self._connection def _getConnection(self): from twisted.internet import reactor return getattr(reactor, 'connect'+self.method)(*self.args, **self.kwargs) _doc={ 'Client': """Connect to %(tran)s Call reactor.connect%(method)s when the service starts, with the arguments given to the constructor. """, 'Server': """Serve %(tran)s clients Call reactor.listen%(method)s when the service starts, with the arguments given to the constructor. When the service stops, stop listening. See twisted.internet.interfaces for documentation on arguments to the reactor method. """, } import new for tran in 'Generic TCP UNIX SSL UDP UNIXDatagram Multicast'.split(): for side in 'Server Client'.split(): if tran == "Multicast" and side == "Client": continue base = globals()['_Abstract'+side] method = {'Generic': 'With'}.get(tran, tran) doc = _doc[side]%vars() klass = new.classobj(tran+side, (base,), {'method': method, '__doc__': doc}) globals()[tran+side] = klass class TimerService(_VolatileDataService): """Service to periodically call a function Every C{step} seconds call the given function with the given arguments. The service starts the calls when it starts, and cancels them when it stops. """ volatile = ['_loop'] def __init__(self, step, callable, *args, **kwargs): self.step = step self.call = (callable, args, kwargs) def startService(self): service.Service.startService(self) callable, args, kwargs = self.call # we have to make a new LoopingCall each time we're started, because # an active LoopingCall remains active when serialized. If # LoopingCall were a _VolatileDataService, we wouldn't need to do # this. self._loop = task.LoopingCall(callable, *args, **kwargs) self._loop.start(self.step, now=True).addErrback(self._failed) def _failed(self, why): # make a note that the LoopingCall is no longer looping, so we don't # try to shut it down a second time in stopService. I think this # should be in LoopingCall. -warner self._loop.running = False log.err(why) def stopService(self): if self._loop.running: self._loop.stop() return service.Service.stopService(self) class CooperatorService(service.Service): """ Simple L{service.IService} which starts and stops a L{twisted.internet.task.Cooperator}. """ def __init__(self): self.coop = task.Cooperator(started=False) def coiterate(self, iterator): return self.coop.coiterate(iterator) def startService(self): self.coop.start() def stopService(self): self.coop.stop() __all__ = (['TimerService', 'CooperatorService'] + [tran+side for tran in 'Generic TCP UNIX SSL UDP UNIXDatagram Multicast'.split() for side in 'Server Client'.split()])
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/application/internet.py
internet.py
# System Imports import urlparse from zope.interface import implements import urllib import warnings from twisted.internet import address from twisted.python import log # Sibling Imports from twisted.web2 import resource from twisted.web2 import responsecode from twisted.web2 import iweb from twisted.web2 import http class NameVirtualHost(resource.Resource): """Resource in charge of dispatching requests to other resources based on the value of the HTTP 'Host' header. @param supportNested: If True domain segments will be chopped off until the TLD is reached or a matching virtual host is found. (In which case the child resource can do its own more specific virtual host lookup.) """ supportNested = True def __init__(self, default=None): """ @param default: The default resource to be served when encountering an unknown hostname. @type default: L{twisted.web2.iweb.IResource} or C{None} """ resource.Resource.__init__(self) self.hosts = {} self.default = default def addHost(self, name, resrc): """Add a host to this virtual host. - The Fun Stuff(TM) This associates a host named 'name' with a resource 'resrc':: nvh.addHost('nevow.com', nevowDirectory) nvh.addHost('divmod.org', divmodDirectory) nvh.addHost('twistedmatrix.com', twistedMatrixDirectory) I told you that was fun. @param name: The FQDN to be matched to the 'Host' header. @type name: C{str} @param resrc: The L{twisted.web2.iweb.IResource} to be served as the given hostname. @type resource: L{twisted.web2.iweb.IResource} """ self.hosts[name] = resrc def removeHost(self, name): """Remove the given host. @param name: The FQDN to remove. @type name: C{str} """ del self.hosts[name] def locateChild(self, req, segments): """It's a NameVirtualHost, do you know where your children are? This uses locateChild magic so you don't have to mutate the request. """ host = req.host.lower() if self.supportNested: while not self.hosts.has_key(host) and len(host.split('.')) > 1: host = '.'.join(host.split('.')[1:]) # Default being None is okay, it'll turn into a 404 return self.hosts.get(host, self.default), segments class AutoVHostURIRewrite(object): """ I do request mangling to insure that children know what host they are being accessed from behind apache2. Usage: - Twisted:: root = MyResource() vur = vhost.AutoVHostURIRewrite(root) - Apache2:: <Location /whatever/> ProxyPass http://localhost:8538/ RequestHeader set X-App-Location /whatever/ </Location> If the trailing / is ommitted in the second argument to ProxyPass VHostURIRewrite will return a 404 response code. If proxying HTTPS, add this to the Apache config:: RequestHeader set X-App-Scheme https """ implements(iweb.IResource) def __init__(self, resource, sendsRealHost=False): """ @param resource: The resource to serve after mutating the request. @type resource: L{twisted.web2.iweb.IResource} @param sendsRealHost: If True then the proxy will be expected to send the HTTP 'Host' header that was sent by the requesting client. @type sendsRealHost: C{bool} """ self.resource=resource self.sendsRealHost = sendsRealHost def renderHTTP(self, req): return http.Response(responsecode.NOT_FOUND) def locateChild(self, req, segments): scheme = req.headers.getRawHeaders('x-app-scheme') if self.sendsRealHost: host = req.headers.getRawHeaders('host') else: host = req.headers.getRawHeaders('x-forwarded-host') app_location = req.headers.getRawHeaders('x-app-location') remote_ip = req.headers.getRawHeaders('x-forwarded-for') if not (host and remote_ip): if not host: warnings.warn( ("No host was obtained either from Host or " "X-Forwarded-Host headers. If your proxy does not " "send either of these headers use VHostURIRewrite. " "If your proxy sends the real host as the Host header " "use " "AutoVHostURIRewrite(resrc, sendsRealHost=True)")) # some header unspecified => Error raise http.HTTPError(responsecode.BAD_REQUEST) host = host[0] remote_ip = remote_ip[0] if app_location: app_location = app_location[0] else: app_location = '/' if scheme: scheme = scheme[0] else: scheme='http' req.host, req.port = http.splitHostPort(scheme, host) req.scheme = scheme req.remoteAddr = address.IPv4Address('TCP', remote_ip, 0) req.prepath = app_location[1:].split('/')[:-1] req.path = '/'+('/'.join([urllib.quote(s, '') for s in (req.prepath + segments)])) return self.resource, segments class VHostURIRewrite(object): """ I do request mangling to insure that children know what host they are being accessed from behind mod_proxy. Usage: - Twisted:: root = MyResource() vur = vhost.VHostURIRewrite(uri='http://hostname:port/path', resource=root) server.Site(vur) - Apache:: <VirtualHost hostname:port> ProxyPass /path/ http://localhost:8080/ Servername hostname </VirtualHost> If the trailing / is ommitted in the second argument to ProxyPass VHostURIRewrite will return a 404 response code. uri must be a fully specified uri complete with scheme://hostname/path/ """ implements(iweb.IResource) def __init__(self, uri, resource): """ @param uri: The URI to be used for mutating the request. This MUST include scheme://hostname/path. @type uri: C{str} @param resource: The resource to serve after mutating the request. @type resource: L{twisted.web2.iweb.IResource} """ self.resource = resource (self.scheme, self.host, self.path, params, querystring, fragment) = urlparse.urlparse(uri) if params or querystring or fragment: raise ValueError("Must not specify params, query args, or fragment to VHostURIRewrite") self.path = map(urllib.unquote, self.path[1:].split('/'))[:-1] self.host, self.port = http.splitHostPort(self.scheme, self.host) def renderHTTP(self, req): return http.Response(responsecode.NOT_FOUND) def locateChild(self, req, segments): req.scheme = self.scheme req.host = self.host req.port = self.port req.prepath=self.path[:] req.path = '/'+('/'.join([urllib.quote(s, '') for s in (req.prepath + segments)])) # print req.prepath, segments, req.postpath, req.path return self.resource, segments __all__ = ['VHostURIRewrite', 'AutoVHostURIRewrite', 'NameVirtualHost']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/vhost.py
vhost.py
# System Imports import xmlrpclib # Sibling Imports from twisted.web2 import resource, stream from twisted.web2 import responsecode, http, http_headers from twisted.internet import defer from twisted.python import log, reflect # Useful so people don't need to import xmlrpclib directly Fault = xmlrpclib.Fault Binary = xmlrpclib.Binary Boolean = xmlrpclib.Boolean DateTime = xmlrpclib.DateTime class NoSuchFunction(Fault): """There is no function by the given name.""" pass class XMLRPC(resource.Resource): """A resource that implements XML-RPC. You probably want to connect this to '/RPC2'. Methods published can return XML-RPC serializable results, Faults, Binary, Boolean, DateTime, Deferreds, or Handler instances. By default methods beginning with 'xmlrpc_' are published. Sub-handlers for prefixed methods (e.g., system.listMethods) can be added with putSubHandler. By default, prefixes are separated with a '.'. Override self.separator to change this. """ # Error codes for Twisted, if they conflict with yours then # modify them at runtime. NOT_FOUND = 8001 FAILURE = 8002 separator = '.' def __init__(self): resource.Resource.__init__(self) self.subHandlers = {} def putSubHandler(self, prefix, handler): self.subHandlers[prefix] = handler def getSubHandler(self, prefix): return self.subHandlers.get(prefix, None) def getSubHandlerPrefixes(self): return self.subHandlers.keys() def render(self, request): # For GET/HEAD: Return an error message s=("<html><head><title>XML-RPC responder</title></head>" "<body><h1>XML-RPC responder</h1>POST your XML-RPC here.</body></html>") return http.Response(responsecode.OK, {'content-type': http_headers.MimeType('text', 'html')}, s) def http_POST(self, request): parser, unmarshaller = xmlrpclib.getparser() deferred = stream.readStream(request.stream, parser.feed) deferred.addCallback(lambda x: self._cbDispatch( request, parser, unmarshaller)) deferred.addErrback(self._ebRender) deferred.addCallback(self._cbRender, request) return deferred def _cbDispatch(self, request, parser, unmarshaller): parser.close() args, functionPath = unmarshaller.close(), unmarshaller.getmethodname() function = self.getFunction(functionPath) return defer.maybeDeferred(function, request, *args) def _cbRender(self, result, request): if not isinstance(result, Fault): result = (result,) try: s = xmlrpclib.dumps(result, methodresponse=1) except: f = Fault(self.FAILURE, "can't serialize output") s = xmlrpclib.dumps(f, methodresponse=1) return http.Response(responsecode.OK, {'content-type': http_headers.MimeType('text', 'xml')}, s) def _ebRender(self, failure): if isinstance(failure.value, Fault): return failure.value log.err(failure) return Fault(self.FAILURE, "error") def getFunction(self, functionPath): """Given a string, return a function, or raise NoSuchFunction. This returned function will be called, and should return the result of the call, a Deferred, or a Fault instance. Override in subclasses if you want your own policy. The default policy is that given functionPath 'foo', return the method at self.xmlrpc_foo, i.e. getattr(self, "xmlrpc_" + functionPath). If functionPath contains self.separator, the sub-handler for the initial prefix is used to search for the remaining path. """ if functionPath.find(self.separator) != -1: prefix, functionPath = functionPath.split(self.separator, 1) handler = self.getSubHandler(prefix) if handler is None: raise NoSuchFunction(self.NOT_FOUND, "no such subHandler %s" % prefix) return handler.getFunction(functionPath) f = getattr(self, "xmlrpc_%s" % functionPath, None) if not f: raise NoSuchFunction(self.NOT_FOUND, "function %s not found" % functionPath) elif not callable(f): raise NoSuchFunction(self.NOT_FOUND, "function %s not callable" % functionPath) else: return f def _listFunctions(self): """Return a list of the names of all xmlrpc methods.""" return reflect.prefixedMethodNames(self.__class__, 'xmlrpc_') class XMLRPCIntrospection(XMLRPC): """Implement the XML-RPC Introspection API. By default, the methodHelp method returns the 'help' method attribute, if it exists, otherwise the __doc__ method attribute, if it exists, otherwise the empty string. To enable the methodSignature method, add a 'signature' method attribute containing a list of lists. See methodSignature's documentation for the format. Note the type strings should be XML-RPC types, not Python types. """ def __init__(self, parent): """Implement Introspection support for an XMLRPC server. @param parent: the XMLRPC server to add Introspection support to. """ XMLRPC.__init__(self) self._xmlrpc_parent = parent def xmlrpc_listMethods(self, request): """Return a list of the method names implemented by this server.""" functions = [] todo = [(self._xmlrpc_parent, '')] while todo: obj, prefix = todo.pop(0) functions.extend([prefix + name for name in obj._listFunctions()]) todo.extend([(obj.getSubHandler(name), prefix + name + obj.separator) for name in obj.getSubHandlerPrefixes()]) functions.sort() return functions xmlrpc_listMethods.signature = [['array']] def xmlrpc_methodHelp(self, request, method): """Return a documentation string describing the use of the given method. """ method = self._xmlrpc_parent.getFunction(method) return (getattr(method, 'help', None) or getattr(method, '__doc__', None) or '') xmlrpc_methodHelp.signature = [['string', 'string']] def xmlrpc_methodSignature(self, request, method): """Return a list of type signatures. Each type signature is a list of the form [rtype, type1, type2, ...] where rtype is the return type and typeN is the type of the Nth argument. If no signature information is available, the empty string is returned. """ method = self._xmlrpc_parent.getFunction(method) return getattr(method, 'signature', None) or '' xmlrpc_methodSignature.signature = [['array', 'string'], ['string', 'string']] def addIntrospection(xmlrpc): """Add Introspection support to an XMLRPC server. @param xmlrpc: The xmlrpc server to add Introspection support to. """ xmlrpc.putSubHandler('system', XMLRPCIntrospection(xmlrpc)) __all__ = ["XMLRPC", "NoSuchFunction", "Fault"]
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/xmlrpc.py
xmlrpc.py
from zope.interface import Attribute, Interface, interface # server.py interfaces class IResource(Interface): """ An HTTP resource. I serve 2 main purposes: one is to provide a standard representation for what HTTP specification calls an 'entity', and the other is to provide an mechanism for mapping URLs to content. """ def locateChild(req, segments): """Locate another object which can be adapted to IResource. @return: A 2-tuple of (resource, remaining-path-segments), or a deferred which will fire the above. Causes the object publishing machinery to continue on with specified resource and segments, calling the appropriate method on the specified resource. If you return (self, L{server.StopTraversal}), this instructs web2 to immediately stop the lookup stage, and switch to the rendering stage, leaving the remaining path alone for your render function to handle. """ def renderHTTP(req): """Return an IResponse or a deferred which will fire an IResponse. This response will be written to the web browser which initiated the request. """ # Is there a better way to do this than this funky extra class? _default = object() class SpecialAdaptInterfaceClass(interface.InterfaceClass): # A special adapter for IResource to handle the extra step of adapting # from IOldNevowResource-providing resources. def __call__(self, other, alternate=_default): result = super(SpecialAdaptInterfaceClass, self).__call__(other, alternate) if result is not alternate: return result result = IOldNevowResource(other, alternate) if result is not alternate: result = IResource(result) return result if alternate is not _default: return alternate raise TypeError('Could not adapt', other, self) IResource.__class__ = SpecialAdaptInterfaceClass class IOldNevowResource(Interface): # Shared interface with inevow.IResource """ I am a web resource. """ def locateChild(ctx, segments): """Locate another object which can be adapted to IResource Return a tuple of resource, path segments """ def renderHTTP(ctx): """Return a string or a deferred which will fire a string. This string will be written to the web browser which initiated this request. Unlike iweb.IResource, this expects the incoming data to have already been read and parsed into request.args and request.content, and expects to return a string instead of a response object. """ class ICanHandleException(Interface): # Shared interface with inevow.ICanHandleException def renderHTTP_exception(request, failure): """Render an exception to the given request object. """ def renderInlineException(request, reason): """Return stan representing the exception, to be printed in the page, not replacing the page.""" # http.py interfaces class IResponse(Interface): """I'm a response.""" code = Attribute("The HTTP response code") headers = Attribute("A http_headers.Headers instance of headers to send") stream = Attribute("A stream.IByteStream of outgoing data, or else None.") class IRequest(Interface): """I'm a request for a web resource """ method = Attribute("The HTTP method from the request line, e.g. GET") uri = Attribute("The raw URI from the request line. May or may not include host.") clientproto = Attribute("Protocol from the request line, e.g. HTTP/1.1") headers = Attribute("A http_headers.Headers instance of incoming headers.") stream = Attribute("A stream.IByteStream of incoming data.") def writeResponse(response): """Write an IResponse object to the client""" chanRequest = Attribute("The ChannelRequest. I wonder if this is public really?") class IOldRequest(Interface): # Shared interface with inevow.ICurrentSegments """An old HTTP request. Subclasses should override the process() method to determine how the request will be processed. @ivar method: The HTTP method that was used. @ivar uri: The full URI that was requested (includes arguments). @ivar path: The path only (arguments not included). @ivar args: All of the arguments, including URL and POST arguments. @type args: A mapping of strings (the argument names) to lists of values. i.e., ?foo=bar&foo=baz&quux=spam results in {'foo': ['bar', 'baz'], 'quux': ['spam']}. @ivar received_headers: All received headers """ # Methods for received request def getHeader(key): """Get a header that was sent from the network. """ def getCookie(key): """Get a cookie that was sent from the network. """ def getAllHeaders(): """Return dictionary of all headers the request received.""" def getRequestHostname(): """Get the hostname that the user passed in to the request. This will either use the Host: header (if it is available) or the host we are listening on if the header is unavailable. """ def getHost(): """Get my originally requesting transport's host. Don't rely on the 'transport' attribute, since Request objects may be copied remotely. For information on this method's return value, see twisted.internet.tcp.Port. """ def getClientIP(): pass def getClient(): pass def getUser(): pass def getPassword(): pass def isSecure(): pass def getSession(sessionInterface = None): pass def URLPath(): pass def prePathURL(): pass def rememberRootURL(): """ Remember the currently-processed part of the URL for later recalling. """ def getRootURL(): """ Get a previously-remembered URL. """ # Methods for outgoing request def finish(): """We are finished writing data.""" def write(data): """ Write some data as a result of an HTTP request. The first time this is called, it writes out response data. """ def addCookie(k, v, expires=None, domain=None, path=None, max_age=None, comment=None, secure=None): """Set an outgoing HTTP cookie. In general, you should consider using sessions instead of cookies, see twisted.web.server.Request.getSession and the twisted.web.server.Session class for details. """ def setResponseCode(code, message=None): """Set the HTTP response code. """ def setHeader(k, v): """Set an outgoing HTTP header. """ def redirect(url): """Utility function that does a redirect. The request should have finish() called after this. """ def setLastModified(when): """Set the X{Last-Modified} time for the response to this request. If I am called more than once, I ignore attempts to set Last-Modified earlier, only replacing the Last-Modified time if it is to a later value. If I am a conditional request, I may modify my response code to L{NOT_MODIFIED} if appropriate for the time given. @param when: The last time the resource being returned was modified, in seconds since the epoch. @type when: number @return: If I am a X{If-Modified-Since} conditional request and the time given is not newer than the condition, I return L{http.CACHED<CACHED>} to indicate that you should write no body. Otherwise, I return a false value. """ def setETag(etag): """Set an X{entity tag} for the outgoing response. That's \"entity tag\" as in the HTTP/1.1 X{ETag} header, \"used for comparing two or more entities from the same requested resource.\" If I am a conditional request, I may modify my response code to L{NOT_MODIFIED} or L{PRECONDITION_FAILED}, if appropriate for the tag given. @param etag: The entity tag for the resource being returned. @type etag: string @return: If I am a X{If-None-Match} conditional request and the tag matches one in the request, I return L{http.CACHED<CACHED>} to indicate that you should write no body. Otherwise, I return a false value. """ def setHost(host, port, ssl=0): """Change the host and port the request thinks it's using. This method is useful for working with reverse HTTP proxies (e.g. both Squid and Apache's mod_proxy can do this), when the address the HTTP client is using is different than the one we're listening on. For example, Apache may be listening on https://www.example.com, and then forwarding requests to http://localhost:8080, but we don't want HTML produced by Twisted to say 'http://localhost:8080', they should say 'https://www.example.com', so we do:: request.setHost('www.example.com', 443, ssl=1) This method is experimental. """ class IChanRequestCallbacks(Interface): """The bits that are required of a Request for interfacing with a IChanRequest object""" def __init__(chanRequest, command, path, version, contentLength, inHeaders): """Create a new Request object. @param chanRequest: the IChanRequest object creating this request @param command: the HTTP command e.g. GET @param path: the HTTP path e.g. /foo/bar.html @param version: the parsed HTTP version e.g. (1,1) @param contentLength: how much data to expect, or None if unknown @param inHeaders: the request headers""" def process(): """Process the request. Called as soon as it's possibly reasonable to return a response. handleContentComplete may or may not have been called already.""" def handleContentChunk(data): """Called when a piece of incoming data has been received.""" def handleContentComplete(): """Called when the incoming data stream is finished.""" def connectionLost(reason): """Called if the connection was lost.""" class IChanRequest(Interface): def writeIntermediateResponse(code, headers=None): """Write a non-terminating response. Intermediate responses cannot contain data. If the channel does not support intermediate responses, do nothing. @ivar code: The response code. Should be in the 1xx range. @type code: int @ivar headers: the headers to send in the response @type headers: C{twisted.web.http_headers.Headers} """ pass def writeHeaders(code, headers): """Write a final response. @param code: The response code. Should not be in the 1xx range. @type code: int @param headers: the headers to send in the response. They will be augmented with any connection-oriented headers as necessary for the protocol. @type headers: C{twisted.web.http_headers.Headers} """ pass def write(data): """Write some data. @param data: the data bytes @type data: str """ pass def finish(): """Finish the request, and clean up the connection if necessary. """ pass def abortConnection(): """Forcibly abort the connection without cleanly closing. Use if, for example, you can't write all the data you promised. """ pass def registerProducer(producer, streaming): """Register a producer with the standard API.""" pass def unregisterProducer(): """Unregister a producer.""" pass def getHostInfo(): """Returns a tuple of (address, socket user connected to, boolean, was it secure). Note that this should not necsessarily always return the actual local socket information from twisted. E.g. in a CGI, it should use the variables coming from the invoking script. """ def getRemoteHost(): """Returns an address of the remote host. Like getHostInfo, this information may come from the real socket, or may come from additional information, depending on the transport. """ persistent = Attribute("""Whether this request supports HTTP connection persistence. May be set to False. Should not be set to other values.""") class ISite(Interface): pass __all__ = ['ICanHandleException', 'IChanRequest', 'IChanRequestCallbacks', 'IOldNevowResource', 'IOldRequest', 'IRequest', 'IResource', 'IResponse', 'ISite']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/iweb.py
iweb.py
# System Imports from zope.interface import implements from twisted.web2 import iweb, http, server, responsecode class RenderMixin(object): """ Mix-in class for L{iweb.IResource} which provides a dispatch mechanism for handling HTTP methods. """ def allowedMethods(self): """ @return: A tuple of HTTP methods that are allowed to be invoked on this resource. """ if not hasattr(self, "_allowed_methods"): self._allowed_methods = tuple([name[5:] for name in dir(self) if name.startswith('http_')]) return self._allowed_methods def checkPreconditions(self, request): """ Checks all preconditions imposed by this resource upon a request made against it. @param request: the request to process. @raise http.HTTPError: if any precondition fails. @return: C{None} or a deferred whose callback value is C{request}. """ # # http.checkPreconditions() gets called by the server after every # GET or HEAD request. # # For other methods, we need to know to bail out before request # processing, especially for methods that modify server state (eg. PUT). # We also would like to do so even for methods that don't, if those # methods might be expensive to process. We're assuming that GET and # HEAD are not expensive. # if request.method not in ("GET", "HEAD"): http.checkPreconditions(request) # Check per-method preconditions method = getattr(self, "preconditions_" + request.method, None) if method: return method(request) def renderHTTP(self, request): """ See L{iweb.IResource.renderHTTP}. This implementation will dispatch the given C{request} to another method of C{self} named C{http_}METHOD, where METHOD is the HTTP method used by C{request} (eg. C{http_GET}, C{http_POST}, etc.). Generally, a subclass should implement those methods instead of overriding this one. C{http_*} methods are expected provide the same interface and return the same results as L{iweb.IResource}C{.renderHTTP} (and therefore this method). C{etag} and C{last-modified} are added to the response returned by the C{http_*} header, if known. If an appropriate C{http_*} method is not found, a L{responsecode.NOT_ALLOWED}-status response is returned, with an appropriate C{allow} header. @param request: the request to process. @return: an object adaptable to L{iweb.IResponse}. """ method = getattr(self, 'http_' + request.method, None) if not method: response = http.Response(responsecode.NOT_ALLOWED) response.headers.setHeader("allow", self.allowedMethods()) return response d = self.checkPreconditions(request) if d is None: return method(request) else: return d.addCallback(method) def http_OPTIONS(self, request): """ Respond to a OPTIONS request. @param request: the request to process. @return: an object adaptable to L{iweb.IResponse}. """ response = http.Response(responsecode.OK) response.headers.setHeader("allow", self.allowedMethods()) return response def http_TRACE(self, request): """ Respond to a TRACE request. @param request: the request to process. @return: an object adaptable to L{iweb.IResponse}. """ return server.doTrace(request) def http_HEAD(self, request): """ Respond to a HEAD request. @param request: the request to process. @return: an object adaptable to L{iweb.IResponse}. """ return self.http_GET(request) def http_GET(self, request): """ Respond to a GET request. This implementation validates that the request body is empty and then dispatches the given C{request} to L{render} and returns its result. @param request: the request to process. @return: an object adaptable to L{iweb.IResponse}. """ if request.stream.length != 0: return responsecode.REQUEST_ENTITY_TOO_LARGE return self.render(request) def render(self, request): """ Subclasses should implement this method to do page rendering. See L{http_GET}. @param request: the request to process. @return: an object adaptable to L{iweb.IResponse}. """ raise NotImplementedError("Subclass must implement render method.") class Resource(RenderMixin): """ An L{iweb.IResource} implementation with some convenient mechanisms for locating children. """ implements(iweb.IResource) addSlash = False def locateChild(self, request, segments): """ Locates a child resource of this resource. @param request: the request to process. @param segments: a sequence of URL path segments. @return: a tuple of C{(child, segments)} containing the child of this resource which matches one or more of the given C{segments} in sequence, and a list of remaining segments. """ w = getattr(self, 'child_%s' % (segments[0], ), None) if w: r = iweb.IResource(w, None) if r: return r, segments[1:] return w(request), segments[1:] factory = getattr(self, 'childFactory', None) if factory is not None: r = factory(request, segments[0]) if r: return r, segments[1:] return None, [] def child_(self, request): """ This method locates a child with a trailing C{"/"} in the URL. @param request: the request to process. """ if self.addSlash and len(request.postpath) == 1: return self return None def putChild(self, path, child): """ Register a static child. This implementation registers children by assigning them to attributes with a C{child_} prefix. C{resource.putChild("foo", child)} is therefore same as C{o.child_foo = child}. @param path: the name of the child to register. You almost certainly don't want C{"/"} in C{path}. If you want to add a "directory" resource (e.g. C{/foo/}) specify C{path} as C{""}. @param child: an object adaptable to L{iweb.IResource}. """ setattr(self, 'child_%s' % (path, ), child) def http_GET(self, request): if self.addSlash and request.prepath[-1] != '': # If this is a directory-ish resource... return http.RedirectResponse(request.unparseURL(path=request.path+'/')) return super(Resource, self).http_GET(request) class PostableResource(Resource): """ A L{Resource} capable of handling the POST request method. """ def http_POST(self, request): """ Respond to a POST request. Reads and parses the incoming body data then calls L{render}. @param request: the request to process. @return: an object adaptable to L{iweb.IResponse}. """ return server.parsePOSTData(request).addCallback( lambda res: self.render(request)) class LeafResource(RenderMixin): """ A L{Resource} with no children. """ implements(iweb.IResource) def locateChild(self, request, segments): return self, server.StopTraversal class RedirectResource(LeafResource): """ A L{LeafResource} which always performs a redirect. """ implements(iweb.IResource) def __init__(self, *args, **kwargs): """ Parameters are URL components and are the same as those for L{urlparse.urlunparse}. URL components which are not specified will default to the corresponding component of the URL of the request being redirected. """ self._args = args self._kwargs = kwargs def renderHTTP(self, request): return http.RedirectResponse(request.unparseURL(*self._args, **self._kwargs)) class WrapperResource(object): """ An L{iweb.IResource} implementation which wraps a L{RenderMixin} instance and provides a hook in which a subclass can implement logic that is called before request processing on the contained L{Resource}. """ implements(iweb.IResource) def __init__(self, resource): self.resource=resource def hook(self, request): """ Override this method in order to do something before passing control on to the wrapped resource's C{renderHTTP} and C{locateChild} methods. @return: None or a L{Deferred}. If a deferred object is returned, it's value is ignored, but C{renderHTTP} and C{locateChild} are chained onto the deferred as callbacks. """ raise NotImplementedError() def locateChild(self, request, segments): x = self.hook(request) if x is not None: return x.addCallback(lambda data: (self.resource, segments)) return self.resource, segments def renderHTTP(self, request): x = self.hook(request) if x is not None: return x.addCallback(lambda data: self.resource) return self.resource __all__ = ['RenderMixin', 'Resource', 'PostableResource', 'LeafResource', 'WrapperResource']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/resource.py
resource.py
import os, threading import Queue from zope.interface import implements from twisted.internet import defer from twisted.python import log, failure from twisted.web2 import http from twisted.web2 import iweb from twisted.web2 import server from twisted.web2 import stream from twisted.web2.twcgi import createCGIEnvironment class AlreadyStartedResponse(Exception): pass # This isn't a subclass of resource.Resource, because it shouldn't do # any method-specific actions at all. All that stuff is totally up to # the contained wsgi application class WSGIResource(object): """ A web2 Resource which wraps the given WSGI application callable. The WSGI application will be called in a separate thread (using the reactor threadpool) whenever a request for this resource or any lower part of the url hierarchy is received. """ implements(iweb.IResource) def __init__(self, application): self.application = application def renderHTTP(self, req): from twisted.internet import reactor # Do stuff with WSGIHandler. handler = WSGIHandler(self.application, req) # Get deferred d = handler.responseDeferred # Run it in a thread reactor.callInThread(handler.run) return d def locateChild(self, request, segments): return self, server.StopTraversal def callInReactor(__f, *__a, **__kw): from twisted.internet import reactor queue = Queue.Queue() reactor.callFromThread(__callFromThread, queue, __f, __a, __kw) result = queue.get() if isinstance(result, failure.Failure): result.raiseException() return result def __callFromThread(queue, f, a, kw): result = defer.maybeDeferred(f, *a, **kw) result.addBoth(queue.put) class InputStream(object): """ This class implements the 'wsgi.input' object. The methods are expected to have the same behavior as the same-named methods for python's builtin file object. """ def __init__(self, newstream): # Called in IO thread self.stream = stream.BufferedStream(newstream) def read(self, size=None): """ Read at most size bytes from the input, or less if EOF is encountered. If size is ommitted or negative, read until EOF. """ # Called in application thread if size < 0: size = None return callInReactor(self.stream.readExactly, size) def readline(self, size=None): """ Read a line, delimited by a newline. If the stream reaches EOF or size bytes have been read before reaching a newline (if size is given), the partial line is returned. COMPATIBILITY NOTE: the size argument is excluded from the WSGI specification, but is provided here anyhow, because useful libraries such as python stdlib's cgi.py assume their input file-like-object supports readline with a size argument. If you use it, be aware your application may not be portable to other conformant WSGI servers. """ # Called in application thread if size < 0: # E.g. -1, which is the default readline size for *some* # other file-like-objects... size = None return callInReactor(self.stream.readline, '\n', size = size) def readlines(self, hint=None): """ Read until EOF, collecting all lines in a list, and returns that list. The hint argument is ignored (as is allowed in the API specification) """ # Called in application thread data = self.read() lines = data.split('\n') last = lines.pop() lines = [s+'\n' for s in lines] if last != '': lines.append(last) return lines def __iter__(self): """ Returns an iterator, each iteration of which returns the result of readline(), and stops when readline() returns an empty string. """ while 1: line = self.readline() if not line: return yield line class ErrorStream(object): """ This class implements the 'wsgi.error' object. """ def flush(self): # Called in application thread return def write(self, s): # Called in application thread log.msg("WSGI app error: "+s, isError=True) def writelines(self, seq): # Called in application thread s = ''.join(seq) log.msg("WSGI app error: "+s, isError=True) class WSGIHandler(object): headersSent = False stopped = False stream = None def __init__(self, application, request): # Called in IO thread self.setupEnvironment(request) self.application = application self.request = request self.response = None self.responseDeferred = defer.Deferred() def setupEnvironment(self, request): # Called in IO thread env = createCGIEnvironment(request) env['wsgi.version'] = (1, 0) env['wsgi.url_scheme'] = env['REQUEST_SCHEME'] env['wsgi.input'] = InputStream(request.stream) env['wsgi.errors'] = ErrorStream() env['wsgi.multithread'] = True env['wsgi.multiprocess'] = False env['wsgi.run_once'] = False env['wsgi.file_wrapper'] = FileWrapper self.environment = env def startWSGIResponse(self, status, response_headers, exc_info=None): # Called in application thread if exc_info is not None: try: if self.headersSent: raise exc_info[0], exc_info[1], exc_info[2] finally: exc_info = None elif self.response is not None: raise AlreadyStartedResponse, 'startWSGIResponse(%r)' % status status = int(status.split(' ')[0]) self.response = http.Response(status) for key, value in response_headers: self.response.headers.addRawHeader(key, value) return self.write def run(self): from twisted.internet import reactor # Called in application thread try: result = self.application(self.environment, self.startWSGIResponse) self.handleResult(result) except: if not self.headersSent: reactor.callFromThread(self.__error, failure.Failure()) else: reactor.callFromThread(self.stream.finish, failure.Failure()) def __callback(self): # Called in IO thread self.responseDeferred.callback(self.response) self.responseDeferred = None def __error(self, f): # Called in IO thread self.responseDeferred.errback(f) self.responseDeferred = None def write(self, output): # Called in application thread from twisted.internet import reactor if self.response is None: raise RuntimeError( "Application didn't call startResponse before writing data!") if not self.headersSent: self.stream=self.response.stream=stream.ProducerStream() self.headersSent = True # threadsafe event object to communicate paused state. self.unpaused = threading.Event() # After this, we cannot touch self.response from this # thread any more def _start(): # Called in IO thread self.stream.registerProducer(self, True) self.__callback() # Notify application thread to start writing self.unpaused.set() reactor.callFromThread(_start) # Wait for unpaused to be true self.unpaused.wait() reactor.callFromThread(self.stream.write, output) def writeAll(self, result): # Called in application thread from twisted.internet import reactor if not self.headersSent: if self.response is None: raise RuntimeError( "Application didn't call startResponse before writing data!") l = 0 for item in result: l += len(item) self.response.stream=stream.ProducerStream(length=l) self.response.stream.buffer = list(result) self.response.stream.finish() reactor.callFromThread(self.__callback) else: # Has already been started, cannot replace the stream def _write(): # Called in IO thread for s in result: self.stream.write(s) self.stream.finish() reactor.callFromThread(_write) def handleResult(self, result): # Called in application thread try: from twisted.internet import reactor if (isinstance(result, FileWrapper) and hasattr(result.filelike, 'fileno') and not self.headersSent): if self.response is None: raise RuntimeError( "Application didn't call startResponse before writing data!") self.headersSent = True # Make FileStream and output it. We make a new file # object from the fd, just in case the original one # isn't an actual file object. self.response.stream = stream.FileStream( os.fdopen(os.dup(result.filelike.fileno()))) reactor.callFromThread(self.__callback) return if type(result) in (list,tuple): # If it's a list or tuple (exactly, not subtype!), # then send the entire thing down to Twisted at once, # and free up this thread to do other work. self.writeAll(result) return # Otherwise, this thread has to keep running to provide the # data. for data in result: if self.stopped: return self.write(data) if not self.headersSent: if self.response is None: raise RuntimeError( "Application didn't call startResponse, and didn't send any data!") self.headersSent = True reactor.callFromThread(self.__callback) else: reactor.callFromThread(self.stream.finish) finally: if hasattr(result,'close'): result.close() def pauseProducing(self): # Called in IO thread self.unpaused.set() def resumeProducing(self): # Called in IO thread self.unpaused.clear() def stopProducing(self): self.stopped = True class FileWrapper(object): """ Wrapper to convert file-like objects to iterables, to implement the optional 'wsgi.file_wrapper' object. """ def __init__(self, filelike, blksize=8192): self.filelike = filelike self.blksize = blksize if hasattr(filelike,'close'): self.close = filelike.close def __iter__(self): return self def next(self): data = self.filelike.read(self.blksize) if data: return data raise StopIteration __all__ = ['WSGIResource']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/wsgi.py
wsgi.py
# import traceback; log.msg(''.join(traceback.format_stack())) # system imports import socket import time import cgi # twisted imports from twisted.internet import interfaces, error from twisted.python import log, components from zope.interface import implements # sibling imports from twisted.web2 import responsecode from twisted.web2 import http_headers from twisted.web2 import iweb from twisted.web2 import stream from twisted.web2.stream import IByteStream defaultPortForScheme = {'http': 80, 'https':443, 'ftp':21} def splitHostPort(scheme, hostport): """Split the host in "host:port" format into host and port fields. If port was not specified, use the default for the given scheme, if known. Returns a tuple of (hostname, portnumber).""" # Split hostport into host and port hostport = hostport.split(':', 1) try: if len(hostport) == 2: return hostport[0], int(hostport[1]) except ValueError: pass return hostport[0], defaultPortForScheme.get(scheme, 0) def parseVersion(strversion): """Parse version strings of the form Protocol '/' Major '.' Minor. E.g. 'HTTP/1.1'. Returns (protocol, major, minor). Will raise ValueError on bad syntax.""" proto, strversion = strversion.split('/') major, minor = strversion.split('.') major, minor = int(major), int(minor) if major < 0 or minor < 0: raise ValueError("negative number") return (proto.lower(), major, minor) class HTTPError(Exception): def __init__(self, codeOrResponse): """An Exception for propagating HTTP Error Responses. @param codeOrResponse: The numeric HTTP code or a complete http.Response object. @type codeOrResponse: C{int} or L{http.Response} """ Exception.__init__(self) self.response = iweb.IResponse(codeOrResponse) def __repr__(self): return "<%s %s>" % (self.__class__.__name__, self.response) class Response(object): """An object representing an HTTP Response to be sent to the client. """ implements(iweb.IResponse) code = responsecode.OK headers = None stream = None def __init__(self, code=None, headers=None, stream=None): """ @param code: The HTTP status code for this Response @type code: C{int} @param headers: Headers to be sent to the client. @type headers: C{dict}, L{twisted.web2.http_headers.Headers}, or C{None} @param stream: Content body to send to the HTTP client @type stream: L{twisted.web2.stream.IByteStream} """ if code is not None: self.code = int(code) if headers is not None: if isinstance(headers, dict): headers = http_headers.Headers(headers) self.headers=headers else: self.headers = http_headers.Headers() if stream is not None: self.stream = IByteStream(stream) def __repr__(self): if self.stream is None: streamlen = None else: streamlen = self.stream.length return "<%s.%s code=%d, streamlen=%s>" % (self.__module__, self.__class__.__name__, self.code, streamlen) class StatusResponse (Response): """ A L{Response} object which simply contains a status code and a description of what happened. """ def __init__(self, code, description, title=None): """ @param code: a response code in L{responsecode.RESPONSES}. @param description: a string description. @param title: the message title. If not specified or C{None}, defaults to C{responsecode.RESPONSES[code]}. """ if title is None: title = cgi.escape(responsecode.RESPONSES[code]) output = "".join(( "<html>", "<head>", "<title>%s</title>" % (title,), "</head>", "<body>", "<h1>%s</h1>" % (title,), "<p>%s</p>" % (cgi.escape(description),), "</body>", "</html>", )) if type(output) == unicode: output = output.encode("utf-8") mime_params = {"charset": "utf-8"} else: mime_params = {} super(StatusResponse, self).__init__(code=code, stream=output) self.headers.setHeader("content-type", http_headers.MimeType("text", "html", mime_params)) self.description = description def __repr__(self): return "<%s %s %s>" % (self.__class__.__name__, self.code, self.description) class RedirectResponse (StatusResponse): """ A L{Response} object that contains a redirect to another network location. """ def __init__(self, location): """ @param location: the URI to redirect to. """ super(RedirectResponse, self).__init__( responsecode.MOVED_PERMANENTLY, "Document moved to %s." % (location,) ) self.headers.setHeader("location", location) def NotModifiedResponse(oldResponse=None): if oldResponse is not None: headers=http_headers.Headers() for header in ( # Required from sec 10.3.5: 'date', 'etag', 'content-location', 'expires', 'cache-control', 'vary', # Others: 'server', 'proxy-authenticate', 'www-authenticate', 'warning'): value = oldResponse.headers.getRawHeaders(header) if value is not None: headers.setRawHeaders(header, value) else: headers = None return Response(code=responsecode.NOT_MODIFIED, headers=headers) def checkPreconditions(request, response=None, entityExists=True, etag=None, lastModified=None): """Check to see if this request passes the conditional checks specified by the client. May raise an HTTPError with result codes L{NOT_MODIFIED} or L{PRECONDITION_FAILED}, as appropriate. This function is called automatically as an output filter for GET and HEAD requests. With GET/HEAD, it is not important for the precondition check to occur before doing the action, as the method is non-destructive. However, if you are implementing other request methods, like PUT for your resource, you will need to call this after determining the etag and last-modified time of the existing resource but before actually doing the requested action. In that case, This examines the appropriate request headers for conditionals, (If-Modified-Since, If-Unmodified-Since, If-Match, If-None-Match, or If-Range), compares with the etag and last and and then sets the response code as necessary. @param response: This should be provided for GET/HEAD methods. If it is specified, the etag and lastModified arguments will be retrieved automatically from the response headers and shouldn't be separately specified. Not providing the response with a GET request may cause the emitted "Not Modified" responses to be non-conformant. @param entityExists: Set to False if the entity in question doesn't yet exist. Necessary for PUT support with 'If-None-Match: *'. @param etag: The etag of the resource to check against, or None. @param lastModified: The last modified date of the resource to check against, or None. @raise: HTTPError: Raised when the preconditions fail, in order to abort processing and emit an error page. """ if response: assert etag is None and lastModified is None # if the code is some sort of error code, don't do anything if not ((response.code >= 200 and response.code <= 299) or response.code == responsecode.PRECONDITION_FAILED): return False etag = response.headers.getHeader("etag") lastModified = response.headers.getHeader("last-modified") def matchETag(tags, allowWeak): if entityExists and '*' in tags: return True if etag is None: return False return ((allowWeak or not etag.weak) and ([etagmatch for etagmatch in tags if etag.match(etagmatch, strongCompare=not allowWeak)])) # First check if-match/if-unmodified-since # If either one fails, we return PRECONDITION_FAILED match = request.headers.getHeader("if-match") if match: if not matchETag(match, False): raise HTTPError(StatusResponse(responsecode.PRECONDITION_FAILED, "Requested resource does not have a matching ETag.")) unmod_since = request.headers.getHeader("if-unmodified-since") if unmod_since: if not lastModified or lastModified > unmod_since: raise HTTPError(StatusResponse(responsecode.PRECONDITION_FAILED, "Requested resource has changed.")) # Now check if-none-match/if-modified-since. # This bit is tricky, because of the requirements when both IMS and INM # are present. In that case, you can't return a failure code # unless *both* checks think it failed. # Also, if the INM check succeeds, ignore IMS, because INM is treated # as more reliable. # I hope I got the logic right here...the RFC is quite poorly written # in this area. Someone might want to verify the testcase against # RFC wording. # If IMS header is later than current time, ignore it. notModified = None ims = request.headers.getHeader('if-modified-since') if ims: notModified = (ims < time.time() and lastModified and lastModified <= ims) inm = request.headers.getHeader("if-none-match") if inm: if request.method in ("HEAD", "GET"): # If it's a range request, don't allow a weak ETag, as that # would break. canBeWeak = not request.headers.hasHeader('Range') if notModified != False and matchETag(inm, canBeWeak): raise HTTPError(NotModifiedResponse(response)) else: if notModified != False and matchETag(inm, False): raise HTTPError(StatusResponse(responsecode.PRECONDITION_FAILED, "Requested resource has a matching ETag.")) else: if notModified == True: if request.method in ("HEAD", "GET"): raise HTTPError(NotModifiedResponse(response)) else: # S14.25 doesn't actually say what to do for a failing IMS on # non-GET methods. But Precondition Failed makes sense to me. raise HTTPError(StatusResponse(responsecode.PRECONDITION_FAILED, "Requested resource has not changed.")) def checkIfRange(request, response): """Checks for the If-Range header, and if it exists, checks if the test passes. Returns true if the server should return partial data.""" ifrange = request.headers.getHeader("if-range") if ifrange is None: return True if isinstance(ifrange, http_headers.ETag): return ifrange.match(response.headers.getHeader("etag"), strongCompare=True) else: return ifrange == response.headers.getHeader("last-modified") class _NotifyingProducerStream(stream.ProducerStream): doStartReading = None def __init__(self, length=None, doStartReading=None): stream.ProducerStream.__init__(self, length=length) self.doStartReading = doStartReading def read(self): if self.doStartReading is not None: doStartReading = self.doStartReading self.doStartReading = None doStartReading() return stream.ProducerStream.read(self) def write(self, data): self.doStartReading = None stream.ProducerStream.write(self, data) def finish(self): self.doStartReading = None stream.ProducerStream.finish(self) # response codes that must have empty bodies NO_BODY_CODES = (responsecode.NO_CONTENT, responsecode.NOT_MODIFIED) class Request(object): """A HTTP request. Subclasses should override the process() method to determine how the request will be processed. @ivar method: The HTTP method that was used. @ivar uri: The full URI that was requested (includes arguments). @ivar headers: All received headers @ivar clientproto: client HTTP version @ivar stream: incoming data stream. """ implements(iweb.IRequest, interfaces.IConsumer) known_expects = ('100-continue',) def __init__(self, chanRequest, command, path, version, contentLength, headers): """ @param chanRequest: the channel request we're associated with. """ self.chanRequest = chanRequest self.method = command self.uri = path self.clientproto = version self.headers = headers if '100-continue' in self.headers.getHeader('expect', ()): doStartReading = self._sendContinue else: doStartReading = None self.stream = _NotifyingProducerStream(contentLength, doStartReading) self.stream.registerProducer(self.chanRequest, True) def checkExpect(self): """Ensure there are no expectations that cannot be met. Checks Expect header against self.known_expects.""" expects = self.headers.getHeader('expect', ()) for expect in expects: if expect not in self.known_expects: raise HTTPError(responsecode.EXPECTATION_FAILED) def process(self): """Called by channel to let you process the request. Can be overridden by a subclass to do something useful.""" pass def handleContentChunk(self, data): """Callback from channel when a piece of data has been received. Puts the data in .stream""" self.stream.write(data) def handleContentComplete(self): """Callback from channel when all data has been received. """ self.stream.unregisterProducer() self.stream.finish() def connectionLost(self, reason): """connection was lost""" pass def __repr__(self): return '<%s %s %s>'% (self.method, self.uri, self.clientproto) def _sendContinue(self): self.chanRequest.writeIntermediateResponse(responsecode.CONTINUE) def _finished(self, x): """We are finished writing data.""" self.chanRequest.finish() def _error(self, reason): if reason.check(error.ConnectionLost): log.msg("Request error: " + reason.getErrorMessage()) else: log.err(reason) # Only bother with cleanup on errors other than lost connection. self.chanRequest.abortConnection() def writeResponse(self, response): """ Write a response. """ if self.stream.doStartReading is not None: # Expect: 100-continue was requested, but 100 response has not been # sent, and there's a possibility that data is still waiting to be # sent. # # Ideally this means the remote side will not send any data. # However, because of compatibility requirements, it might timeout, # and decide to do so anyways at the same time we're sending back # this response. Thus, the read state is unknown after this. # We must close the connection. self.chanRequest.channel.setReadPersistent(False) # Nothing more will be read self.chanRequest.allContentReceived() if response.code != responsecode.NOT_MODIFIED: # Not modified response is *special* and doesn't get a content-length. if response.stream is None: response.headers.setHeader('content-length', 0) elif response.stream.length is not None: response.headers.setHeader('content-length', response.stream.length) self.chanRequest.writeHeaders(response.code, response.headers) # if this is a "HEAD" request, or a special response code, # don't return any data. if self.method == "HEAD" or response.code in NO_BODY_CODES: if response.stream is not None: response.stream.close() self._finished(None) return d = stream.StreamProducer(response.stream).beginProducing(self.chanRequest) d.addCallback(self._finished).addErrback(self._error) from twisted.web2 import compat components.registerAdapter(compat.makeOldRequestAdapter, iweb.IRequest, iweb.IOldRequest) components.registerAdapter(compat.OldNevowResourceAdapter, iweb.IOldNevowResource, iweb.IResource) components.registerAdapter(Response, int, iweb.IResponse) try: # If twisted.web is installed, add an adapter for it from twisted.web import resource except: pass else: components.registerAdapter(compat.OldResourceAdapter, resource.IResource, iweb.IOldNevowResource) __all__ = ['HTTPError', 'NotModifiedResponse', 'Request', 'Response', 'checkIfRange', 'checkPreconditions', 'defaultPortForScheme', 'parseVersion', 'splitHostPort']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/http.py
http.py
from __future__ import generators import types, time from calendar import timegm import base64 import re def dashCapitalize(s): ''' Capitalize a string, making sure to treat - as a word seperator ''' return '-'.join([ x.capitalize() for x in s.split('-')]) # datetime parsing and formatting weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] weekdayname_lower = [name.lower() for name in weekdayname] monthname = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] monthname_lower = [name and name.lower() for name in monthname] # HTTP Header parsing API header_case_mapping = {} def casemappingify(d): global header_case_mapping newd = dict([(key.lower(),key) for key in d.keys()]) header_case_mapping.update(newd) def lowerify(d): return dict([(key.lower(),value) for key,value in d.items()]) class HeaderHandler(object): """HeaderHandler manages header generating and parsing functions. """ HTTPParsers = {} HTTPGenerators = {} def __init__(self, parsers=None, generators=None): """ @param parsers: A map of header names to parsing functions. @type parsers: L{dict} @param generators: A map of header names to generating functions. @type generators: L{dict} """ if parsers: self.HTTPParsers.update(parsers) if generators: self.HTTPGenerators.update(generators) def parse(self, name, header): """ Parse the given header based on its given name. @param name: The header name to parse. @type name: C{str} @param header: A list of unparsed headers. @type header: C{list} of C{str} @return: The return value is the parsed header representation, it is dependent on the header. See the HTTP Headers document. """ parser = self.HTTPParsers.get(name, None) if parser is None: raise ValueError("No header parser for header '%s', either add one or use getHeaderRaw." % (name,)) try: for p in parser: # print "Parsing %s: %s(%s)" % (name, repr(p), repr(h)) header = p(header) # if isinstance(h, types.GeneratorType): # h=list(h) except ValueError,v: # print v header=None return header def generate(self, name, header): """ Generate the given header based on its given name. @param name: The header name to generate. @type name: C{str} @param header: A parsed header, such as the output of L{HeaderHandler}.parse. @return: C{list} of C{str} each representing a generated HTTP header. """ generator = self.HTTPGenerators.get(name, None) if generator is None: # print self.generators raise ValueError("No header generator for header '%s', either add one or use setHeaderRaw." % (name,)) for g in generator: header = g(header) #self._raw_headers[name] = h return header def updateParsers(self, parsers): """Update en masse the parser maps. @param parsers: Map of header names to parser chains. @type parsers: C{dict} """ casemappingify(parsers) self.HTTPParsers.update(lowerify(parsers)) def addParser(self, name, value): """Add an individual parser chain for the given header. @param name: Name of the header to add @type name: C{str} @param value: The parser chain @type value: C{str} """ self.updateParsers({name: value}) def updateGenerators(self, generators): """Update en masse the generator maps. @param parsers: Map of header names to generator chains. @type parsers: C{dict} """ casemappingify(generators) self.HTTPGenerators.update(lowerify(generators)) def addGenerators(self, name, value): """Add an individual generator chain for the given header. @param name: Name of the header to add @type name: C{str} @param value: The generator chain @type value: C{str} """ self.updateGenerators({name: value}) def update(self, parsers, generators): """Conveniently update parsers and generators all at once. """ self.updateParsers(parsers) self.updateGenerators(generators) DefaultHTTPHandler = HeaderHandler() ## HTTP DateTime parser def parseDateTime(dateString): """Convert an HTTP date string (one of three formats) to seconds since epoch.""" parts = dateString.split() if not parts[0][0:3].lower() in weekdayname_lower: # Weekday is stupid. Might have been omitted. try: return parseDateTime("Sun, "+dateString) except ValueError: # Guess not. pass partlen = len(parts) if (partlen == 5 or partlen == 6) and parts[1].isdigit(): # 1st date format: Sun, 06 Nov 1994 08:49:37 GMT # (Note: "GMT" is literal, not a variable timezone) # (also handles without "GMT") # This is the normal format day = parts[1] month = parts[2] year = parts[3] time = parts[4] elif (partlen == 3 or partlen == 4) and parts[1].find('-') != -1: # 2nd date format: Sunday, 06-Nov-94 08:49:37 GMT # (Note: "GMT" is literal, not a variable timezone) # (also handles without without "GMT") # Two digit year, yucko. day, month, year = parts[1].split('-') time = parts[2] year=int(year) if year < 69: year = year + 2000 elif year < 100: year = year + 1900 elif len(parts) == 5: # 3rd date format: Sun Nov 6 08:49:37 1994 # ANSI C asctime() format. day = parts[2] month = parts[1] year = parts[4] time = parts[3] else: raise ValueError("Unknown datetime format %r" % dateString) day = int(day) month = int(monthname_lower.index(month.lower())) year = int(year) hour, min, sec = map(int, time.split(':')) return int(timegm((year, month, day, hour, min, sec))) ##### HTTP tokenizer class Token(str): __slots__=[] tokens = {} def __new__(self, char): token = Token.tokens.get(char) if token is None: Token.tokens[char] = token = str.__new__(self, char) return token def __repr__(self): return "Token(%s)" % str.__repr__(self) http_tokens = " \t\"()<>@,;:\\/[]?={}" http_ctls = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x7f" def tokenize(header, foldCase=True): """Tokenize a string according to normal HTTP header parsing rules. In particular: - Whitespace is irrelevant and eaten next to special separator tokens. Its existance (but not amount) is important between character strings. - Quoted string support including embedded backslashes. - Case is insignificant (and thus lowercased), except in quoted strings. (unless foldCase=False) - Multiple headers are concatenated with ',' NOTE: not all headers can be parsed with this function. Takes a raw header value (list of strings), and Returns a generator of strings and Token class instances. """ tokens=http_tokens ctls=http_ctls string = ",".join(header) list = [] start = 0 cur = 0 quoted = False qpair = False inSpaces = -1 qstring = None for x in string: if quoted: if qpair: qpair = False qstring = qstring+string[start:cur-1]+x start = cur+1 elif x == '\\': qpair = True elif x == '"': quoted = False yield qstring+string[start:cur] qstring=None start = cur+1 elif x in tokens: if start != cur: if foldCase: yield string[start:cur].lower() else: yield string[start:cur] start = cur+1 if x == '"': quoted = True qstring = "" inSpaces = False elif x in " \t": if inSpaces is False: inSpaces = True else: inSpaces = -1 yield Token(x) elif x in ctls: raise ValueError("Invalid control character: %d in header" % ord(x)) else: if inSpaces is True: yield Token(' ') inSpaces = False inSpaces = False cur = cur+1 if qpair: raise ValueError, "Missing character after '\\'" if quoted: raise ValueError, "Missing end quote" if start != cur: if foldCase: yield string[start:cur].lower() else: yield string[start:cur] def split(seq, delim): """The same as str.split but works on arbitrary sequences. Too bad it's not builtin to python!""" cur = [] for item in seq: if item == delim: yield cur cur = [] else: cur.append(item) yield cur # def find(seq, *args): # """The same as seq.index but returns -1 if not found, instead # Too bad it's not builtin to python!""" # try: # return seq.index(value, *args) # except ValueError: # return -1 def filterTokens(seq): """Filter out instances of Token, leaving only a list of strings. Used instead of a more specific parsing method (e.g. splitting on commas) when only strings are expected, so as to be a little lenient. Apache does it this way and has some comments about broken clients which forget commas (?), so I'm doing it the same way. It shouldn't hurt anything, in any case. """ l=[] for x in seq: if not isinstance(x, Token): l.append(x) return l ##### parser utilities: def checkSingleToken(tokens): if len(tokens) != 1: raise ValueError, "Expected single token, not %s." % (tokens,) return tokens[0] def parseKeyValue(val): if len(val) == 1: return val[0],None elif len(val) == 3 and val[1] == Token('='): return val[0],val[2] raise ValueError, "Expected key or key=value, but got %s." % (val,) def parseArgs(field): args=split(field, Token(';')) val = args.next() args = [parseKeyValue(arg) for arg in args] return val,args def listParser(fun): """Return a function which applies 'fun' to every element in the comma-separated list""" def listParserHelper(tokens): fields = split(tokens, Token(',')) for field in fields: if len(field) != 0: yield fun(field) return listParserHelper def last(seq): """Return seq[-1]""" return seq[-1] ##### Generation utilities def quoteString(s): return '"%s"' % s.replace('\\', '\\\\').replace('"', '\\"') def listGenerator(fun): """Return a function which applies 'fun' to every element in the given list, then joins the result with generateList""" def listGeneratorHelper(l): return generateList([fun(e) for e in l]) return listGeneratorHelper def generateList(seq): return ", ".join(seq) def singleHeader(item): return [item] def generateKeyValues(kvs): l = [] # print kvs for k,v in kvs: if v is None: l.append('%s' % k) else: l.append('%s=%s' % (k,v)) return ";".join(l) class MimeType(object): def fromString(klass, mimeTypeString): """Generate a MimeType object from the given string. @param mimeTypeString: The mimetype to parse @return: L{MimeType} """ return DefaultHTTPHandler.parse('content-type', [mimeTypeString]) fromString = classmethod(fromString) def __init__(self, mediaType, mediaSubtype, params={}, **kwargs): """ @type mediaType: C{str} @type mediaSubtype: C{str} @type params: C{dict} """ self.mediaType = mediaType self.mediaSubtype = mediaSubtype self.params = dict(params) if kwargs: self.params.update(kwargs) def __eq__(self, other): if not isinstance(other, MimeType): return NotImplemented return (self.mediaType == other.mediaType and self.mediaSubtype == other.mediaSubtype and self.params == other.params) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return "MimeType(%r, %r, %r)" % (self.mediaType, self.mediaSubtype, self.params) def __hash__(self): return hash(self.mediaType)^hash(self.mediaSubtype)^hash(tuple(self.params.iteritems())) ##### Specific header parsers. def parseAccept(field): type,args = parseArgs(field) if len(type) != 3 or type[1] != Token('/'): raise ValueError, "MIME Type "+str(type)+" invalid." # okay, this spec is screwy. A 'q' parameter is used as the separator # between MIME parameters and (as yet undefined) additional HTTP # parameters. num = 0 for arg in args: if arg[0] == 'q': mimeparams=tuple(args[0:num]) params=args[num:] break num = num + 1 else: mimeparams=tuple(args) params=[] # Default values for parameters: qval = 1.0 # Parse accept parameters: for param in params: if param[0] =='q': qval = float(param[1]) else: # Warn? ignored parameter. pass ret = MimeType(type[0],type[2],mimeparams),qval return ret def parseAcceptQvalue(field): type,args=parseArgs(field) type = checkSingleToken(type) qvalue = 1.0 # Default qvalue is 1 for arg in args: if arg[0] == 'q': qvalue = float(arg[1]) return type,qvalue def addDefaultCharset(charsets): if charsets.get('*') is None and charsets.get('iso-8859-1') is None: charsets['iso-8859-1'] = 1.0 return charsets def addDefaultEncoding(encodings): if encodings.get('*') is None and encodings.get('identity') is None: # RFC doesn't specify a default value for identity, only that it # "is acceptable" if not mentioned. Thus, give it a very low qvalue. encodings['identity'] = .0001 return encodings def parseContentType(header): # Case folding is disabled for this header, because of use of # Content-Type: multipart/form-data; boundary=CaSeFuLsTuFf # So, we need to explicitly .lower() the type/subtype and arg keys. type,args = parseArgs(header) if len(type) != 3 or type[1] != Token('/'): raise ValueError, "MIME Type "+str(type)+" invalid." args = [(kv[0].lower(), kv[1]) for kv in args] return MimeType(type[0].lower(), type[2].lower(), tuple(args)) def parseContentMD5(header): try: return base64.decodestring(header) except Exception,e: raise ValueError(e) def parseContentRange(header): """Parse a content-range header into (kind, start, end, realLength). realLength might be None if real length is not known ('*'). start and end might be None if start,end unspecified (for response code 416) """ kind, other = header.strip().split() if kind.lower() != "bytes": raise ValueError("a range of type %r is not supported") startend, realLength = other.split("/") if startend.strip() == '*': start,end=None,None else: start, end = map(int, startend.split("-")) if realLength == "*": realLength = None else: realLength = int(realLength) return (kind, start, end, realLength) def parseExpect(field): type,args=parseArgs(field) type=parseKeyValue(type) return (type[0], (lambda *args:args)(type[1], *args)) def parseExpires(header): # """HTTP/1.1 clients and caches MUST treat other invalid date formats, # especially including the value 0, as in the past (i.e., "already expired").""" try: return parseDateTime(header) except ValueError: return 0 def parseIfModifiedSince(header): # Ancient versions of netscape and *current* versions of MSIE send # If-Modified-Since: Thu, 05 Aug 2004 12:57:27 GMT; length=123 # which is blantantly RFC-violating and not documented anywhere # except bug-trackers for web frameworks. # So, we'll just strip off everything after a ';'. return parseDateTime(header.split(';', 1)[0]) def parseIfRange(headers): try: return ETag.parse(tokenize(headers)) except ValueError: return parseDateTime(last(headers)) def parseRange(range): range = list(range) if len(range) < 3 or range[1] != Token('='): raise ValueError("Invalid range header format: %s" %(range,)) type=range[0] if type != 'bytes': raise ValueError("Unknown range unit: %s." % (type,)) rangeset=split(range[2:], Token(',')) ranges = [] for byterangespec in rangeset: if len(byterangespec) != 1: raise ValueError("Invalid range header format: %s" % (range,)) start,end=byterangespec[0].split('-') if not start and not end: raise ValueError("Invalid range header format: %s" % (range,)) if start: start = int(start) else: start = None if end: end = int(end) else: end = None if start and end and start > end: raise ValueError("Invalid range header, start > end: %s" % (range,)) ranges.append((start,end)) return type,ranges def parseRetryAfter(header): try: # delta seconds return time.time() + int(header) except ValueError: # or datetime return parseDateTime(header) # WWW-Authenticate and Authorization def parseWWWAuthenticate(tokenized): headers = [] tokenList = list(tokenized) while tokenList: scheme = tokenList.pop(0) challenge = {} last = None kvChallenge = False while tokenList: token = tokenList.pop(0) if token == Token('='): kvChallenge = True challenge[last] = tokenList.pop(0) last = None elif token == Token(','): if kvChallenge: if len(tokenList) > 1 and tokenList[1] != Token('='): break else: break else: last = token if last and scheme and not challenge and not kvChallenge: challenge = last last = None headers.append((scheme, challenge)) if last and last not in (Token('='), Token(',')): if headers[-1] == (scheme, challenge): scheme = last challenge = {} headers.append((scheme, challenge)) return headers def parseAuthorization(header): scheme, rest = header.split(' ', 1) # this header isn't tokenized because it may eat characters # in the unquoted base64 encoded credentials return scheme.lower(), rest #### Header generators def generateAccept(accept): mimeType,q = accept out="%s/%s"%(mimeType.mediaType, mimeType.mediaSubtype) if mimeType.params: out+=';'+generateKeyValues(mimeType.params.iteritems()) if q != 1.0: out+=(';q=%.3f' % (q,)).rstrip('0').rstrip('.') return out def removeDefaultEncoding(seq): for item in seq: if item[0] != 'identity' or item[1] != .0001: yield item def generateAcceptQvalue(keyvalue): if keyvalue[1] == 1.0: return "%s" % keyvalue[0:1] else: return ("%s;q=%.3f" % keyvalue).rstrip('0').rstrip('.') def parseCacheControl(kv): k, v = parseKeyValue(kv) if k == 'max-age' or k == 'min-fresh' or k == 's-maxage': # Required integer argument if v is None: v = 0 else: v = int(v) elif k == 'max-stale': # Optional integer argument if v is not None: v = int(v) elif k == 'private' or k == 'no-cache': # Optional list argument if v is not None: v = [field.strip().lower() for field in v.split(',')] return k, v def generateCacheControl((k, v)): if v is None: return str(k) else: if k == 'no-cache' or k == 'private': # quoted list of values v = quoteString(generateList( [header_case_mapping.get(name) or dashCapitalize(name) for name in v])) return '%s=%s' % (k,v) def generateContentRange(tup): """tup is (type, start, end, len) len can be None. """ type, start, end, len = tup if len == None: len = '*' else: len = int(len) if start == None and end == None: startend = '*' else: startend = '%d-%d' % (start, end) return '%s %s/%s' % (type, startend, len) def generateDateTime(secSinceEpoch): """Convert seconds since epoch to HTTP datetime string.""" year, month, day, hh, mm, ss, wd, y, z = time.gmtime(secSinceEpoch) s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( weekdayname[wd], day, monthname[month], year, hh, mm, ss) return s def generateExpect(item): if item[1][0] is None: out = '%s' % (item[0],) else: out = '%s=%s' % (item[0], item[1][0]) if len(item[1]) > 1: out += ';'+generateKeyValues(item[1][1:]) return out def generateRange(range): def noneOr(s): if s is None: return '' return s type,ranges=range if type != 'bytes': raise ValueError("Unknown range unit: "+type+".") return (type+'='+ ','.join(['%s-%s' % (noneOr(startend[0]), noneOr(startend[1])) for startend in ranges])) def generateRetryAfter(when): # always generate delta seconds format return str(int(when - time.time())) def generateContentType(mimeType): out="%s/%s"%(mimeType.mediaType, mimeType.mediaSubtype) if mimeType.params: out+=';'+generateKeyValues(mimeType.params.iteritems()) return out def generateIfRange(dateOrETag): if isinstance(dateOrETag, ETag): return dateOrETag.generate() else: return generateDateTime(dateOrETag) # WWW-Authenticate and Authorization def generateWWWAuthenticate(headers): _generated = [] for seq in headers: scheme, challenge = seq[0], seq[1] # If we're going to parse out to something other than a dict # we need to be able to generate from something other than a dict try: l = [] for k,v in dict(challenge).iteritems(): l.append("%s=%s" % (k, quoteString(v))) _generated.append("%s %s" % (scheme, ", ".join(l))) except ValueError: _generated.append("%s %s" % (scheme, challenge)) return _generated def generateAuthorization(seq): return [' '.join(seq)] #### class ETag(object): def __init__(self, tag, weak=False): self.tag = str(tag) self.weak = weak def match(self, other, strongCompare): # Sec 13.3. # The strong comparison function: in order to be considered equal, both # validators MUST be identical in every way, and both MUST NOT be weak. # # The weak comparison function: in order to be considered equal, both # validators MUST be identical in every way, but either or both of # them MAY be tagged as "weak" without affecting the result. if not isinstance(other, ETag) or other.tag != self.tag: return False if strongCompare and (other.weak or self.weak): return False return True def __eq__(self, other): return isinstance(other, ETag) and other.tag == self.tag and other.weak == self.weak def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return "Etag(%r, weak=%r)" % (self.tag, self.weak) def parse(tokens): tokens=tuple(tokens) if len(tokens) == 1 and not isinstance(tokens[0], Token): return ETag(tokens[0]) if(len(tokens) == 3 and tokens[0] == "w" and tokens[1] == Token('/')): return ETag(tokens[2], weak=True) raise ValueError("Invalid ETag.") parse=staticmethod(parse) def generate(self): if self.weak: return 'W/'+quoteString(self.tag) else: return quoteString(self.tag) def parseStarOrETag(tokens): tokens=tuple(tokens) if tokens == ('*',): return '*' else: return ETag.parse(tokens) def generateStarOrETag(etag): if etag=='*': return etag else: return etag.generate() #### Cookies. Blech! class Cookie(object): # __slots__ = ['name', 'value', 'path', 'domain', 'ports', 'expires', 'discard', 'secure', 'comment', 'commenturl', 'version'] def __init__(self, name, value, path=None, domain=None, ports=None, expires=None, discard=False, secure=False, comment=None, commenturl=None, version=0): self.name=name self.value=value self.path=path self.domain=domain self.ports=ports self.expires=expires self.discard=discard self.secure=secure self.comment=comment self.commenturl=commenturl self.version=version def __repr__(self): s="Cookie(%r=%r" % (self.name, self.value) if self.path is not None: s+=", path=%r" % (self.path,) if self.domain is not None: s+=", domain=%r" % (self.domain,) if self.ports is not None: s+=", ports=%r" % (self.ports,) if self.expires is not None: s+=", expires=%r" % (self.expires,) if self.secure is not False: s+=", secure=%r" % (self.secure,) if self.comment is not None: s+=", comment=%r" % (self.comment,) if self.commenturl is not None: s+=", commenturl=%r" % (self.commenturl,) if self.version != 0: s+=", version=%r" % (self.version,) s+=")" return s def __eq__(self, other): return (isinstance(other, Cookie) and other.path == self.path and other.domain == self.domain and other.ports == self.ports and other.expires == self.expires and other.secure == self.secure and other.comment == self.comment and other.commenturl == self.commenturl and other.version == self.version) def __ne__(self, other): return not self.__eq__(other) def parseCookie(headers): """Bleargh, the cookie spec sucks. This surely needs interoperability testing. There are two specs that are supported: Version 0) http://wp.netscape.com/newsref/std/cookie_spec.html Version 1) http://www.faqs.org/rfcs/rfc2965.html """ cookies = [] # There can't really be multiple cookie headers according to RFC, because # if multiple headers are allowed, they must be joinable with ",". # Neither new RFC2965 cookies nor old netscape cookies are. header = ';'.join(headers) if header[0:8].lower() == "$version": # RFC2965 cookie h=tokenize([header], foldCase=False) r_cookies = split(h, Token(',')) for r_cookie in r_cookies: last_cookie = None rr_cookies = split(r_cookie, Token(';')) for cookie in rr_cookies: nameval = tuple(split(cookie, Token('='))) if len(nameval) == 2: (name,), (value,) = nameval else: (name,), = nameval value = None name=name.lower() if name == '$version': continue if name[0] == '$': if last_cookie is not None: if name == '$path': last_cookie.path=value elif name == '$domain': last_cookie.domain=value elif name == '$port': if value is None: last_cookie.ports = () else: last_cookie.ports=tuple([int(s) for s in value.split(',')]) else: last_cookie = Cookie(name, value, version=1) cookies.append(last_cookie) else: # Oldstyle cookies don't do quoted strings or anything sensible. # All characters are valid for names except ';' and '=', and all # characters are valid for values except ';'. Spaces are stripped, # however. r_cookies = header.split(';') for r_cookie in r_cookies: name,value = r_cookie.split('=', 1) name=name.strip(' \t') value=value.strip(' \t') cookies.append(Cookie(name, value)) return cookies cookie_validname = "[^"+re.escape(http_tokens+http_ctls)+"]*$" cookie_validname_re = re.compile(cookie_validname) cookie_validvalue = cookie_validname+'|"([^"]|\\\\")*"$' cookie_validvalue_re = re.compile(cookie_validvalue) def generateCookie(cookies): # There's a fundamental problem with the two cookie specifications. # They both use the "Cookie" header, and the RFC Cookie header only allows # one version to be specified. Thus, when you have a collection of V0 and # V1 cookies, you have to either send them all as V0 or send them all as # V1. # I choose to send them all as V1. # You might think converting a V0 cookie to a V1 cookie would be lossless, # but you'd be wrong. If you do the conversion, and a V0 parser tries to # read the cookie, it will see a modified form of the cookie, in cases # where quotes must be added to conform to proper V1 syntax. # (as a real example: "Cookie: cartcontents=oid:94680,qty:1,auto:0,esp:y") # However, that is what we will do, anyways. It has a high probability of # breaking applications that only handle oldstyle cookies, where some other # application set a newstyle cookie that is applicable over for site # (or host), AND where the oldstyle cookie uses a value which is invalid # syntax in a newstyle cookie. # Also, the cookie name *cannot* be quoted in V1, so some cookies just # cannot be converted at all. (e.g. "Cookie: phpAds_capAd[32]=2"). These # are just dicarded during conversion. # As this is an unsolvable problem, I will pretend I can just say # OH WELL, don't do that, or else upgrade your old applications to have # newstyle cookie parsers. # I will note offhandedly that there are *many* sites which send V0 cookies # that are not valid V1 cookie syntax. About 20% for my cookies file. # However, they do not generally mix them with V1 cookies, so this isn't # an issue, at least right now. I have not tested to see how many of those # webapps support RFC2965 V1 cookies. I suspect not many. max_version = max([cookie.version for cookie in cookies]) if max_version == 0: # no quoting or anything. return ';'.join(["%s=%s" % (cookie.name, cookie.value) for cookie in cookies]) else: str_cookies = ['$Version="1"'] for cookie in cookies: if cookie.version == 0: # Version 0 cookie: we make sure the name and value are valid # V1 syntax. # If they are, we use them as is. This means in *most* cases, # the cookie will look literally the same on output as it did # on input. # If it isn't a valid name, ignore the cookie. # If it isn't a valid value, quote it and hope for the best on # the other side. if cookie_validname_re.match(cookie.name) is None: continue value=cookie.value if cookie_validvalue_re.match(cookie.value) is None: value = quoteString(value) str_cookies.append("%s=%s" % (cookie.name, value)) else: # V1 cookie, nice and easy str_cookies.append("%s=%s" % (cookie.name, quoteString(cookie.value))) if cookie.path: str_cookies.append("$Path=%s" % quoteString(cookie.path)) if cookie.domain: str_cookies.append("$Domain=%s" % quoteString(cookie.domain)) if cookie.ports is not None: if len(cookie.ports) == 0: str_cookies.append("$Port") else: str_cookies.append("$Port=%s" % quoteString(",".join([str(x) for x in cookie.ports]))) return ';'.join(str_cookies) def parseSetCookie(headers): setCookies = [] for header in headers: try: parts = header.split(';') l = [] for part in parts: namevalue = part.split('=',1) if len(namevalue) == 1: name=namevalue[0] value=None else: name,value=namevalue value=value.strip(' \t') name=name.strip(' \t') l.append((name, value)) setCookies.append(makeCookieFromList(l, True)) except ValueError: # If we can't parse one Set-Cookie, ignore it, # but not the rest of Set-Cookies. pass return setCookies def parseSetCookie2(toks): outCookies = [] for cookie in [[parseKeyValue(x) for x in split(y, Token(';'))] for y in split(toks, Token(','))]: try: outCookies.append(makeCookieFromList(cookie, False)) except ValueError: # Again, if we can't handle one cookie -- ignore it. pass return outCookies def makeCookieFromList(tup, netscapeFormat): name, value = tup[0] if name is None or value is None: raise ValueError("Cookie has missing name or value") if name.startswith("$"): raise ValueError("Invalid cookie name: %r, starts with '$'." % name) cookie = Cookie(name, value) hadMaxAge = False for name,value in tup[1:]: name = name.lower() if value is None: if name in ("discard", "secure"): # Boolean attrs value = True elif name != "port": # Can be either boolean or explicit continue if name in ("comment", "commenturl", "discard", "domain", "path", "secure"): # simple cases setattr(cookie, name, value) elif name == "expires" and not hadMaxAge: if netscapeFormat and value[0] == '"' and value[-1] == '"': value = value[1:-1] cookie.expires = parseDateTime(value) elif name == "max-age": hadMaxAge = True cookie.expires = int(value) + time.time() elif name == "port": if value is None: cookie.ports = () else: if netscapeFormat and value[0] == '"' and value[-1] == '"': value = value[1:-1] cookie.ports = tuple([int(s) for s in value.split(',')]) elif name == "version": cookie.version = int(value) return cookie def generateSetCookie(cookies): setCookies = [] for cookie in cookies: out = ["%s=%s" % (cookie.name, cookie.value)] if cookie.expires: out.append("expires=%s" % generateDateTime(cookie.expires)) if cookie.path: out.append("path=%s" % cookie.path) if cookie.domain: out.append("domain=%s" % cookie.domain) if cookie.secure: out.append("secure") setCookies.append('; '.join(out)) return setCookies def generateSetCookie2(cookies): setCookies = [] for cookie in cookies: out = ["%s=%s" % (cookie.name, quoteString(cookie.value))] if cookie.comment: out.append("Comment=%s" % quoteString(cookie.comment)) if cookie.commenturl: out.append("CommentURL=%s" % quoteString(cookie.commenturl)) if cookie.discard: out.append("Discard") if cookie.domain: out.append("Domain=%s" % quoteString(cookie.domain)) if cookie.expires: out.append("Max-Age=%s" % (cookie.expires - time.time())) if cookie.path: out.append("Path=%s" % quoteString(cookie.path)) if cookie.ports is not None: if len(cookie.ports) == 0: out.append("Port") else: out.append("Port=%s" % quoteString(",".join([str(x) for x in cookie.ports]))) if cookie.secure: out.append("Secure") out.append('Version="1"') setCookies.append('; '.join(out)) return setCookies def parseDepth(depth): if depth not in ("0", "1", "infinity"): raise ValueError("Invalid depth header value: %s" % (depth,)) return depth def parseOverWrite(overwrite): if overwrite == "F": return False elif overwrite == "T": return True raise ValueError("Invalid overwrite header value: %s" % (overwrite,)) def generateOverWrite(overwrite): if overwrite: return "T" else: return "F" ##### Random stuff that looks useful. # def sortMimeQuality(s): # def sorter(item1, item2): # if item1[0] == '*': # if item2[0] == '*': # return 0 # def sortQuality(s): # def sorter(item1, item2): # if item1[1] < item2[1]: # return -1 # if item1[1] < item2[1]: # return 1 # if item1[0] == item2[0]: # return 0 # def getMimeQuality(mimeType, accepts): # type,args = parseArgs(mimeType) # type=type.split(Token('/')) # if len(type) != 2: # raise ValueError, "MIME Type "+s+" invalid." # for accept in accepts: # accept,acceptQual=accept # acceptType=accept[0:1] # acceptArgs=accept[2] # if ((acceptType == type or acceptType == (type[0],'*') or acceptType==('*','*')) and # (args == acceptArgs or len(acceptArgs) == 0)): # return acceptQual # def getQuality(type, accepts): # qual = accepts.get(type) # if qual is not None: # return qual # return accepts.get('*') # Headers object class __RecalcNeeded(object): def __repr__(self): return "<RecalcNeeded>" _RecalcNeeded = __RecalcNeeded() class Headers(object): """This class stores the HTTP headers as both a parsed representation and the raw string representation. It converts between the two on demand.""" def __init__(self, headers=None, rawHeaders=None, handler=DefaultHTTPHandler): self._raw_headers = {} self._headers = {} self.handler = handler if headers is not None: for key, value in headers.iteritems(): self.setHeader(key, value) if rawHeaders is not None: for key, value in rawHeaders.iteritems(): self.setRawHeaders(key, value) def _setRawHeaders(self, headers): self._raw_headers = headers self._headers = {} def _toParsed(self, name): r = self._raw_headers.get(name, None) h = self.handler.parse(name, r) if h is not None: self._headers[name] = h return h def _toRaw(self, name): h = self._headers.get(name, None) r = self.handler.generate(name, h) if r is not None: self._raw_headers[name] = r return r def hasHeader(self, name): """Does a header with the given name exist?""" name=name.lower() return self._raw_headers.has_key(name) def getRawHeaders(self, name, default=None): """Returns a list of headers matching the given name as the raw string given.""" name=name.lower() raw_header = self._raw_headers.get(name, default) if raw_header is not _RecalcNeeded: return raw_header return self._toRaw(name) def getHeader(self, name, default=None): """Ret9urns the parsed representation of the given header. The exact form of the return value depends on the header in question. If no parser for the header exists, raise ValueError. If the header doesn't exist, return default (or None if not specified) """ name=name.lower() parsed = self._headers.get(name, default) if parsed is not _RecalcNeeded: return parsed return self._toParsed(name) def setRawHeaders(self, name, value): """Sets the raw representation of the given header. Value should be a list of strings, each being one header of the given name. """ name=name.lower() self._raw_headers[name] = value self._headers[name] = _RecalcNeeded def setHeader(self, name, value): """Sets the parsed representation of the given header. Value should be a list of objects whose exact form depends on the header in question. """ name=name.lower() self._raw_headers[name] = _RecalcNeeded self._headers[name] = value def addRawHeader(self, name, value): """ Add a raw value to a header that may or may not already exist. If it exists, add it as a separate header to output; do not replace anything. """ name=name.lower() raw_header = self._raw_headers.get(name) if raw_header is None: # No header yet raw_header = [] self._raw_headers[name] = raw_header elif raw_header is _RecalcNeeded: raw_header = self._toRaw(name) raw_header.append(value) self._headers[name] = _RecalcNeeded def removeHeader(self, name): """Removes the header named.""" name=name.lower() if self._raw_headers.has_key(name): del self._raw_headers[name] del self._headers[name] def __repr__(self): return '<Headers: Raw: %s Parsed: %s>'% (self._raw_headers, self._headers) def canonicalNameCaps(self, name): """Return the name with the canonical capitalization, if known, otherwise, Caps-After-Dashes""" return header_case_mapping.get(name) or dashCapitalize(name) def getAllRawHeaders(self): """Return an iterator of key,value pairs of all headers contained in this object, as strings. The keys are capitalized in canonical capitalization.""" for k,v in self._raw_headers.iteritems(): if v is _RecalcNeeded: v = self._toRaw(k) yield self.canonicalNameCaps(k), v def makeImmutable(self): """Make this header set immutable. All mutating operations will raise an exception.""" self.setHeader = self.setRawHeaders = self.removeHeader = self._mutateRaise def _mutateRaise(self, *args): raise AttributeError("This header object is immutable as the headers have already been sent.") """The following dicts are all mappings of header to list of operations to perform. The first operation should generally be 'tokenize' if the header can be parsed according to the normal tokenization rules. If it cannot, generally the first thing you want to do is take only the last instance of the header (in case it was sent multiple times, which is strictly an error, but we're nice.). """ iteritems = lambda x: x.iteritems() parser_general_headers = { 'Cache-Control':(tokenize, listParser(parseCacheControl), dict), 'Connection':(tokenize,filterTokens), 'Date':(last,parseDateTime), # 'Pragma':tokenize # 'Trailer':tokenize 'Transfer-Encoding':(tokenize,filterTokens), # 'Upgrade':tokenize # 'Via':tokenize,stripComment # 'Warning':tokenize } generator_general_headers = { 'Cache-Control':(iteritems, listGenerator(generateCacheControl), singleHeader), 'Connection':(generateList,singleHeader), 'Date':(generateDateTime,singleHeader), # 'Pragma': # 'Trailer': 'Transfer-Encoding':(generateList,singleHeader), # 'Upgrade': # 'Via': # 'Warning': } parser_request_headers = { 'Accept': (tokenize, listParser(parseAccept), dict), 'Accept-Charset': (tokenize, listParser(parseAcceptQvalue), dict, addDefaultCharset), 'Accept-Encoding':(tokenize, listParser(parseAcceptQvalue), dict, addDefaultEncoding), 'Accept-Language':(tokenize, listParser(parseAcceptQvalue), dict), 'Authorization': (last, parseAuthorization), 'Cookie':(parseCookie,), 'Expect':(tokenize, listParser(parseExpect), dict), 'From':(last,), 'Host':(last,), 'If-Match':(tokenize, listParser(parseStarOrETag), list), 'If-Modified-Since':(last, parseIfModifiedSince), 'If-None-Match':(tokenize, listParser(parseStarOrETag), list), 'If-Range':(parseIfRange,), 'If-Unmodified-Since':(last,parseDateTime), 'Max-Forwards':(last,int), # 'Proxy-Authorization':str, # what is "credentials" 'Range':(tokenize, parseRange), 'Referer':(last,str), # TODO: URI object? 'TE':(tokenize, listParser(parseAcceptQvalue), dict), 'User-Agent':(last,str), } generator_request_headers = { 'Accept': (iteritems,listGenerator(generateAccept),singleHeader), 'Accept-Charset': (iteritems, listGenerator(generateAcceptQvalue),singleHeader), 'Accept-Encoding': (iteritems, removeDefaultEncoding, listGenerator(generateAcceptQvalue),singleHeader), 'Accept-Language': (iteritems, listGenerator(generateAcceptQvalue),singleHeader), 'Authorization': (generateAuthorization,), # what is "credentials" 'Cookie':(generateCookie,singleHeader), 'Expect':(iteritems, listGenerator(generateExpect), singleHeader), 'From':(str,singleHeader), 'Host':(str,singleHeader), 'If-Match':(listGenerator(generateStarOrETag), singleHeader), 'If-Modified-Since':(generateDateTime,singleHeader), 'If-None-Match':(listGenerator(generateStarOrETag), singleHeader), 'If-Range':(generateIfRange, singleHeader), 'If-Unmodified-Since':(generateDateTime,singleHeader), 'Max-Forwards':(str, singleHeader), # 'Proxy-Authorization':str, # what is "credentials" 'Range':(generateRange,singleHeader), 'Referer':(str,singleHeader), 'TE': (iteritems, listGenerator(generateAcceptQvalue),singleHeader), 'User-Agent':(str,singleHeader), } parser_response_headers = { 'Accept-Ranges':(tokenize, filterTokens), 'Age':(last,int), 'ETag':(tokenize, ETag.parse), 'Location':(last,), # TODO: URI object? # 'Proxy-Authenticate' 'Retry-After':(last, parseRetryAfter), 'Server':(last,), 'Set-Cookie':(parseSetCookie,), 'Set-Cookie2':(tokenize, parseSetCookie2), 'Vary':(tokenize, filterTokens), 'WWW-Authenticate': (lambda h: tokenize(h, foldCase=False), parseWWWAuthenticate,) } generator_response_headers = { 'Accept-Ranges':(generateList, singleHeader), 'Age':(str, singleHeader), 'ETag':(ETag.generate, singleHeader), 'Location':(str, singleHeader), # 'Proxy-Authenticate' 'Retry-After':(generateRetryAfter, singleHeader), 'Server':(str, singleHeader), 'Set-Cookie':(generateSetCookie,), 'Set-Cookie2':(generateSetCookie2,), 'Vary':(generateList, singleHeader), 'WWW-Authenticate':(generateWWWAuthenticate,) } parser_entity_headers = { 'Allow':(lambda str:tokenize(str, foldCase=False), filterTokens), 'Content-Encoding':(tokenize, filterTokens), 'Content-Language':(tokenize, filterTokens), 'Content-Length':(last, int), 'Content-Location':(last,), # TODO: URI object? 'Content-MD5':(last, parseContentMD5), 'Content-Range':(last, parseContentRange), 'Content-Type':(lambda str:tokenize(str, foldCase=False), parseContentType), 'Expires':(last, parseExpires), 'Last-Modified':(last, parseDateTime), } generator_entity_headers = { 'Allow':(generateList, singleHeader), 'Content-Encoding':(generateList, singleHeader), 'Content-Language':(generateList, singleHeader), 'Content-Length':(str, singleHeader), 'Content-Location':(str, singleHeader), 'Content-MD5':(base64.encodestring, lambda x: x.strip("\n"), singleHeader), 'Content-Range':(generateContentRange, singleHeader), 'Content-Type':(generateContentType, singleHeader), 'Expires':(generateDateTime, singleHeader), 'Last-Modified':(generateDateTime, singleHeader), } parser_dav_headers = { 'DAV' : (tokenize, list), 'Depth' : (last, parseDepth), 'Destination' : (last,), # TODO: URI object? #'If' : (), #'Lock-Token' : (), 'Overwrite' : (last, parseOverWrite), #'Status-URI' : (), #'Timeout' : (), } generator_dav_headers = { 'DAV' : (generateList, singleHeader), 'Depth' : (singleHeader), 'Destination' : (singleHeader), #'If' : (), #'Lock-Token' : (), 'Overwrite' : (), #'Status-URI' : (), #'Timeout' : (), } DefaultHTTPHandler.updateParsers(parser_general_headers) DefaultHTTPHandler.updateParsers(parser_request_headers) DefaultHTTPHandler.updateParsers(parser_response_headers) DefaultHTTPHandler.updateParsers(parser_entity_headers) DefaultHTTPHandler.updateParsers(parser_dav_headers) DefaultHTTPHandler.updateGenerators(generator_general_headers) DefaultHTTPHandler.updateGenerators(generator_request_headers) DefaultHTTPHandler.updateGenerators(generator_response_headers) DefaultHTTPHandler.updateGenerators(generator_entity_headers) DefaultHTTPHandler.updateGenerators(generator_dav_headers) # casemappingify(DefaultHTTPParsers) # casemappingify(DefaultHTTPGenerators) # lowerify(DefaultHTTPParsers) # lowerify(DefaultHTTPGenerators)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/http_headers.py
http_headers.py
from __future__ import generators from urllib import quote, string import UserDict, math, time from cStringIO import StringIO from twisted.web2 import http_headers, iweb, stream, responsecode from twisted.internet import defer, address from twisted.python import components from twisted.spread import pb from zope.interface import implements class HeaderAdapter(UserDict.DictMixin): def __init__(self, headers): self._headers = headers def __getitem__(self, name): raw = self._headers.getRawHeaders(name) if raw is None: raise KeyError(name) return ', '.join(raw) def __setitem__(self, name, value): self._headers.setRawHeaders([value]) def __delitem__(self, name): if not self._headers.hasHeader(name): raise KeyError(name) self._headers.removeHeader(name) def iteritems(self): for k,v in self._headers.getAllRawHeaders(): yield k, ', '.join(v) def keys(self): return [k for k, _ in self.iteritems()] def __iter__(self): for k, _ in self.iteritems(): yield k def has_key(self, name): return self._headers.hasHeader(name) def makeOldRequestAdapter(original): # Cache the adapter. Replace this with a more better generalized # mechanism when one becomes available. if not hasattr(original, '_oldRequest'): original._oldRequest = OldRequestAdapter(original) return original._oldRequest def _addressToTuple(addr): if isinstance(addr, address.IPv4Address): return ('INET', addr.host, addr.port) elif isinstance(addr, address.UNIXAddress): return ('UNIX', addr.name) else: return tuple(addr) class OldRequestAdapter(pb.Copyable, components.Componentized, object): """Adapt old requests to new request """ implements(iweb.IOldRequest) def _getFrom(where, name): def _get(self): return getattr(getattr(self, where), name) return property(_get) def _getsetFrom(where, name): def _get(self): return getattr(getattr(self, where), name) def _set(self, new): setattr(getattr(self, where), name, new) def _del(self): delattr(getattr(self, where), name) return property(_get, _set, _del) def _getsetHeaders(where): def _get(self): headers = getattr(self, where).headers return HeaderAdapter(headers) def _set(self, newheaders): headers = http_headers.Headers() for n,v in newheaders.items(): headers.setRawHeaders(n, (v,)) newheaders = headers getattr(self, where).headers = newheaders return property(_get, _set) code = _getsetFrom('response', 'code') code_message = "" method = _getsetFrom('request', 'method') uri = _getsetFrom('request', 'uri') def _getClientproto(self): return "HTTP/%d.%d" % self.request.clientproto clientproto = property(_getClientproto) received_headers = _getsetHeaders('request') headers = _getsetHeaders('response') path = _getsetFrom('request', 'path') # cookies = # Do I need this? # received_cookies = # Do I need this? content = StringIO() #### FIXME args = _getsetFrom('request', 'args') # stack = # WTF is stack? prepath = _getsetFrom('request', 'prepath') postpath = _getsetFrom('request', 'postpath') def _getClient(self): return "WTF" client = property(_getClient) def _getHost(self): return address.IPv4Address("TCP", self.request.host, self.request.port) host = property(_getHost) def __init__(self, request): from twisted.web2 import http components.Componentized.__init__(self) self.request = request self.response = http.Response(stream=stream.ProducerStream()) # This deferred will be fired by the first call to write on OldRequestAdapter # and will cause the headers to be output. self.deferredResponse = defer.Deferred() def getStateToCopyFor(self, issuer): # This is for distrib compatibility x = {} x['prepath'] = self.prepath x['postpath'] = self.postpath x['method'] = self.method x['uri'] = self.uri x['clientproto'] = self.clientproto self.content.seek(0, 0) x['content_data'] = self.content.read() x['remote'] = pb.ViewPoint(issuer, self) x['host'] = _addressToTuple(self.request.chanRequest.channel.transport.getHost()) x['client'] = _addressToTuple(self.request.chanRequest.channel.transport.getPeer()) return x def getTypeToCopy(self): # lie to PB so the ResourcePublisher doesn't have to know web2 exists # which is good because web2 doesn't exist. return 'twisted.web.server.Request' def registerProducer(self, producer, streaming): self.response.stream.registerProducer(producer, streaming) def unregisterProducer(self): self.response.stream.unregisterProducer() def finish(self): if self.deferredResponse is not None: d = self.deferredResponse self.deferredResponse = None d.callback(self.response) self.response.stream.finish() def write(self, data): if self.deferredResponse is not None: d = self.deferredResponse self.deferredResponse = None d.callback(self.response) self.response.stream.write(data) def getHeader(self, name): raw = self.request.headers.getRawHeaders(name) if raw is None: return None return ', '.join(raw) def setHeader(self, name, value): """Set an outgoing HTTP header. """ self.response.headers.setRawHeaders(name, [value]) def setResponseCode(self, code, message=None): # message ignored self.response.code = code def setLastModified(self, when): # Never returns CACHED -- can it and still be compliant? when = long(math.ceil(when)) self.response.headers.setHeader('last-modified', when) return None def setETag(self, etag): self.response.headers.setRawHeaders('etag', [etag]) return None def getAllHeaders(self): return dict(self.headers.iteritems()) def getRequestHostname(self): return self.request.host def getCookie(self, key): for cookie in self.request.headers.getHeader('cookie', ()): if cookie.name == key: return cookie.value return None def addCookie(self, k, v, expires=None, domain=None, path=None, max_age=None, comment=None, secure=None): if expires is None and max_age is not None: expires=max_age-time.time() cookie = http_headers.Cookie(k,v, expires=expires, domain=domain, path=path, comment=comment, secure=secure) self.response.headers.setHeader('set-cookie', self.request.headers.getHeader('set-cookie', ())+(cookie,)) def notifyFinish(self): ### FIXME return None # return self.request.notifyFinish() def getHost(self): return self.host def setHost(self, host, port, ssl=0): self.request.host = host self.request.port = port self.request.scheme = ssl and 'https' or 'http' def isSecure(self): return self.request.scheme == 'https' def getClientIP(self): if isinstance(self.request.chanRequest.getRemoteHost(), address.IPv4Address): return self.client.host else: return None return self.request.chanRequest.getRemoteHost() return "127.0.0.1" def getClient(self): return "127.0.0.1" ### FIXME: def getUser(self): return "" def getPassword(self): return "" # Identical to original methods -- hopefully these don't have to change def sibLink(self, name): "Return the text that links to a sibling of the requested resource." if self.postpath: return (len(self.postpath)*"../") + name else: return name def childLink(self, name): "Return the text that links to a child of the requested resource." lpp = len(self.postpath) if lpp > 1: return ((lpp-1)*"../") + name elif lpp == 1: return name else: # lpp == 0 if len(self.prepath) and self.prepath[-1]: return self.prepath[-1] + '/' + name else: return name def redirect(self, url): """Utility function that does a redirect. The request should have finish() called after this. """ self.setResponseCode(responsecode.FOUND) self.setHeader("location", url) def prePathURL(self): port = self.getHost().port if self.isSecure(): default = 443 else: default = 80 if port == default: hostport = '' else: hostport = ':%d' % port return quote('http%s://%s%s/%s' % ( self.isSecure() and 's' or '', self.getRequestHostname(), hostport, string.join(self.prepath, '/')), "/:") # def URLPath(self): # from twisted.python import urlpath # return urlpath.URLPath.fromRequest(self) # But nevow wants it to look like this... :( def URLPath(self): from nevow import url return url.URL.fromContext(self) def rememberRootURL(self, url=None): """ Remember the currently-processed part of the URL for later recalling. """ if url is None: url = self.prePathURL() # remove one segment self.appRootURL = url[:url.rindex("/")] else: self.appRootURL = url def getRootURL(self): """ Get a previously-remembered URL. """ return self.appRootURL session = None def getSession(self, sessionInterface = None): # Session management if not self.session: # FIXME: make sitepath be something cookiename = string.join(['TWISTED_SESSION'] + self.sitepath, "_") sessionCookie = self.getCookie(cookiename) if sessionCookie: try: self.session = self.site.getSession(sessionCookie) except KeyError: pass # if it still hasn't been set, fix it up. if not self.session: self.session = self.site.makeSession() self.addCookie(cookiename, self.session.uid, path='/') self.session.touch() if sessionInterface: return self.session.getComponent(sessionInterface) return self.session class OldNevowResourceAdapter(object): implements(iweb.IResource) def __init__(self, original): # Can't use self.__original= because of __setattr__. self.__dict__['_OldNevowResourceAdapter__original']=original def __getattr__(self, name): return getattr(self.__original, name) def __setattr__(self, name, value): setattr(self.__original, name, value) def __delattr__(self, name): delattr(self.__original, name) def locateChild(self, ctx, segments): from twisted.web2.server import parsePOSTData request = iweb.IRequest(ctx) if request.method == "POST": return parsePOSTData(request).addCallback( lambda x: self.__original.locateChild(ctx, segments)) return self.__original.locateChild(ctx, segments) def renderHTTP(self, ctx): from twisted.web2.server import parsePOSTData request = iweb.IRequest(ctx) if request.method == "POST": return parsePOSTData(request).addCallback(self.__reallyRender, ctx) return self.__reallyRender(None, ctx) def __reallyRender(self, ignored, ctx): # This deferred will be called when our resource is _finished_ # writing, and will make sure we write the rest of our data # and finish the connection. defer.maybeDeferred(self.__original.renderHTTP, ctx).addCallback(self.__finish, ctx) # Sometimes the __original.renderHTTP will write() before we # even get this far, and we don't want to return # oldRequest.deferred if it's already been set to None. oldRequest = iweb.IOldRequest(ctx) if oldRequest.deferredResponse is None: return oldRequest.response return oldRequest.deferredResponse def __finish(self, data, ctx): oldRequest = iweb.IOldRequest(ctx) oldRequest.write(data) oldRequest.finish() class OldResourceAdapter(object): implements(iweb.IOldNevowResource) def __init__(self, original): self.original = original def __repr__(self): return "<%s @ 0x%x adapting %r>" % (self.__class__.__name__, id(self), self.original) def locateChild(self, req, segments): import server request = iweb.IOldRequest(req) if self.original.isLeaf: return self, server.StopTraversal name = segments[0] if name == '': res = self else: request.prepath.append(request.postpath.pop(0)) res = self.original.getChildWithDefault(name, request) request.postpath.insert(0, request.prepath.pop()) if isinstance(res, defer.Deferred): return res.addCallback(lambda res: (res, segments[1:])) return res, segments[1:] def _handle_NOT_DONE_YET(self, data, request): from twisted.web.server import NOT_DONE_YET if data == NOT_DONE_YET: # Return a deferred that will never fire, so the finish # callback doesn't happen. This is because, when returning # NOT_DONE_YET, the page is responsible for calling finish. return defer.Deferred() else: return data def renderHTTP(self, req): request = iweb.IOldRequest(req) result = defer.maybeDeferred(self.original.render, request).addCallback( self._handle_NOT_DONE_YET, request) return result __all__ = []
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/compat.py
compat.py
# system imports import os import urllib import stat import time # twisted imports from twisted.web2 import iweb, resource, http, http_headers def formatFileSize(size): if size < 1024: return '%i' % size elif size < (1024**2): return '%iK' % (size / 1024) elif size < (1024**3): return '%iM' % (size / (1024**2)) else: return '%iG' % (size / (1024**3)) class DirectoryLister(resource.Resource): def __init__(self, pathname, dirs=None, contentTypes={}, contentEncodings={}, defaultType='text/html'): self.contentTypes = contentTypes self.contentEncodings = contentEncodings self.defaultType = defaultType # dirs allows usage of the File to specify what gets listed self.dirs = dirs self.path = pathname resource.Resource.__init__(self) def data_listing(self, request, data): if self.dirs is None: directory = os.listdir(self.path) directory.sort() else: directory = self.dirs files = [] for path in directory: url = urllib.quote(path, '/') fullpath = os.path.join(self.path, path) try: st = os.stat(fullpath) except OSError: continue if stat.S_ISDIR(st.st_mode): url = url + '/' files.append({ 'link': url, 'linktext': path + "/", 'size': '', 'type': '-', 'lastmod': time.strftime("%Y-%b-%d %H:%M", time.localtime(st.st_mtime)) }) else: from twisted.web2.static import getTypeAndEncoding mimetype, encoding = getTypeAndEncoding( path, self.contentTypes, self.contentEncodings, self.defaultType) filesize = st.st_size files.append({ 'link': url, 'linktext': path, 'size': formatFileSize(filesize), 'type': mimetype, 'lastmod': time.strftime("%Y-%b-%d %H:%M", time.localtime(st.st_mtime)) }) return files def __repr__(self): return '<DirectoryLister of %r>' % self.path __str__ = __repr__ def render(self, request): title = "Directory listing for %s" % urllib.unquote(request.path) s= """<html><head><title>%s</title><style> th, .even td, .odd td { padding-right: 0.5em; font-family: monospace} .even-dir { background-color: #efe0ef } .even { background-color: #eee } .odd-dir {background-color: #f0d0ef } .odd { background-color: #dedede } .icon { text-align: center } .listing { margin-left: auto; margin-right: auto; width: 50%%; padding: 0.1em; } body { border: 0; padding: 0; margin: 0; background-color: #efefef;} h1 {padding: 0.1em; background-color: #777; color: white; border-bottom: thin white dashed;} </style></head><body><div class="directory-listing"><h1>%s</h1>""" % (title,title) s+="<table>" s+="<tr><th>Filename</th><th>Size</th><th>Last Modified</th><th>File Type</th></tr>" even = False for row in self.data_listing(request, None): s+='<tr class="%s">' % (even and 'even' or 'odd',) s+='<td><a href="%(link)s">%(linktext)s</a></td><td align="right">%(size)s</td><td>%(lastmod)s</td><td>%(type)s</td></tr>' % row even = not even s+="</table></div></body></html>" response = http.Response(200, {}, s) response.headers.setHeader("content-type", http_headers.MimeType('text', 'html')) return response __all__ = ['DirectoryLister']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dirlist.py
dirlist.py
CONTINUE = 100 SWITCHING = 101 OK = 200 CREATED = 201 ACCEPTED = 202 NON_AUTHORITATIVE_INFORMATION = 203 NO_CONTENT = 204 RESET_CONTENT = 205 PARTIAL_CONTENT = 206 MULTI_STATUS = 207 MULTIPLE_CHOICE = 300 MOVED_PERMANENTLY = 301 FOUND = 302 SEE_OTHER = 303 NOT_MODIFIED = 304 USE_PROXY = 305 TEMPORARY_REDIRECT = 307 BAD_REQUEST = 400 UNAUTHORIZED = 401 PAYMENT_REQUIRED = 402 FORBIDDEN = 403 NOT_FOUND = 404 NOT_ALLOWED = 405 NOT_ACCEPTABLE = 406 PROXY_AUTH_REQUIRED = 407 REQUEST_TIMEOUT = 408 CONFLICT = 409 GONE = 410 LENGTH_REQUIRED = 411 PRECONDITION_FAILED = 412 REQUEST_ENTITY_TOO_LARGE = 413 REQUEST_URI_TOO_LONG = 414 UNSUPPORTED_MEDIA_TYPE = 415 REQUESTED_RANGE_NOT_SATISFIABLE = 416 EXPECTATION_FAILED = 417 UNPROCESSABLE_ENTITY = 422 # RFC 2518 LOCKED = 423 # RFC 2518 FAILED_DEPENDENCY = 424 # RFC 2518 INTERNAL_SERVER_ERROR = 500 NOT_IMPLEMENTED = 501 BAD_GATEWAY = 502 SERVICE_UNAVAILABLE = 503 GATEWAY_TIMEOUT = 504 HTTP_VERSION_NOT_SUPPORTED = 505 INSUFFICIENT_STORAGE_SPACE = 507 NOT_EXTENDED = 510 RESPONSES = { # 100 CONTINUE: "Continue", SWITCHING: "Switching Protocols", # 200 OK: "OK", CREATED: "Created", ACCEPTED: "Accepted", NON_AUTHORITATIVE_INFORMATION: "Non-Authoritative Information", NO_CONTENT: "No Content", RESET_CONTENT: "Reset Content.", PARTIAL_CONTENT: "Partial Content", MULTI_STATUS: "Multi-Status", # 300 MULTIPLE_CHOICE: "Multiple Choices", MOVED_PERMANENTLY: "Moved Permanently", FOUND: "Found", SEE_OTHER: "See Other", NOT_MODIFIED: "Not Modified", USE_PROXY: "Use Proxy", # 306 unused TEMPORARY_REDIRECT: "Temporary Redirect", # 400 BAD_REQUEST: "Bad Request", UNAUTHORIZED: "Unauthorized", PAYMENT_REQUIRED: "Payment Required", FORBIDDEN: "Forbidden", NOT_FOUND: "Not Found", NOT_ALLOWED: "Method Not Allowed", NOT_ACCEPTABLE: "Not Acceptable", PROXY_AUTH_REQUIRED: "Proxy Authentication Required", REQUEST_TIMEOUT: "Request Time-out", CONFLICT: "Conflict", GONE: "Gone", LENGTH_REQUIRED: "Length Required", PRECONDITION_FAILED: "Precondition Failed", REQUEST_ENTITY_TOO_LARGE: "Request Entity Too Large", REQUEST_URI_TOO_LONG: "Request-URI Too Long", UNSUPPORTED_MEDIA_TYPE: "Unsupported Media Type", REQUESTED_RANGE_NOT_SATISFIABLE: "Requested Range not satisfiable", EXPECTATION_FAILED: "Expectation Failed", UNPROCESSABLE_ENTITY: "Unprocessable Entity", LOCKED: "Locked", FAILED_DEPENDENCY: "Failed Dependency", # 500 INTERNAL_SERVER_ERROR: "Internal Server Error", NOT_IMPLEMENTED: "Not Implemented", BAD_GATEWAY: "Bad Gateway", SERVICE_UNAVAILABLE: "Service Unavailable", GATEWAY_TIMEOUT: "Gateway Time-out", HTTP_VERSION_NOT_SUPPORTED: "HTTP Version not supported", INSUFFICIENT_STORAGE_SPACE: "Insufficient Storage Space", NOT_EXTENDED: "Not Extended" } # No __all__ necessary -- everything is exported
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/responsecode.py
responsecode.py
raise ImportError("FIXME: this file probably doesn't work.") # Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. """Simplistic HTTP proxy support. This comes in two main variants - the Proxy and the ReverseProxy. When a Proxy is in use, a browser trying to connect to a server (say, www.yahoo.com) will be intercepted by the Proxy, and the proxy will covertly connect to the server, and return the result. When a ReverseProxy is in use, the client connects directly to the ReverseProxy (say, www.yahoo.com) which farms off the request to one of a pool of servers, and returns the result. Normally, a Proxy is used on the client end of an Internet connection, while a ReverseProxy is used on the server end. """ # twisted imports from twisted.web2 import http from twisted.internet import reactor, protocol from twisted.web2 import resource, server from zope.interface import implements, Interface # system imports import urlparse class ProxyClient(http.HTTPClient): """Used by ProxyClientFactory to implement a simple web proxy.""" def __init__(self, command, rest, version, headers, data, father): self.father = father self.command = command self.rest = rest if headers.has_key("proxy-connection"): del headers["proxy-connection"] headers["connection"] = "close" self.headers = headers self.data = data def connectionMade(self): self.sendCommand(self.command, self.rest) for header, value in self.headers.items(): self.sendHeader(header, value) self.endHeaders() self.transport.write(self.data) def handleStatus(self, version, code, message): self.father.transport.write("%s %s %s\r\n" % (version, code, message)) def handleHeader(self, key, value): self.father.transport.write("%s: %s\r\n" % (key, value)) def handleEndHeaders(self): self.father.transport.write("\r\n") def handleResponsePart(self, buffer): self.father.transport.write(buffer) def handleResponseEnd(self): self.transport.loseConnection() self.father.channel.transport.loseConnection() class ProxyClientFactory(protocol.ClientFactory): """Used by ProxyRequest to implement a simple web proxy.""" def __init__(self, command, rest, version, headers, data, father): self.father = father self.command = command self.rest = rest self.headers = headers self.data = data self.version = version def buildProtocol(self, addr): return ProxyClient(self.command, self.rest, self.version, self.headers, self.data, self.father) def clientConnectionFailed(self, connector, reason): self.father.transport.write("HTTP/1.0 501 Gateway error\r\n") self.father.transport.write("Content-Type: text/html\r\n") self.father.transport.write("\r\n") self.father.transport.write('''<H1>Could not connect</H1>''') class ProxyRequest(http.Request): """Used by Proxy to implement a simple web proxy.""" protocols = {'http': ProxyClientFactory} ports = {'http': 80} def process(self): parsed = urlparse.urlparse(self.uri) protocol = parsed[0] host = parsed[1] port = self.ports[protocol] if ':' in host: host, port = host.split(':') port = int(port) rest = urlparse.urlunparse(('','')+parsed[2:]) if not rest: rest = rest+'/' class_ = self.protocols[protocol] headers = self.getAllHeaders().copy() if not headers.has_key('host'): headers['host'] = host self.content.seek(0, 0) s = self.content.read() clientFactory = class_(self.method, rest, self.clientproto, headers, s, self) reactor.connectTCP(host, port, clientFactory) class Proxy(http.HTTPChannel): """This class implements a simple web proxy. Since it inherits from twisted.protocols.http.HTTPChannel, to use it you should do something like this:: from twisted.web2 import http f = http.HTTPFactory() f.protocol = Proxy Make the HTTPFactory a listener on a port as per usual, and you have a fully-functioning web proxy! """ requestFactory = ProxyRequest class ReverseProxyRequest(http.Request): """Used by ReverseProxy to implement a simple reverse proxy.""" def process(self): self.received_headers['host'] = self.factory.host clientFactory = ProxyClientFactory(self.method, self.uri, self.clientproto, self.getAllHeaders(), self.content.read(), self) reactor.connectTCP(self.factory.host, self.factory.port, clientFactory) class ReverseProxy(http.HTTPChannel): """Implements a simple reverse proxy. For details of usage, see the file examples/proxy.py""" requestFactory = ReverseProxyRequest class IConnector(Interface): """attribute name""" def connect(factory): """connect ClientFactory""" class TCPConnector: implements(IConnector) def __init__(self, host, port): self.host = host self.name = host self.port = port def connect(self, factory): reactor.connectTCP(self.host, self.port, factory) class UNIXConnector: implements(IConnector) name = 'n/a' def __init__(self, socket): self.socket = socket def connect(self, factory): reactor.connectUNIX(self.socket, factory) def ReverseProxyResource(host, port, path): return ReverseProxyResourceConnector(TCPConnector(host, port), path) class ReverseProxyResourceConnector: """Resource that renders the results gotten from another server Put this resource in the tree to cause everything below it to be relayed to a different server. """ isLeaf = True implements(resource.IResource) def __init__(self, connector, path): self.connector = connector self.path = path def render(self, request): request.received_headers['host'] = self.connector.name request.content.seek(0, 0) qs = urlparse.urlparse(request.uri)[4] path = self.path+'/'.join(request.postpath) if qs: rest = path + '?' + qs else: rest = path clientFactory = ProxyClientFactory(request.method, rest, request.clientproto, request.getAllHeaders(), request.content.read(), request) self.connector.connect(clientFactory) return server.NOT_DONE_YET
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/proxy.py
proxy.py
from twisted.web2 import stream, http_headers from twisted.web2.responsecode import * # 300 - Should include entity with choices # 301 - # 304 - Must include Date, ETag, Content-Location, Expires, Cache-Control, Vary. # # 401 - Must include WWW-Authenticate. # 405 - Must include Allow. # 406 - Should include entity describing allowable characteristics # 407 - Must include Proxy-Authenticate # 413 - May include Retry-After # 416 - Should include Content-Range # 503 - Should include Retry-After ERROR_MESSAGES = { # 300 # no MULTIPLE_CHOICES MOVED_PERMANENTLY: 'The document has permanently moved <a href="%(location)s">here</a>.', FOUND: 'The document has temporarily moved <a href="%(location)s">here</a>.', SEE_OTHER: 'The results are available <a href="%(location)s">here</a>.', # no NOT_MODIFIED USE_PROXY: "Access to this resource must be through the proxy %(location)s.", # 306 unused TEMPORARY_REDIRECT: 'The document has temporarily moved <a href="%(location)s">here</a>.', # 400 BAD_REQUEST: "Your browser sent an invalid request.", UNAUTHORIZED: "You are not authorized to view the resource at %(uri)s. Perhaps you entered a wrong password, or perhaps your browser doesn't support authentication.", PAYMENT_REQUIRED: "Payment Required (useful result code, this...).", FORBIDDEN: "You don't have permission to access %(uri)s.", NOT_FOUND: "The resource %(uri)s cannot be found.", NOT_ALLOWED: "The requested method %(method)s is not supported by %(uri)s.", NOT_ACCEPTABLE: "No representation of %(uri)s that is acceptable to your client could be found.", PROXY_AUTH_REQUIRED: "You are not authorized to view the resource at %(uri)s. Perhaps you entered a wrong password, or perhaps your browser doesn't support authentication.", REQUEST_TIMEOUT: "Server timed out waiting for your client to finish sending the HTTP request.", CONFLICT: "Conflict (?)", GONE: "The resource %(uri)s has been permanently removed.", LENGTH_REQUIRED: "The resource %(uri)s requires a Content-Length header.", PRECONDITION_FAILED: "A precondition evaluated to false.", REQUEST_ENTITY_TOO_LARGE: "The provided request entity data is too longer than the maximum for the method %(method)s at %(uri)s.", REQUEST_URI_TOO_LONG: "The request URL is longer than the maximum on this server.", UNSUPPORTED_MEDIA_TYPE: "The provided request data has a format not understood by the resource at %(uri)s.", REQUESTED_RANGE_NOT_SATISFIABLE: "None of the ranges given in the Range request header are satisfiable by the resource %(uri)s.", EXPECTATION_FAILED: "The server does support one of the expectations given in the Expect header.", # 500 INTERNAL_SERVER_ERROR: "An internal error occurred trying to process your request. Sorry.", NOT_IMPLEMENTED: "Some functionality requested is not implemented on this server.", BAD_GATEWAY: "An upstream server returned an invalid response.", SERVICE_UNAVAILABLE: "This server cannot service your request becaues it is overloaded.", GATEWAY_TIMEOUT: "An upstream server is not responding.", HTTP_VERSION_NOT_SUPPORTED: "HTTP Version not supported.", INSUFFICIENT_STORAGE_SPACE: "There is insufficient storage space available to perform that request.", NOT_EXTENDED: "This server does not support the a mandatory extension requested." } # Is there a good place to keep this function? def _escape(original): if original is None: return None return original.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;").replace("\"", "&quot;") def defaultErrorHandler(request, response): if response.stream is not None: # Already got an error message return response if response.code < 300: # We only do error messages return response message = ERROR_MESSAGES.get(response.code, None) if message is None: # No message specified for that code return response message = message % { 'uri':_escape(request.uri), 'location':_escape(response.headers.getHeader('location')), 'method':_escape(request.method) } title = RESPONSES.get(response.code, "") body = ("<html><head><title>%d %s</title></head>" "<body><h1>%s</h1>%s</body></html>") % ( response.code, title, title, message) response.headers.setHeader("content-type", http_headers.MimeType('text', 'html')) response.stream = stream.MemoryStream(body) return response defaultErrorHandler.handleErrors = True __all__ = ['defaultErrorHandler',]
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/error.py
error.py
from __future__ import generators import copy, os, types, sys from zope.interface import Interface, Attribute, implements from twisted.internet.defer import Deferred from twisted.internet import interfaces as ti_interfaces, defer, reactor, protocol, error as ti_error from twisted.python import components, log from twisted.python.failure import Failure # Python 2.4.2 (only) has a broken mmap that leaks a fd every time you call it. if sys.version_info[0:3] != (2,4,2): try: import mmap except ImportError: mmap = None else: mmap = None ############################## #### Interfaces #### ############################## class IStream(Interface): """A stream of arbitrary data.""" def read(): """Read some data. Returns some object representing the data. If there is no more data available, returns None. Can also return a Deferred resulting in one of the above. Errors may be indicated by exception or by a Deferred of a Failure. """ def close(): """Prematurely close. Should also cause further reads to return None.""" class IByteStream(IStream): """A stream which is of bytes.""" length = Attribute("""How much data is in this stream. Can be None if unknown.""") def read(): """Read some data. Returns an object conforming to the buffer interface, or if there is no more data available, returns None. Can also return a Deferred resulting in one of the above. Errors may be indicated by exception or by a Deferred of a Failure. """ def split(point): """Split this stream into two, at byte position 'point'. Returns a tuple of (before, after). After calling split, no other methods should be called on this stream. Doing so will have undefined behavior. If you cannot implement split easily, you may implement it as:: return fallbackSplit(self, point) """ def close(): """Prematurely close this stream. Should also cause further reads to return None. Additionally, .length should be set to 0. """ class ISendfileableStream(Interface): def read(sendfile=False): """ Read some data. If sendfile == False, returns an object conforming to the buffer interface, or else a Deferred. If sendfile == True, returns either the above, or a SendfileBuffer. """ class SimpleStream(object): """Superclass of simple streams with a single buffer and a offset and length into that buffer.""" implements(IByteStream) length = None start = None def read(self): return None def close(self): self.length = 0 def split(self, point): if self.length is not None: if point > self.length: raise ValueError("split point (%d) > length (%d)" % (point, self.length)) b = copy.copy(self) self.length = point if b.length is not None: b.length -= point b.start += point return (self, b) ############################## #### FileStream #### ############################## # maximum mmap size MMAP_LIMIT = 4*1024*1024 # minimum mmap size MMAP_THRESHOLD = 8*1024 # maximum sendfile length SENDFILE_LIMIT = 16777216 # minimum sendfile size SENDFILE_THRESHOLD = 256 def mmapwrapper(*args, **kwargs): """ Python's mmap call sucks and ommitted the "offset" argument for no discernable reason. Replace this with a mmap module that has offset. """ offset = kwargs.get('offset', None) if offset in [None, 0]: if 'offset' in kwargs: del kwargs['offset'] else: raise mmap.error("mmap: Python sucks and does not support offset.") return mmap.mmap(*args, **kwargs) class FileStream(SimpleStream): implements(ISendfileableStream) """A stream that reads data from a file. File must be a normal file that supports seek, (e.g. not a pipe or device or socket).""" # 65K, minus some slack CHUNK_SIZE = 2 ** 2 ** 2 ** 2 - 32 f = None def __init__(self, f, start=0, length=None, useMMap=bool(mmap)): """ Create the stream from file f. If you specify start and length, use only that portion of the file. """ self.f = f self.start = start if length is None: self.length = os.fstat(f.fileno()).st_size else: self.length = length self.useMMap = useMMap def read(self, sendfile=False): if self.f is None: return None length = self.length if length == 0: self.f = None return None if sendfile and length > SENDFILE_THRESHOLD: # XXX: Yay using non-existent sendfile support! # FIXME: if we return a SendfileBuffer, and then sendfile # fails, then what? Or, what if file is too short? readSize = min(length, SENDFILE_LIMIT) res = SendfileBuffer(self.f, self.start, readSize) self.length -= readSize self.start += readSize return res if self.useMMap and length > MMAP_THRESHOLD: readSize = min(length, MMAP_LIMIT) try: res = mmapwrapper(self.f.fileno(), readSize, access=mmap.ACCESS_READ, offset=self.start) #madvise(res, MADV_SEQUENTIAL) self.length -= readSize self.start += readSize return res except mmap.error: pass # Fall back to standard read. readSize = min(length, self.CHUNK_SIZE) self.f.seek(self.start) b = self.f.read(readSize) bytesRead = len(b) if not bytesRead: raise RuntimeError("Ran out of data reading file %r, expected %d more bytes" % (self.f, length)) else: self.length -= bytesRead self.start += bytesRead return b def close(self): self.f = None SimpleStream.close(self) components.registerAdapter(FileStream, file, IByteStream) ############################## #### MemoryStream #### ############################## class MemoryStream(SimpleStream): """A stream that reads data from a buffer object.""" def __init__(self, mem, start=0, length=None): """ Create the stream from buffer object mem. If you specify start and length, use only that portion of the buffer. """ self.mem = mem self.start = start if length is None: self.length = len(mem) - start else: if len(mem) < length: raise ValueError("len(mem) < start + length") self.length = length def read(self): if self.mem is None: return None if self.length == 0: result = None else: result = buffer(self.mem, self.start, self.length) self.mem = None self.length = 0 return result def close(self): self.mem = None SimpleStream.close(self) components.registerAdapter(MemoryStream, str, IByteStream) components.registerAdapter(MemoryStream, types.BufferType, IByteStream) ############################## #### CompoundStream #### ############################## class CompoundStream(object): """A stream which is composed of many other streams. Call addStream to add substreams. """ implements(IByteStream, ISendfileableStream) deferred = None length = 0 def __init__(self, buckets=()): self.buckets = [IByteStream(s) for s in buckets] def addStream(self, bucket): """Add a stream to the output""" bucket = IByteStream(bucket) self.buckets.append(bucket) if self.length is not None: if bucket.length is None: self.length = None else: self.length += bucket.length def read(self, sendfile=False): if self.deferred is not None: raise RuntimeError("Call to read while read is already outstanding") if not self.buckets: return None if sendfile and ISendfileableStream.providedBy(self.buckets[0]): try: result = self.buckets[0].read(sendfile) except: return self._gotFailure(Failure()) else: try: result = self.buckets[0].read() except: return self._gotFailure(Failure()) if isinstance(result, Deferred): self.deferred = result result.addCallbacks(self._gotRead, self._gotFailure, (sendfile,)) return result return self._gotRead(result, sendfile) def _gotFailure(self, f): self.deferred = None del self.buckets[0] self.close() return f def _gotRead(self, result, sendfile): self.deferred = None if result is None: del self.buckets[0] # Next bucket return self.read(sendfile) if self.length is not None: self.length -= len(result) return result def split(self, point): num = 0 origPoint = point for bucket in self.buckets: num+=1 if point == 0: b = CompoundStream() b.buckets = self.buckets[num:] del self.buckets[num:] return self,b if bucket.length is None: # Indeterminate length bucket. # give up and use fallback splitter. return fallbackSplit(self, origPoint) if point < bucket.length: before,after = bucket.split(point) b = CompoundStream() b.buckets = self.buckets[num:] b.buckets[0] = after del self.buckets[num+1:] self.buckets[num] = before return self,b point -= bucket.length def close(self): for bucket in self.buckets: bucket.close() self.buckets = [] self.length = 0 ############################## #### readStream #### ############################## class _StreamReader(object): """Process a stream's data using callbacks for data and stream finish.""" def __init__(self, stream, gotDataCallback): self.stream = stream self.gotDataCallback = gotDataCallback self.result = Deferred() def run(self): # self.result may be del'd in _read() result = self.result self._read() return result def _read(self): try: result = self.stream.read() except: self._gotError(Failure()) return if isinstance(result, Deferred): result.addCallbacks(self._gotData, self._gotError) else: self._gotData(result) def _gotError(self, failure): result = self.result del self.result, self.gotDataCallback, self.stream result.errback(failure) def _gotData(self, data): if data is None: result = self.result del self.result, self.gotDataCallback, self.stream result.callback(None) return try: self.gotDataCallback(data) except: self._gotError(Failure()) return reactor.callLater(0, self._read) def readStream(stream, gotDataCallback): """Pass a stream's data to a callback. Returns Deferred which will be triggered on finish. Errors in reading the stream or in processing it will be returned via this Deferred. """ return _StreamReader(stream, gotDataCallback).run() def readAndDiscard(stream): """Read all the data from the given stream, and throw it out. Returns Deferred which will be triggered on finish. """ return readStream(stream, lambda _: None) def readIntoFile(stream, outFile): """Read a stream and write it into a file. Returns Deferred which will be triggered on finish. """ def done(_): outFile.close() return _ return readStream(stream, outFile.write).addBoth(done) def connectStream(inputStream, factory): """Connect a protocol constructed from a factory to stream. Returns an output stream from the protocol. The protocol's transport will have a finish() method it should call when done writing. """ # XXX deal better with addresses p = factory.buildProtocol(None) out = ProducerStream() out.disconnecting = False # XXX for LineReceiver suckage p.makeConnection(out) readStream(inputStream, lambda _: p.dataReceived(_)).addCallbacks( lambda _: p.connectionLost(ti_error.ConnectionDone()), lambda _: p.connectionLost(_)) return out ############################## #### fallbackSplit #### ############################## def fallbackSplit(stream, point): after = PostTruncaterStream(stream, point) before = TruncaterStream(stream, point, after) return (before, after) class TruncaterStream(object): def __init__(self, stream, point, postTruncater): self.stream = stream self.length = point self.postTruncater = postTruncater def read(self): if self.length == 0: if self.postTruncater is not None: postTruncater = self.postTruncater self.postTruncater = None postTruncater.sendInitialSegment(self.stream.read()) self.stream = None return None result = self.stream.read() if isinstance(result, Deferred): return result.addCallback(self._gotRead) else: return self._gotRead(result) def _gotRead(self, data): if data is None: raise ValueError("Ran out of data for a split of a indeterminate length source") if self.length >= len(data): self.length -= len(data) return data else: before = buffer(data, 0, self.length) after = buffer(data, self.length) self.length = 0 if self.postTruncater is not None: postTruncater = self.postTruncater self.postTruncater = None postTruncater.sendInitialSegment(after) self.stream = None return before def split(self, point): if point > self.length: raise ValueError("split point (%d) > length (%d)" % (point, self.length)) post = PostTruncaterStream(self.stream, point) trunc = TruncaterStream(post, self.length - point, self.postTruncater) self.length = point self.postTruncater = post return self, trunc def close(self): if self.postTruncater is not None: self.postTruncater.notifyClosed(self) else: # Nothing cares about the rest of the stream self.stream.close() self.stream = None self.length = 0 class PostTruncaterStream(object): deferred = None sentInitialSegment = False truncaterClosed = None closed = False length = None def __init__(self, stream, point): self.stream = stream self.deferred = Deferred() if stream.length is not None: self.length = stream.length - point def read(self): if not self.sentInitialSegment: self.sentInitialSegment = True if self.truncaterClosed is not None: readAndDiscard(self.truncaterClosed) self.truncaterClosed = None return self.deferred return self.stream.read() def split(self, point): return fallbackSplit(self, point) def close(self): self.closed = True if self.truncaterClosed is not None: # have first half close itself self.truncaterClosed.postTruncater = None self.truncaterClosed.close() elif self.sentInitialSegment: # first half already finished up self.stream.close() self.deferred = None # Callbacks from TruncaterStream def sendInitialSegment(self, data): if self.closed: # First half finished, we don't want data. self.stream.close() self.stream = None if self.deferred is not None: if isinstance(data, Deferred): data.chainDeferred(self.deferred) else: self.deferred.callback(data) def notifyClosed(self, truncater): if self.closed: # we are closed, have first half really close truncater.postTruncater = None truncater.close() elif self.sentInitialSegment: # We are trying to read, read up first half readAndDiscard(truncater) else: # Idle, store closed info. self.truncaterClosed = truncater ######################################## #### ProducerStream/StreamProducer #### ######################################## class ProducerStream(object): """Turns producers into a IByteStream. Thus, implements IConsumer and IByteStream.""" implements(IByteStream, ti_interfaces.IConsumer) length = None closed = False failed = False producer = None producerPaused = False deferred = None bufferSize = 5 def __init__(self, length=None): self.buffer = [] self.length = length # IByteStream implementation def read(self): if self.buffer: return self.buffer.pop(0) elif self.closed: self.length = 0 if self.failed: f = self.failure del self.failure return defer.fail(f) return None else: deferred = self.deferred = Deferred() if self.producer is not None and (not self.streamingProducer or self.producerPaused): self.producerPaused = False self.producer.resumeProducing() return deferred def split(self, point): return fallbackSplit(self, point) def close(self): """Called by reader of stream when it is done reading.""" self.buffer=[] self.closed = True if self.producer is not None: self.producer.stopProducing() self.producer = None self.deferred = None # IConsumer implementation def write(self, data): if self.closed: return if self.deferred: deferred = self.deferred self.deferred = None deferred.callback(data) else: self.buffer.append(data) if(self.producer is not None and self.streamingProducer and len(self.buffer) > self.bufferSize): self.producer.pauseProducing() self.producerPaused = True def finish(self, failure=None): """Called by producer when it is done. If the optional failure argument is passed a Failure instance, the stream will return it as errback on next Deferred. """ self.closed = True if not self.buffer: self.length = 0 if self.deferred is not None: deferred = self.deferred self.deferred = None if failure is not None: self.failed = True deferred.errback(failure) else: deferred.callback(None) else: if failure is not None: self.failed = True self.failure = failure def registerProducer(self, producer, streaming): if self.producer is not None: raise RuntimeError("Cannot register producer %s, because producer %s was never unregistered." % (producer, self.producer)) if self.closed: producer.stopProducing() else: self.producer = producer self.streamingProducer = streaming if not streaming: producer.resumeProducing() def unregisterProducer(self): self.producer = None class StreamProducer(object): """A push producer which gets its data by reading a stream.""" implements(ti_interfaces.IPushProducer) deferred = None finishedCallback = None paused = False consumer = None def __init__(self, stream, enforceStr=True): self.stream = stream self.enforceStr = enforceStr def beginProducing(self, consumer): if self.stream is None: return defer.succeed(None) self.consumer = consumer finishedCallback = self.finishedCallback = Deferred() self.consumer.registerProducer(self, True) self.resumeProducing() return finishedCallback def resumeProducing(self): self.paused = False if self.deferred is not None: return try: data = self.stream.read() except: self.stopProducing(Failure()) return if isinstance(data, Deferred): self.deferred = data.addCallbacks(self._doWrite, self.stopProducing) else: self._doWrite(data) def _doWrite(self, data): if self.consumer is None: return if data is None: # The end. if self.consumer is not None: self.consumer.unregisterProducer() if self.finishedCallback is not None: self.finishedCallback.callback(None) self.finishedCallback = self.deferred = self.consumer = self.stream = None return self.deferred = None if self.enforceStr: # XXX: sucks that we have to do this. make transport.write(buffer) work! data = str(buffer(data)) self.consumer.write(data) if not self.paused: self.resumeProducing() def pauseProducing(self): self.paused = True def stopProducing(self, failure=ti_error.ConnectionLost()): if self.consumer is not None: self.consumer.unregisterProducer() if self.finishedCallback is not None: if failure is not None: self.finishedCallback.errback(failure) else: self.finishedCallback.callback(None) self.finishedCallback = None self.paused = True if self.stream is not None: self.stream.close() self.finishedCallback = self.deferred = self.consumer = self.stream = None ############################## #### ProcessStreamer #### ############################## class _ProcessStreamerProtocol(protocol.ProcessProtocol): def __init__(self, inputStream, outStream, errStream): self.inputStream = inputStream self.outStream = outStream self.errStream = errStream self.resultDeferred = defer.Deferred() def connectionMade(self): p = StreamProducer(self.inputStream) # if the process stopped reading from the input stream, # this is not an error condition, so it oughtn't result # in a ConnectionLost() from the input stream: p.stopProducing = lambda err=None: StreamProducer.stopProducing(p, err) d = p.beginProducing(self.transport) d.addCallbacks(lambda _: self.transport.closeStdin(), self._inputError) def _inputError(self, f): log.msg("Error in input stream for %r" % self.transport) log.err(f) self.transport.closeStdin() def outReceived(self, data): self.outStream.write(data) def errReceived(self, data): self.errStream.write(data) def outConnectionLost(self): self.outStream.finish() def errConnectionLost(self): self.errStream.finish() def processEnded(self, reason): self.resultDeferred.errback(reason) del self.resultDeferred class ProcessStreamer(object): """Runs a process hooked up to streams. Requires an input stream, has attributes 'outStream' and 'errStream' for stdout and stderr. outStream and errStream are public attributes providing streams for stdout and stderr of the process. """ def __init__(self, inputStream, program, args, env={}): self.outStream = ProducerStream() self.errStream = ProducerStream() self._protocol = _ProcessStreamerProtocol(IByteStream(inputStream), self.outStream, self.errStream) self._program = program self._args = args self._env = env def run(self): """Run the process. Returns Deferred which will eventually have errback for non-clean (exit code > 0) exit, with ProcessTerminated, or callback with None on exit code 0. """ # XXX what happens if spawn fails? reactor.spawnProcess(self._protocol, self._program, self._args, env=self._env) del self._env return self._protocol.resultDeferred.addErrback(lambda _: _.trap(ti_error.ProcessDone)) ############################## #### generatorToStream #### ############################## class _StreamIterator(object): done=False def __iter__(self): return self def next(self): if self.done: raise StopIteration return self.value wait=object() class _IteratorStream(object): length = None def __init__(self, fun, stream, args, kwargs): self._stream=stream self._streamIterator = _StreamIterator() self._gen = fun(self._streamIterator, *args, **kwargs) def read(self): try: val = self._gen.next() except StopIteration: return None else: if val is _StreamIterator.wait: newdata = self._stream.read() if isinstance(newdata, defer.Deferred): return newdata.addCallback(self._gotRead) else: return self._gotRead(newdata) return val def _gotRead(self, data): if data is None: self._streamIterator.done=True else: self._streamIterator.value=data return self.read() def close(self): self._stream.close() del self._gen, self._stream, self._streamIterator def split(self): return fallbackSplit(self) def generatorToStream(fun): """Converts a generator function into a stream. The function should take an iterator as its first argument, which will be converted *from* a stream by this wrapper, and yield items which are turned *into* the results from the stream's 'read' call. One important point: before every call to input.next(), you *MUST* do a "yield input.wait" first. Yielding this magic value takes care of ensuring that the input is not a deferred before you see it. >>> from twisted.web2 import stream >>> from string import maketrans >>> alphabet = 'abcdefghijklmnopqrstuvwxyz' >>> >>> def encrypt(input, key): ... code = alphabet[key:] + alphabet[:key] ... translator = maketrans(alphabet+alphabet.upper(), code+code.upper()) ... yield input.wait ... for s in input: ... yield str(s).translate(translator) ... yield input.wait ... >>> encrypt = stream.generatorToStream(encrypt) >>> >>> plaintextStream = stream.MemoryStream('SampleSampleSample') >>> encryptedStream = encrypt(plaintextStream, 13) >>> encryptedStream.read() 'FnzcyrFnzcyrFnzcyr' >>> >>> plaintextStream = stream.MemoryStream('SampleSampleSample') >>> encryptedStream = encrypt(plaintextStream, 13) >>> evenMoreEncryptedStream = encrypt(encryptedStream, 13) >>> evenMoreEncryptedStream.read() 'SampleSampleSample' """ def generatorToStream_inner(stream, *args, **kwargs): return _IteratorStream(fun, stream, args, kwargs) return generatorToStream_inner ############################## #### BufferedStream #### ############################## class BufferedStream(object): """A stream which buffers its data to provide operations like readline and readExactly.""" data = "" def __init__(self, stream): self.stream = stream def _readUntil(self, f): """Internal helper function which repeatedly calls f each time after more data has been received, until it returns non-None.""" while True: r = f() if r is not None: yield r; return newdata = self.stream.read() if isinstance(newdata, defer.Deferred): newdata = defer.waitForDeferred(newdata) yield newdata; newdata = newdata.getResult() if newdata is None: # End Of File newdata = self.data self.data = '' yield newdata; return self.data += str(newdata) _readUntil = defer.deferredGenerator(_readUntil) def readExactly(self, size=None): """Read exactly size bytes of data, or, if size is None, read the entire stream into a string.""" if size is not None and size < 0: raise ValueError("readExactly: size cannot be negative: %s", size) def gotdata(): data = self.data if size is not None and len(data) >= size: pre,post = data[:size], data[size:] self.data = post return pre return self._readUntil(gotdata) def readline(self, delimiter='\r\n', size=None): """ Read a line of data from the string, bounded by delimiter. The delimiter is included in the return value. If size is specified, read and return at most that many bytes, even if the delimiter has not yet been reached. If the size limit falls within a delimiter, the rest of the delimiter, and the next line will be returned together. """ if size is not None and size < 0: raise ValueError("readline: size cannot be negative: %s" % (size, )) def gotdata(): data = self.data if size is not None: splitpoint = data.find(delimiter, 0, size) if splitpoint == -1: if len(data) >= size: splitpoint = size else: splitpoint += len(delimiter) else: splitpoint = data.find(delimiter) if splitpoint != -1: splitpoint += len(delimiter) if splitpoint != -1: pre = data[:splitpoint] self.data = data[splitpoint:] return pre return self._readUntil(gotdata) def pushback(self, pushed): """Push data back into the buffer.""" self.data = pushed + self.data def read(self): data = self.data if data: self.data = "" return data return self.stream.read() def _len(self): l = self.stream.length if l is None: return None return l + len(self.data) length = property(_len) def split(self, offset): off = offset - len(self.data) pre, post = self.stream.split(max(0, off)) pre = BufferedStream(pre) post = BufferedStream(post) if off < 0: pre.data = self.data[:-off] post.data = self.data[-off:] else: pre.data = self.data return pre, post def substream(stream, start, end): if start > end: raise ValueError("start position must be less than end position %r" % ((start, end),)) stream = stream.split(start)[1] return stream.split(end - start)[0] __all__ = ['IStream', 'IByteStream', 'FileStream', 'MemoryStream', 'CompoundStream', 'readAndDiscard', 'fallbackSplit', 'ProducerStream', 'StreamProducer', 'BufferedStream', 'readStream', 'ProcessStreamer', 'readIntoFile', 'generatorToStream']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/stream.py
stream.py
# System Imports import os import sys import urllib # Twisted Imports from twisted.internet import defer, protocol, reactor from twisted.python import log, filepath # Sibling Imports from twisted.web2 import http from twisted.web2 import resource from twisted.web2 import responsecode from twisted.web2 import server from twisted.web2 import static from twisted.web2 import stream headerNameTranslation = ''.join([c.isalnum() and c.upper() or '_' for c in map(chr, range(256))]) def createCGIEnvironment(request): # See http://hoohoo.ncsa.uiuc.edu/cgi/env.html for CGI interface spec # http://cgi-spec.golux.com/draft-coar-cgi-v11-03-clean.html for a better one remotehost = request.remoteAddr python_path = os.pathsep.join(sys.path) env = dict(os.environ) # MUST provide: if request.stream.length: env["CONTENT_LENGTH"] = str(request.stream.length) ctype = request.headers.getRawHeaders('content-type') if ctype: env["CONTENT_TYPE"] = ctype[0] env["GATEWAY_INTERFACE"] = "CGI/1.1" if request.postpath: # Should we raise an exception if this contains "/" chars? env["PATH_INFO"] = '/' + '/'.join(request.postpath) # MUST always be present, even if no query env["QUERY_STRING"] = request.querystring env["REMOTE_ADDR"] = remotehost.host env["REQUEST_METHOD"] = request.method # Should we raise an exception if this contains "/" chars? env["SCRIPT_NAME"] = '/' + '/'.join(request.prepath) env["SERVER_NAME"] = request.host env["SERVER_PORT"] = str(request.port) env["SERVER_PROTOCOL"] = "HTTP/%i.%i" % request.clientproto env["SERVER_SOFTWARE"] = server.VERSION # SHOULD provide # env["AUTH_TYPE"] # FIXME: add this # env["REMOTE_HOST"] # possibly dns resolve? # MAY provide # env["PATH_TRANSLATED"] # Completely worthless # env["REMOTE_IDENT"] # Completely worthless # env["REMOTE_USER"] # FIXME: add this # Unofficial, but useful and expected by applications nonetheless env["REMOTE_PORT"] = str(remotehost.port) env["REQUEST_SCHEME"] = request.scheme env["REQUEST_URI"] = request.uri env["HTTPS"] = ("off", "on")[request.scheme=="https"] env["SERVER_PORT_SECURE"] = ("0", "1")[request.scheme=="https"] # Propagate HTTP headers for title, header in request.headers.getAllRawHeaders(): envname = title.translate(headerNameTranslation) # Don't send headers we already sent otherwise, and don't # send authorization headers, because that's a security # issue. if title not in ('content-type', 'content-length', 'authorization', 'proxy-authorization'): envname = "HTTP_" + envname env[envname] = ','.join(header) for k,v in env.items(): if type(k) is not str: print "is not string:",k if type(v) is not str: print k, "is not string:",v return env def runCGI(request, filename, filterscript=None): # Make sure that we don't have an unknown content-length if request.stream.length is None: return http.Response(responsecode.LENGTH_REQUIRED) env = createCGIEnvironment(request) env['SCRIPT_FILENAME'] = filename if '=' in request.querystring: qargs = [] else: qargs = [urllib.unquote(x) for x in request.querystring.split('+')] if filterscript is None: filterscript = filename qargs = [filename] + qargs else: qargs = [filterscript, filename] + qargs d = defer.Deferred() proc = CGIProcessProtocol(request, d) reactor.spawnProcess(proc, filterscript, qargs, env, os.path.dirname(filename)) return d class CGIScript(resource.LeafResource): """I represent a CGI script. My implementation is complex due to the fact that it requires asynchronous IPC with an external process with an unpleasant protocol. """ def __init__(self, filename): """Initialize, with the name of a CGI script file. """ self.filename = filename resource.LeafResource.__init__(self) def render(self, request): """Do various things to conform to the CGI specification. I will set up the usual slew of environment variables, then spin off a process. """ return runCGI(request, self.filename) def http_POST(self, request): return self.render(request) class FilteredScript(CGIScript): """ I am a special version of a CGI script, that uses a specific executable (or, the first existing executable in a list of executables). This is useful for interfacing with other scripting languages that adhere to the CGI standard (cf. PHPScript). My 'filters' attribute specifies what executables to try to run, and my 'filename' init parameter describes which script to pass to the first argument of that script. """ filters = '/usr/bin/cat', def __init__(self, filename, filters=None): if filters is not None: self.filters = filters CGIScript.__init__(self, filename) def render(self, request): for filterscript in self.filters: if os.path.exists(filterscript): return runCGI(request, self.filename, filterscript) else: log.err(self.__class__.__name__ + ' could not find any of: ' + ', '.join(self.filters)) return http.Response(responsecode.INTERNAL_SERVER_ERROR) class PHP3Script(FilteredScript): """I am a FilteredScript that uses the default PHP3 command on most systems. """ filters = '/usr/bin/php3', class PHPScript(FilteredScript): """I am a FilteredScript that uses the PHP command on most systems. Sometimes, php wants the path to itself as argv[0]. This is that time. """ filters = '/usr/bin/php4-cgi', '/usr/bin/php4' class CGIProcessProtocol(protocol.ProcessProtocol): handling_headers = 1 headers_written = 0 headertext = '' errortext = '' def resumeProducing(self): self.transport.resumeProducing() def pauseProducing(self): self.transport.pauseProducing() def stopProducing(self): self.transport.loseConnection() def __init__(self, request, deferred): self.request = request self.deferred = deferred self.stream = stream.ProducerStream() self.response = http.Response(stream=self.stream) def connectionMade(self): # Send input data over to the CGI script. def _failedProducing(reason): # If you really care. #log.err(reason) pass def _finishedProducing(result): self.transport.closeChildFD(0) s = stream.StreamProducer(self.request.stream) producingDeferred = s.beginProducing(self.transport) producingDeferred.addCallback(_finishedProducing) producingDeferred.addErrback(_failedProducing) def errReceived(self, error): self.errortext = self.errortext + error def outReceived(self, output): """ Handle a chunk of input """ # First, make sure that the headers from the script are sorted # out (we'll want to do some parsing on these later.) if self.handling_headers: fullText = self.headertext + output header_endings = [] for delimiter in '\n\n','\r\n\r\n','\r\r', '\n\r\n': headerend = fullText.find(delimiter) if headerend != -1: header_endings.append((headerend, delimiter)) # Have we noticed the end of our headers in this chunk? if header_endings: header_endings.sort() headerend, delimiter = header_endings[0] # This is a final version of the header text. self.headertext = fullText[:headerend] linebreak = delimiter[:len(delimiter)/2] # Write all our headers to self.response for header in self.headertext.split(linebreak): self._addResponseHeader(header) output = fullText[headerend+len(delimiter):] self.handling_headers = 0 # Trigger our callback with a response self._sendResponse() # If we haven't hit the end of our headers yet, then # everything we've seen so far is _still_ headers if self.handling_headers: self.headertext = fullText # If we've stopped handling headers at this point, write # whatever output we've got. if not self.handling_headers: self.stream.write(output) def _addResponseHeader(self, header): """ Save a header until we're ready to write our Response. """ breakpoint = header.find(': ') if breakpoint == -1: log.msg('ignoring malformed CGI header: %s' % header) else: name = header.lower()[:breakpoint] text = header[breakpoint+2:] if name == 'status': try: # "123 <description>" sometimes happens. self.response.code = int(text.split(' ', 1)[0]) except: log.msg("malformed status header: %s" % header) else: self.response.headers.addRawHeader(name, text) def processEnded(self, reason): if reason.value.exitCode != 0: log.msg("CGI %s exited with exit code %s" % (self.request.uri, reason.value.exitCode)) if self.errortext: log.msg("Errors from CGI %s: %s" % (self.request.uri, self.errortext)) if self.handling_headers: log.msg("Premature end of headers in %s: %s" % (self.request.uri, self.headertext)) self.response = http.Response(responsecode.INTERNAL_SERVER_ERROR) self._sendResponse() self.stream.finish() def _sendResponse(self): """ Call our deferred (from CGIScript.render) with a response. """ # Fix up location stuff loc = self.response.headers.getHeader('location') if loc and self.response.code == responsecode.OK: if loc[0] == '/': # FIXME: Do internal redirect raise RuntimeError("Sorry, internal redirects not implemented yet.") else: # NOTE: if a script wants to output its own redirect body, # it must specify Status: 302 itself. self.response.code = 302 self.response.stream = None self.deferred.callback(self.response) class CGIDirectory(resource.Resource, filepath.FilePath): """A directory that serves only CGI scripts (to infinite depth) and does not support directory listings. @param pathname: A path to the directory that you wish to serve CGI scripts from, for example /var/www/cgi-bin/ @type pathname: str """ addSlash = True def __init__(self, pathname): resource.Resource.__init__(self) filepath.FilePath.__init__(self, pathname) def locateChild(self, request, segments): fnp = self.child(segments[0]) if not fnp.exists(): raise http.HTTPError(responsecode.NOT_FOUND) elif fnp.isdir(): return CGIDirectory(fnp.path), segments[1:] else: return CGIScript(fnp.path), segments[1:] return None, () def render(self, request): errormsg = 'CGI directories do not support directory listing' return http.Response(responsecode.FORBIDDEN) __all__ = ['createCGIEnvironment', 'CGIDirectory', 'CGIScript', 'FilteredScript', 'PHP3Script', 'PHPScript']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/twcgi.py
twcgi.py
# System Imports import os, time, stat import tempfile import md5 # Sibling Imports from twisted.web2 import http_headers, resource from twisted.web2 import http, iweb, stream, responsecode, server, dirlist # Twisted Imports from twisted.python import filepath from twisted.internet.defer import maybeDeferred from zope.interface import implements class MetaDataMixin(object): """ Mix-in class for L{iweb.IResource} which provides methods for accessing resource metadata specified by HTTP. """ def etag(self): """ @return: The current etag for the resource if available, None otherwise. """ return None def lastModified(self): """ @return: The last modified time of the resource if available, None otherwise. """ return None def creationDate(self): """ @return: The creation date of the resource if available, None otherwise. """ return None def contentLength(self): """ @return: The size in bytes of the resource if available, None otherwise. """ return None def contentType(self): """ @return: The MIME type of the resource if available, None otherwise. """ return None def contentEncoding(self): """ @return: The encoding of the resource if available, None otherwise. """ return None def displayName(self): """ @return: The display name of the resource if available, None otherwise. """ return None def exists(self): """ @return: True if the resource exists on the server, False otherwise. """ return True class StaticRenderMixin(resource.RenderMixin, MetaDataMixin): def checkPreconditions(self, request): # This code replaces the code in resource.RenderMixin if request.method not in ("GET", "HEAD"): http.checkPreconditions( request, entityExists = self.exists(), etag = self.etag(), lastModified = self.lastModified(), ) # Check per-method preconditions method = getattr(self, "preconditions_" + request.method, None) if method: return method(request) def renderHTTP(self, request): """ See L{resource.RenderMixIn.renderHTTP}. This implementation automatically sets some headers on the response based on data available from L{MetaDataMixin} methods. """ def setHeaders(response): response = iweb.IResponse(response) # Don't provide additional resource information to error responses if response.code < 400: # Content-* headers refer to the response content, not # (necessarily) to the resource content, so they depend on the # request method, and therefore can't be set here. for (header, value) in ( ("etag", self.etag()), ("last-modified", self.lastModified()), ): if value is not None: response.headers.setHeader(header, value) return response def onError(f): # If we get an HTTPError, run its response through setHeaders() as # well. f.trap(http.HTTPError) return setHeaders(f.value.response) d = maybeDeferred(super(StaticRenderMixin, self).renderHTTP, request) return d.addCallbacks(setHeaders, onError) class Data(resource.Resource): """ This is a static, in-memory resource. """ def __init__(self, data, type): self.data = data self.type = http_headers.MimeType.fromString(type) self.created_time = time.time() def etag(self): lastModified = self.lastModified() return http_headers.ETag("%X-%X" % (lastModified, hash(self.data)), weak=(time.time() - lastModified <= 1)) def lastModified(self): return self.creationDate() def creationDate(self): return self.created_time def contentLength(self): return len(self.data) def contentType(self): return self.type def render(self, req): return http.Response( responsecode.OK, http_headers.Headers({'content-type': self.contentType()}), stream=self.data) class File(StaticRenderMixin): """ File is a resource that represents a plain non-interpreted file (although it can look for an extension like .rpy or .cgi and hand the file to a processor for interpretation if you wish). Its constructor takes a file path. Alternatively, you can give a directory path to the constructor. In this case the resource will represent that directory, and its children will be files underneath that directory. This provides access to an entire filesystem tree with a single Resource. If you map the URL 'http://server/FILE' to a resource created as File('/tmp'), then http://server/FILE/ will return an HTML-formatted listing of the /tmp/ directory, and http://server/FILE/foo/bar.html will return the contents of /tmp/foo/bar.html . """ implements(iweb.IResource) def _getContentTypes(self): if not hasattr(File, "_sharedContentTypes"): File._sharedContentTypes = loadMimeTypes() return File._sharedContentTypes contentTypes = property(_getContentTypes) contentEncodings = { ".gz" : "gzip", ".bz2": "bzip2" } processors = {} indexNames = ["index", "index.html", "index.htm", "index.trp", "index.rpy"] type = None def __init__(self, path, defaultType="text/plain", ignoredExts=(), processors=None, indexNames=None): """Create a file with the given path. """ super(File, self).__init__() self.putChildren = {} self.fp = filepath.FilePath(path) # Remove the dots from the path to split self.defaultType = defaultType self.ignoredExts = list(ignoredExts) if processors is not None: self.processors = dict([ (key.lower(), value) for key, value in processors.items() ]) if indexNames is not None: self.indexNames = indexNames def exists(self): return self.fp.exists() def etag(self): if not self.fp.exists(): return None st = self.fp.statinfo # # Mark ETag as weak if it was modified more recently than we can # measure and report, as it could be modified again in that span # and we then wouldn't know to provide a new ETag. # weak = (time.time() - st.st_mtime <= 1) return http_headers.ETag( "%X-%X-%X" % (st.st_ino, st.st_size, st.st_mtime), weak=weak ) def lastModified(self): if self.fp.exists(): return self.fp.getmtime() else: return None def creationDate(self): if self.fp.exists(): return self.fp.getmtime() else: return None def contentLength(self): if self.fp.exists(): if self.fp.isfile(): return self.fp.getsize() else: # Computing this would require rendering the resource; let's # punt instead. return None else: return None def _initTypeAndEncoding(self): self._type, self._encoding = getTypeAndEncoding( self.fp.basename(), self.contentTypes, self.contentEncodings, self.defaultType ) # Handle cases not covered by getTypeAndEncoding() if self.fp.isdir(): self._type = "httpd/unix-directory" def contentType(self): if not hasattr(self, "_type"): self._initTypeAndEncoding() return http_headers.MimeType.fromString(self._type) def contentEncoding(self): if not hasattr(self, "_encoding"): self._initTypeAndEncoding() return self._encoding def displayName(self): if self.fp.exists(): return self.fp.basename() else: return None def ignoreExt(self, ext): """Ignore the given extension. Serve file.ext if file is requested """ self.ignoredExts.append(ext) def directoryListing(self): return dirlist.DirectoryLister(self.fp.path, self.listChildren(), self.contentTypes, self.contentEncodings, self.defaultType) def putChild(self, name, child): """ Register a child with the given name with this resource. @param name: the name of the child (a URI path segment) @param child: the child to register """ self.putChildren[name] = child def getChild(self, name): """ Look up a child resource. @return: the child of this resource with the given name. """ if name == "": return self child = self.putChildren.get(name, None) if child: return child child_fp = self.fp.child(name) if child_fp.exists(): return self.createSimilarFile(child_fp.path) else: return None def listChildren(self): """ @return: a sequence of the names of all known children of this resource. """ children = self.putChildren.keys() if self.fp.isdir(): children += [c for c in self.fp.listdir() if c not in children] return children def locateChild(self, req, segments): """ See L{IResource}C{.locateChild}. """ # If getChild() finds a child resource, return it child = self.getChild(segments[0]) if child is not None: return (child, segments[1:]) # If we're not backed by a directory, we have no children. # But check for existance first; we might be a collection resource # that the request wants created. self.fp.restat(False) if self.fp.exists() and not self.fp.isdir(): return (None, ()) # OK, we need to return a child corresponding to the first segment path = segments[0] if path: fpath = self.fp.child(path) else: # Request is for a directory (collection) resource return (self, server.StopTraversal) # Don't run processors on directories - if someone wants their own # customized directory rendering, subclass File instead. if fpath.isfile(): processor = self.processors.get(fpath.splitext()[1].lower()) if processor: return ( processor(fpath.path), segments[1:]) elif not fpath.exists(): sibling_fpath = fpath.siblingExtensionSearch(*self.ignoredExts) if sibling_fpath is not None: fpath = sibling_fpath return self.createSimilarFile(fpath.path), segments[1:] def renderHTTP(self, req): self.fp.restat(False) return super(File, self).renderHTTP(req) def render(self, req): """You know what you doing.""" if not self.fp.exists(): return responsecode.NOT_FOUND if self.fp.isdir(): if req.uri[-1] != "/": # Redirect to include trailing '/' in URI return http.RedirectResponse(req.unparseURL(path=req.path+'/')) else: ifp = self.fp.childSearchPreauth(*self.indexNames) if ifp: # Render from the index file standin = self.createSimilarFile(ifp.path) else: # Render from a DirectoryLister standin = dirlist.DirectoryLister( self.fp.path, self.listChildren(), self.contentTypes, self.contentEncodings, self.defaultType ) return standin.render(req) try: f = self.fp.open() except IOError, e: import errno if e[0] == errno.EACCES: return responsecode.FORBIDDEN elif e[0] == errno.ENOENT: return responsecode.NOT_FOUND else: raise response = http.Response() response.stream = stream.FileStream(f, 0, self.fp.getsize()) for (header, value) in ( ("content-type", self.contentType()), ("content-encoding", self.contentEncoding()), ): if value is not None: response.headers.setHeader(header, value) return response def createSimilarFile(self, path): return self.__class__(path, self.defaultType, self.ignoredExts, self.processors, self.indexNames[:]) class FileSaver(resource.PostableResource): allowedTypes = (http_headers.MimeType('text', 'plain'), http_headers.MimeType('text', 'html'), http_headers.MimeType('text', 'css')) def __init__(self, destination, expectedFields=[], allowedTypes=None, maxBytes=1000000, permissions=0644): self.destination = destination self.allowedTypes = allowedTypes or self.allowedTypes self.maxBytes = maxBytes self.expectedFields = expectedFields self.permissions = permissions def makeUniqueName(self, filename): """Called when a unique filename is needed. filename is the name of the file as given by the client. Returns the fully qualified path of the file to create. The file must not yet exist. """ return tempfile.mktemp(suffix=os.path.splitext(filename)[1], dir=self.destination) def isSafeToWrite(self, filename, mimetype, filestream): """Returns True if it's "safe" to write this file, otherwise it raises an exception. """ if filestream.length > self.maxBytes: raise IOError("%s: File exceeds maximum length (%d > %d)" % (filename, filestream.length, self.maxBytes)) if mimetype not in self.allowedTypes: raise IOError("%s: File type not allowed %s" % (filename, mimetype)) return True def writeFile(self, filename, mimetype, fileobject): """Does the I/O dirty work after it calls isWriteable to make sure it's safe to write this file. """ filestream = stream.FileStream(fileobject) if self.isSafeToWrite(filename, mimetype, filestream): outname = self.makeUniqueName(filename) fileobject = os.fdopen(os.open(outname, os.O_WRONLY | os.O_CREAT | os.O_EXCL, self.permissions), 'w', 0) stream.readIntoFile(filestream, fileobject) return outname def render(self, req): content = ["<html><body>"] if req.files: for fieldName in req.files: if fieldName in self.expectedFields: for finfo in req.files[fieldName]: try: outname = self.writeFile(*finfo) content.append("Saved file %s<br />" % outname) except IOError, err: content.append(str(err) + "<br />") else: content.append("%s is not a valid field" % fieldName) else: content.append("No files given") content.append("</body></html>") return http.Response(responsecode.OK, {}, stream='\n'.join(content)) # FIXME: hi there I am a broken class # """I contain AsIsProcessor, which serves files 'As Is' # Inspired by Apache's mod_asis # """ # # class ASISProcessor: # implements(iweb.IResource) # # def __init__(self, path): # self.path = path # # def renderHTTP(self, request): # request.startedWriting = 1 # return File(self.path) # # def locateChild(self, request): # return None, () ## # Utilities ## dangerousPathError = http.HTTPError(responsecode.NOT_FOUND) #"Invalid request URL." def isDangerous(path): return path == '..' or '/' in path or os.sep in path def addSlash(request): return "http%s://%s%s/" % ( request.isSecure() and 's' or '', request.getHeader("host"), (request.uri.split('?')[0])) def loadMimeTypes(mimetype_locations=['/etc/mime.types']): """ Multiple file locations containing mime-types can be passed as a list. The files will be sourced in that order, overriding mime-types from the files sourced beforehand, but only if a new entry explicitly overrides the current entry. """ import mimetypes # Grab Python's built-in mimetypes dictionary. contentTypes = mimetypes.types_map # Update Python's semi-erroneous dictionary with a few of the # usual suspects. contentTypes.update( { '.conf': 'text/plain', '.diff': 'text/plain', '.exe': 'application/x-executable', '.flac': 'audio/x-flac', '.java': 'text/plain', '.ogg': 'application/ogg', '.oz': 'text/x-oz', '.swf': 'application/x-shockwave-flash', '.tgz': 'application/x-gtar', '.wml': 'text/vnd.wap.wml', '.xul': 'application/vnd.mozilla.xul+xml', '.py': 'text/plain', '.patch': 'text/plain', } ) # Users can override these mime-types by loading them out configuration # files (this defaults to ['/etc/mime.types']). for location in mimetype_locations: if os.path.exists(location): contentTypes.update(mimetypes.read_mime_types(location)) return contentTypes def getTypeAndEncoding(filename, types, encodings, defaultType): p, ext = os.path.splitext(filename) ext = ext.lower() if encodings.has_key(ext): enc = encodings[ext] ext = os.path.splitext(p)[1].lower() else: enc = None type = types.get(ext, defaultType) return type, enc ## # Test code ## if __name__ == '__builtin__': # Running from twistd -y from twisted.application import service, strports from twisted.web2 import server res = File('/') application = service.Application("demo") s = strports.service('8080', server.Site(res)) s.setServiceParent(application)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/static.py
static.py
from zope.interface import implements from twisted.internet import defer, protocol, reactor from twisted.protocols import basic from twisted.web2 import http, iweb, resource, responsecode, stream, twcgi class SCGIClientResource(resource.LeafResource): """A resource that connects to an SCGI server and relays the server's response to the browser. This resource connects to a SCGI server on a known host ('localhost', by default) and port. It has no responsibility for starting the SCGI server. If the server is not running when a client connects then a BAD_GATEWAY response will be returned immediately. """ def __init__(self, port, host='localhost'): """Initialise a SCGI client resource """ resource.LeafResource.__init__(self) self.host = host self.port = port def renderHTTP(self, request): return doSCGI(request, self.host, self.port) def doSCGI(request, host, port): if request.stream.length is None: return http.Response(responsecode.LENGTH_REQUIRED) factory = SCGIClientProtocolFactory(request) reactor.connectTCP(host, port, factory) return factory.deferred class SCGIClientProtocol(basic.LineReceiver): """Protocol for talking to a SCGI server. """ def __init__(self, request, deferred): self.request = request self.deferred = deferred self.stream = stream.ProducerStream() self.response = http.Response(stream=self.stream) def connectionMade(self): # Ooh, look someone did all the hard work for me :). env = twcgi.createCGIEnvironment(self.request) # Send the headers. The Content-Length header should always be sent # first and must be 0 if not present. # The whole lot is sent as one big netstring with each name and value # separated by a '\0'. contentLength = str(env.pop('CONTENT_LENGTH', 0)) env['SCGI'] = '1' scgiHeaders = [] scgiHeaders.append('%s\x00%s\x00'%('CONTENT_LENGTH', str(contentLength))) scgiHeaders.append('SCGI\x001\x00') for name, value in env.iteritems(): if name in ('CONTENT_LENGTH', 'SCGI'): continue scgiHeaders.append('%s\x00%s\x00'%(name,value)) scgiHeaders = ''.join(scgiHeaders) self.transport.write('%d:%s,' % (len(scgiHeaders), scgiHeaders)) stream.StreamProducer(self.request.stream).beginProducing(self.transport) def lineReceived(self, line): # Look for end of headers if line == '': # Switch into raw mode to recieve data and callback the deferred # with the response instance. The data will be streamed as it # arrives. Callback the deferred and set self.response to None, # because there are no promises that the response will not be # mutated by a resource higher in the tree, such as # log.LogWrapperResource self.setRawMode() self.deferred.callback(self.response) self.response = None return # Split the header into name and value. The 'Status' header is handled # specially; all other headers are simply passed onto the response I'm # building. name, value = line.split(':',1) value = value.strip() if name.lower() == 'status': value = value.split(None,1)[0] self.response.code = int(value) else: self.response.headers.addRawHeader(name, value) def rawDataReceived(self, data): self.stream.write(data) def connectionLost(self, reason): # The connection is closed and all data has been streamed via the # response. Tell the response stream it's over. self.stream.finish() class SCGIClientProtocolFactory(protocol.ClientFactory): """SCGI client protocol factory. I am created by a SCGIClientResource to connect to an SCGI server. When I connect I create a SCGIClientProtocol instance to do all the talking with the server. The ``deferred`` attribute is passed on to the protocol and is fired with the HTTP response from the server once it has been recieved. """ protocol = SCGIClientProtocol noisy = False # Make Factory shut up def __init__(self, request): self.request = request self.deferred = defer.Deferred() def buildProtocol(self, addr): return self.protocol(self.request, self.deferred) def clientConnectionFailed(self, connector, reason): self.sendFailureResponse(reason) def sendFailureResponse(self, reason): response = http.Response(code=responsecode.BAD_GATEWAY, stream=str(reason.value)) self.deferred.callback(response) __all__ = ['SCGIClientResource']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/twscgi.py
twscgi.py
from __future__ import generators import re from zope.interface import implements import urllib import tempfile from twisted.internet import defer from twisted.web2.stream import IStream, FileStream, BufferedStream, readStream from twisted.web2.stream import generatorToStream, readAndDiscard from twisted.web2 import http_headers from cStringIO import StringIO ################################### ##### Multipart MIME Reader ##### ################################### class MimeFormatError(Exception): pass # parseContentDispositionFormData is absolutely horrible, but as # browsers don't seem to believe in sensible quoting rules, it's # really the only way to handle the header. (Quotes can be in the # filename, unescaped) cd_regexp = re.compile( ' *form-data; *name="([^"]*)"(?:; *filename="(.*)")?$', re.IGNORECASE) def parseContentDispositionFormData(value): match = cd_regexp.match(value) if not match: # Error parsing. raise ValueError("Unknown content-disposition format.") name=match.group(1) filename=match.group(2) return name, filename #@defer.deferredGenerator def _readHeaders(stream): """Read the MIME headers. Assumes we've just finished reading in the boundary string.""" ctype = fieldname = filename = None headers = [] # Now read headers while 1: line = stream.readline(size=1024) if isinstance(line, defer.Deferred): line = defer.waitForDeferred(line) yield line line = line.getResult() #print "GOT", line if not line.endswith('\r\n'): if line == "": raise MimeFormatError("Unexpected end of stream.") else: raise MimeFormatError("Header line too long") line = line[:-2] # strip \r\n if line == "": break # End of headers parts = line.split(':', 1) if len(parts) != 2: raise MimeFormatError("Header did not have a :") name, value = parts name = name.lower() headers.append((name, value)) if name == "content-type": ctype = http_headers.parseContentType(http_headers.tokenize((value,), foldCase=False)) elif name == "content-disposition": fieldname, filename = parseContentDispositionFormData(value) if ctype is None: ctype == http_headers.MimeType('application', 'octet-stream') if fieldname is None: raise MimeFormatError('Content-disposition invalid or omitted.') # End of headers, return (field name, content-type, filename) yield fieldname, filename, ctype return _readHeaders = defer.deferredGenerator(_readHeaders) class _BoundaryWatchingStream(object): def __init__(self, stream, boundary): self.stream = stream self.boundary = boundary self.data = '' self.deferred = defer.Deferred() length = None # unknown def read(self): if self.stream is None: if self.deferred is not None: deferred = self.deferred self.deferred = None deferred.callback(None) return None newdata = self.stream.read() if isinstance(newdata, defer.Deferred): return newdata.addCallbacks(self._gotRead, self._gotError) return self._gotRead(newdata) def _gotRead(self, newdata): if not newdata: raise MimeFormatError("Unexpected EOF") # BLECH, converting buffer back into string. self.data += str(newdata) data = self.data boundary = self.boundary off = data.find(boundary) if off == -1: # No full boundary, check for the first character off = data.rfind(boundary[0], max(0, len(data)-len(boundary))) if off != -1: # We could have a partial boundary, store it for next time self.data = data[off:] return data[:off] else: self.data = '' return data else: self.stream.pushback(data[off+len(boundary):]) self.stream = None return data[:off] def _gotError(self, err): # Propogate error back to MultipartMimeStream also if self.deferred is not None: deferred = self.deferred self.deferred = None deferred.errback(err) return err def close(self): # Assume error will be raised again and handled by MMS? readAndDiscard(self).addErrback(lambda _: None) class MultipartMimeStream(object): implements(IStream) def __init__(self, stream, boundary): self.stream = BufferedStream(stream) self.boundary = "--"+boundary self.first = True def read(self): """ Return a deferred which will fire with a tuple of: (fieldname, filename, ctype, dataStream) or None when all done. Format errors will be sent to the errback. Returns None when all done. IMPORTANT: you *must* exhaust dataStream returned by this call before calling .read() again! """ if self.first: self.first = False d = self._readFirstBoundary() else: d = self._readBoundaryLine() d.addCallback(self._doReadHeaders) d.addCallback(self._gotHeaders) return d def _readFirstBoundary(self): #print "_readFirstBoundary" line = self.stream.readline(size=1024) if isinstance(line, defer.Deferred): line = defer.waitForDeferred(line) yield line line = line.getResult() if line != self.boundary + '\r\n': raise MimeFormatError("Extra data before first boundary: %r looking for: %r" % (line, self.boundary + '\r\n')) self.boundary = "\r\n"+self.boundary yield True return _readFirstBoundary = defer.deferredGenerator(_readFirstBoundary) def _readBoundaryLine(self): #print "_readBoundaryLine" line = self.stream.readline(size=1024) if isinstance(line, defer.Deferred): line = defer.waitForDeferred(line) yield line line = line.getResult() if line == "--\r\n": # THE END! yield False return elif line != "\r\n": raise MimeFormatError("Unexpected data on same line as boundary: %r" % (line,)) yield True return _readBoundaryLine = defer.deferredGenerator(_readBoundaryLine) def _doReadHeaders(self, morefields): #print "_doReadHeaders", morefields if not morefields: return None return _readHeaders(self.stream) def _gotHeaders(self, headers): if headers is None: return None bws = _BoundaryWatchingStream(self.stream, self.boundary) self.deferred = bws.deferred ret=list(headers) ret.append(bws) return tuple(ret) def readIntoFile(stream, outFile, maxlen): """Read the stream into a file, but not if it's longer than maxlen. Returns Deferred which will be triggered on finish. """ curlen = [0] def done(_): return _ def write(data): curlen[0] += len(data) if curlen[0] > maxlen: raise MimeFormatError("Maximum length of %d bytes exceeded." % maxlen) outFile.write(data) return readStream(stream, write).addBoth(done) #@defer.deferredGenerator def parseMultipartFormData(stream, boundary, maxMem=100*1024, maxFields=1024, maxSize=10*1024*1024): # If the stream length is known to be too large upfront, abort immediately if stream.length is not None and stream.length > maxSize: raise MimeFormatError("Maximum length of %d bytes exceeded." % maxSize) mms = MultipartMimeStream(stream, boundary) numFields = 0 args = {} files = {} while 1: datas = mms.read() if isinstance(datas, defer.Deferred): datas = defer.waitForDeferred(datas) yield datas datas = datas.getResult() if datas is None: break numFields+=1 if numFields == maxFields: raise MimeFormatError("Maximum number of fields %d exceeded"%maxFields) # Parse data fieldname, filename, ctype, stream = datas if filename is None: # Not a file outfile = StringIO() maxBuf = min(maxSize, maxMem) else: outfile = tempfile.NamedTemporaryFile() maxBuf = maxSize x = readIntoFile(stream, outfile, maxBuf) if isinstance(x, defer.Deferred): x = defer.waitForDeferred(x) yield x x = x.getResult() if filename is None: # Is a normal form field outfile.seek(0) data = outfile.read() args.setdefault(fieldname, []).append(data) maxMem -= len(data) maxSize -= len(data) else: # Is a file upload maxSize -= outfile.tell() outfile.seek(0) files.setdefault(fieldname, []).append((filename, ctype, outfile)) yield args, files return parseMultipartFormData = defer.deferredGenerator(parseMultipartFormData) ################################### ##### x-www-urlencoded reader ##### ################################### def parse_urlencoded_stream(input, maxMem=100*1024, keep_blank_values=False, strict_parsing=False): lastdata = '' still_going=1 while still_going: try: yield input.wait data = input.next() except StopIteration: pairs = [lastdata] still_going=0 else: maxMem -= len(data) if maxMem < 0: raise MimeFormatError("Maximum length of %d bytes exceeded." % maxMem) pairs = str(data).split('&') pairs[0] = lastdata + pairs[0] lastdata=pairs.pop() for name_value in pairs: nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise MimeFormatError("bad query field: %s") % `name_value` continue if len(nv[1]) or keep_blank_values: name = urllib.unquote(nv[0].replace('+', ' ')) value = urllib.unquote(nv[1].replace('+', ' ')) yield name, value parse_urlencoded_stream = generatorToStream(parse_urlencoded_stream) def parse_urlencoded(stream, maxMem=100*1024, maxFields=1024, keep_blank_values=False, strict_parsing=False): d = {} numFields = 0 s=parse_urlencoded_stream(stream, maxMem, keep_blank_values, strict_parsing) while 1: datas = s.read() if isinstance(datas, defer.Deferred): datas = defer.waitForDeferred(datas) yield datas datas = datas.getResult() if datas is None: break name, value = datas numFields += 1 if numFields == maxFields: raise MimeFormatError("Maximum number of fields %d exceeded"%maxFields) if name in d: d[name].append(value) else: d[name] = [value] yield d return parse_urlencoded = defer.deferredGenerator(parse_urlencoded) if __name__ == '__main__': d = parseMultipartFormData( FileStream(open("upload.txt")), "----------0xKhTmLbOuNdArY") from twisted.python import log d.addErrback(log.err) def pr(s): print s d.addCallback(pr) __all__ = ['parseMultipartFormData', 'parse_urlencoded', 'parse_urlencoded_stream', 'MultipartMimeStream', 'MimeFormatError']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/fileupload.py
fileupload.py
import time from twisted.python import log as tlog from twisted.internet import defer from twisted.web2 import iweb, stream, resource from zope.interface import implements, Attribute, Interface class _LogByteCounter(object): implements(stream.IByteStream) def __init__(self, stream, done): self.stream=stream self.done=done self.len=0 length=property(lambda self: self.stream.length) def _callback(self, data): if data is None: if self.done: done=self.done; self.done=None done(True, self.len) else: self.len += len(data) return data def read(self): data = self.stream.read() if isinstance(data, defer.Deferred): return data.addCallback(self._callback) return self._callback(data) def close(self): if self.done: done=self.done; self.done=None done(False, self.len) self.stream.close() class ILogInfo(Interface): """Auxilliary information about the response useful for logging.""" bytesSent=Attribute("Number of bytes sent.") responseCompleted=Attribute("Whether or not the response was completed.") secondsTaken=Attribute("Number of seconds taken to serve the request.") startTime=Attribute("Time at which the request started") class LogInfo(object): implements(ILogInfo) responseCompleted=None secondsTaken=None bytesSent=None startTime=None def logFilter(request, response, startTime=None): if startTime is None: startTime = time.time() def _log(success, length): loginfo=LogInfo() loginfo.bytesSent=length loginfo.responseCompleted=success loginfo.secondsTaken=time.time()-startTime tlog.msg(interface=iweb.IRequest, request=request, response=response, loginfo=loginfo) # Or just... # ILogger(ctx).log(...) ? if response.stream: response.stream=_LogByteCounter(response.stream, _log) else: _log(True, 0) return response logFilter.handleErrors = True class LogWrapperResource(resource.WrapperResource): def hook(self, request): # Insert logger request.addResponseFilter(logFilter, atEnd=True) monthname = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] class BaseCommonAccessLoggingObserver(object): """An abstract Twisted-based logger for creating access logs. Derived implementations of this class *must* implement the ``logMessage(message)`` method, which will send the message to an actual log/file or stream. """ logFormat = '%s - %s [%s] "%s" %s %d "%s" "%s"' def logMessage(self, message): raise NotImplemented, 'You must provide an implementation.' def computeTimezoneForLog(self, tz): if tz > 0: neg = 1 else: neg = 0 tz = -tz h, rem = divmod(tz, 3600) m, rem = divmod(rem, 60) if neg: return '-%02d%02d' % (h, m) else: return '+%02d%02d' % (h, m) tzForLog = None tzForLogAlt = None def logDateString(self, when): logtime = time.localtime(when) Y, M, D, h, m, s = logtime[:6] if not time.daylight: tz = self.tzForLog if tz is None: tz = self.computeTimezoneForLog(time.timezone) self.tzForLog = tz else: tz = self.tzForLogAlt if tz is None: tz = self.computeTimezoneForLog(time.altzone) self.tzForLogAlt = tz return '%02d/%s/%02d:%02d:%02d:%02d %s' % ( D, monthname[M], Y, h, m, s, tz) def emit(self, eventDict): if eventDict.get('interface') is not iweb.IRequest: return request = eventDict['request'] response = eventDict['response'] loginfo = eventDict['loginfo'] firstLine = '%s %s HTTP/%s' %( request.method, request.uri, '.'.join([str(x) for x in request.clientproto])) self.logMessage( '%s - %s [%s] "%s" %s %d "%s" "%s"' %( request.remoteAddr.host, # XXX: Where to get user from? "-", self.logDateString( response.headers.getHeader('date', 0)), firstLine, response.code, loginfo.bytesSent, request.headers.getHeader('referer', '-'), request.headers.getHeader('user-agent', '-') ) ) def start(self): """Start observing log events.""" tlog.addObserver(self.emit) def stop(self): """Stop observing log events.""" tlog.removeObserver(self.emit) class FileAccessLoggingObserver(BaseCommonAccessLoggingObserver): """I log requests to a single logfile """ def __init__(self, logpath): self.logpath = logpath def logMessage(self, message): self.f.write(message + '\n') def start(self): super(FileAccessLoggingObserver, self).start() self.f = open(self.logpath, 'a', 1) def stop(self): super(FileAccessLoggingObserver, self).stop() self.f.close() class DefaultCommonAccessLoggingObserver(BaseCommonAccessLoggingObserver): """Log requests to default twisted logfile.""" def logMessage(self, message): tlog.msg(message)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/log.py
log.py
import os from zope.interface import implements from twisted.python import usage, reflect from twisted.application import internet, service, strports from twisted.scripts.mktap import IServiceMaker from twisted.plugin import IPlugin from twisted.web2 import static, iweb, log, server, channel, vhost from twisted.web2.dav import static as dav_static class Options(usage.Options): optParameters = [["port", "p", "8080", "Port to start the server on."], ["logfile", "l", None, ("Common Access Logging Format file to write to " "if unspecified access log information will be " "written to the standard twisted log file.")], ["https", None, None, "Port to listen on for Secure HTTP."], ["certificate", "c", "server.pem", "SSL certificate to use for HTTPS."], ["privkey", "k", "server.pem", "SSL certificate to use for HTTPS."]] zsh_actions = {"certificate" : "_files -g '*.pem'", "privkey" : "_files -g '*.pem'"} longdesc = """\ This creates a web2.tap file that can be used by twistd. Basic Examples: To serve a static directory or file: mktap web2 --path=/tmp/ To serve a dav collection: mktap web2 --dav=/tmp/ To serve a dynamic resource: mktap web2 --class=fully.qualified.ClassName To serve a directory of the form: /var/www/domain1/ /var/www/domain2/ mktap web2 --vhost-path=/var/www/ All the above options are incompatible as they all specify the root resource. However you can use the following options in conjunction with --vhost-path To serve a specific host name as a static file: mktap web2 --vhost-static=domain3=/some/other/root/domain3 Or to serve a specific host name as a dynamic resource: mktap web2 --vhost-class=domain4=fully.qualified.ClassName """ def __init__(self): usage.Options.__init__(self) self['indexes'] = [] self['root'] = None def opt_index(self, indexName): """Add the name of a file used to check for directory indexes. [default: index, index.html] """ self['indexes'].append(indexName) opt_i = opt_index def opt_path(self, path): """A path that will be used to serve the root resource as a raw file or directory. """ if self['root']: raise usage.UsageError("You may only have one root resource.") self['root'] = static.File(os.path.abspath(path)) def opt_processor(self, proc): """`ext=class' where `class' is added as a Processor for files ending with `ext'. """ if not isinstance(self['root'], static.File): raise usage.UsageError("You can only use --processor after --path.") ext, klass = proc.split('=', 1) self['root'].processors[ext] = reflect.namedClass(klass) def opt_class(self, className): """A class that will be used to serve the root resource. Must implement twisted.web2.iweb.IResource and take no arguments. """ if self['root']: raise usage.UsageError("You may only have one root resource.") classObj = reflect.namedClass(className) self['root'] = iweb.IResource(classObj()) def opt_allow_ignore_ext(self): """Specify whether or not a request for 'foo' should return 'foo.ext'""" if not isinstance(self['root'], static.File): raise usage.UsageError("You can only use --allow_ignore_ext " "after --path.") self['root'].ignoreExt('*') def opt_ignore_ext(self, ext): """Specify an extension to ignore. These will be processed in order. """ if not isinstance(self['root'], static.File): raise usage.UsageError("You can only use --ignore_ext " "after --path.") self['root'].ignoreExt(ext) def opt_mimetype(self, mimetype): """Mapping from file extension to MIME Type in the form of 'ext=type'. Example: html=text/html """ if not isinstance(self['root'], static.File): raise usage.UsageError("You can only use --mimetype " "after --path.") ext, mimetype = mimetype.split('=', 1) # this is really gross, there should be a public api for this. self['root']._sharedContentTypes.update({ext: mimetype}) def opt_vhost_path(self, path): """Specify a directory to use for automatic named virtual hosts. It is assumed that this directory contains a series of subdirectories each representing a virtual host domain name and containing the files to be served at that domain. """ if self['root']: if not isintance(self['root'], vhost.NameVirtualHost): raise usage.UsageError("You may only have one root resource") else: self['root'] = vhost.NameVirtualHost() path = os.path.abspath(path) for name in os.listdir(path): fullname = os.path.join(path, name) self['root'].addHost(name, static.File(fullname)) def opt_vhost_static(self, virtualHost): """Specify a virtual host in the form of domain=path to be served as raw directory or file. """ if (self['root'] and not \ isinstance(self['root'], vhost.NameVirtualHost)): raise usage.UsageError("You can only use --vhost-static alone " "or with --vhost-class and --vhost-path") domain, path = virtualHost.split('=', 1) if not self['root']: self['root'] = vhost.NameVirtualHost() self['root'].addHost(domain, static.File(os.path.abspath(path))) def opt_vhost_class(self, virtualHost): """Specify a virtual host in the form of domain=class, where class can be adapted to an iweb.IResource and has a zero-argument constructor. """ if (self['root'] and not \ isinstance(self['root'], vhost.NameVirtualHost)): raise usage.UsageError("You can not use --vhost-class with " "--path or --class.") domain, className = virtualHost.split('=', 1) if not self['root']: self['root'] = vhost.NameVirtualHost() classObj = reflect.namedClass(className) self['root'].addHost(domain, iweb.IResource(classObj())) def opt_vhost_dav(self, virtualHost): """Specify a virtual host in the form of domain=path, to have path served as a DAV collection at the root of domain """ if (self['root'] and not \ isinstance(self['root'], vhost.NameVirtualHost)): raise usage.UsageError("You can only use --vhost-static alone " "or with --vhost-class and --vhost-path") domain, path = virtualHost.split('=', 1) if not self['root']: self['root'] = vhost.NameVirtualHost() self['root'].addHost(domain, dav_static.DAVFile(os.path.abspath(path))) def opt_dav(self, path): """A path that will be used to serve the root resource as a DAV Collection. """ if self['root']: raise usage.UsageError("You may only have one root resource") self['root'] = dav_static.DAVFile(os.path.abspath(path)) def postOptions(self): if self['https']: try: from twisted.internet.ssl import DefaultOpenSSLContextFactory except ImportError: raise usage.UsageError("SSL support not installed") class Web2Service(service.MultiService): def __init__(self, logObserver): self.logObserver = logObserver service.MultiService.__init__(self) def startService(self): service.MultiService.startService(self) self.logObserver.start() def stopService(self): service.MultiService.stopService(self) self.logObserver.stop() def makeService(config): if config['logfile']: logObserver = log.FileAccessLoggingObserver(config['logfile']) else: logObserver = log.DefaultCommonAccessLoggingObserver() if config['root']: if config['indexes']: config['root'].indexNames = config['indexes'] root = log.LogWrapperResource(config['root']) s = Web2Service(logObserver) site = server.Site(root) chan = channel.HTTPFactory(site) if config['https']: from twisted.internet.ssl import DefaultOpenSSLContextFactory i = internet.SSLServer(int(config['https']), chan, DefaultOpenSSLContextFactory(config['privkey'], config['certificate'])) i.setServiceParent(s) strports.service(config['port'], chan ).setServiceParent(s) return s
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/tap.py
tap.py
# System Imports import cStringIO as StringIO import cgi, time, urlparse from urllib import quote, unquote from urlparse import urlsplit import weakref from zope.interface import implements # Twisted Imports from twisted.internet import defer from twisted.python import log, failure # Sibling Imports from twisted.web2 import http, iweb, fileupload, responsecode from twisted.web2 import http_headers from twisted.web2.filter.range import rangefilter from twisted.web2 import error from twisted.web2 import version as web2_version from twisted import __version__ as twisted_version VERSION = "Twisted/%s TwistedWeb/%s" % (twisted_version, web2_version) _errorMarker = object() def defaultHeadersFilter(request, response): if not response.headers.hasHeader('server'): response.headers.setHeader('server', VERSION) if not response.headers.hasHeader('date'): response.headers.setHeader('date', time.time()) return response defaultHeadersFilter.handleErrors = True def preconditionfilter(request, response): if request.method in ("GET", "HEAD"): http.checkPreconditions(request, response) return response def doTrace(request): request = iweb.IRequest(request) txt = "%s %s HTTP/%d.%d\r\n" % (request.method, request.uri, request.clientproto[0], request.clientproto[1]) l=[] for name, valuelist in request.headers.getAllRawHeaders(): for value in valuelist: l.append("%s: %s\r\n" % (name, value)) txt += ''.join(l) return http.Response( responsecode.OK, {'content-type': http_headers.MimeType('message', 'http')}, txt) def parsePOSTData(request): if request.stream.length == 0: return defer.succeed(None) parser = None ctype = request.headers.getHeader('content-type') if ctype is None: return defer.succeed(None) def updateArgs(data): args = data request.args.update(args) def updateArgsAndFiles(data): args, files = data request.args.update(args) request.files.update(files) def error(f): f.trap(fileupload.MimeFormatError) raise http.HTTPError(responsecode.BAD_REQUEST) if ctype.mediaType == 'application' and ctype.mediaSubtype == 'x-www-form-urlencoded': d = fileupload.parse_urlencoded(request.stream) d.addCallbacks(updateArgs, error) return d elif ctype.mediaType == 'multipart' and ctype.mediaSubtype == 'form-data': boundary = ctype.params.get('boundary') if boundary is None: return failure.Failure(fileupload.MimeFormatError("Boundary not specified in Content-Type.")) d = fileupload.parseMultipartFormData(request.stream, boundary) d.addCallbacks(updateArgsAndFiles, error) return d else: raise http.HTTPError(responsecode.BAD_REQUEST) class StopTraversal(object): """ Indicates to Request._handleSegment that it should stop handling path segments. """ pass class Request(http.Request): """ vars: site remoteAddr scheme host port path params querystring args files prepath postpath @ivar path: The path only (arguments not included). @ivar args: All of the arguments, including URL and POST arguments. @type args: A mapping of strings (the argument names) to lists of values. i.e., ?foo=bar&foo=baz&quux=spam results in {'foo': ['bar', 'baz'], 'quux': ['spam']}. """ implements(iweb.IRequest) site = None _initialprepath = None responseFilters = [rangefilter, preconditionfilter, error.defaultErrorHandler, defaultHeadersFilter] def __init__(self, *args, **kw): if kw.has_key('site'): self.site = kw['site'] del kw['site'] if kw.has_key('prepathuri'): self._initialprepath = kw['prepathuri'] del kw['prepathuri'] # Copy response filters from the class self.responseFilters = self.responseFilters[:] self.files = {} self.resources = [] http.Request.__init__(self, *args, **kw) def addResponseFilter(self, f, atEnd=False): if atEnd: self.responseFilters.append(f) else: self.responseFilters.insert(0, f) def unparseURL(self, scheme=None, host=None, port=None, path=None, params=None, querystring=None, fragment=None): """Turn the request path into a url string. For any pieces of the url that are not specified, use the value from the request. The arguments have the same meaning as the same named attributes of Request.""" if scheme is None: scheme = self.scheme if host is None: host = self.host if port is None: port = self.port if path is None: path = self.path if params is None: params = self.params if querystring is None: query = self.querystring if fragment is None: fragment = '' if port == http.defaultPortForScheme.get(scheme, 0): hostport = host else: hostport = host + ':' + str(port) return urlparse.urlunparse(( scheme, hostport, path, params, querystring, fragment)) def _parseURL(self): if self.uri[0] == '/': # Can't use urlparse for request_uri because urlparse # wants to be given an absolute or relative URI, not just # an abs_path, and thus gets '//foo' wrong. self.scheme = self.host = self.path = self.params = self.querystring = '' if '?' in self.uri: self.path, self.querystring = self.uri.split('?', 1) else: self.path = self.uri if ';' in self.path: self.path, self.params = self.path.split(';', 1) else: # It is an absolute uri, use standard urlparse (self.scheme, self.host, self.path, self.params, self.querystring, fragment) = urlparse.urlparse(self.uri) if self.querystring: self.args = cgi.parse_qs(self.querystring, True) else: self.args = {} path = map(unquote, self.path[1:].split('/')) if self._initialprepath: # We were given an initial prepath -- this is for supporting # CGI-ish applications where part of the path has already # been processed prepath = map(unquote, self._initialprepath[1:].split('/')) if path[:len(prepath)] == prepath: self.prepath = prepath self.postpath = path[len(prepath):] else: self.prepath = [] self.postpath = path else: self.prepath = [] self.postpath = path #print "_parseURL", self.uri, (self.uri, self.scheme, self.host, self.path, self.params, self.querystring) def _fixupURLParts(self): hostaddr, secure = self.chanRequest.getHostInfo() if not self.scheme: self.scheme = ('http', 'https')[secure] if self.host: self.host, self.port = http.splitHostPort(self.scheme, self.host) else: # If GET line wasn't an absolute URL host = self.headers.getHeader('host') if host: self.host, self.port = http.splitHostPort(self.scheme, host) else: # When no hostname specified anywhere, either raise an # error, or use the interface hostname, depending on # protocol version if self.clientproto >= (1,1): raise http.HTTPError(responsecode.BAD_REQUEST) self.host = hostaddr.host self.port = hostaddr.port def process(self): "Process a request." try: self.checkExpect() resp = self.preprocessRequest() if resp is not None: self._cbFinishRender(resp).addErrback(self._processingFailed) return self._parseURL() self._fixupURLParts() self.remoteAddr = self.chanRequest.getRemoteHost() except: failedDeferred = self._processingFailed(failure.Failure()) return d = defer.Deferred() d.addCallback(self._getChild, self.site.resource, self.postpath) d.addCallback(lambda res, req: res.renderHTTP(req), self) d.addCallback(self._cbFinishRender) d.addErrback(self._processingFailed) d.callback(None) def preprocessRequest(self): """Do any request processing that doesn't follow the normal resource lookup procedure. "OPTIONS *" is handled here, for example. This would also be the place to do any CONNECT processing.""" if self.method == "OPTIONS" and self.uri == "*": response = http.Response(responsecode.OK) response.headers.setHeader('allow', ('GET', 'HEAD', 'OPTIONS', 'TRACE')) return response # This is where CONNECT would go if we wanted it return None def _getChild(self, _, res, path, updatepaths=True): """Call res.locateChild, and pass the result on to _handleSegment.""" self.resources.append(res) if not path: return res result = res.locateChild(self, path) if isinstance(result, defer.Deferred): return result.addCallback(self._handleSegment, res, path, updatepaths) else: return self._handleSegment(result, res, path, updatepaths) def _handleSegment(self, result, res, path, updatepaths): """Handle the result of a locateChild call done in _getChild.""" newres, newpath = result # If the child resource is None then display a error page if newres is None: raise http.HTTPError(responsecode.NOT_FOUND) # If we got a deferred then we need to call back later, once the # child is actually available. if isinstance(newres, defer.Deferred): return newres.addCallback( lambda actualRes: self._handleSegment( (actualRes, newpath), res, path, updatepaths) ) if path: url = quote("/" + "/".join(path)) else: url = "/" if newpath is StopTraversal: # We need to rethink how to do this. #if newres is res: self._rememberResource(res, url) return res #else: # raise ValueError("locateChild must not return StopTraversal with a resource other than self.") newres = iweb.IResource(newres) if newres is res: assert not newpath is path, "URL traversal cycle detected when attempting to locateChild %r from resource %r." % (path, res) assert len(newpath) < len(path), "Infinite loop impending..." if updatepaths: # We found a Resource... update the request.prepath and postpath for x in xrange(len(path) - len(newpath)): self.prepath.append(self.postpath.pop(0)) child = self._getChild(None, newres, newpath, updatepaths=updatepaths) self._rememberResource(child, url) return child _urlsByResource = weakref.WeakKeyDictionary() def _rememberResource(self, resource, url): """ Remember the URL of a visited resource. """ self._urlsByResource[resource] = url return resource def urlForResource(self, resource): """ Looks up the URL of the given resource if this resource was found while processing this request. Specifically, this includes the requested resource, and resources looked up via L{locateResource}. Note that a resource may be found at multiple URIs; if the same resource is visited at more than one location while processing this request, this method will return one of those URLs, but which one is not defined, nor whether the same URL is returned in subsequent calls. @param resource: the resource to find a URI for. This resource must have been obtained from the request (ie. via its C{uri} attribute, or through its C{locateResource} or C{locateChildResource} methods). @return: a valid URL for C{resource} in this request. @raise NoURLForResourceError: if C{resource} has no URL in this request (because it was not obtained from the request). """ resource = self._urlsByResource.get(resource, None) if resource is None: raise NoURLForResourceError(resource) return resource def locateResource(self, url): """ Looks up the resource with the given URL. @param uri: The URL of the desired resource. @return: a L{Deferred} resulting in the L{IResource} at the given URL or C{None} if no such resource can be located. @raise HTTPError: If C{url} is not a URL on the site that this request is being applied to. The contained response will have a status code of L{responsecode.BAD_GATEWAY}. @raise HTTPError: If C{url} contains a query or fragment. The contained response will have a status code of L{responsecode.BAD_REQUEST}. """ if url is None: return None # # Parse the URL # (scheme, host, path, query, fragment) = urlsplit(url) if query or fragment: raise http.HTTPError(http.StatusResponse( responsecode.BAD_REQUEST, "URL may not contain a query or fragment: %s" % (url,) )) # The caller shouldn't be asking a request on one server to lookup a # resource on some other server. if (scheme and scheme != self.scheme) or (host and host != self.headers.getHeader("host")): raise http.HTTPError(http.StatusResponse( responsecode.BAD_GATEWAY, "URL is not on this site (%s://%s/): %s" % (scheme, self.headers.getHeader("host"), url) )) segments = path.split("/") assert segments[0] == "", "URL path didn't begin with '/': %s" % (path,) segments = map(unquote, segments[1:]) def notFound(f): f.trap(http.HTTPError) if f.value.response.code != responsecode.NOT_FOUND: return f return None d = defer.maybeDeferred(self._getChild, None, self.site.resource, segments, updatepaths=False) d.addCallback(self._rememberResource, path) d.addErrback(notFound) return d def locateChildResource(self, parent, childName): """ Looks up the child resource with the given name given the parent resource. This is similar to locateResource(), but doesn't have to start the lookup from the root resource, so it is potentially faster. @param parent: the parent of the resource being looked up. This resource must have been obtained from the request (ie. via its C{uri} attribute, or through its C{locateResource} or C{locateChildResource} methods). @param childName: the name of the child of C{parent} to looked up. to C{parent}. @return: a L{Deferred} resulting in the L{IResource} at the given URL or C{None} if no such resource can be located. @raise NoURLForResourceError: if C{resource} was not obtained from the request. """ if parent is None or childName is None: return None assert "/" not in childName, "Child name may not contain '/': %s" % (childName,) parentURL = self.urlForResource(parent) if not parentURL.endswith("/"): parentURL += "/" url = parentURL + quote(childName) segment = childName def notFound(f): f.trap(http.HTTPError) if f.value.response.code != responsecode.NOT_FOUND: return f return None d = defer.maybeDeferred(self._getChild, None, parent, [segment], updatepaths=False) d.addCallback(self._rememberResource, url) d.addErrback(notFound) return d def _processingFailed(self, reason): if reason.check(http.HTTPError) is not None: # If the exception was an HTTPError, leave it alone d = defer.succeed(reason.value.response) else: # Otherwise, it was a random exception, so give a # ICanHandleException implementer a chance to render the page. def _processingFailed_inner(reason): handler = iweb.ICanHandleException(self, self) return handler.renderHTTP_exception(self, reason) d = defer.maybeDeferred(_processingFailed_inner, reason) d.addCallback(self._cbFinishRender) d.addErrback(self._processingReallyFailed, reason) return d def _processingReallyFailed(self, reason, origReason): log.msg("Exception rendering error page:", isErr=1) log.err(reason) log.msg("Original exception:", isErr=1) log.err(origReason) body = ("<html><head><title>Internal Server Error</title></head>" "<body><h1>Internal Server Error</h1>An error occurred rendering the requested page. Additionally, an error occured rendering the error page.</body></html>") response = http.Response( responsecode.INTERNAL_SERVER_ERROR, {'content-type': http_headers.MimeType('text','html')}, body) self.writeResponse(response) def _cbFinishRender(self, result): def filterit(response, f): if (hasattr(f, 'handleErrors') or (response.code >= 200 and response.code < 300 and response.code != 204)): return f(self, response) else: return response response = iweb.IResponse(result, None) if response: d = defer.Deferred() for f in self.responseFilters: d.addCallback(filterit, f) d.addCallback(self.writeResponse) d.callback(response) return d resource = iweb.IResource(result, None) if resource: self.resources.append(resource) d = defer.maybeDeferred(resource.renderHTTP, self) d.addCallback(self._cbFinishRender) return d raise TypeError("html is not a resource or a response") def renderHTTP_exception(self, req, reason): log.msg("Exception rendering:", isErr=1) log.err(reason) body = ("<html><head><title>Internal Server Error</title></head>" "<body><h1>Internal Server Error</h1>An error occurred rendering the requested page. More information is available in the server log.</body></html>") return http.Response( responsecode.INTERNAL_SERVER_ERROR, {'content-type': http_headers.MimeType('text','html')}, body) class Site(object): def __init__(self, resource): """Initialize. """ self.resource = iweb.IResource(resource) def __call__(self, *args, **kwargs): return Request(site=self, *args, **kwargs) class NoURLForResourceError(RuntimeError): def __init__(self, resource): RuntimeError.__init__(self, "Resource %r has no URL in this request." % (resource,)) self.resource = resource __all__ = ['Request', 'Site', 'StopTraversal', 'VERSION', 'defaultHeadersFilter', 'doTrace', 'parsePOSTData', 'preconditionfilter', 'NoURLForResourceError']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/server.py
server.py
from zope.interface import implements from twisted.cred import error from twisted.web2 import resource from twisted.web2 import responsecode from twisted.web2 import http from twisted.web2 import iweb class UnauthorizedResponse(http.StatusResponse): """A specialized response class for generating www-authenticate headers from the given L{CredentialFactory} instances """ def __init__(self, factories, remoteAddr=None): super(UnauthorizedResponse, self).__init__( responsecode.UNAUTHORIZED, "You are not authorized to access this resource.") authHeaders = [] for factory in factories.itervalues(): authHeaders.append((factory.scheme, factory.getChallenge(remoteAddr))) self.headers.setHeader('www-authenticate', authHeaders) class UnauthorizedResource(resource.LeafResource): """Returned by locateChild or render to generate an http Unauthorized response. """ def __init__(self, factories): """ @param factories: sequence of ICredentialFactory implementations for which to generate a WWW-Authenticate header. """ self.factories = factories def render(self, req): return UnauthorizedResponse(self.factories, req.remoteAddr) class HTTPAuthResource(object): """I wrap a resource to prevent it being accessed unless the authentication can be completed using the credential factory, portal, and interfaces specified. """ implements(iweb.IResource) def __init__(self, wrappedResource, credentialFactories, portal, interfaces): """ @param wrappedResource: A L{twisted.web2.iweb.IResource} to be returned from locateChild and render upon successful authentication. @param credentialFactories: A list of instances that implement L{ICredentialFactory}. @type credentialFactories: L{list} @param portal: Portal to handle logins for this resource. @type portal: L{twisted.cred.portal.Portal} @param interfaces: the interfaces that are allowed to log in via the given portal @type interfaces: L{tuple} """ self.wrappedResource = wrappedResource self.credentialFactories = dict([(factory.scheme, factory) for factory in credentialFactories]) self.portal = portal self.interfaces = interfaces def login(self, factory, response, req): def _loginSucceeded(res): return self.wrappedResource def _loginFailed(res): return UnauthorizedResource(self.credentialFactories) try: creds = factory.decode(response, req) except error.LoginFailed: return UnauthorizedResource(self.credentialFactories) return self.portal.login(creds, None, *self.interfaces ).addCallbacks(_loginSucceeded, _loginFailed) def authenticate(self, req): authHeader = req.headers.getHeader('authorization') if authHeader is None or authHeader[0] not in self.credentialFactories: return UnauthorizedResource(self.credentialFactories) else: return self.login(self.credentialFactories[authHeader[0]], authHeader[1], req) def locateChild(self, req, seg): return self.authenticate(req), seg[1:] def renderHTTP(self, req): return self.authenticate(req)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/auth/wrapper.py
wrapper.py
from twisted.cred import credentials, error from zope.interface import implements from twisted.web2.auth.interfaces import ICredentialFactory import md5, sha import random, sys # The digest math algorithms = { 'md5': md5.md5, 'md5-sess': md5.md5, 'sha': sha.sha, } # DigestCalcHA1 def calcHA1( pszAlg, pszUserName, pszRealm, pszPassword, pszNonce, pszCNonce, ): m = algorithms[pszAlg]() m.update(pszUserName) m.update(":") m.update(pszRealm) m.update(":") m.update(pszPassword) HA1 = m.digest() if pszAlg == "md5-sess": m = algorithms[pszAlg]() m.update(HA1) m.update(":") m.update(pszNonce) m.update(":") m.update(pszCNonce) HA1 = m.digest() return HA1.encode('hex') # DigestCalcResponse def calcResponse( HA1, algo, pszNonce, pszNonceCount, pszCNonce, pszQop, pszMethod, pszDigestUri, pszHEntity, ): m = algorithms[algo]() m.update(pszMethod) m.update(":") m.update(pszDigestUri) if pszQop == "auth-int": m.update(":") m.update(pszHEntity) HA2 = m.digest().encode('hex') m = algorithms[algo]() m.update(HA1) m.update(":") m.update(pszNonce) m.update(":") if pszNonceCount and pszCNonce: # pszQop: m.update(pszNonceCount) m.update(":") m.update(pszCNonce) m.update(":") m.update(pszQop) m.update(":") m.update(HA2) hash = m.digest().encode('hex') return hash class DigestedCredentials: """Yet Another Simple HTTP Digest authentication scheme""" implements(credentials.IUsernameHashedPassword) def __init__(self, username, method, realm, fields): self.username = username self.method = method self.realm = realm self.fields = fields def checkPassword(self, password): response = self.fields.get('response') uri = self.fields.get('uri') nonce = self.fields.get('nonce') cnonce = self.fields.get('cnonce') nc = self.fields.get('nc') algo = self.fields.get('algorithm', 'md5').lower() qop = self.fields.get('qop', 'auth') expected = calcResponse( calcHA1(algo, self.username, self.realm, password, nonce, cnonce), algo, nonce, nc, cnonce, qop, self.method, uri, None ) return expected == response class DigestCredentialFactory: implements(ICredentialFactory) CHALLENGE_LIFETIME = 15 scheme = "digest" def __init__(self, algorithm, realm): """@type algorithm: C{str} @param algorithm: case insensitive string that specifies the hash algorithm used, should be either, md5, md5-sess or sha @type realm: C{str} @param realm: case sensitive string that specifies the realm portion of the challenge """ self.outstanding = {} self.algorithm = algorithm self.realm = realm def generateNonce(self): c = tuple([random.randrange(sys.maxint) for _ in range(3)]) c = '%d%d%d' % c return c def generateOpaque(self): return str(random.randrange(sys.maxint)) def getChallenge(self, peer): c = self.generateNonce() o = self.generateOpaque() self.outstanding[o] = c return {'nonce': c, 'opaque': o, 'qop': 'auth', 'algorithm': self.algorithm, 'realm': self.realm} def decode(self, response, request): def unq(s): if s[0] == s[-1] == '"': return s[1:-1] return s response = ' '.join(response.splitlines()) parts = response.split(',') auth = dict([(k.strip(), unq(v.strip())) for (k, v) in [p.split('=', 1) for p in parts]]) username = auth.get('username') if not username: raise error.LoginFailed('Invalid response, no username given') if auth.get('opaque') not in self.outstanding: raise error.LoginFailed('Invalid response, opaque not outstanding') del self.outstanding[auth['opaque']] return DigestedCredentials(username, request.method, self.realm, auth)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/auth/digest.py
digest.py
import warnings import socket from cStringIO import StringIO from zope.interface import implements from twisted.python import log from twisted.internet import interfaces, protocol, reactor from twisted.protocols import policies, basic from twisted.web2 import responsecode from twisted.web2 import http_headers from twisted.web2 import http PERSIST_NO_PIPELINE, PERSIST_PIPELINE = (1,2) _cachedHostNames = {} def _cachedGetHostByAddr(hostaddr): hostname = _cachedHostNames.get(hostaddr) if hostname is None: try: hostname = socket.gethostbyaddr(hostaddr)[0] except socket.herror: hostname = hostaddr _cachedHostNames[hostaddr]=hostname return hostname class StringTransport(object): """ I am a StringIO wrapper that conforms for the transport API. I support the 'writeSequence' method. """ def __init__(self): self.s = StringIO() def writeSequence(self, seq): self.s.write(''.join(seq)) def __getattr__(self, attr): return getattr(self.__dict__['s'], attr) class AbortedException(Exception): pass class HTTPParser(object): """This class handles the parsing side of HTTP processing. With a suitable subclass, it can parse either the client side or the server side of the connection. """ # Class config: parseCloseAsEnd = False # Instance vars chunkedIn = False headerlen = 0 length = None inHeaders = None partialHeader = '' connHeaders = None finishedReading = False channel = None # For subclassing... # Needs attributes: # version # Needs functions: # createRequest() # processRequest() # _abortWithError() # handleContentChunk(data) # handleContentComplete() # Needs functions to exist on .channel # channel.maxHeaderLength # channel.requestReadFinished(self) # channel.setReadPersistent(self, persistent) # (from LineReceiver): # channel.setRawMode() # channel.setLineMode(extraneous) # channel.pauseProducing() # channel.resumeProducing() # channel.stopProducing() def __init__(self, channel): self.inHeaders = http_headers.Headers() self.channel = channel def lineReceived(self, line): if self.chunkedIn: # Parsing a chunked input if self.chunkedIn == 1: # First we get a line like "chunk-size [';' chunk-extension]" # (where chunk extension is just random crap as far as we're concerned) # RFC says to ignore any extensions you don't recognize -- that's all of them. chunksize = line.split(';', 1)[0] try: self.length = int(chunksize, 16) except: self._abortWithError(responsecode.BAD_REQUEST, "Invalid chunk size, not a hex number: %s!" % chunksize) if self.length < 0: self._abortWithError(responsecode.BAD_REQUEST, "Invalid chunk size, negative.") if self.length == 0: # We're done, parse the trailers line self.chunkedIn = 3 else: # Read self.length bytes of raw data self.channel.setRawMode() elif self.chunkedIn == 2: # After we got data bytes of the appropriate length, we end up here, # waiting for the CRLF, then go back to get the next chunk size. if line != '': self._abortWithError(responsecode.BAD_REQUEST, "Excess %d bytes sent in chunk transfer mode" % len(line)) self.chunkedIn = 1 elif self.chunkedIn == 3: # TODO: support Trailers (maybe! but maybe not!) # After getting the final "0" chunk we're here, and we *EAT MERCILESSLY* # any trailer headers sent, and wait for the blank line to terminate the # request. if line == '': self.allContentReceived() # END of chunk handling elif line == '': # Empty line => End of headers if self.partialHeader: self.headerReceived(self.partialHeader) self.partialHeader = '' self.allHeadersReceived() # can set chunkedIn self.createRequest() if self.chunkedIn: # stay in linemode waiting for chunk header pass elif self.length == 0: # no content expected self.allContentReceived() else: # await raw data as content self.channel.setRawMode() # Should I do self.pauseProducing() here? self.processRequest() else: self.headerlen += len(line) if self.headerlen > self.channel.maxHeaderLength: self._abortWithError(responsecode.BAD_REQUEST, 'Headers too long.') if line[0] in ' \t': # Append a header continuation self.partialHeader += line else: if self.partialHeader: self.headerReceived(self.partialHeader) self.partialHeader = line def rawDataReceived(self, data): """Handle incoming content.""" datalen = len(data) if datalen < self.length: self.handleContentChunk(data) self.length = self.length - datalen else: self.handleContentChunk(data[:self.length]) extraneous = data[self.length:] channel = self.channel # could go away from allContentReceived. if not self.chunkedIn: self.allContentReceived() else: # NOTE: in chunked mode, self.length is the size of the current chunk, # so we still have more to read. self.chunkedIn = 2 # Read next chunksize channel.setLineMode(extraneous) def headerReceived(self, line): """Store this header away. Check for too much header data (> channel.maxHeaderLength) and abort the connection if so. """ nameval = line.split(':', 1) if len(nameval) != 2: self._abortWithError(responsecode.BAD_REQUEST, "No ':' in header.") name, val = nameval val = val.lstrip(' \t') self.inHeaders.addRawHeader(name, val) def allHeadersReceived(self): # Split off connection-related headers connHeaders = self.splitConnectionHeaders() # Set connection parameters from headers self.setConnectionParams(connHeaders) self.connHeaders = connHeaders def allContentReceived(self): self.finishedReading = True self.channel.requestReadFinished(self) self.handleContentComplete() def splitConnectionHeaders(self): # Split off headers for the connection from headers for the request. def move(name): h = inHeaders.getRawHeaders(name, None) if h is not None: inHeaders.removeHeader(name) connHeaders.setRawHeaders(name, h) # NOTE: According to HTTP spec, we're supposed to eat the # 'Proxy-Authenticate' and 'Proxy-Authorization' headers also, but that # doesn't sound like a good idea to me, because it makes it impossible # to have a non-authenticating transparent proxy in front of an # authenticating proxy. An authenticating proxy can eat them itself. # 'Proxy-Connection' is an undocumented HTTP 1.0 abomination. connHeaderNames = ['connection', 'content-length', 'keep-alive', 'te', 'trailers', 'transfer-encoding', 'upgrade', 'proxy-connection'] inHeaders = self.inHeaders connHeaders = http_headers.Headers() move('connection') if self.version < (1,1): # Remove all headers mentioned in Connection, because a HTTP 1.0 # proxy might have erroneously forwarded it from a 1.1 client. for name in connHeaders.getHeader('connection', ()): if inHeaders.hasHeader(name): inHeaders.removeHeader(name) else: # Otherwise, just add the headers listed to the list of those to move connHeaderNames.extend(connHeaders.getHeader('connection', ())) for headername in connHeaderNames: move(headername) return connHeaders def setConnectionParams(self, connHeaders): # Figure out persistent connection stuff if self.version >= (1,1): if 'close' in connHeaders.getHeader('connection', ()): readPersistent = False else: readPersistent = PERSIST_PIPELINE elif 'keep-alive' in connHeaders.getHeader('connection', ()): readPersistent = PERSIST_NO_PIPELINE else: readPersistent = False # Okay, now implement section 4.4 Message Length to determine # how to find the end of the incoming HTTP message. transferEncoding = connHeaders.getHeader('transfer-encoding') if transferEncoding: if transferEncoding[-1] == 'chunked': # Chunked self.chunkedIn = 1 # Cut off the chunked encoding (cause it's special) transferEncoding = transferEncoding[:-1] elif not self.parseCloseAsEnd: # Would close on end of connection, except this can't happen for # client->server data. (Well..it could actually, since TCP has half-close # but the HTTP spec says it can't, so we'll pretend it's right.) self._abortWithError(responsecode.BAD_REQUEST, "Transfer-Encoding received without chunked in last position.") # TODO: support gzip/etc encodings. # FOR NOW: report an error if the client uses any encodings. # They shouldn't, because we didn't send a TE: header saying it's okay. if transferEncoding: self._abortWithError(responsecode.NOT_IMPLEMENTED, "Transfer-Encoding %s not supported." % transferEncoding) else: # No transfer-coding. self.chunkedIn = 0 if self.parseCloseAsEnd: # If no Content-Length, indeterminate length data # (unless the responsecode was one of the special ones, or # the request method was HEAD. # If the request was HEAD, self.length has been set to 0 by # HTTPClientRequest.submit) if self.code in http.NO_BODY_CODES: self.length = 0 else: self.length = connHeaders.getHeader('content-length', self.length) # If it's an indeterminate stream without transfer encoding, it must be # the last request. if self.length is None: readPersistent = False else: # If no Content-Length either, assume no content. self.length = connHeaders.getHeader('content-length', 0) # Set the calculated persistence self.channel.setReadPersistent(readPersistent) def abortParse(self): # If we're erroring out while still reading the request if not self.finishedReading: self.finishedReading = True self.channel.setReadPersistent(False) self.channel.requestReadFinished(self) # producer interface def pauseProducing(self): if not self.finishedReading: self.channel.pauseProducing() def resumeProducing(self): if not self.finishedReading: self.channel.resumeProducing() def stopProducing(self): if not self.finishedReading: self.channel.stopProducing() class HTTPChannelRequest(HTTPParser): """This class handles the state and parsing for one HTTP request. It is responsible for all the low-level connection oriented behavior. Thus, it takes care of keep-alive, de-chunking, etc., and passes the non-connection headers on to the user-level Request object.""" command = path = version = None queued = 0 request = None out_version = "HTTP/1.1" def __init__(self, channel, queued=0): HTTPParser.__init__(self, channel) self.queued=queued # Buffer writes to a string until we're first in line # to write a response if queued: self.transport = StringTransport() else: self.transport = self.channel.transport # set the version to a fallback for error generation self.version = (1,0) def gotInitialLine(self, initialLine): parts = initialLine.split() # Parse the initial request line if len(parts) != 3: if len(parts) == 1: parts.append('/') if len(parts) == 2 and parts[1][0] == '/': parts.append('HTTP/0.9') else: self._abortWithError(responsecode.BAD_REQUEST, 'Bad request line: %s' % initialLine) self.command, self.path, strversion = parts try: protovers = http.parseVersion(strversion) if protovers[0] != 'http': raise ValueError() except ValueError: self._abortWithError(responsecode.BAD_REQUEST, "Unknown protocol: %s" % strversion) self.version = protovers[1:3] # Ensure HTTP 0 or HTTP 1. if self.version[0] > 1: self._abortWithError(responsecode.HTTP_VERSION_NOT_SUPPORTED, 'Only HTTP 0.9 and HTTP 1.x are supported.') if self.version[0] == 0: # simulate end of headers, as HTTP 0 doesn't have headers. self.lineReceived('') def lineLengthExceeded(self, line, wasFirst=False): code = wasFirst and responsecode.REQUEST_URI_TOO_LONG or responsecode.BAD_REQUEST self._abortWithError(code, 'Header line too long.') def createRequest(self): self.request = self.channel.requestFactory(self, self.command, self.path, self.version, self.length, self.inHeaders) del self.inHeaders def processRequest(self): self.request.process() def handleContentChunk(self, data): self.request.handleContentChunk(data) def handleContentComplete(self): self.request.handleContentComplete() ############## HTTPChannelRequest *RESPONSE* methods ############# producer = None chunkedOut = False finished = False ##### Request Callbacks ##### def writeIntermediateResponse(self, code, headers=None): if self.version >= (1,1): self._writeHeaders(code, headers, False) def writeHeaders(self, code, headers): self._writeHeaders(code, headers, True) def _writeHeaders(self, code, headers, addConnectionHeaders): # HTTP 0.9 doesn't have headers. if self.version[0] == 0: return l = [] code_message = responsecode.RESPONSES.get(code, "Unknown Status") l.append('%s %s %s\r\n' % (self.out_version, code, code_message)) if headers is not None: for name, valuelist in headers.getAllRawHeaders(): for value in valuelist: l.append("%s: %s\r\n" % (name, value)) if addConnectionHeaders: # if we don't have a content length, we send data in # chunked mode, so that we can support persistent connections. if (headers.getHeader('content-length') is None and self.command != "HEAD" and code not in http.NO_BODY_CODES): if self.version >= (1,1): l.append("%s: %s\r\n" % ('Transfer-Encoding', 'chunked')) self.chunkedOut = True else: # Cannot use persistent connections if we can't do chunking self.channel.dropQueuedRequests() if self.channel.isLastRequest(self): l.append("%s: %s\r\n" % ('Connection', 'close')) elif self.version < (1,1): l.append("%s: %s\r\n" % ('Connection', 'Keep-Alive')) l.append("\r\n") self.transport.writeSequence(l) def write(self, data): if not data: return elif self.chunkedOut: self.transport.writeSequence(("%X\r\n" % len(data), data, "\r\n")) else: self.transport.write(data) def finish(self): """We are finished writing data.""" if self.finished: warnings.warn("Warning! request.finish called twice.", stacklevel=2) return if self.chunkedOut: # write last chunk and closing CRLF self.transport.write("0\r\n\r\n") self.finished = True if not self.queued: self._cleanup() def abortConnection(self, closeWrite=True): """Abort the HTTP connection because of some kind of unrecoverable error. If closeWrite=False, then only abort reading, but leave the writing side alone. This is mostly for internal use by the HTTP request parsing logic, so that it can call an error page generator. Otherwise, completely shut down the connection. """ self.abortParse() if closeWrite: if self.producer: self.producer.stopProducing() self.unregisterProducer() self.finished = True if self.queued: self.transport.reset() self.transport.truncate() else: self._cleanup() def getHostInfo(self): t=self.channel.transport secure = interfaces.ISSLTransport(t, None) is not None host = t.getHost() host.host = _cachedGetHostByAddr(host.host) return host, secure def getRemoteHost(self): return self.channel.transport.getPeer() ##### End Request Callbacks ##### def _abortWithError(self, errorcode, text=''): """Handle low level protocol errors.""" headers = http_headers.Headers() headers.setHeader('content-length', len(text)+1) self.abortConnection(closeWrite=False) self.writeHeaders(errorcode, headers) self.write(text) self.write("\n") self.finish() raise AbortedException def _cleanup(self): """Called when have finished responding and are no longer queued.""" if self.producer: log.err(RuntimeError("Producer was not unregistered for %s" % self)) self.unregisterProducer() self.channel.requestWriteFinished(self) del self.transport # methods for channel - end users should not use these def noLongerQueued(self): """Notify the object that it is no longer queued. We start writing whatever data we have to the transport, etc. This method is not intended for users. """ if not self.queued: raise RuntimeError, "noLongerQueued() got called unnecessarily." self.queued = 0 # set transport to real one and send any buffer data data = self.transport.getvalue() self.transport = self.channel.transport if data: self.transport.write(data) # if we have producer, register it with transport if (self.producer is not None) and not self.finished: self.transport.registerProducer(self.producer, True) # if we're finished, clean up if self.finished: self._cleanup() # consumer interface def registerProducer(self, producer, streaming): """Register a producer. """ if self.producer: raise ValueError, "registering producer %s before previous one (%s) was unregistered" % (producer, self.producer) self.producer = producer if self.queued: producer.pauseProducing() else: self.transport.registerProducer(producer, streaming) def unregisterProducer(self): """Unregister the producer.""" if not self.queued: self.transport.unregisterProducer() self.producer = None def connectionLost(self, reason): """connection was lost""" if self.queued and self.producer: self.producer.stopProducing() self.producer = None if self.request: self.request.connectionLost(reason) class HTTPChannel(basic.LineReceiver, policies.TimeoutMixin, object): """A receiver for HTTP requests. Handles splitting up the connection for the multiple HTTPChannelRequests that may be in progress on this channel. @ivar timeOut: number of seconds to wait before terminating an idle connection. @ivar maxPipeline: number of outstanding in-progress requests to allow before pausing the input. @ivar maxHeaderLength: number of bytes of header to accept from the client. """ implements(interfaces.IHalfCloseableProtocol) ## Configuration parameters. Set in instances or subclasses. # How many simultaneous requests to handle. maxPipeline = 4 # Timeout when between two requests betweenRequestsTimeOut = 15 # Timeout between lines or bytes while reading a request inputTimeOut = 60 * 4 # maximum length of headers (10KiB) maxHeaderLength = 10240 # Allow persistent connections? allowPersistentConnections = True # ChannelRequest chanRequestFactory = HTTPChannelRequest requestFactory = http.Request _first_line = 2 readPersistent = PERSIST_PIPELINE _readLost = False _writeLost = False _lingerTimer = None chanRequest = None def _callLater(self, secs, fun): reactor.callLater(secs, fun) def __init__(self): # the request queue self.requests = [] def connectionMade(self): self.setTimeout(self.inputTimeOut) self.factory.outstandingRequests+=1 def lineReceived(self, line): if self._first_line: self.setTimeout(self.inputTimeOut) # if this connection is not persistent, drop any data which # the client (illegally) sent after the last request. if not self.readPersistent: self.dataReceived = self.lineReceived = lambda *args: None return # IE sends an extraneous empty line (\r\n) after a POST request; # eat up such a line, but only ONCE if not line and self._first_line == 1: self._first_line = 2 return self._first_line = 0 if not self.allowPersistentConnections: # Don't allow a second request self.readPersistent = False try: self.chanRequest = self.chanRequestFactory(self, len(self.requests)) self.requests.append(self.chanRequest) self.chanRequest.gotInitialLine(line) except AbortedException: pass else: try: self.chanRequest.lineReceived(line) except AbortedException: pass def lineLengthExceeded(self, line): if self._first_line: # Fabricate a request object to respond to the line length violation. self.chanRequest = self.chanRequestFactory(self, len(self.requests)) self.requests.append(self.chanRequest) self.chanRequest.gotInitialLine("GET fake HTTP/1.0") try: self.chanRequest.lineLengthExceeded(line, self._first_line) except AbortedException: pass def rawDataReceived(self, data): self.setTimeout(self.inputTimeOut) try: self.chanRequest.rawDataReceived(data) except AbortedException: pass def requestReadFinished(self, request): if(self.readPersistent is PERSIST_NO_PIPELINE or len(self.requests) >= self.maxPipeline): self.pauseProducing() # reset state variables self._first_line = 1 self.chanRequest = None self.setLineMode() # Disable the idle timeout, in case this request takes a long # time to finish generating output. if len(self.requests) > 0: self.setTimeout(None) def _startNextRequest(self): # notify next request, if present, it can start writing del self.requests[0] if self._writeLost: self.transport.loseConnection() elif self.requests: self.requests[0].noLongerQueued() # resume reading if allowed to if(not self._readLost and self.readPersistent is not PERSIST_NO_PIPELINE and len(self.requests) < self.maxPipeline): self.resumeProducing() elif self._readLost: # No more incoming data, they already closed! self.transport.loseConnection() else: # no requests in queue, resume reading self.setTimeout(self.betweenRequestsTimeOut) self.resumeProducing() def setReadPersistent(self, persistent): if self.readPersistent: # only allow it to be set if it's not currently False self.readPersistent = persistent def dropQueuedRequests(self): """Called when a response is written that forces a connection close.""" self.readPersistent = False # Tell all requests but first to abort. for request in self.requests[1:]: request.connectionLost(None) del self.requests[1:] def isLastRequest(self, request): # Is this channel handling the last possible request return not self.readPersistent and self.requests[-1] == request def requestWriteFinished(self, request): """Called by first request in queue when it is done.""" if request != self.requests[0]: raise TypeError # Don't del because we haven't finished cleanup, so, # don't want queue len to be 0 yet. self.requests[0] = None if self.readPersistent or len(self.requests) > 1: # Do this in the next reactor loop so as to # not cause huge call stacks with fast # incoming requests. self._callLater(0, self._startNextRequest) else: self.lingeringClose() def timeoutConnection(self): #log.msg("Timing out client: %s" % str(self.transport.getPeer())) policies.TimeoutMixin.timeoutConnection(self) def lingeringClose(self): """ This is a bit complicated. This process is necessary to ensure proper workingness when HTTP pipelining is in use. Here is what it wants to do: 1. Finish writing any buffered data, then close our write side. While doing so, read and discard any incoming data. 2. When that happens (writeConnectionLost called), wait up to 20 seconds for the remote end to close their write side (our read side). 3. - If they do (readConnectionLost called), close the socket, and cancel the timeout. - If that doesn't happen, the timer fires, and makes the socket close anyways. """ # Close write half self.transport.loseWriteConnection() # Throw out any incoming data self.dataReceived = self.lineReceived = lambda *args: None self.transport.resumeProducing() def writeConnectionLost(self): # Okay, all data has been written # In 20 seconds, actually close the socket self._lingerTimer = reactor.callLater(20, self._lingerClose) self._writeLost = True def _lingerClose(self): self._lingerTimer = None self.transport.loseConnection() def readConnectionLost(self): """Read connection lost""" # If in the lingering-close state, lose the socket. if self._lingerTimer: self._lingerTimer.cancel() self._lingerTimer = None self.transport.loseConnection() return # If between requests, drop connection # when all current requests have written their data. self._readLost = True if not self.requests: # No requests in progress, lose now. self.transport.loseConnection() # If currently in the process of reading a request, this is # probably a client abort, so lose the connection. if self.chanRequest: self.transport.loseConnection() def connectionLost(self, reason): self.factory.outstandingRequests-=1 self._writeLost = True self.readConnectionLost() self.setTimeout(None) # Tell all requests to abort. for request in self.requests: if request is not None: request.connectionLost(reason) class OverloadedServerProtocol(protocol.Protocol): def connectionMade(self): self.transport.write("HTTP/1.0 503 Service Unavailable\r\n" "Content-Type: text/html\r\n" "Connection: close\r\n\r\n" "<html><head><title>503 Service Unavailable</title></head>" "<body><h1>Service Unavailable</h1>" "The server is currently overloaded, " "please try again later.</body></html>") self.transport.loseConnection() class HTTPFactory(protocol.ServerFactory): """Factory for HTTP server.""" protocol = HTTPChannel protocolArgs = None outstandingRequests = 0 def __init__(self, requestFactory, maxRequests=600, **kwargs): self.maxRequests=maxRequests self.protocolArgs = kwargs self.protocolArgs['requestFactory']=requestFactory def buildProtocol(self, addr): if self.outstandingRequests >= self.maxRequests: return OverloadedServerProtocol() p = protocol.ServerFactory.buildProtocol(self, addr) for arg,value in self.protocolArgs.iteritems(): setattr(p, arg, value) return p __all__ = ['HTTPFactory', ]
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/channel/http.py
http.py
Okay, FastCGI is a pretty stupid protocol. Let me count some reasons: 1) Specifies ability to multiplex streams of data over a single socket, but has no form of flow control. This is fine for multiplexing stderr, but serving more than one request over a channel with no flow control is just *asking* for trouble. I avoid this and enforce one outstanding request per connection. This basically means the whole "requestId" field is worthless. 2) Has variable length packet padding. If you want padding, just make it always pad to 8 bytes fercrissake! 3) Why does every packet need to specify the version. How about just sending it once. 4) Name/value pair format. Come *on*. Is it *possible* to come up with a more complex format to send them with?? Even if you think you've gotten it down, you probably forgot that it's a stream, and the name/values can be split between two packets. (Yes, this means *you*. Don't even try to pretend you didn't miss this detail.) """ from twisted.internet import protocol from twisted.web2 import responsecode from twisted.web2.channel import cgi class FastCGIError(Exception): pass # Values for type component of FCGI_Header FCGI_BEGIN_REQUEST = 1 FCGI_ABORT_REQUEST = 2 FCGI_END_REQUEST = 3 FCGI_PARAMS = 4 FCGI_STDIN = 5 FCGI_STDOUT = 6 FCGI_STDERR = 7 FCGI_DATA = 8 FCGI_GET_VALUES = 9 FCGI_GET_VALUES_RESULT = 10 FCGI_UNKNOWN_TYPE = 11 typeNames = { FCGI_BEGIN_REQUEST : 'fcgi_begin_request', FCGI_ABORT_REQUEST : 'fcgi_abort_request', FCGI_END_REQUEST : 'fcgi_end_request', FCGI_PARAMS : 'fcgi_params', FCGI_STDIN : 'fcgi_stdin', FCGI_STDOUT : 'fcgi_stdout', FCGI_STDERR : 'fcgi_stderr', FCGI_DATA : 'fcgi_data', FCGI_GET_VALUES : 'fcgi_get_values', FCGI_GET_VALUES_RESULT: 'fcgi_get_values_result', FCGI_UNKNOWN_TYPE : 'fcgi_unknown_type'} # Mask for flags component of FCGI_BeginRequestBody FCGI_KEEP_CONN = 1 # Values for role component of FCGI_BeginRequestBody FCGI_RESPONDER = 1 FCGI_AUTHORIZER = 2 FCGI_FILTER = 3 # Values for protocolStatus component of FCGI_EndRequestBody FCGI_REQUEST_COMPLETE = 0 FCGI_CANT_MPX_CONN = 1 FCGI_OVERLOADED = 2 FCGI_UNKNOWN_ROLE = 3 FCGI_MAX_PACKET_LEN = 0xFFFF class Record(object): def __init__(self, type, reqId, content, version=1): self.version = version self.type = type self.reqId = reqId self.content = content self.length = len(content) if self.length > FCGI_MAX_PACKET_LEN: raise ValueError("Record length too long: %d > %d" % (self.length, FCGI_MAX_PACKET_LEN)) self.padding = 8 - (self.length & 7) self.reserved = 0 def fromHeaderString(clz, rec): self = object.__new__(clz) self.version = ord(rec[0]) self.type = ord(rec[1]) self.reqId = (ord(rec[2])<<8)|ord(rec[3]) self.length = (ord(rec[4])<<8)|ord(rec[5]) self.padding = ord(rec[6]) self.reserved = ord(rec[7]) self.content = None return self fromHeaderString = classmethod(fromHeaderString) def toOutputString(self): return "%c%c%c%c%c%c%c%c" % ( self.version, self.type, (self.reqId&0xFF00)>>8, self.reqId&0xFF, (self.length&0xFF00)>>8, self.length & 0xFF, self.padding, self.reserved) + self.content + '\0'*self.padding def totalLength(self): return 8 + self.length + self.padding def __repr__(self): return "<FastCGIRecord version=%d type=%d(%s) reqId=%d content=%r>" % ( self.version, self.type, typeNames.get(self.type), self.reqId, self.content) def parseNameValues(s): ''' @param s: String containing valid name/value data, of the form: 'namelength + valuelength + name + value' repeated 0 or more times. See C{fastcgi.writeNameValue} for how to create this string. @return: Generator of tuples of the form (name, value) ''' off = 0 while off < len(s): nameLen = ord(s[off]) off += 1 if nameLen&0x80: nameLen=(nameLen&0x7F)<<24 | ord(s[off])<<16 | ord(s[off+1])<<8 | ord(s[off+2]) off += 3 valueLen=ord(s[off]) off += 1 if valueLen&0x80: valueLen=(valueLen&0x7F)<<24 | ord(s[off])<<16 | ord(s[off+1])<<8 | ord(s[off+2]) off += 3 yield (s[off:off+nameLen], s[off+nameLen:off+nameLen+valueLen]) off += nameLen + valueLen def getLenBytes(length): if length<0x80: return chr(length) elif 0 < length <= 0x7FFFFFFF: return (chr(0x80|(length>>24)&0x7F) + chr((length>>16)&0xFF) + chr((length>>8)&0xFF) + chr(length&0xFF)) else: raise ValueError("Name length too long.") def writeNameValue(name, value): return getLenBytes(len(name)) + getLenBytes(len(value)) + name + value class FastCGIChannelRequest(cgi.BaseCGIChannelRequest): maxConnections = 100 reqId = 0 request = None ## High level protocol def packetReceived(self, packet): ''' @param packet: instance of C{fastcgi.Record}. @raise: FastCGIError on invalid version or where the type does not exist in funName ''' if packet.version != 1: raise FastCGIError("FastCGI packet received with version != 1") funName = typeNames.get(packet.type) if funName is None: raise FastCGIError("Unknown FastCGI packet type: %d" % packet.type) getattr(self, funName)(packet) def fcgi_get_values(self, packet): if packet.reqId != 0: raise ValueError("Should be 0!") content = "" for name,value in parseNameValues(packet.content): outval = None if name == "FCGI_MAX_CONNS": outval = str(self.maxConnections) elif name == "FCGI_MAX_REQS": outval = str(self.maxConnections) elif name == "FCGI_MPXS_CONNS": outval = "0" if outval: content += writeNameValue(name, outval) self.writePacket(Record(FCGI_GET_VALUES_RESULT, 0, content)) def fcgi_unknown_type(self, packet): # Unused, reserved for future expansion pass def fcgi_begin_request(self, packet): role = ord(packet.content[0])<<8 | ord(packet.content[1]) flags = ord(packet.content[2]) if packet.reqId == 0: raise ValueError("ReqId shouldn't be 0!") if self.reqId != 0: self.writePacket(Record(FCGI_END_REQUEST, self.reqId, "\0\0\0\0"+chr(FCGI_CANT_MPX_CONN)+"\0\0\0")) if role != FCGI_RESPONDER: self.writePacket(Record(FCGI_END_REQUEST, self.reqId, "\0\0\0\0"+chr(FCGI_UNKNOWN_ROLE)+"\0\0\0")) self.reqId = packet.reqId self.keepalive = flags & FCGI_KEEP_CONN self.params = "" def fcgi_abort_request(self, packet): if packet.reqId != self.reqId: return self.request.connectionLost() def fcgi_params(self, packet): if packet.reqId != self.reqId: return # I don't feel like doing the work to incrementally parse this stupid # protocol, so we'll just buffer all the params data before parsing. if not packet.content: self.makeRequest(dict(parseNameValues(self.params))) self.request.process() self.params += packet.content def fcgi_stdin(self, packet): if packet.reqId != self.reqId: return if not packet.content: self.request.handleContentComplete() else: self.request.handleContentChunk(packet.content) def fcgi_data(self, packet): # For filter roles only, which is currently unsupported. pass def write(self, data): if len(data) > FCGI_MAX_PACKET_LEN: n = 0 while 1: d = data[n*FCGI_MAX_PACKET_LEN:(n+1)*FCGI_MAX_PACKET_LEN] if not d: break self.write(d) return self.writePacket(Record(FCGI_STDOUT, self.reqId, data)) def writeHeaders(self, code, headers): l = [] code_message = responsecode.RESPONSES.get(code, "Unknown Status") l.append("Status: %s %s\n" % (code, code_message)) if headers is not None: for name, valuelist in headers.getAllRawHeaders(): for value in valuelist: l.append("%s: %s\n" % (name, value)) l.append('\n') self.write(''.join(l)) def finish(self): if self.request is None: raise RuntimeError("Request.finish called when no request was outstanding.") self.writePacket(Record(FCGI_END_REQUEST, self.reqId, "\0\0\0\0"+chr(FCGI_REQUEST_COMPLETE)+"\0\0\0")) del self.reqId, self.request if not self.keepalive: self.transport.loseConnection() ## Low level protocol paused = False _lastRecord = None recvd = "" def writePacket(self, packet): #print "Writing record", packet self.transport.write(packet.toOutputString()) def dataReceived(self, recd): self.recvd = self.recvd + recd record = self._lastRecord self._lastRecord = None while len(self.recvd) >= 8 and not self.paused: if record is None: record = Record.fromHeaderString(self.recvd[:8]) if len(self.recvd) < record.totalLength(): self._lastRecord = record break record.content = self.recvd[8:record.length+8] self.recvd = self.recvd[record.totalLength():] self.packetReceived(record) record = None def pauseProducing(self): self.paused = True self.transport.pauseProducing() def resumeProducing(self): self.paused = False self.transport.resumeProducing() self.dataReceived('') def stopProducing(self): self.paused = True self.transport.stopProducing() class FastCGIFactory(protocol.ServerFactory): protocol = FastCGIChannelRequest def __init__(self, requestFactory): self.requestFactory=requestFactory def buildProtocol(self, addr): p = protocol.ServerFactory.buildProtocol(self, addr) p.requestFactory=self.requestFactory return p # import socket # import fcntl # from twisted.web2 import tcp # class ExistingFDTCPPort(tcp.Port): # def __init__(self, socknum, factory): # tcp.Port.__init__(self, 0, factory) # # Part of base.createInternetSocket # skt = socket.fromfd(socknum, self.addressFamily, self.socketType) # skt.setblocking(0) # if fcntl and hasattr(fcntl, 'FD_CLOEXEC'): # old = fcntl.fcntl(skt.fileno(), fcntl.F_GETFD) # fcntl.fcntl(skt.fileno(), fcntl.F_SETFD, old | fcntl.FD_CLOEXEC) # # Part of tcp.startListening # self._realPortNumber = skt.getsockname()[1] # log.msg("%s starting on %s" % (self.factory.__class__, self._realPortNumber)) # # The order of the next 6 lines is kind of bizarre. If no one # # can explain it, perhaps we should re-arrange them. # self.factory.doStart() # skt.listen(self.backlog) # self.connected = 1 # self.socket = skt # self.fileno = self.socket.fileno # self.numberAccepts = 100 # self.startReading() # def startListening(self): # raise NotImplementedError("Cannot startListening on an ExistingFDTCPPort")
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/channel/fastcgi.py
fastcgi.py
import warnings import os import urllib from zope.interface import implements from twisted.internet import protocol, address from twisted.internet import reactor, interfaces from twisted.web2 import http, http_headers, server, responsecode class BaseCGIChannelRequest(protocol.Protocol): implements(interfaces.IHalfCloseableProtocol) finished = False requestFactory = http.Request request = None def makeRequest(self, vars): headers = http_headers.Headers() http_vers = http.parseVersion(vars['SERVER_PROTOCOL']) if http_vers[0] != 'http' or http_vers[1] > 1: _abortWithError(responsecode.INTERNAL_SERVER_ERROR, "Twisted.web CGITransport: Unknown HTTP version: " % vars['SERVER_PROTOCOL']) secure = vars.get("HTTPS") in ("1", "on") # apache extension? port = vars.get('SERVER_PORT') or 80 server_host = vars.get('SERVER_NAME') or vars.get('SERVER_ADDR') or 'localhost' self.hostinfo = address.IPv4Address('TCP', server_host, port), bool(secure) self.remoteinfo = address.IPv4Address( 'TCP', vars.get('REMOTE_ADDR', ''), vars.get('REMOTE_PORT', 0)) uri = vars.get('REQUEST_URI') # apache extension? if not uri: qstr = vars.get('QUERY_STRING', '') if qstr: qstr = "?"+urllib.quote(qstr, safe="") uri = urllib.quote(vars['SCRIPT_NAME'])+urllib.quote(vars.get('PATH_INFO', ''))+qstr for name,val in vars.iteritems(): if name.startswith('HTTP_'): name = name[5:].replace('_', '-') elif name == 'CONTENT_TYPE': name = 'content-type' else: continue headers.setRawHeaders(name, (val,)) self._dataRemaining = int(vars.get('CONTENT_LENGTH', '0')) self.request = self.requestFactory(self, vars['REQUEST_METHOD'], uri, http_vers[1:3], self._dataRemaining, headers, prepathuri=vars['SCRIPT_NAME']) def writeIntermediateResponse(self, code, headers=None): """Ignore, CGI doesn't support.""" pass def write(self, data): self.transport.write(data) def finish(self): if self.finished: warnings.warn("Warning! request.finish called twice.", stacklevel=2) return self.finished = True self.transport.loseConnection() def getHostInfo(self): return self.hostinfo def getRemoteHost(self): return self.remoteinfo def abortConnection(self, closeWrite=True): self.transport.loseConnection() def registerProducer(self, producer, streaming): self.transport.registerProducer(producer, streaming) def unregisterProducer(self): self.transport.unregisterProducer() def writeConnectionLost(self): self.loseConnection() def readConnectionLost(self): if self._dataRemaining > 0: # content-length was wrong, abort self.loseConnection() class CGIChannelRequest(BaseCGIChannelRequest): cgi_vers = (1, 0) def __init__(self, requestFactory, vars): self.requestFactory=requestFactory cgi_vers = http.parseVersion(vars['GATEWAY_INTERFACE']) if cgi_vers[0] != 'cgi' or cgi_vers[1] != 1: _abortWithError(responsecode.INTERNAL_SERVER_ERROR, "Twisted.web CGITransport: Unknown CGI version %s" % vars['GATEWAY_INTERFACE']) self.makeRequest(vars) def writeHeaders(self, code, headers): l = [] code_message = responsecode.RESPONSES.get(code, "Unknown Status") l.append("Status: %s %s\n" % (code, code_message)) if headers is not None: for name, valuelist in headers.getAllRawHeaders(): for value in valuelist: l.append("%s: %s\n" % (name, value)) l.append('\n') self.transport.writeSequence(l) def dataReceived(self, data): if self._dataRemaining <= 0: return if self._dataRemaining < len(data): data = data[:self._dataRemaining] self._dataRemaining -= len(data) self.request.handleContentChunk(data) if self._dataRemaining == 0: self.request.handleContentComplete() def connectionMade(self): self.request.process() if self._dataRemaining == 0: self.request.handleContentComplete() def connectionLost(self, reason): if reactor.running: reactor.stop() def startCGI(site): """Call this as the last thing in your CGI python script in order to hook up your site object with the incoming request. E.g.: >>> from twisted.web2 import channel, server >>> if __name__ == '__main__': ... channel.startCGI(server.Site(myToplevelResource)) """ from twisted.internet.stdio import StandardIO StandardIO(CGIChannelRequest(site, os.environ)) reactor.run() __all__ = ['startCGI']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/channel/cgi.py
cgi.py
from twisted.internet import protocol from twisted.web2 import responsecode from twisted.web2.channel import cgi as cgichannel class SCGIChannelRequest(cgichannel.BaseCGIChannelRequest): scgi_vers = "1" _data = "" headerLen = None def __init__(self): pass def writeHeaders(self, code, headers): l = [] code_message = responsecode.RESPONSES.get(code, "Unknown Status") l.append("Status: %s %s\n" % (code, code_message)) if headers is not None: for name, valuelist in headers.getAllRawHeaders(): for value in valuelist: l.append("%s: %s\r\n" % (name, value)) l.append('\r\n') self.transport.writeSequence(l) def makeRequest(self, vars): scgi_vers = vars['SCGI'] if scgi_vers != self.scgi_vers: _abortWithError(responsecode.INTERNAL_SERVER_ERROR, "Twisted.web SCGITransport: Unknown SCGI version %s" % vars['SCGI']) cgichannel.BaseCGIChannelRequest.makeRequest(self, vars) def dataReceived(self, data): if self.request is None: # Reading headers self._data += data if self.headerLen is None: # Haven't gotten a length prefix yet datas = data.split(':', 1) if len(datas) == 1: return self.headerLen = int(datas[0]) + 1 # +1 for the "," at the end self._data = datas[1] if len(self._data) >= self.headerLen: # Got all headers headerdata=self._data[:self.headerLen] data=self._data[self.headerLen:] items = headerdata.split('\0') assert (len(items) % 2) == 1, "malformed headers" assert items[-1]==',' env = {} for i in range(0, len(items) - 1, 2): env[items[i]] = items[i+1] self.makeRequest(env) self.request.process() if self._dataRemaining == 0: self.request.handleContentComplete() return if not data: return # no extra data in this packet # Fall through, self.request is now set, handle data else: return if self._dataRemaining <= 0: return if self._dataRemaining < len(data): data = data[:self._dataRemaining] self._dataRemaining -= len(data) self.request.handleContentChunk(data) if self._dataRemaining == 0: self.request.handleContentComplete() def connectionLost(self, reason): if self.request is not None: self.request.connectionLost(reason) class SCGIFactory(protocol.ServerFactory): protocol = SCGIChannelRequest def __init__(self, requestFactory): self.requestFactory=requestFactory def buildProtocol(self, addr): p = protocol.ServerFactory.buildProtocol(self, addr) p.requestFactory=self.requestFactory return p __all__ = ['SCGIFactory']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/channel/scgi.py
scgi.py
__all__ = [ "allDataFromStream", "davXMLFromStream", "noDataFromStream", "normalizeURL", "joinURL", "parentForURL", "bindMethods", ] import urllib from urlparse import urlsplit, urlunsplit import posixpath # Careful; this module is not documented as public API from twisted.python import log from twisted.python.failure import Failure from twisted.internet.defer import succeed from twisted.web2.stream import readStream from twisted.web2.dav import davxml ## # Reading request body ## def allDataFromStream(stream, filter=None): data = [] def gotAllData(_): if not data: return None result = "".join([str(x) for x in data]) if filter is None: return result else: return filter(result) return readStream(stream, data.append).addCallback(gotAllData) def davXMLFromStream(stream): # FIXME: # This reads the request body into a string and then parses it. # A better solution would parse directly and incrementally from the # request stream. if stream is None: return succeed(None) def parse(xml): try: return davxml.WebDAVDocument.fromString(xml) except ValueError: log.err("Bad XML:\n%s" % (xml,)) raise return allDataFromStream(stream, parse) def noDataFromStream(stream): def gotData(data): if data: raise ValueError("Stream contains unexpected data.") return readStream(stream, gotData) ## # URLs ## def normalizeURL(url): """ Normalized a URL. @param url: a URL. @return: the normalized representation of C{url}. The returned URL will never contain a trailing C{"/"}; it is up to the caller to determine whether the resource referred to by the URL is a collection and add a trailing C{"/"} if so. """ def cleanup(path): # For some silly reason, posixpath.normpath doesn't clean up '//' at the # start of a filename, so let's clean it up here. if path[0] == "/": count = 0 for char in path: if char != "/": break count += 1 path = path[count-1:] return path (scheme, host, path, query, fragment) = urlsplit(cleanup(url)) path = cleanup(posixpath.normpath(urllib.unquote(path))) return urlunsplit((scheme, host, urllib.quote(path), query, fragment)) def joinURL(*urls): """ Appends URLs in series. @param urls: URLs to join. @return: the normalized URL formed by combining each URL in C{urls}. The returned URL will contain a trailing C{"/"} if and only if the last given URL contains a trailing C{"/"}. """ if len(urls) > 0 and len(urls[-1]) > 0 and urls[-1][-1] == "/": trailing = "/" else: trailing = "" url = normalizeURL("/".join([url for url in urls])) if url == "/": return "/" else: return url + trailing def parentForURL(url): """ Extracts the URL of the containing collection resource for the resource corresponding to a given URL. @param url: an absolute (server-relative is OK) URL. @return: the normalized URL of the collection resource containing the resource corresponding to C{url}. The returned URL will always contain a trailing C{"/"}. """ (scheme, host, path, query, fragment) = urlsplit(normalizeURL(url)) index = path.rfind("/") if index is 0: if path == "/": return None else: path = "/" else: if index is -1: raise ValueError("Invalid URL: %s" % (url,)) else: path = path[:index] + "/" return urlunsplit((scheme, host, path, query, fragment)) ## # Python magic ## def unimplemented(obj): """ Throw an exception signifying that the current method is unimplemented and should not have been invoked. """ import inspect caller = inspect.getouterframes(inspect.currentframe())[1][3] raise NotImplementedError("Method %s is unimplemented in subclass %s" % (caller, obj.__class__)) def bindMethods(module, clazz, prefixes=("preconditions_", "http_", "report_")): """ Binds all functions in the given module (as defined by that module's C{__all__} attribute) which start with any of the given prefixes as methods of the given class. @param module: the module in which to search for functions. @param clazz: the class to bind found functions to as methods. @param prefixes: a sequence of prefixes to match found functions against. """ for submodule_name in module.__all__: try: __import__(module.__name__ + "." + submodule_name) except ImportError: log.err("Unable to import module %s" % (module.__name__ + "." + submodule_name,)) Failure().raiseException() submodule = getattr(module, submodule_name) for method_name in submodule.__all__: for prefix in prefixes: if method_name.startswith(prefix): method = getattr(submodule, method_name) setattr(clazz, method_name, method) break
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/util.py
util.py
__all__ = [ "DAVPropertyMixIn", "DAVResource", "DAVLeafResource" ] import urllib from zope.interface import implements from twisted.python import log from twisted.internet.defer import maybeDeferred, succeed from twisted.web2 import responsecode from twisted.web2.dav import davxml from twisted.web2.dav.davxml import dav_namespace, lookupElement from twisted.web2.dav.idav import IDAVResource from twisted.web2.dav.noneprops import NonePropertyStore from twisted.web2.dav.util import unimplemented from twisted.web2.http import HTTPError, RedirectResponse, StatusResponse from twisted.web2.http_headers import generateContentType from twisted.web2.iweb import IResponse from twisted.web2.resource import LeafResource from twisted.web2.static import MetaDataMixin, StaticRenderMixin twisted_dav_namespace = "http://twistedmatrix.com/xml_namespace/dav/" twisted_private_namespace = "http://twistedmatrix.com/xml_namespace/dav/private/" class DAVPropertyMixIn (MetaDataMixin): """ Mix-in class which implements the DAV property access API in L{IDAVResource}. There are three categories of DAV properties, for the purposes of how this class manages them. A X{property} is either a X{live property} or a X{dead property}, and live properties are split into two categories: 1. Dead properties. There are properties that the server simply stores as opaque data. These are store in the X{dead property store}, which is provided by subclasses via the L{deadProperties} method. 2. Live properties which are always computed. These properties aren't stored anywhere (by this class) but instead are derived from the resource state or from data that is persisted elsewhere. These are listed in the L{liveProperties} attribute and are handled explicitly by the L{readProperty} method. 3. Live properties may be acted on specially and are stored in the X{dead property store}. These are not listed in the L{liveProperties} attribute, but may be handled specially by the property access methods. For example, L{writeProperty} might validate the data and refuse to write data it deems inappropriate for a given property. There are two sets of property access methods. The first group (L{hasProperty}, etc.) provides access to all properties. They automatically figure out which category a property falls into and act accordingly. The second group (L{hasDeadProperty}, etc.) accesses the dead property store directly and bypasses any live property logic that exists in the first group of methods. These methods are used by the first group of methods, and there are cases where they may be needed by other methods. I{Accessing dead properties directly should be done with caution.} Bypassing the live property logic means that values may not be the correct ones for use in DAV requests such as PROPFIND, and may be bypassing security checks. In general, one should never bypass the live property logic as part of a client request for property data. Properties in the L{twisted_private_namespace} namespace are internal to the server and should not be exposed to clients. They can only be accessed via the dead property store. """ # Note: # The DAV:owner and DAV:group live properties are only meaningful if you # are using ACL semantics (ie. Unix-like) which use them. This (generic) # class does not. liveProperties = ( (dav_namespace, "resourcetype" ), (dav_namespace, "getetag" ), (dav_namespace, "getcontenttype" ), (dav_namespace, "getcontentlength" ), (dav_namespace, "getlastmodified" ), (dav_namespace, "creationdate" ), (dav_namespace, "displayname" ), (dav_namespace, "supportedlock" ), #(dav_namespace, "supported-report-set" ), # RFC 3253, section 3.1.5 #(dav_namespace, "owner" ), # RFC 3744, section 5.1 #(dav_namespace, "group" ), # RFC 3744, section 5.2 #(dav_namespace, "supported-privilege-set" ), # RFC 3744, section 5.3 #(dav_namespace, "current-user-privilege-set"), # RFC 3744, section 5.4 #(dav_namespace, "acl" ), # RFC 3744, section 5.5 (dav_namespace, "acl-restrictions" ), # RFC 3744, section 5.6 #(dav_namespace, "inherited-acl-set" ), # RFC 3744, section 5.7 #(dav_namespace, "principal-collection-set" ), # RFC 3744, section 5.8 (twisted_dav_namespace, "resource-class"), ) def deadProperties(self): """ Provides internal access to the WebDAV dead property store. You probably shouldn't be calling this directly if you can use the property accessors in the L{IDAVResource} API instead. However, a subclass must override this method to provide it's own dead property store. This implementation returns an instance of L{NonePropertyStore}, which cannot store dead properties. Subclasses must override this method if they wish to store dead properties. @return: a dict-like object from which one can read and to which one can write dead properties. Keys are qname tuples (ie. C{(namespace, name)}) as returned by L{davxml.WebDAVElement.qname()} and values are L{davxml.WebDAVElement} instances. """ if not hasattr(self, "_dead_properties"): self._dead_properties = NonePropertyStore(self) return self._dead_properties def hasProperty(self, property, request): """ See L{IDAVResource.hasProperty}. """ if type(property) is tuple: qname = property else: qname = property.qname() if qname[0] == twisted_private_namespace: return succeed(False) return succeed(qname in self.liveProperties or self.deadProperties().contains(qname)) def readProperty(self, property, request): """ See L{IDAVResource.readProperty}. """ def defer(): if type(property) is tuple: qname = property sname = "{%s}%s" % property else: qname = property.qname() sname = property.sname() namespace, name = qname if namespace == dav_namespace: if name == "resourcetype": # Allow live property to be overriden by dead property if self.deadProperties().contains(qname): return self.deadProperties().get(qname) if self.isCollection(): return davxml.ResourceType.collection return davxml.ResourceType.empty if name == "getetag": return davxml.GETETag(self.etag().generate()) if name == "getcontenttype": mimeType = self.contentType() mimeType.params = None # WebDAV getcontenttype property does not include parameters return davxml.GETContentType(generateContentType(mimeType)) if name == "getcontentlength": return davxml.GETContentLength(str(self.contentLength())) if name == "getlastmodified": return davxml.GETLastModified.fromDate(self.lastModified()) if name == "creationdate": return davxml.CreationDate.fromDate(self.creationDate()) if name == "displayname": return davxml.DisplayName(self.displayName()) if name == "supportedlock": return davxml.SupportedLock( davxml.LockEntry(davxml.LockScope.exclusive, davxml.LockType.write), davxml.LockEntry(davxml.LockScope.shared , davxml.LockType.write), ) if name == "acl-restrictions": return davxml.ACLRestrictions() if namespace == twisted_dav_namespace: if name == "resource-class": class ResourceClass (davxml.WebDAVTextElement): namespace = twisted_dav_namespace name = "resource-class" hidden = False return ResourceClass(self.__class__.__name__) if namespace == twisted_private_namespace: raise HTTPError(StatusResponse( responsecode.FORBIDDEN, "Properties in the %s namespace are private to the server." % (sname,) )) return self.deadProperties().get(qname) return maybeDeferred(defer) def writeProperty(self, property, request): """ See L{IDAVResource.writeProperty}. """ assert isinstance(property, davxml.WebDAVElement) def defer(): if property.protected: raise HTTPError(StatusResponse( responsecode.FORBIDDEN, "Protected property %s may not be set." % (property.sname(),) )) if property.namespace == twisted_private_namespace: raise HTTPError(StatusResponse( responsecode.FORBIDDEN, "Properties in the %s namespace are private to the server." % (property.sname(),) )) return self.deadProperties().set(property) return maybeDeferred(defer) def removeProperty(self, property, request): """ See L{IDAVResource.removeProperty}. """ def defer(): if type(property) is tuple: qname = property sname = "{%s}%s" % property else: qname = property.qname() sname = property.sname() if qname in self.liveProperties: raise HTTPError(StatusResponse( responsecode.FORBIDDEN, "Live property %s cannot be deleted." % (sname,) )) if qname[0] == twisted_private_namespace: raise HTTPError(StatusResponse( responsecode.FORBIDDEN, "Properties in the %s namespace are private to the server." % (sname,) )) return self.deadProperties().delete(qname) return maybeDeferred(defer) def listProperties(self, request): """ See L{IDAVResource.listProperties}. """ # FIXME: A set would be better here, that that's a python 2.4+ feature. qnames = list(self.liveProperties) for qname in self.deadProperties().list(): if (qname not in qnames) and (qname[0] != twisted_private_namespace): qnames.append(qname) return succeed(qnames) def listAllprop(self, request): """ Some DAV properties should not be returned to a C{DAV:allprop} query. RFC 3253 defines several such properties. This method computes a subset of the property qnames returned by L{listProperties} by filtering out elements whose class have the C{.hidden} attribute set to C{True}. @return: a list of qnames of properties which are defined and are appropriate for use in response to a C{DAV:allprop} query. """ def doList(allnames): qnames = [] for qname in allnames: try: if not lookupElement(qname).hidden: qnames.append(qname) except KeyError: # Unknown element qnames.append(qname) return qnames d = self.listProperties(request) d.addCallback(doList) return d def hasDeadProperty(self, property): """ Same as L{hasProperty}, but bypasses the live property store and checks directly from the dead property store. """ if type(property) is tuple: qname = property else: qname = property.qname() return self.deadProperties().contains(qname) def readDeadProperty(self, property): """ Same as L{readProperty}, but bypasses the live property store and reads directly from the dead property store. """ if type(property) is tuple: qname = property else: qname = property.qname() return self.deadProperties().get(qname) def writeDeadProperty(self, property): """ Same as L{writeProperty}, but bypasses the live property store and writes directly to the dead property store. Note that this should not be used unless you know that you are writing to an overrideable live property, as this bypasses the logic which protects protected properties. The result of writing to a non-overrideable live property with this method is undefined; the value in the dead property store may or may not be ignored when reading the property with L{readProperty}. """ self.deadProperties().set(property) def removeDeadProperty(self, property): """ Same as L{removeProperty}, but bypasses the live property store and acts directly on the dead property store. """ if self.hasDeadProperty(property): if type(property) is tuple: qname = property else: qname = property.qname() self.deadProperties().delete(qname) # # Overrides some methods in MetaDataMixin in order to allow DAV properties # to override the values of some HTTP metadata. # def contentType(self): if self.hasDeadProperty((davxml.dav_namespace, "getcontenttype")): return self.readDeadProperty((davxml.dav_namespace, "getcontenttype")).mimeType() else: return super(DAVPropertyMixIn, self).contentType() def displayName(self): if self.hasDeadProperty((davxml.dav_namespace, "displayname")): return str(self.readDeadProperty((davxml.dav_namespace, "displayname"))) else: return super(DAVPropertyMixIn, self).displayName() class DAVResource (DAVPropertyMixIn, StaticRenderMixin): implements(IDAVResource) ## # DAV ## def davComplianceClasses(self): """ This implementation raises L{NotImplementedError}. @return: a sequence of strings denoting WebDAV compliance classes. For example, a DAV level 2 server might return ("1", "2"). """ unimplemented(self) def isCollection(self): """ See L{IDAVResource.isCollection}. This implementation raises L{NotImplementedError}; a subclass must override this method. """ unimplemented(self) def findChildren(self, depth): """ See L{IDAVResource.findChildren}. This implementation raises returns C{()} if C{depth} is C{0} and this resource is a collection. Otherwise, it raises L{NotImplementedError}; a subclass must override this method. """ assert depth in ("0", "1", "infinity"), "Invalid depth: %s" % (depth,) if depth == "0" or not self.isCollection(): return () else: unimplemented(self) ## # ACL ## def principalCollections(self): """ See L{IDAVResource.principalCollections}. This implementation returns C{()}. """ return () def accessControlList(self): """ See L{IDAVResource.accessControlList}. This implementation returns an ACL granting all privileges to all principals. """ return allACL def supportedPrivileges(self): """ See L{IDAVResource.supportedPrivileges}. This implementation returns a supported privilege set containing only the DAV:all privilege. """ return allPrivilegeSet ## # HTTP ## def renderHTTP(self, request): # FIXME: This is for testing with litmus; comment out when not in use #litmus = request.headers.getRawHeaders("x-litmus") #if litmus: log.msg("*** Litmus test: %s ***" % (litmus,)) # FIXME: Learn how to use twisted logging facility, wsanchez protocol = "HTTP/%s.%s" % request.clientproto log.msg("%s %s %s" % (request.method, urllib.unquote(request.uri), protocol)) # # If this is a collection and the URI doesn't end in "/", redirect. # if self.isCollection() and request.uri[-1:] != "/": return RedirectResponse(request.uri + "/") def setHeaders(response): response = IResponse(response) response.headers.setHeader("dav", self.davComplianceClasses()) # # If this is a collection and the URI doesn't end in "/", add a # Content-Location header. This is needed even if we redirect such # requests (as above) in the event that this resource was created or # modified by the request. # if self.isCollection() and request.uri[-1:] != "/": response.headers.setHeader("content-location", request.uri + "/") return response def onError(f): # If we get an HTTPError, run its response through setHeaders() as # well. f.trap(HTTPError) return setHeaders(f.value.response) d = maybeDeferred(super(DAVResource, self).renderHTTP, request) return d.addCallbacks(setHeaders, onError) class DAVLeafResource (DAVResource, LeafResource): """ DAV resource with no children. """ def findChildren(self, depth): return () ## # Utilities ## allACL = davxml.ACL( davxml.ACE( davxml.Principal(davxml.All()), davxml.Grant(davxml.Privilege(davxml.All())), davxml.Protected() ) ) allPrivilegeSet = davxml.SupportedPrivilegeSet( davxml.SupportedPrivilege( davxml.Privilege(davxml.All()), davxml.Description("all privileges", **{"xml:lang": "en"}) ) )
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/resource.py
resource.py
__all__ = [ "ErrorResponse", "MultiStatusResponse", "ResponseQueue", "PropertyStatusResponseQueue", "statusForFailure", ] import errno from twisted.python import log from twisted.python.failure import Failure from twisted.web2 import responsecode from twisted.web2.iweb import IResponse from twisted.web2.http import Response, HTTPError, StatusResponse from twisted.web2.http_headers import MimeType from twisted.web2.dav import davxml ## # Generating and tweaking responses ## class ErrorResponse (Response): """ A L{Response} object which contains a status code and a L{davxml.Error} element. Renders itself as a DAV:error XML document. """ error = None def __init__(self, code, error): """ @param code: a response code. @param error: an L{davxml.WebDAVElement} identifying the error, or a tuple C{(namespace, name)} with which to create an empty element denoting the error. (The latter is useful in the case of preconditions ans postconditions, not all of which have defined XML element classes.) """ if type(error) is tuple: xml_namespace, xml_name = error class EmptyError (davxml.WebDAVEmptyElement): namespace = xml_namespace name = xml_name error = EmptyError() output = davxml.Error(error).toxml() Response.__init__(self, code=code, stream=output) self.headers.setHeader("content-type", MimeType("text", "xml")) self.error = error def __repr__(self): return "<%s %s %s>" % (self.__class__.__name__, self.code, self.error.sname()) class MultiStatusResponse (Response): """ Multi-status L{Response} object. Renders itself as a DAV:multi-status XML document. """ def __init__(self, xml_responses): """ @param xml_responses: an interable of davxml.Response objects. """ multistatus = davxml.MultiStatus(*xml_responses) output = multistatus.toxml() Response.__init__(self, code=responsecode.MULTI_STATUS, stream=davxml.MultiStatus(*xml_responses).toxml()) self.headers.setHeader("content-type", MimeType("text", "xml")) class ResponseQueue (object): """ Stores a list of (typically error) responses for use in a L{MultiStatusResponse}. """ def __init__(self, path_basename, method, success_response): """ @param path_basename: the base path for all responses to be added to the queue. All paths for responses added to the queue must start with C{path_basename}, which will be stripped from the beginning of each path to determine the response's URI. @param method: the name of the method generating the queue. @param success_response: the response to return in lieu of a L{MultiStatusResponse} if no responses are added to this queue. """ self.responses = [] self.path_basename = path_basename self.path_basename_len = len(path_basename) self.method = method self.success_response = success_response def add(self, path, what): """ Add a response. @param path: a path, which must be a subpath of C{path_basename} as provided to L{__init__}. @param what: a status code or a L{Failure} for the given path. """ assert path.startswith(self.path_basename), "%s does not start with %s" % (path, self.path_basename) if type(what) is int: code = what error = None message = responsecode.RESPONSES[code] elif isinstance(what, Failure): code = statusForFailure(what) error = errorForFailure(what) message = messageForFailure(what) else: raise AssertionError("Unknown data type: %r" % (what,)) if code > 400: # Error codes only log.err("Error during %s for %s: %s" % (self.method, path, message)) uri = path[self.path_basename_len:] children = [] children.append(davxml.HRef(uri)) children.append(davxml.Status.fromResponseCode(code)) if error is not None: children.append(error) if message is not None: children.append(davxml.ResponseDescription(message)) self.responses.append(davxml.StatusResponse(*children)) def response(self): """ Generate a L{MultiStatusResponse} with the responses contained in the queue or, if no such responses, return the C{success_response} provided to L{__init__}. @return: the response. """ if self.responses: return MultiStatusResponse(self.responses) else: return self.success_response class PropertyStatusResponseQueue (object): """ Stores a list of propstat elements for use in a L{Response} in a L{MultiStatusResponse}. """ def __init__(self, method, uri, success_response): """ @param method: the name of the method generating the queue. @param uri: the href for the response. @param success_response: the status to return if no L{PropertyStatus} are added to this queue. """ self.method = method self.propstats = [davxml.HRef(uri)] self.success_response = success_response def add(self, what, property): """ Add a response. @param what: a status code or a L{Failure} for the given path. @param property: the property whose status is being reported. """ if type(what) is int: code = what error = None message = responsecode.RESPONSES[code] elif isinstance(what, Failure): code = statusForFailure(what) error = errorForFailure(what) message = messageForFailure(what) else: raise AssertionError("Unknown data type: %r" % (what,)) if len(property.children) > 0: # Re-instantiate as empty element. property = property.__class__() if code > 400: # Error codes only log.err("Error during %s for %s: %s" % (self.method, property, message)) children = [] children.append(davxml.PropertyContainer(property)) children.append(davxml.Status.fromResponseCode(code)) if error is not None: children.append(error) if message is not None: children.append(davxml.ResponseDescription(message)) self.propstats.append(davxml.PropertyStatus(*children)) def error(self): """ Convert any 2xx codes in the propstat responses to 424 Failed Dependency. """ for propstat in self.propstats: # Check the status changed_status = False for index, child in enumerate(propstat.children): if isinstance(child, davxml.Status) and (child.code / 100 == 2): # Change the code propstat.children[index] = davxml.Status.fromResponseCode( responsecode.FAILED_DEPENDENCY ) changed_status = True elif changed_status and isinstance(child, davxml.ResponseDescription): propstat.children[index] = davxml.ResponseDescription( responsecode.RESPONSES[responsecode.FAILED_DEPENDENCY] ) def response(self): """ Generate a response from the responses contained in the queue or, if there are no such responses, return the C{success_response} provided to L{__init__}. @return: a L{davxml.PropertyStatusResponse}. """ if len(self.propstats) == 1: self.propstats.append(davxml.Status.fromResponseCode(self.success_response)) return davxml.PropertyStatusResponse(*self.propstats) ## # Exceptions and response codes ## def statusForFailure(failure, what=None): """ @param failure: a L{Failure}. @param what: a decription of what was going on when the failure occurred. If what is not C{None}, emit a cooresponding message via L{log.err}. @return: a response code cooresponding to the given C{failure}. """ def msg(err): if what is not None: log.msg("%s while %s" % (err, what)) if failure.check(IOError, OSError): e = failure.value[0] if e == errno.EACCES or e == errno.EPERM: msg("Permission denied") return responsecode.FORBIDDEN elif e == errno.ENOSPC: msg("Out of storage space") return responsecode.INSUFFICIENT_STORAGE_SPACE elif e == errno.ENOENT: msg("Not found") return responsecode.NOT_FOUND else: failure.raiseException() elif failure.check(NotImplementedError): msg("Unimplemented error") return responsecode.NOT_IMPLEMENTED elif failure.check(HTTPError): code = IResponse(failure.value.response).code msg("%d response" % (code,)) return code else: failure.raiseException() def errorForFailure(failure): if failure.check(HTTPError) and isinstance(failure.value.response, ErrorResponse): return davxml.Error(failure.value.response.error) else: return None def messageForFailure(failure): if failure.check(HTTPError): if isinstance(failure.value.response, ErrorResponse): return None if isinstance(failure.value.response, StatusResponse): return failure.value.response.description return str(failure)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/http.py
http.py
__all__ = [ "IDAVResource" ] from twisted.web2.iweb import IResource class IDAVResource(IResource): """ WebDAV resource. """ def isCollection(): """ Checks whether this resource is a collection resource. @return: C{True} if this resource is a collection resource, C{False} otherwise. """ def findChildren(depth): """ Returns an iterable of child resources for the given depth. Because resources do not know their request URIs, chidren are returned as tuples C{(resource, uri)}, where C{resource} is the child resource and C{uri} is a URL path relative to this resource. @param depth: the search depth (one of C{"0"}, C{"1"}, or C{"infinity"}) @return: an iterable of tuples C{(resource, uri)}. """ def hasProperty(property, request): """ Checks whether the given property is defined on this resource. @param property: an empty L{davxml.WebDAVElement} instance or a qname tuple. @param request: the request being processed. @return: a deferred value of C{True} if the given property is set on this resource, or C{False} otherwise. """ def readProperty(property, request): """ Reads the given property on this resource. @param property: an empty L{davxml.WebDAVElement} class or instance, or a qname tuple. @param request: the request being processed. @return: a deferred L{davxml.WebDAVElement} instance containing the value of the given property. @raise HTTPError: (containing a response with a status code of L{responsecode.CONFLICT}) if C{property} is not set on this resource. """ def writeProperty(property, request): """ Writes the given property on this resource. @param property: a L{davxml.WebDAVElement} instance. @param request: the request being processed. @return: an empty deferred which fires when the operation is completed. @raise HTTPError: (containing a response with a status code of L{responsecode.CONFLICT}) if C{property} is a read-only property. """ def removeProperty(property, request): """ Removes the given property from this resource. @param property: a L{davxml.WebDAVElement} instance or a qname tuple. @param request: the request being processed. @return: an empty deferred which fires when the operation is completed. @raise HTTPError: (containing a response with a status code of L{responsecode.CONFLICT}) if C{property} is a read-only property or if the property does not exist. """ def listProperties(request): """ @param request: the request being processed. @return: a deferred iterable of qnames for all properties defined for this resource. """ def principalCollections(): """ Provides the URIs of collection resources which contain principal resources which may be used in access control entries on this resource. (RFC 3744, section 5.8) @return: a sequence of URIs referring to collection resources which implement the C{DAV:principal-property-search} C{REPORT}. """ def accessControlList(): """ @return: the L{davxml.ACL} element containing the access control list for this resource. """ def supportedPrivileges(): """ @return: a sequence of the access control privileges which are supported by this resource. """ class IDAVPrincipalResource (IDAVResource): """ WebDAV principal resource. (RFC 3744, section 2) """ def alternateURIs(): """ Provides the URIs of network resources with additional descriptive information about the principal, for example, a URI to an LDAP record. (RFC 3744, section 4.1) @return: a iterable of URIs. """ def principalURL(): """ Provides the URL which must be used to identify this principal in ACL requests. (RFC 3744, section 4.2) @return: a URL. """ def groupMembers(): """ Provides the principal URLs of principals that are direct members of this (group) principal. (RFC 3744, section 4.3) @return: a iterable of principal URLs. """ def groupMemberships(): """ Provides the URLs of the group principals in which the principal is directly a member. (RFC 3744, section 4.4) @return: a iterable of group principal URLs. """
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/idav.py
idav.py
__all__ = ["DAVPrincipalResource"] from zope.interface import implements from twisted.internet.defer import maybeDeferred from twisted.web2.dav import davxml from twisted.web2.dav.davxml import dav_namespace from twisted.web2.dav.idav import IDAVPrincipalResource from twisted.web2.dav.resource import DAVLeafResource from twisted.web2.dav.util import unimplemented class DAVPrincipalResource (DAVLeafResource): """ Resource representing a WebDAV principal. (RFC 3744, section 2) """ implements(IDAVPrincipalResource) ## # WebDAV ## liveProperties = DAVLeafResource.liveProperties + ( (dav_namespace, "alternate-uri-set"), (dav_namespace, "principal-url" ), (dav_namespace, "group-member-set" ), (dav_namespace, "group-membership" ), ) def davComplianceClasses(self): return ("1",) def isCollection(self): return False def findChildren(self, depth): return () def readProperty(self, property, request): def defer(): if type(property) is tuple: qname = property sname = "{%s}%s" % property else: qname = property.qname() sname = property.sname() namespace, name = qname if namespace == dav_namespace: if name == "alternate-uri-set": return davxml.AlternateURISet(*[davxml.HRef(u) for u in self.alternateURIs()]) if name == "principal-url": return davxml.PrincipalURL(davxml.HRef(self.principalURL())) if name == "group-member-set": return davxml.GroupMemberSet(*[davxml.HRef(p) for p in self.groupMembers()]) if name == "group-membership": return davxml.GroupMemberSet(*[davxml.HRef(g) for g in self.groupMemberships()]) return super(DAVPrincipalResource, self).readProperty(qname, request) return maybeDeferred(defer) ## # ACL ## def alternateURIs(self): """ See L{IDAVPrincipalResource.alternateURIs}. This implementation returns C{()}. Subclasses should override this method to provide alternate URIs for this resource if appropriate. """ return () def principalURL(self): """ See L{IDAVPrincipalResource.principalURL}. This implementation raises L{NotImplementedError}. Subclasses must override this method to provide the principal URL for this resource. """ unimplemented(self) def groupMembers(self): """ See L{IDAVPrincipalResource.groupMembers}. This implementation returns C{()}, which is appropriate for non-group principals. Subclasses should override this method to provide member URLs for this resource if appropriate. """ return () def groupMemberships(self): """ See L{IDAVPrincipalResource.groupMemberships}. This implementation raises L{NotImplementedError}. Subclasses must override this method to provide the group URLs for this resource. """ unimplemented(self)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/acl.py
acl.py
__all__ = ["xattrPropertyStore"] import urllib import sys import xattr if getattr(xattr, 'xattr', None) is None: raise ImportError("wrong xattr package imported") from twisted.python import log from twisted.web2 import responsecode from twisted.web2.http import HTTPError, StatusResponse from twisted.web2.dav import davxml class xattrPropertyStore (object): """ This implementation uses Bob Ippolito's xattr package, available from: http://undefined.org/python/#xattr Note that the Bob's xattr package is specific to Linux and Darwin, at least presently. """ # # Dead properties are stored as extended attributes on disk. In order to # avoid conflicts with other attributes, prefix dead property names. # deadPropertyXattrPrefix = "WebDAV:" # Linux seems to require that attribute names use a "user." prefix. # FIXME: Is is a system-wide thing, or a per-filesystem thing? # If the latter, how to we detect the file system? if sys.platform == "linux2": deadPropertyXattrPrefix = "user." def _encode(clazz, name): # # FIXME: The xattr API in Mac OS 10.4.2 breaks if you have "/" in an # attribute name (radar://4202440). We'll quote the strings to get rid # of "/" characters for now. # result = list("{%s}%s" % name) for i in range(len(result)): c = result[i] if c in "%/": result[i] = "%%%02X" % (ord(c),) r = clazz.deadPropertyXattrPrefix + ''.join(result) return r def _decode(clazz, name): name = urllib.unquote(name[len(clazz.deadPropertyXattrPrefix):]) index = name.find("}") if (index is -1 or not len(name) > index or not name[0] == "{"): raise ValueError("Invalid encoded name: %r" % (name,)) return (name[1:index], name[index+1:]) _encode = classmethod(_encode) _decode = classmethod(_decode) def __init__(self, resource): self.resource = resource self.attrs = xattr.xattr(self.resource.fp.path) def get(self, qname): try: value = self.attrs[self._encode(qname)] except KeyError: raise HTTPError(StatusResponse( responsecode.NOT_FOUND, "No such property: {%s}%s" % qname )) doc = davxml.WebDAVDocument.fromString(value) return doc.root_element def set(self, property): #log.msg("Writing property %s on file %s" # % (property.sname(), self.resource.fp.path)) self.attrs[self._encode(property.qname())] = property.toxml() # Update the resource because we've modified it self.resource.fp.restat() def delete(self, qname): #log.msg("Deleting property {%s}%s on file %s" # % (qname[0], qname[1], self.resource.fp.path)) try: del(self.attrs[self._encode(qname)]) except KeyError: # RFC 2518 Section 12.13.1 says that removal of # non-existing property is not an error. pass def contains(self, qname): try: return self._encode(qname) in self.attrs except TypeError: return False def list(self): prefix = self.deadPropertyXattrPrefix prefix_len = len(prefix) return [ self._decode(name) for name in self.attrs if name.startswith(prefix) ]
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/xattrprops.py
xattrprops.py
__all__ = ["DAVFile"] from twisted.python import log from twisted.web2.static import File from twisted.web2.dav import davxml from twisted.web2.dav.idav import IDAVResource from twisted.web2.dav.resource import DAVResource from twisted.web2.dav.util import bindMethods try: from twisted.web2.dav.xattrprops import xattrPropertyStore as DeadPropertyStore except ImportError: log.msg("No dead property store available; using nonePropertyStore.") log.msg("Setting of dead properties will not be allowed.") from twisted.web2.dav.noneprops import NonePropertyStore as DeadPropertyStore class DAVFile (DAVResource, File): """ WebDAV-accessible File resource. Extends twisted.web2.static.File to handle WebDAV methods. """ def __init__(self, path, defaultType="text/plain", indexNames=None): """ @param path: the path of the file backing this resource. @param defaultType: the default mime type (as a string) for this resource and (eg. child) resources derived from it. @param indexNames: a sequence of index file names. @param acl: an L{IDAVAccessControlList} with the . """ super(DAVFile, self).__init__(path, defaultType = defaultType, ignoredExts = (), processors = None, indexNames = indexNames) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.fp.path) ## # WebDAV ## def davComplianceClasses(self): return ("1",) # Add "2" when we have locking def deadProperties(self): if not hasattr(self, "_dead_properties"): self._dead_properties = DeadPropertyStore(self) return self._dead_properties def isCollection(self): """ See L{IDAVResource.isCollection}. """ for child in self.listChildren(): return True return self.fp.isdir() def findChildren(self, depth): """ See L{IDAVResource.findChildren}. """ assert depth in ("0", "1", "infinity"), "Invalid depth: %s" % (depth,) if depth != "0" and self.isCollection(): for name in self.listChildren(): try: child = IDAVResource(self.getChild(name)) except TypeError: child = None if child is not None: if child.isCollection(): yield (child, name + "/") if depth == "infinity": for grandchild in child.findChildren(depth): yield (grandchild[0], name + "/" + grandchild[1]) else: yield (child, name) ## # ACL ## def supportedPrivileges(self): if not hasattr(DAVFile, "_supportedPrivilegeSet"): DAVFile._supportedPrivilegeSet = davxml.SupportedPrivilegeSet( davxml.SupportedPrivilege( davxml.Privilege(davxml.All()), davxml.Description("all privileges", **{"xml:lang": "en"}), davxml.SupportedPrivilege( davxml.Privilege(davxml.Read()), davxml.Description("read resource", **{"xml:lang": "en"}), ), davxml.SupportedPrivilege( davxml.Privilege(davxml.Write()), davxml.Description("write resource", **{"xml:lang": "en"}), davxml.SupportedPrivilege( davxml.Privilege(davxml.WriteProperties()), davxml.Description("write resource properties", **{"xml:lang": "en"}), ), davxml.SupportedPrivilege( davxml.Privilege(davxml.WriteContent()), davxml.Description("write resource content", **{"xml:lang": "en"}), ), davxml.SupportedPrivilege( davxml.Privilege(davxml.Bind()), davxml.Description("add child resource", **{"xml:lang": "en"}), ), davxml.SupportedPrivilege( davxml.Privilege(davxml.Unbind()), davxml.Description("remove child resource", **{"xml:lang": "en"}), ), ), davxml.SupportedPrivilege( davxml.Privilege(davxml.Unlock()), davxml.Description("unlock resource without ownership", **{"xml:lang": "en"}), ), davxml.SupportedPrivilege( davxml.Privilege(davxml.ReadACL()), davxml.Description("read resource access control list", **{"xml:lang": "en"}), ), davxml.SupportedPrivilege( davxml.Privilege(davxml.WriteACL()), davxml.Description("write resource access control list", **{"xml:lang": "en"}), ), davxml.SupportedPrivilege( davxml.Privilege(davxml.ReadCurrentUserPrivilegeSet()), davxml.Description("read privileges for current principal", **{"xml:lang": "en"}), ), ), ) return DAVFile._supportedPrivilegeSet ## # Workarounds for issues with File ## def ignoreExt(self, ext): """ Does nothing; doesn't apply to this subclass. """ pass def createSimilarFile(self, path): return self.__class__(path, defaultType=self.defaultType, indexNames=self.indexNames[:]) # # Attach method handlers to DAVFile # import twisted.web2.dav.method bindMethods(twisted.web2.dav.method, DAVFile)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/static.py
static.py
__all__ = [ "delete", "copy", "move", "put", "mkcollection", ] import os import urllib from urlparse import urlsplit from twisted.python import log from twisted.python.filepath import FilePath from twisted.python.failure import Failure from twisted.internet.defer import succeed, deferredGenerator, waitForDeferred from twisted.web2 import responsecode from twisted.web2.http import StatusResponse, HTTPError from twisted.web2.stream import FileStream, readIntoFile from twisted.web2.dav.http import ResponseQueue, statusForFailure def delete(uri, filepath, depth="infinity"): """ Perform a X{DELETE} operation on the given URI, which is backed by the given filepath. @param filepath: the L{FilePath} to delete. @param depth: the recursion X{Depth} for the X{DELETE} operation, which must be "infinity". @raise HTTPError: (containing a response with a status code of L{responsecode.BAD_REQUEST}) if C{depth} is not "infinity". @raise HTTPError: (containing an appropriate response) if the delete operation fails. If C{filepath} is a directory, the response will be a L{MultiStatusResponse}. @return: a deferred response with a status code of L{responsecode.NO_CONTENT} if the X{DELETE} operation succeeds. """ # # Remove the file(s) # # FIXME: defer if filepath.isdir(): # # RFC 2518, section 8.6 says that we must act as if the Depth header is # set to infinity, and that the client must omit the Depth header or set # it to infinity, meaning that for collections, we will delete all # members. # # This seems somewhat at odds with the notion that a bad request should # be rejected outright; if the client sends a bad depth header, the # client is broken, and RFC 2518, section 8 suggests that a bad request # should be rejected... # # Let's play it safe for now and ignore broken clients. # if depth != "infinity": msg = ("Client sent illegal depth header value for DELETE: %s" % (depth,)) log.err(msg) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, msg)) # # Recursive delete # # RFC 2518, section 8.6 says that if we get an error deleting a resource # other than the collection in the request-URI, that we must respond # with a multi-status response containing error statuses for each # resource that we fail to delete. It also says we should not return # no-content (success) status, which means that we should continue after # errors, rather than aborting right away. This is interesting in that # it's different from how most operating system tools act (eg. rm) when # recursive filsystem deletes fail. # uri_path = urllib.unquote(urlsplit(uri)[2]) if uri_path[-1] == "/": uri_path = uri_path[:-1] log.msg("Deleting directory %s" % (filepath.path,)) # NOTE: len(uri_path) is wrong if os.sep is not one byte long... meh. request_basename = filepath.path[:-len(uri_path)] request_basename_len = len(request_basename) errors = ResponseQueue(request_basename, "DELETE", responsecode.NO_CONTENT) # FIXME: defer this for dir, subdirs, files in os.walk(filepath.path, topdown=False): for filename in files: path = os.path.join(dir, filename) try: os.remove(path) except: errors.add(path, Failure()) for subdir in subdirs: path = os.path.join(dir, subdir) if os.path.islink(path): try: os.remove(path) except: errors.add(path, Failure()) else: try: os.rmdir(path) except: errors.add(path, Failure()) try: os.rmdir(filepath.path) except: raise HTTPError(statusForFailure( Failure(), "deleting directory: %s" % (filepath.path,) )) response = errors.response() else: # # Delete a file; much simpler, eh? # log.msg("Deleting file %s" % (filepath.path,)) try: os.remove(filepath.path) except: raise HTTPError(statusForFailure( Failure(), "deleting file: %s" % (filepath.path,) )) response = responsecode.NO_CONTENT # Restat filepath since we deleted the backing file filepath.restat(False) return succeed(response) def copy(source_filepath, destination_filepath, destination_uri, depth): """ Perform a X{COPY} from the given source and destination filepaths. This will perform a X{DELETE} on the destination if necessary; the caller should check and handle the X{overwrite} header before calling L{copy} (as in L{COPYMOVE.prepareForCopy}). @param source_filepath: a L{FilePath} for the file to copy from. @param destination_filepath: a L{FilePath} for the file to copy to. @param destination_uri: the URI of the destination resource. @param depth: the recursion X{Depth} for the X{COPY} operation, which must be one of "0", "1", or "infinity". @raise HTTPError: (containing a response with a status code of L{responsecode.BAD_REQUEST}) if C{depth} is not "0", "1" or "infinity". @raise HTTPError: (containing an appropriate response) if the operation fails. If C{source_filepath} is a directory, the response will be a L{MultiStatusResponse}. @return: a deferred response with a status code of L{responsecode.CREATED} if the destination already exists, or L{responsecode.NO_CONTENT} if the destination was created by the X{COPY} operation. """ if source_filepath.isfile(): # # Copy the file # log.msg("Copying file %s to %s" % (source_filepath.path, destination_filepath.path)) try: source_file = source_filepath.open() except: raise HTTPError(statusForFailure( Failure(), "opening file for reading: %s" % (source_filepath.path,) )) source_stream = FileStream(source_file) response = waitForDeferred(put(source_stream, destination_filepath, destination_uri)) yield response try: response = response.getResult() finally: source_stream.close() source_file.close() checkResponse(response, "put", responsecode.NO_CONTENT, responsecode.CREATED) yield response return elif source_filepath.isdir(): if destination_filepath.exists(): # # Delete the destination # response = waitForDeferred(delete(destination_uri, destination_filepath)) yield response response = response.getResult() checkResponse(response, "delete", responsecode.NO_CONTENT) success_code = responsecode.NO_CONTENT else: success_code = responsecode.CREATED # # Copy the directory # log.msg("Copying directory %s to %s" % (source_filepath.path, destination_filepath.path)) source_basename = source_filepath.path destination_basename = destination_filepath.path errors = ResponseQueue(source_basename, "COPY", success_code) if destination_filepath.parent().isdir(): if os.path.islink(source_basename): link_destination = os.readlink(source_basename) if link_destination[0] != os.path.sep: link_destination = os.path.join(source_basename, link_destination) try: os.symlink(destination_basename, link_destination) except: errors.add(source_basename, Failure()) else: try: os.mkdir(destination_basename) except: raise HTTPError(statusForFailure( Failure(), "creating directory %s" % (destination_basename,) )) if depth == "0": yield success_code return else: raise HTTPError(StatusResponse( responsecode.CONFLICT, "Parent collection for destination %s does not exist" % (destination_uri,) )) # # Recursive copy # # FIXME: When we report errors, do we report them on the source URI # or on the destination URI? We're using the source URI here. # # FIXME: defer the walk? source_basename_len = len(source_basename) def paths(basepath, subpath): source_path = os.path.join(basepath, subpath) assert source_path.startswith(source_basename) destination_path = os.path.join(destination_basename, source_path[source_basename_len+1:]) return source_path, destination_path for dir, subdirs, files in os.walk(source_filepath.path, topdown=True): for filename in files: source_path, destination_path = paths(dir, filename) if not os.path.isdir(os.path.dirname(destination_path)): errors.add(source_path, responsecode.NOT_FOUND) else: response = waitForDeferred(copy(FilePath(source_path), FilePath(destination_path), destination_uri, depth)) yield response response = response.getResult() checkResponse(response, "copy", responsecode.NO_CONTENT) for subdir in subdirs: source_path, destination_path = paths(dir, subdir) log.msg("Copying directory %s to %s" % (source_path, destination_path)) if not os.path.isdir(os.path.dirname(destination_path)): errors.add(source_path, responsecode.CONFLICT) else: if os.path.islink(source_path): link_destination = os.readlink(source_path) if link_destination[0] != os.path.sep: link_destination = os.path.join(source_path, link_destination) try: os.symlink(destination_path, link_destination) except: errors.add(source_path, Failure()) else: try: os.mkdir(destination_path) except: errors.add(source_path, Failure()) yield errors.response() return else: log.err("Unable to COPY to non-file: %s" % (source_filepath.path,)) raise HTTPError(StatusResponse( responsecode.FORBIDDEN, "The requested resource exists but is not backed by a regular file." )) raise AssertionError("We shouldn't be here.") copy = deferredGenerator(copy) def move(source_filepath, source_uri, destination_filepath, destination_uri, depth): """ Perform a X{MOVE} from the given source and destination filepaths. This will perform a X{DELETE} on the destination if necessary; the caller should check and handle the X{overwrite} header before calling L{copy} (as in L{COPYMOVE.prepareForCopy}). Following the X{DELETE}, this will attempt an atomic filesystem move. If that fails, a X{COPY} operation followed by a X{DELETE} on the source will be attempted instead. @param source_filepath: a L{FilePath} for the file to copy from. @param destination_filepath: a L{FilePath} for the file to copy to. @param destination_uri: the URI of the destination resource. @param depth: the recursion X{Depth} for the X{MOVE} operation, which must be "infinity". @raise HTTPError: (containing a response with a status code of L{responsecode.BAD_REQUEST}) if C{depth} is not "infinity". @raise HTTPError: (containing an appropriate response) if the operation fails. If C{source_filepath} is a directory, the response will be a L{MultiStatusResponse}. @return: a deferred response with a status code of L{responsecode.CREATED} if the destination already exists, or L{responsecode.NO_CONTENT} if the destination was created by the X{MOVE} operation. """ log.msg("Moving %s to %s" % (source_filepath.path, destination_filepath.path)) # # Choose a success status # if destination_filepath.exists(): # # Delete the destination # response = waitForDeferred(delete(destination_uri, destination_filepath)) yield response response = response.getResult() checkResponse(response, "delete", responsecode.NO_CONTENT) success_code = responsecode.NO_CONTENT else: success_code = responsecode.CREATED # # See if rename (which is atomic, and fast) works # try: os.rename(source_filepath.path, destination_filepath.path) except OSError: pass else: # Restat source filepath since we moved it source_filepath.restat(False) yield success_code return # # Do a copy, then delete the source # response = waitForDeferred(copy(source_filepath, destination_filepath, destination_uri, depth)) yield response response = response.getResult() checkResponse(response, "copy", responsecode.CREATED, responsecode.NO_CONTENT) response = waitForDeferred(delete(source_uri, source_filepath)) yield response response = response.getResult() checkResponse(response, "delete", responsecode.NO_CONTENT) yield success_code move = deferredGenerator(move) def put(stream, filepath, uri=None): """ Perform a PUT of the given data stream into the given filepath. @param stream: the stream to write to the destination. @param filepath: the L{FilePath} of the destination file. @param uri: the URI of the destination resource. If the destination exists, if C{uri} is not C{None}, perform a X{DELETE} operation on the destination, but if C{uri} is C{None}, delete the destination directly. Note that whether a L{put} deletes the destination directly vs. performing a X{DELETE} on the destination affects the response returned in the event of an error during deletion. Specifically, X{DELETE} on collections must return a L{MultiStatusResponse} under certain circumstances, whereas X{PUT} isn't required to do so. Therefore, if the caller expects X{DELETE} semantics, it must provide a valid C{uri}. @raise HTTPError: (containing an appropriate response) if the operation fails. @return: a deferred response with a status code of L{responsecode.CREATED} if the destination already exists, or L{responsecode.NO_CONTENT} if the destination was created by the X{PUT} operation. """ log.msg("Writing to file %s" % (filepath.path,)) if filepath.exists(): if uri is None: try: if filepath.isdir(): rmdir(filepath.path) else: os.remove(filepath.path) except: raise HTTPError(statusForFailure( Failure(), "writing to file: %s" % (filepath.path,) )) else: response = waitForDeferred(delete(uri, filepath)) yield response response = response.getResult() checkResponse(response, "delete", responsecode.NO_CONTENT) success_code = responsecode.NO_CONTENT else: success_code = responsecode.CREATED # # Write the contents of the request stream to resource's file # try: resource_file = filepath.open("w") except: raise HTTPError(statusForFailure( Failure(), "opening file for writing: %s" % (filepath.path,) )) try: x = waitForDeferred(readIntoFile(stream, resource_file)) yield x x.getResult() except: raise HTTPError(statusForFailure( Failure(), "writing to file: %s" % (filepath.path,) )) # Restat filepath since we modified the backing file filepath.restat(False) yield success_code put = deferredGenerator(put) def mkcollection(filepath): """ Perform a X{MKCOL} on the given filepath. @param filepath: the L{FilePath} of the collection resource to create. @raise HTTPError: (containing an appropriate response) if the operation fails. @return: a deferred response with a status code of L{responsecode.CREATED} if the destination already exists, or L{responsecode.NO_CONTENT} if the destination was created by the X{MKCOL} operation. """ try: os.mkdir(filepath.path) # Restat filepath because we modified it filepath.restat(False) except: raise HTTPError(statusForFailure( Failure(), "creating directory in MKCOL: %s" % (filepath.path,) )) return succeed(responsecode.CREATED) def rmdir(dirname): """ Removes the directory with the given name, as well as its contents. @param dirname: the path to the directory to remove. """ for dir, subdirs, files in os.walk(dirname, topdown=False): for filename in files: os.remove(os.path.join(dir, filename)) for subdir in subdirs: path = os.path.join(dir, subdir) if os.path.islink(path): os.remove(path) else: os.rmdir(path) os.rmdir(dirname) def checkResponse(response, method, *codes): assert ( response in codes, "%s() should have raised, but returned one of %r instead" % (method, codes) )
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/fileop.py
fileop.py
import os from urllib import quote as url_quote from filecmp import dircmp as DirCompare from tempfile import mkdtemp from shutil import copy from random import randrange, choice from twisted.python import log from twisted.trial import unittest from twisted.internet.defer import Deferred from twisted.web2.dav.fileop import rmdir from twisted.web2.dav.util import joinURL from twisted.web2.dav.static import DAVFile class TodoTest(Exception): pass class TestCase(unittest.TestCase): docroot = property(lambda(self): self.site.resource.fp.path) resource_class = DAVFile def setUp(self): log.msg("Setting up %s" % (self.__class__,)) docroot = self.mktemp() os.mkdir(docroot) dirs = ( os.path.join(docroot, "dir1"), os.path.join(docroot, "dir2"), os.path.join(docroot, "dir2", "subdir1"), os.path.join(docroot, "dir3"), os.path.join(docroot, "dir4"), os.path.join(docroot, "dir4", "subdir1"), os.path.join(docroot, "dir4", "subdir1", "subsubdir1"), os.path.join(docroot, "dir4", "subdir2"), os.path.join(docroot, "dir4", "subdir2", "dir1"), os.path.join(docroot, "dir4", "subdir2", "dir2"), ) for dir in dirs: os.mkdir(dir) src = os.path.dirname(__file__) files = [ os.path.join(src, f) for f in os.listdir(src) if os.path.isfile(os.path.join(src, f)) ] dc = randrange(len(dirs)) while dc: dc -= 1 dir = choice(dirs) fc = randrange(len(files)) while fc: fc -= 1 copy(choice(files), dir) for path in files[:8]: copy(path, docroot) self.site = Site(self.resource_class(docroot)) def tearDown(self): log.msg("Tearing down %s" % (self.__class__,)) rmdir(self.docroot) def mkdtemp(self, prefix): """ Creates a new directory in the document root and returns its path and URI. """ path = mkdtemp(prefix=prefix + "_", dir=self.docroot) uri = joinURL("/", url_quote(os.path.basename(path))) + "/" return (path, uri) def send(self, request, callback): log.msg("Sending %s request for URI %s" % (request.method, request.uri)) d = request.locateResource(request.uri) d.addCallback(lambda resource: resource.renderHTTP(request)) d.addCallback(request._cbFinishRender) if type(callback) is tuple: d.addCallbacks(*callback) else: d.addCallback(callback) return d def _ebDeferTestMethod(self, f, result): if f.check(TodoTest): result.addExpectedFailure(self, f, unittest.makeTodo(f.getErrorMessage())) else: return unittest.TestCase._ebDeferTestMethod(self, f, result) class Site: # FIXME: There is no ISite interface; there should be. # implements(ISite) def __init__(self, resource): self.resource = resource def dircmp(dir1, dir2): dc = DirCompare(dir1, dir2) return bool( dc.left_only or dc.right_only or dc.diff_files or dc.common_funny or dc.funny_files ) def serialize(f, work): d = Deferred() def oops(error): d.errback(error) def do_serialize(_): try: args = work.next() except StopIteration: d.callback(None) else: r = f(*args) r.addCallback(do_serialize) r.addErrback(oops) do_serialize(None) return d
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/test/util.py
util.py
from twisted.web2.dav.element.base import * ## # Section 3 (Privileges) ## class Read (WebDAVEmptyElement): """ Privilege which controls methods that return information about the state of a resource, including the resource's properties. (RFC 3744, section 3.1) """ name = "read" # For DAV:write element (RFC 3744, section 3.2) see Write class above. class WriteProperties (WebDAVEmptyElement): """ Privilege which controls methods that modify the dead properties of a resource. (RFC 3744, section 3.3) """ name = "write-properties" class WriteContent (WebDAVEmptyElement): """ Privilege which controls methods that modify the content of an existing resource. (RFC 3744, section 3.4) """ name = "write-content" class Unlock (WebDAVEmptyElement): """ Privilege which controls the use of the UNLOCK method by a principal other than the lock owner. (RFC 3744, section 3.5) """ name = "unlock" class ReadACL (WebDAVEmptyElement): """ Privilege which controls the use of the PROPFIND method to retrieve the DAV:acl property of a resource. (RFC 3744, section 3.6) """ name = "read-acl" class ReadCurrentUserPrivilegeSet (WebDAVEmptyElement): """ Privilege which controls the use of the PROPFIND method to retrieve the DAV:current-user-privilege-set property of a resource. (RFC 3744, section 3.7) """ name = "read-current-user-privilege-set" class WriteACL (WebDAVEmptyElement): """ Privilege which controls the use of the ACL method to modify the DAV:acl property of a resource. (RFC 3744, section 3.8) """ name = "write-acl" class Bind (WebDAVEmptyElement): """ Privilege which allows a method to add a new member URL from the a collection resource. (RFC 3744, section 3.9) """ name = "bind" class Unbind (WebDAVEmptyElement): """ Privilege which allows a method to remove a member URL from the a collection resource. (RFC 3744, section 3.10) """ name = "unbind" class All (WebDAVEmptyElement): """ Aggregate privilege that contains the entire set of privileges that can be applied to a resource. (RFC 3744, section 3.11) Principal which matches all users. (RFC 3744, section 5.5.1) """ name = "all" ## # Section 4 (Principal Properties) ## class Principal (WebDAVElement): """ Indicates a principal resource type. (RFC 3744, section 4) Identifies the principal to which an ACE applies. (RFC 3744, section 5.5.1) """ name = "principal" allowed_children = { (dav_namespace, "href" ): (0, 1), (dav_namespace, "all" ): (0, 1), (dav_namespace, "authenticated" ): (0, 1), (dav_namespace, "unauthenticated"): (0, 1), (dav_namespace, "property" ): (0, 1), (dav_namespace, "self" ): (0, 1), } def __init__(self, *children, **attributes): super(Principal, self).__init__(*children, **attributes) if len(self.children) > 1: raise ValueError( "Exactly one of DAV:href, DAV:all, DAV:authenticated, " "DAV:unauthenticated, DAV:property or DAV:self is required for " "%s, got: %r" % (self.sname(), self.children) ) class AlternateURISet (WebDAVElement): """ Property which contains the URIs of network resources with additional descriptive information about the principal. (RFC 3744, section 4.1) """ name = "alternate-uri-set" hidden = True protected = True allowed_children = { (dav_namespace, "href"): (0, None) } class PrincipalURL (WebDAVElement): """ Property which contains the URL that must be used to identify this principal in an ACL request. (RFC 3744, section 4.2) """ name = "principal-url" hidden = True protected = True allowed_children = { (dav_namespace, "href"): (0, 1) } class GroupMemberSet (WebDAVElement): """ Property which identifies the principals that are direct members of a group principal. (RFC 3744, section 4.3) """ name = "group-member-set" hidden = True allowed_children = { (dav_namespace, "href"): (0, None) } class GroupMembership (WebDAVElement): """ Property which identifies the group principals in which a principal is directly a member. (RFC 3744, section 4.4) """ name = "group-membership" hidden = True protected = True allowed_children = { (dav_namespace, "href"): (0, None) } ## # Section 5 (Access Control Properties) ## # For DAV:owner element (RFC 3744, section 5.1) see Owner class above. class Group (WebDAVElement): """ Property which identifies a particular principal as being the group principal of a resource. (RFC 3744, section 5.2) """ name = "group" hidden = True #protected = True # may be protected, per RFC 3744, section 5.2 allowed_children = { (dav_namespace, "href"): (0, 1) } class SupportedPrivilegeSet (WebDAVElement): """ Property which identifies the privileges defined for a resource. (RFC 3744, section 5.3) """ name = "supported-privilege-set" hidden = True protected = True allowed_children = { (dav_namespace, "supported-privilege"): (0, None) } class SupportedPrivilege (WebDAVElement): """ Identifies a privilege defined for a resource. (RFC 3744, section 5.3) """ name = "supported-privilege" allowed_children = { (dav_namespace, "privilege" ): (1, 1), (dav_namespace, "abstract" ): (0, 1), (dav_namespace, "description" ): (1, 1), (dav_namespace, "supported-privilege"): (0, None), } class Privilege (WebDAVElement): """ Identifies a privilege. (RFC 3744, sections 5.3 and 5.5.1) """ name = "privilege" allowed_children = { WebDAVElement: (0, None) } class Abstract (WebDAVElement): """ Identifies a privilege as abstract. (RFC 3744, section 5.3) """ name = "abstract" class Description (WebDAVTextElement): """ A human-readable description of what privilege controls access to. (RFC 3744, sections 5.3 and 9.5) """ name = "description" allowed_attributes = { "xml:lang": True } class CurrentUserPrivilegeSet (WebDAVElement): """ Property which contains the exact set of privileges (as computer by the server) granted to the currently authenticated HTTP user. (RFC 3744, section 5.4) """ name = "current-user-privilege-set" hidden = True protected = True allowed_children = { (dav_namespace, "privilege"): (0, None) } # For DAV:privilege element (RFC 3744, section 5.4) see Privilege class above. class ACL (WebDAVElement): """ Property which specifies the list of access control entries which define what privileges are granted to which users for a resource. (RFC 3744, section 5.5) """ name = "acl" hidden = True protected = True allowed_children = { (dav_namespace, "ace"): (0, None) } class ACE (WebDAVElement): """ Specifies the list of access control entries which define what privileges are granted to which users for a resource. (RFC 3744, section 5.5) """ name = "ace" allowed_children = { (dav_namespace, "principal"): (0, 1), (dav_namespace, "invert" ): (0, 1), (dav_namespace, "grant" ): (0, 1), (dav_namespace, "deny" ): (0, 1), (dav_namespace, "protected"): (0, 1), (dav_namespace, "inherited"): (0, 1), } def __init__(self, *children, **attributes): super(ACE, self).__init__(*children, **attributes) self.principal = None self.invert = None self.allow = None self.privileges = None self.inherited = None self.protected = False for child in self.children: namespace, name = child.qname() assert namespace == dav_namespace if name in ("principal", "invert"): if self.principal is not None: raise ValueError( "Only one of DAV:principal or DAV:invert allowed in %s, got: %s" % (self.sname(), self.children) ) if name == "invert": self.invert = True self.principal = child.children[0] else: self.invert = False self.principal = child elif name in ("grant", "deny"): if self.allow is not None: raise ValueError( "Only one of DAV:grant or DAV:deny allowed in %s, got: %s" % (self.sname(), self.children) ) self.allow = (name == "grant") self.privileges = child.children elif name == "inherited": self.inherited = str(child.children[0]) elif name == "protected": self.protected = True if self.principal is None: raise ValueError( "One of DAV:principal or DAV:invert is required in %s, got: %s" % (self.sname(), self.children) ) assert self.invert is not None if self.allow is None: raise ValueError( "One of DAV:grant or DAV:deny is required in %s, got: %s" % (self.sname(), self.children) ) assert self.privileges is not None # For DAV:principal element (RFC 3744, section 5.5.1) see Principal class above. # For DAV:all element (RFC 3744, section 5.5.1) see All class above. class Authenticated (WebDAVEmptyElement): """ Principal which matches authenticated users. (RFC 3744, section 5.5.1) """ name = "authenticated" class Unauthenticated (WebDAVEmptyElement): """ Principal which matches unauthenticated users. (RFC 3744, section 5.5.1) """ name = "unauthenticated" # For DAV:property element (RFC 3744, section 5.5.1) see Property class above. class Self (WebDAVEmptyElement): """ Principal which matches a user if a resource is a principal and the user matches the resource. (RFC 3744, sections 5.5.1 and 9.3) """ name = "self" class Invert (WebDAVEmptyElement): """ Principal which matches a user if the user does not match the principal contained by this principal. (RFC 3744, section 5.5.1) """ name = "invert" allowed_children = { (dav_namespace, "principal"): (1, 1) } class Grant (WebDAVElement): """ Grants the contained privileges to a principal. (RFC 3744, section 5.5.2) """ name = "grant" allowed_children = { (dav_namespace, "privilege"): (1, None) } class Deny (WebDAVElement): """ Denies the contained privileges to a principal. (RFC 3744, section 5.5.2) """ name = "deny" allowed_children = { (dav_namespace, "privilege"): (1, None) } # For DAV:privilege element (RFC 3744, section 5.5.2) see Privilege class above. class Protected (WebDAVEmptyElement): """ Identifies an ACE as protected. (RFC 3744, section 5.5.3) """ name = "protected" class Inherited (WebDAVElement): """ Indicates that an ACE is inherited from the resource indentified by the contained DAV:href element. (RFC 3744, section 5.5.4) """ name = "inherited" allowed_children = { (dav_namespace, "href"): (1, 1) } class ACLRestrictions (WebDAVElement): """ Property which defines the types of ACLs supported by this server, to avoid clients needlessly getting errors. (RFC 3744, section 5.6) """ name = "acl-restrictions" hidden = True protected = True allowed_children = { (dav_namespace, "grant-only" ): (0, 1), (dav_namespace, "no-invert" ): (0, 1), (dav_namespace, "deny-before-grant" ): (0, 1), (dav_namespace, "required-principal"): (0, 1), } class GrantOnly (WebDAVEmptyElement): """ Indicates that ACEs with deny clauses are not allowed. (RFC 3744, section 5.6.1) """ name = "grant-only" class NoInvert (WebDAVEmptyElement): """ Indicates that ACEs with the DAV:invert element are not allowed. (RFC 3744, section 5.6.2) """ name = "no-invert" class DenyBeforeGrant (WebDAVEmptyElement): """ Indicates that all deny ACEs must precede all grant ACEs. (RFC 3744, section 5.6.3) """ name = "deny-before-grant" class RequiredPrincipal (WebDAVElement): """ Indicates which principals must have an ACE defined in an ACL. (RFC 3744, section 5.6.4) """ name = "required-principal" allowed_children = { (dav_namespace, "all" ): (0, 1), (dav_namespace, "authenticated" ): (0, 1), (dav_namespace, "unauthenticated"): (0, 1), (dav_namespace, "self" ): (0, 1), (dav_namespace, "href" ): (0, None), (dav_namespace, "property" ): (0, None), } def __init__(self, *children, **attributes): super(RequiredPrincipal, self).__init__(*children, **attributes) type = None for child in self.children: if type is None: type = child.qname() elif child.qname() != type: raise ValueError( "Only one of DAV:all, DAV:authenticated, DAV:unauthenticated, " "DAV:self, DAV:href or DAV:property allowed for %s, got: %s" % (self.sname(), self.children) ) class InheritedACLSet (WebDAVElement): """ Property which contains a set of URLs that identify other resources that also control the access to this resource. (RFC 3744, section 5.7) """ name = "inherited-acl-set" hidden = True protected = True allowed_children = { (dav_namespace, "href"): (0, None) } class PrincipalCollectionSet (WebDAVElement): """ Property which contains a set of URLs that identify the root collections that contain the principals that are available on the server that implements a resource. (RFC 3744, section 5.8) """ name = "principal-collection-set" hidden = True protected = True allowed_children = { (dav_namespace, "href"): (0, None) } ## # Section 7 (Access Control and existing methods) ## class NeedPrivileges (WebDAVElement): """ Error which indicates insufficient privileges. (RFC 3744, section 7.1.1) """ name = "need-privileges" allowed_children = { (dav_namespace, "resource"): (0, None) } class Resource (WebDAVElement): """ Identifies which resource had insufficient privileges. (RFC 3744, section 7.1.1) """ name = "resource" allowed_children = { (dav_namespace, "href" ): (1, 1), (dav_namespace, "privilege"): (1, 1), } ## # Section 9 (Access Control Reports) ## class ACLPrincipalPropSet (WebDAVElement): """ Report which returns, for all principals in the DAV:acl property (of the resource identified by the Request-URI) that are identified by http(s) URLs or by a DAV:property principal, the value of the properties specified in the REPORT request body. (RFC 3744, section 9.2) """ name = "acl-principal-prop-set" allowed_children = { WebDAVElement: (0, None) } def __init__(self, *children, **attributes): super(ACLPrincipalPropSet, self).__init__(*children, **attributes) prop = False for child in self.children: if child.qname() == (dav_namespace, "prop"): if prop: raise ValueError( "Only one DAV:prop allowed for %s, got: %s" % (self.sname(), self.children) ) prop = True class PrincipalMatch (WebDAVElement): """ Report used to identify all members (at any depth) of the collection identified by the Request-URI that are principals and that match the current user. (RFC 3744, section 9.3) """ name = "principal-match" allowed_children = { (dav_namespace, "principal-property"): (0, 1), (dav_namespace, "self" ): (0, 1), (dav_namespace, "prop" ): (0, 1), } def __init__(self, *children, **attributes): super(PrincipalMatch, self).__init__(*children, **attributes) principalPropertyOrSelf = False for child in self.children: namespace, name = child.qname() if name in ("principal-property", "self"): if principalPropertyOrSelf: raise ValueError( "Only one of DAV:principal-property or DAV:self allowed in %s, got: %s" % (self.sname(), self.children) ) principalPropertyOrSelf = True if not principalPropertyOrSelf: raise ValueError( "One of DAV:principal-property or DAV:self is required in %s, got: %s" % (self.sname(), self.children) ) class PrincipalProperty (WebDAVElement): """ Identifies a property. (RFC 3744, section 9.3) """ name = "principal-property" allowed_children = { WebDAVElement: (0, None) } # For DAV:self element (RFC 3744, section 9.3) see Self class above. class PrincipalPropertySearch (WebDAVElement): """ Report which performs a search for all principals whose properties contain character data that matches the search criteria specified in the request. (RFC 3744, section 9.4) """ name = "principal-property-search" allowed_children = { (dav_namespace, "property-search" ): (1, None), (dav_namespace, "prop" ): (0, 1), (dav_namespace, "apply-to-principal-collection-set"): (0, 1), } class PropertySearch (WebDAVElement): """ Contains a DAV:prop element enumerating the properties to be searched and a DAV:match element, containing the search string. (RFC 3744, section 9.4) """ name = "property-search" allowed_children = { (dav_namespace, "prop" ): (1, 1), (dav_namespace, "match"): (1, 1), } class Match (WebDAVTextElement): """ Contains a search string. (RFC 3744, section 9.4) """ name = "match" class PrincipalSearchPropertySet (WebDAVElement): """ Report which identifies those properties that may be searched using the DAV:principal-property-search report. (RFC 3744, section 9.5) """ name = "principal-search-property-set" allowed_children = { (dav_namespace, "principal-search-property"): (0, None) } class PrincipalSearchProperty (WebDAVElement): """ Contains exactly one searchable property, and a description of the property. (RFC 3744, section 9.5) """ name = "principal-search-property" allowed_children = { (dav_namespace, "prop" ): (1, 1), (dav_namespace, "description"): (1, 1), } # For DAV:description element (RFC 3744, section 9.5) see Description class above.
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/element/rfc3744.py
rfc3744.py
from twisted.web2 import responsecode from twisted.web2.dav.element.base import * from twisted.web2.http_headers import MimeType ## # Section 12 ## class ActiveLock (WebDAVElement): """ Describes a lock on a resource. (RFC 2518, section 12.1) """ name = "activelock" allowed_children = { (dav_namespace, "lockscope"): (1, 1), (dav_namespace, "locktype" ): (1, 1), (dav_namespace, "depth" ): (1, 1), (dav_namespace, "owner" ): (0, 1), (dav_namespace, "timeout" ): (0, 1), (dav_namespace, "locktoken"): (0, 1), } class Depth (WebDAVTextElement): """ The value of the depth header. (RFC 2518, section 12.1.1) """ name = "depth" def __init__(self, *children, **attributes): super(Depth, self).__init__(*children, **attributes) depth = str(self) if depth not in ("0", "1", "infinity"): raise ValueError("Invalid depth: %s" % (depth,)) class LockToken (WebDAVElement): """ The lock token associated with a lock. (RFC 2518, section 12.1.2) """ name = "locktoken" allowed_children = { (dav_namespace, "href"): (1, None) } class Timeout (WebDAVTextElement): """ The timeout associated with a lock. (RFC 2518, section 12.1.3) """ name = "timeout" class Collection (WebDAVEmptyElement): """ Identifies the associated resource as a collection. (RFC 2518, section 12.2) """ name = "collection" class HRef (WebDAVTextElement): """ Identifies the content of the element as a URI. (RFC 2518, section 12.3) """ name = "href" class Link (WebDAVElement): """ Identifies the property as a link and contains the source and destination of that link. (RFC 2518, section 12.4) """ name = "link" allowed_children = { (dav_namespace, "src"): (1, None), (dav_namespace, "dst"): (1, None), } class LinkDestination (WebDAVTextElement): """ Indicates the destination of a link. (RFC 2518, section 12.4.1) """ name = "dst" class LinkSource (WebDAVTextElement): """ Indicates the source of a link. (RFC 2518, section 12.4.2) """ name = "src" class LockEntry (WebDAVElement): """ Defines the types of lock that can be used with the resource. (RFC 2518, section 12.5) """ name = "lockentry" allowed_children = { (dav_namespace, "lockscope"): (1, 1), (dav_namespace, "locktype" ): (1, 1), } class LockInfo (WebDAVElement): """ Used with a LOCK method to specify the type of lock that the client wishes to have created. (RFC 2518, section 12.6) """ name = "lockinfo" allowed_children = { (dav_namespace, "lockscope"): (1, 1), (dav_namespace, "locktype" ): (1, 1), (dav_namespace, "owner" ): (0, 1), } class LockScope (WebDAVOneShotElement): """ Specifies whether a lock is an exclusive lock or a shared lock. (RFC 2518, section 12.7) """ name = "lockscope" allowed_children = { (dav_namespace, "exclusive"): (0, 1), (dav_namespace, "shared" ): (0, 1), } class Exclusive (WebDAVEmptyElement): """ Indicates an exclusive lock. (RFC 2518, section 12.7.1) """ name = "exclusive" LockScope.exclusive = LockScope(Exclusive()) class Shared (WebDAVEmptyElement): """ Indicates a shared lock. (RFC 2518, section 12.7.2) """ name = "shared" LockScope.shared = LockScope(Shared()) class LockType (WebDAVOneShotElement): """ Specifies the access type of a lock. (RFC 2518, section 12.8) """ name = "locktype" allowed_children = { (dav_namespace, "write"): (0, 1) } class Write (WebDAVEmptyElement): """ Indicates a write lock. (RFC 2518, section 12.8.1) Controls methods that lock a resource or modify the content, dead properties, or (in the case of a collection) membership of a resource. (RFC 3744, section 3.2) """ name = "write" LockType.write = LockType(Write()) class MultiStatus (WebDAVElement): """ Contains multiple Responses. (RFC 2518, section 12.9) """ name = "multistatus" allowed_children = { (dav_namespace, "response" ): (0, None), (dav_namespace, "responsedescription"): (0, 1), } class Response (WebDAVElement): """ Holds a single response describing the effect of a method on a resource and/or its properties. (RFC 2518, section 12.9.1) """ name = "response" allowed_children = { (dav_namespace, "href" ): (1, None), (dav_namespace, "status" ): (1, 1), (dav_namespace, "propstat" ): (1, None), (dav_namespace, "error" ): (0, 1), # 2518bis (dav_namespace, "responsedescription"): (0, 1), } def __new__(clazz, *children): if clazz is not Response: return WebDAVElement.__new__(clazz) resource_count = 0 status_count = 0 propstat_count = 0 for child in children: if isinstance(child, HRef ): resource_count += 1 elif isinstance(child, Status ): status_count += 1 elif isinstance(child, PropertyStatus): propstat_count += 1 if resource_count < 1: raise ValueError("%s element must have at least one %s." % (clazz.sname(), HRef.sname())) if status_count is 0: if propstat_count is 0: raise ValueError("%s element must have one of %s or %s" % (clazz.sname(), Status.sname(), PropertyStatus.sname())) if resource_count > 1: raise ValueError("%s element with %s may only have one %s" % (clazz.sname(), PropertyStatus.sname(), HRef.sname())) return PropertyStatusResponse.__new__(PropertyStatusResponse, *children) if status_count > 1: raise ValueError("%s element may only have one %s" % (clazz.sname(), Status.sname())) return StatusResponse.__new__(StatusResponse, *children) class StatusResponse (Response): """ Specialized derivative of Response for resource status. """ unregistered = True allowed_children = { (dav_namespace, "href" ): (1, None), (dav_namespace, "status" ): (1, 1), (dav_namespace, "error" ): (0, 1), # 2518bis (dav_namespace, "responsedescription"): (0, 1), } class PropertyStatusResponse (Response): """ Specialized derivative of Response for property status. """ unregistered = True allowed_children = { (dav_namespace, "href" ): (1, 1), (dav_namespace, "propstat" ): (1, None), (dav_namespace, "error" ): (0, 1), # 2518bis (dav_namespace, "responsedescription"): (0, 1), } class PropertyStatus (WebDAVElement): """ Groups together a Property and Status element that is associated with a particular DAV:href element. (RFC 2518, section 12.9.1.1) """ name = "propstat" allowed_children = { (dav_namespace, "prop" ): (1, 1), (dav_namespace, "status" ): (1, 1), (dav_namespace, "error" ): (0, 1), # 2518bis (dav_namespace, "responsedescription"): (0, 1), } class Status (WebDAVTextElement): """ Holds a single HTTP status line. (RFC 2518, section 12.9.1.2) """ name = "status" def fromResponseCode(clazz, code): """ code must be an integer response code in twisted.web2.responsecode.RESPONSES.keys() """ if code not in responsecode.RESPONSES: raise ValueError("Invalid response code: %r" % (code,)) return clazz(PCDATAElement("HTTP/1.1 %d %s" % (code, responsecode.RESPONSES[code]))) fromResponseCode = classmethod(fromResponseCode) def __init__(self, *children, **attributes): super(Status, self).__init__(*children, **attributes) status = str(self) if not status.startswith("HTTP/1.1 "): raise ValueError("Invalid WebDAV status: %s" % (status,)) code = int(status[9:12]) if code not in responsecode.RESPONSES: raise ValueError("Invalid status code: %s" % (code,)) self.code = code class ResponseDescription (WebDAVTextElement): """ Contains a message that can be displayed to the user explaining the nature of the response. (RFC 2518, section 12.9.2) """ name = "responsedescription" class Owner (WebDAVElement): """ Property which provides information about the principal taking out a lock. (RFC 2518, section 12.10) Property which identifies a principal as being the owner principal of a resource. (RFC 3744, section 5.1) Note that RFC 2518 allows any content, while RFC 3744 expect zero or one DAV:href element. """ name = "owner" hidden = True #protected = True # may be protected, per RFC 3744, section 5.2 allowed_children = { WebDAVElement: (0, None) } class PropertyContainer (WebDAVElement): """ Contains properties related to a resource. (RFC 2518, section 12.11) """ name = "prop" allowed_children = { WebDAVElement: (0, None) } class PropertyBehavior (WebDAVElement): """ Specifies how properties are handled during a COPY or MOVE. (RFC 2518, section 12.12) """ name = "propertybehavior" allowed_children = { (dav_namespace, "omit" ): (0, 1), (dav_namespace, "keepalive"): (0, 1), } def __init__(self, *children, **attributes): super(PropertyBehavior, self).__init__(*children, **attributes) if len(self.children) != 1: raise ValueError( "Exactly one of DAV:omit, DAV:keepalive required for %s, got: %s" % (self.sname(), self.children) ) self.behavior = children[0] class KeepAlive (WebDAVElement): """ Specifies requirements for the copying/moving or live properties. (RFC 2518, section 12.12.1) """ name = "keepalive" allowed_children = { (dav_namespace, "href"): (0, None), PCDATAElement: (0, 1), } def __init__(self, *children, **attributes): super(KeepAlive, self).__init__(*children, **attributes) type = None for child in self.children: if type is None: type = child.qname() elif child.qname() != type: raise ValueError( "Only one of DAV:href or PCDATA allowed for %s, got: %s" % (self.sname(), self.children) ) if type == "#PCDATA": if str(self) != "*": raise ValueError("Invalid keepalive value: %r", (str(self),)) class Omit (WebDAVEmptyElement): """ Instructs the server that it should use best effort to copy properties. (RFC 2518, section 12.12.2) """ name = "omit" class PropertyUpdate (WebDAVElement): """ Contains a request to alter the properties on a resource. (RFC 2518, section 12.13) """ name = "propertyupdate" allowed_children = { (dav_namespace, "remove"): (0, None), (dav_namespace, "set" ): (0, None), } class Remove (WebDAVElement): """ Lists the DAV properties to be removed from a resource. (RFC 2518, section 12.13.1) """ name = "remove" allowed_children = { (dav_namespace, "prop"): (1, 1) } class Set (WebDAVElement): """ Lists the DAV properties to be set for a resource. (RFC 2518, section 12.13.2) """ name = "set" allowed_children = { (dav_namespace, "prop"): (1, 1) } class PropertyFind (WebDAVElement): """ Specifies the properties to be returned from a PROPFIND method. (RFC 2518, section 12.14) """ name = "propfind" allowed_children = { (dav_namespace, "allprop" ): (0, 1), (dav_namespace, "propname"): (0, 1), (dav_namespace, "prop" ): (0, 1), } def __init__(self, *children, **attributes): super(PropertyFind, self).__init__(*children, **attributes) if len(self.children) != 1: raise ValueError( "Exactly one of DAV:allprop, DAV:propname or DAV:prop is required for %s, got: %r" % (self.sname(), self.children) ) class AllProperties (WebDAVEmptyElement): """ Specifies that all property names and values on the resource are to be returned. (RFC 2518, section 12.14.1) """ name = "allprop" class PropertyName (WebDAVEmptyElement): """ Specifies that only a list of property names on the resource are to be returned. (RFC 2518, section 12.14.2) """ name = "propname" ## # Section 13 ## class CreationDate (WebDAVDateTimeElement): """ Records the time and date that the resource was created. (RFC 2518, section 13.1) """ name = "creationdate" # MAY be protected as per RFC2518bis. We may make this more flexible later. protected = True class DisplayName (WebDAVTextElement): """ Provides a name for the resource that is suitable for presentation to a user. (RFC 2518, section 13.2) """ name = "displayname" class GETContentLanguage (WebDAVTextElement): """ Contains the Content-Language header returned by a GET without accept headers. (RFC 2518, section 13.3) """ name = "getcontentlanguage" class GETContentLength (WebDAVTextElement): """ Contains the Content-Length header returned by a GET without accept headers. (RFC 2518, section 13.4) """ name = "getcontentlength" protected = True class GETContentType (WebDAVTextElement): """ Contains the Content-Type header returned by a GET without accept headers. (RFC 2518, section 13.5) """ name = "getcontenttype" def mimeType(self): return MimeType.fromString(str(self)) class GETETag (WebDAVTextElement): """ Contains the ETag header returned by a GET without accept headers. (RFC 2518, section 13.6) """ name = "getetag" protected = True class GETLastModified (DateTimeHeaderElement): """ Contains the Last-Modified header returned by a GET without accept headers. (RFC 2518, section 13.7) """ name = "getlastmodified" protected = True class LockDiscovery (WebDAVElement): """ Describes the active locks on a resource. (RFC 2518, section 13.8) """ name = "lockdiscovery" protected = True allowed_children = { (dav_namespace, "activelock"): (0, None) } class ResourceType (WebDAVElement): """ Specifies the nature of the resource. (RFC 2518, section 13.9) """ name = "resourcetype" protected = True allowed_children = { WebDAVElement: (0, None) } ResourceType.collection = ResourceType(Collection()) ResourceType.empty = ResourceType() class Source (WebDAVElement): """ The destination of the source link identifies the resource that contains the unprocessed source of the link's source. (RFC 2518, section 13.10) """ name = "source" allowed_children = { (dav_namespace, "link"): (0, None) } class SupportedLock (WebDAVElement): """ Provides a listing of the lock capabilities supported by the resource. (RFC 2518, section 13.11) """ name = "supportedlock" protected = True allowed_children = { (dav_namespace, "lockentry"): (0, None) }
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/element/rfc2518.py
rfc2518.py
__all__ = [ "dav_namespace", "WebDAVElement", "PCDATAElement", "WebDAVOneShotElement", "WebDAVUnknownElement", "WebDAVEmptyElement", "WebDAVTextElement", "WebDAVDateTimeElement", "DateTimeHeaderElement", ] import string import StringIO import xml.dom.minidom import datetime from twisted.python import log from twisted.web2.http_headers import parseDateTime from twisted.web2.dav.element.util import PrintXML, decodeXMLName ## # Base XML elements ## dav_namespace = "DAV:" class WebDAVElement (object): """ WebDAV XML element. (RFC 2518, section 12) """ namespace = dav_namespace # Element namespace (class variable) name = None # Element name (class variable) allowed_children = None # Types & count limits on child elements allowed_attributes = None # Allowed attribute names hidden = False # Don't list in PROPFIND with <allprop> protected = False # See RFC 3253 section 1.4.1 unregistered = False # Subclass of factory; doesn't register def qname(self): return (self.namespace, self.name) def sname(self): return "{%s}%s" % (self.namespace, self.name) qname = classmethod(qname) sname = classmethod(sname) def __init__(self, *children, **attributes): super(WebDAVElement, self).__init__() if self.allowed_children is None: raise NotImplementedError("WebDAVElement subclass %s is not implemented." % (self.__class__.__name__,)) # # Validate that children are of acceptable types # allowed_children = dict([ (child_type, list(limits)) for child_type, limits in self.allowed_children.items() ]) my_children = [] for child in children: if child is None: continue if isinstance(child, (str, unicode)): child = PCDATAElement(child) assert isinstance(child, (WebDAVElement, PCDATAElement)), "Not an element: %r" % (child,) for allowed, (min, max) in allowed_children.items(): if type(allowed) == type and isinstance(child, allowed): qname = allowed elif child.qname() == allowed: qname = allowed else: continue if min is not None and min > 0: min -= 1 if max is not None: assert max > 0, "Too many children of type %s for %s" % (child.sname(), self.sname()) max -= 1 allowed_children[qname] = (min, max) my_children.append(child) break else: if not (isinstance(child, PCDATAElement) and child.isWhitespace()): log.msg("Child of type %s is unexpected and therefore ignored in %s element" % (child.sname(), self.sname())) for qname, (min, max) in allowed_children.items(): if min != 0: raise ValueError("Not enough children of type {%s}%s for %s" % (qname[0], qname[1], self.sname())) self.children = tuple(my_children) # # Validate that attributes have known names # my_attributes = {} if self.allowed_attributes: for name in attributes: if name in self.allowed_attributes: my_attributes[name] = attributes[name] else: log.msg("Attribute %s is unexpected and therefore ignored in %s element" % (name, self.sname())) for name, required in self.allowed_attributes.items(): if required and name not in my_attributes: raise ValueError("Attribute %s is required in %s element" % (name, self.sname())) elif not isinstance(self, WebDAVUnknownElement): if attributes: log.msg("Attributes %s are unexpected and therefore ignored in %s element" % (attributes.keys(), self.sname())) self.attributes = my_attributes def __str__(self): return self.sname() def __repr__(self): if hasattr(self, "attributes") and hasattr(self, "children"): return "<%s %r: %r>" % (self.sname(), self.attributes, self.children) else: return "<%s>" % (self.sname()) def __eq__(self, other): if isinstance(other, WebDAVElement): return ( self.name == other.name and self.namespace == other.namespace and self.attributes == other.attributes and self.children == other.children ) else: return NotImplemented def __ne__(self, other): return not self.__eq__(other) def __contains__(self, child): return child in self.children def writeXML(self, output): document = xml.dom.minidom.Document() self.addToDOM(document, None) PrintXML(document, stream=output) def toxml(self): output = StringIO.StringIO() self.writeXML(output) return output.getvalue() def element(self, document): element = document.createElementNS(self.namespace, self.name) if hasattr(self, "attributes"): for name, value in self.attributes.items(): namespace, name = decodeXMLName(name) attribute = document.createAttributeNS(namespace, name) attribute.nodeValue = value element.setAttributeNodeNS(attribute) return element def addToDOM(self, document, parent): element = self.element(document) if parent is None: document.appendChild(element) else: parent.appendChild(element) for child in self.children: if child: try: child.addToDOM(document, element) except: log.err("Unable to add child %r of element %s to DOM" % (child, self)) raise def childrenOfType(self, child_type): """ Returns a list of children with the same qname as the given type. """ if type(child_type) is tuple: qname = child_type else: qname = child_type.qname() return [ c for c in self.children if c.qname() == qname ] def childOfType(self, child_type): """ Returns a child of the given type, if any, or None. Raises ValueError if more than one is found. """ found = None for child in self.childrenOfType(child_type): if found: raise ValueError("Multiple %s elements found in %s" % (child_type.sname(), self.toxml())) found = child return found class PCDATAElement (object): def sname(self): return "#PCDATA" qname = classmethod(sname) sname = classmethod(sname) def __init__(self, data): super(PCDATAElement, self).__init__() if data is None: data = "" elif type(data) is unicode: data = data.encode("utf-8") else: assert type(data) is str, ("PCDATA must be a string: %r" % (data,)) self.data = data def __str__(self): return str(self.data) def __repr__(self): return "<%s: %r>" % (self.__class__.__name__, self.data) def __add__(self, other): if isinstance(other, PCDATAElement): return self.__class__(self.data + other.data) else: return self.__class__(self.data + other) def __eq__(self, other): if isinstance(other, PCDATAElement): return self.data == other.data elif type(other) in (str, unicode): return self.data == other else: return NotImplemented def __ne__(self, other): return not self.__eq__(other) def isWhitespace(self): for char in str(self): if char not in string.whitespace: return False return True def element(self, document): return document.createTextNode(self.data) def addToDOM(self, document, parent): try: parent.appendChild(self.element(document)) except TypeError: log.err("Invalid PCDATA: %r" % (self.data,)) raise class WebDAVOneShotElement (WebDAVElement): """ Element with exactly one WebDAVEmptyElement child and no attributes. """ __singletons = {} def __new__(clazz, *children): child = None for next in children: if isinstance(next, WebDAVEmptyElement): if child is not None: raise ValueError("%s must have exactly one child, not %r" % (clazz.__name__, children)) child = next elif isinstance(next, PCDATAElement): pass else: raise ValueError("%s child is not a WebDAVEmptyElement instance: %s" % (clazz.__name__, next)) if clazz not in WebDAVOneShotElement.__singletons: WebDAVOneShotElement.__singletons[clazz] = { child: WebDAVElement.__new__(clazz, children) } elif child not in WebDAVOneShotElement.__singletons[clazz]: WebDAVOneShotElement.__singletons[clazz][child] = ( WebDAVElement.__new__(clazz, children) ) return WebDAVOneShotElement.__singletons[clazz][child] class WebDAVUnknownElement (WebDAVElement): """ Placeholder for unknown element tag names. """ allowed_children = { WebDAVElement: (0, None), PCDATAElement: (0, None), } class WebDAVEmptyElement (WebDAVElement): """ WebDAV element with no contents. """ __singletons = {} def __new__(clazz, *args, **kwargs): assert not args if kwargs: return WebDAVElement.__new__(clazz, **kwargs) else: if clazz not in WebDAVEmptyElement.__singletons: WebDAVEmptyElement.__singletons[clazz] = (WebDAVElement.__new__(clazz)) return WebDAVEmptyElement.__singletons[clazz] allowed_children = {} children = () class WebDAVTextElement (WebDAVElement): """ WebDAV element containing PCDATA. """ def fromString(clazz, string): if string is None: return clazz() elif isinstance(string, (str, unicode)): return clazz(PCDATAElement(string)) else: return clazz(PCDATAElement(str(string))) fromString = classmethod(fromString) allowed_children = { PCDATAElement: (0, None) } def __str__(self): return "".join([c.data for c in self.children]) def __repr__(self): content = str(self) if content: return "<%s: %r>" % (self.sname(), content) else: return "<%s>" % (self.sname(),) def __eq__(self, other): if isinstance(other, WebDAVTextElement): return str(self) == str(other) elif type(other) in (str, unicode): return str(self) == other else: return NotImplemented class WebDAVDateTimeElement (WebDAVTextElement): """ WebDAV date-time element. (RFC 2518, section 23.2) """ def fromDate(clazz, date): """ date may be a datetime.datetime instance, a POSIX timestamp (integer value, such as returned by time.time()), or an ISO 8601-formatted (eg. "2005-06-13T16:14:11Z") date/time string. """ def isoformat(date): if date.utcoffset() is None: return date.isoformat() + "Z" else: return date.isoformat() if type(date) is int: date = isoformat(datetime.datetime.fromtimestamp(date)) elif type(date) is str: pass elif type(date) is unicode: date = date.encode("utf-8") elif isinstance(date, datetime.datetime): date = isoformat(date) else: raise ValueError("Unknown date type: %r" % (date,)) return clazz(PCDATAElement(date)) fromDate = classmethod(fromDate) def __init__(self, *children, **attributes): super(WebDAVDateTimeElement, self).__init__(*children, **attributes) self.datetime() # Raise ValueError if the format is wrong def __eq__(self, other): if isinstance(other, WebDAVDateTimeElement): return self.datetime() == other.datetime() else: return NotImplemented def datetime(self): s = str(self) if not s: return None else: return parse_date(s) class DateTimeHeaderElement (WebDAVTextElement): """ WebDAV date-time element for elements that substitute for HTTP headers. (RFC 2068, section 3.3.1) """ def fromDate(clazz, date): """ date may be a datetime.datetime instance, a POSIX timestamp (integer value, such as returned by time.time()), or an RFC 2068 Full Date (eg. "Mon, 23 May 2005 04:52:22 GMT") string. """ def format(date): # # FIXME: strftime() is subject to localization nonsense; we need to # ensure that we're using the correct localization, or don't use # strftime(). # return date.strftime("%a, %d %b %Y %H:%M:%S GMT") if type(date) is int: date = format(datetime.datetime.fromtimestamp(date)) elif type(date) is str: pass elif type(date) is unicode: date = date.encode("utf-8") elif isinstance(date, datetime.datetime): if date.tzinfo: raise NotImplementedError("I need to normalize to UTC") date = format(date) else: raise ValueError("Unknown date type: %r" % (date,)) return clazz(PCDATAElement(date)) fromDate = classmethod(fromDate) def __init__(self, *children, **attributes): super(DateTimeHeaderElement, self).__init__(*children, **attributes) self.datetime() # Raise ValueError if the format is wrong def __eq__(self, other): if isinstance(other, WebDAVDateTimeElement): return self.datetime() == other.datetime() else: return NotImplemented def datetime(self): s = str(self) if not s: return None else: return parseDateTime(s) ## # Utilities ## class FixedOffset (datetime.tzinfo): """ Fixed offset in minutes east from UTC. """ def __init__(self, offset, name=None): super(FixedOffset, self).__init__() self._offset = datetime.timedelta(minutes=offset) self._name = name def utcoffset(self, dt): return self._offset def tzname (self, dt): return self._name def dst (self, dt): return datetime.timedelta(0) def parse_date(date): """ Parse an ISO 8601 date and return a corresponding datetime.datetime object. """ # See http://www.iso.org/iso/en/prods-services/popstds/datesandtime.html global regex_date if regex_date is None: import re regex_date = re.compile( "^" + "(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})T" + "(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})(?:.(?P<subsecond>\d+))*" + "(?:Z|(?P<offset_sign>\+|-)(?P<offset_hour>\d{2}):(?P<offset_minute>\d{2}))" + "$" ) match = regex_date.match(date) if match is not None: subsecond = match.group("subsecond") if subsecond is None: subsecond = 0 else: subsecond = int(subsecond) offset_sign = match.group("offset_sign") if offset_sign is None: offset = FixedOffset(0) else: offset_hour = int(match.group("offset_hour" )) offset_minute = int(match.group("offset_minute")) delta = (offset_hour * 60) + offset_minute if offset_sign == "+": offset = FixedOffset(0 - delta) elif offset_sign == "-": offset = FixedOffset(0 + delta) return datetime.datetime( int(match.group("year" )), int(match.group("month" )), int(match.group("day" )), int(match.group("hour" )), int(match.group("minute")), int(match.group("second")), subsecond, offset ) else: raise ValueError("Invalid ISO 8601 date format: %r" % (date,)) regex_date = None
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/element/base.py
base.py
"""Some Helper functions: 4DOM/PyXML-specific Extensions to the DOM, and DOM-related utilities.""" __all__ = [ "Print", "PrettyPrint" ] import sys,string import re from xml.dom import Node from xml.dom import XML_NAMESPACE, XMLNS_NAMESPACE, DOMException def Print(root, stream=sys.stdout, encoding='UTF-8'): if not hasattr(root, "nodeType"): return nss = SeekNss(root) visitor = PrintVisitor(stream, encoding, nsHints=nss) PrintWalker(visitor, root).run() return def PrettyPrint(root, stream=sys.stdout, encoding='UTF-8', indent=' ', preserveElements=None): if not hasattr(root, "nodeType"): return nss_hints = SeekNss(root) preserveElements = preserveElements or [] owner_doc = root.ownerDocument or root if hasattr(owner_doc, 'getElementsByName'): #We don't want to insert any whitespace into HTML inline elements preserveElements = preserveElements + HTML_4_TRANSITIONAL_INLINE visitor = PrintVisitor(stream, encoding, indent, preserveElements, nss_hints) PrintWalker(visitor, root).run() stream.write('\n') return def GetAllNs(node): #The xml namespace is implicit nss = {'xml': XML_NAMESPACE} if node.nodeType == Node.ATTRIBUTE_NODE and node.ownerElement: return GetAllNs(node.ownerElement) if node.nodeType == Node.ELEMENT_NODE: if node.namespaceURI: nss[node.prefix] = node.namespaceURI for attr in node.attributes.values(): if attr.namespaceURI == XMLNS_NAMESPACE: if attr.localName == 'xmlns': nss[None] = attr.value else: nss[attr.localName] = attr.value elif attr.namespaceURI: nss[attr.prefix] = attr.namespaceURI if node.parentNode: #Inner NS/Prefix mappings take precedence over outer ones parent_nss = GetAllNs(node.parentNode) parent_nss.update(nss) nss = parent_nss return nss def SeekNss(node, nss=None): '''traverses the tree to seek an approximate set of defined namespaces''' nss = nss or {} for child in node.childNodes: if child.nodeType == Node.ELEMENT_NODE: if child.namespaceURI: nss[child.prefix] = child.namespaceURI for attr in child.attributes.values(): if attr.namespaceURI == XMLNS_NAMESPACE: if attr.localName == 'xmlns': nss[None] = attr.value else: nss[attr.localName] = attr.value elif attr.namespaceURI: nss[attr.prefix] = attr.namespaceURI SeekNss(child, nss) return nss class PrintVisitor: def __init__(self, stream, encoding, indent='', plainElements=None, nsHints=None, isXhtml=0, force8bit=0): self.stream = stream self.encoding = encoding # Namespaces self._namespaces = [{}] self._nsHints = nsHints or {} # PrettyPrint self._indent = indent self._depth = 0 self._inText = 0 self._plainElements = plainElements or [] # HTML support self._html = None self._isXhtml = isXhtml self.force8bit = force8bit return def _write(self, text): if self.force8bit: obj = strobj_to_utf8str(text, self.encoding) else: obj = utf8_to_code(text, self.encoding) self.stream.write(obj) return def _tryIndent(self): if not self._inText and self._indent: self._write('\n' + self._indent*self._depth) return def visit(self, node): if self._html is None: # Set HTMLDocument flag here for speed self._html = hasattr(node.ownerDocument, 'getElementsByName') nodeType = node.nodeType if node.nodeType == Node.ELEMENT_NODE: return self.visitElement(node) elif node.nodeType == Node.ATTRIBUTE_NODE: return self.visitAttr(node) elif node.nodeType == Node.TEXT_NODE: return self.visitText(node) elif node.nodeType == Node.CDATA_SECTION_NODE: return self.visitCDATASection(node) elif node.nodeType == Node.ENTITY_REFERENCE_NODE: return self.visitEntityReference(node) elif node.nodeType == Node.ENTITY_NODE: return self.visitEntity(node) elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE: return self.visitProcessingInstruction(node) elif node.nodeType == Node.COMMENT_NODE: return self.visitComment(node) elif node.nodeType == Node.DOCUMENT_NODE: return self.visitDocument(node) elif node.nodeType == Node.DOCUMENT_TYPE_NODE: return self.visitDocumentType(node) elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE: return self.visitDocumentFragment(node) elif node.nodeType == Node.NOTATION_NODE: return self.visitNotation(node) # It has a node type, but we don't know how to handle it raise Exception("Unknown node type: %s" % repr(node)) def visitNodeList(self, node, exclude=None): for curr in node: curr is not exclude and self.visit(curr) return def visitNamedNodeMap(self, node): for item in node.values(): self.visit(item) return def visitAttr(self, node): if node.namespaceURI == XMLNS_NAMESPACE: # Skip namespace declarations return self._write(' ' + node.name) value = node.value if value or not self._html: text = TranslateCdata(value, self.encoding) text, delimiter = TranslateCdataAttr(text) self.stream.write("=%s%s%s" % (delimiter, text, delimiter)) return def visitProlog(self): self._write("<?xml version='1.0' encoding='%s'?>" % ( self.encoding or 'utf-8' )) self._inText = 0 return def visitDocument(self, node): not self._html and self.visitProlog() node.doctype and self.visitDocumentType(node.doctype) self.visitNodeList(node.childNodes, exclude=node.doctype) return def visitDocumentFragment(self, node): self.visitNodeList(node.childNodes) return def visitElement(self, node): self._namespaces.append(self._namespaces[-1].copy()) inline = node.tagName in self._plainElements not inline and self._tryIndent() self._write('<%s' % node.tagName) if self._isXhtml or not self._html: namespaces = '' if self._isXhtml: nss = {'xml': XML_NAMESPACE, None: XHTML_NAMESPACE} else: nss = GetAllNs(node) if self._nsHints: self._nsHints.update(nss) nss = self._nsHints self._nsHints = {} del nss['xml'] for prefix in nss.keys(): if not self._namespaces[-1].has_key(prefix) or self._namespaces[-1][prefix] != nss[prefix]: nsuri, delimiter = TranslateCdataAttr(nss[prefix]) if prefix: xmlns = " xmlns:%s=%s%s%s" % (prefix, delimiter,nsuri,delimiter) else: xmlns = " xmlns=%s%s%s" % (delimiter,nsuri,delimiter) namespaces = namespaces + xmlns self._namespaces[-1][prefix] = nss[prefix] self._write(namespaces) for attr in node.attributes.values(): self.visitAttr(attr) if len(node.childNodes): self._write('>') self._depth = self._depth + 1 self.visitNodeList(node.childNodes) self._depth = self._depth - 1 if not self._html or (node.tagName not in HTML_FORBIDDEN_END): not (self._inText and inline) and self._tryIndent() self._write('</%s>' % node.tagName) elif not self._html: self._write('/>') elif node.tagName not in HTML_FORBIDDEN_END: self._write('></%s>' % node.tagName) else: self._write('>') del self._namespaces[-1] self._inText = 0 return def visitText(self, node): text = node.data if self._indent: text = string.strip(text) and text if text: if self._html: text = TranslateHtmlCdata(text, self.encoding) else: text = TranslateCdata(text, self.encoding) self.stream.write(text) self._inText = 1 return def visitDocumentType(self, doctype): if not doctype.systemId and not doctype.publicId: return self._tryIndent() self._write('<!DOCTYPE %s' % doctype.name) if doctype.systemId and '"' in doctype.systemId: system = "'%s'" % doctype.systemId else: system = '"%s"' % doctype.systemId if doctype.publicId and '"' in doctype.publicId: # We should probably throw an error # Valid characters: <space> | <newline> | <linefeed> | # [a-zA-Z0-9] | [-'()+,./:=?;!*#@$_%] public = "'%s'" % doctype.publicId else: public = '"%s"' % doctype.publicId if doctype.publicId and doctype.systemId: self._write(' PUBLIC %s %s' % (public, system)) elif doctype.systemId: self._write(' SYSTEM %s' % system) if doctype.entities or doctype.notations: self._write(' [') self._depth = self._depth + 1 self.visitNamedNodeMap(doctype.entities) self.visitNamedNodeMap(doctype.notations) self._depth = self._depth - 1 self._tryIndent() self._write(']>') else: self._write('>') self._inText = 0 return def visitEntity(self, node): """Visited from a NamedNodeMap in DocumentType""" self._tryIndent() self._write('<!ENTITY %s' % (node.nodeName)) node.publicId and self._write(' PUBLIC %s' % node.publicId) node.systemId and self._write(' SYSTEM %s' % node.systemId) node.notationName and self._write(' NDATA %s' % node.notationName) self._write('>') return def visitNotation(self, node): """Visited from a NamedNodeMap in DocumentType""" self._tryIndent() self._write('<!NOTATION %s' % node.nodeName) node.publicId and self._write(' PUBLIC %s' % node.publicId) node.systemId and self._write(' SYSTEM %s' % node.systemId) self._write('>') return def visitCDATASection(self, node): self._tryIndent() self._write('<![CDATA[%s]]>' % (node.data)) self._inText = 0 return def visitComment(self, node): self._tryIndent() self._write('<!--%s-->' % (node.data)) self._inText = 0 return def visitEntityReference(self, node): self._write('&%s;' % node.nodeName) self._inText = 1 return def visitProcessingInstruction(self, node): self._tryIndent() self._write('<?%s %s?>' % (node.target, node.data)) self._inText = 0 return class PrintWalker: def __init__(self, visitor, startNode): self.visitor = visitor self.start_node = startNode return def step(self): """There is really no step to printing. It prints the whole thing""" self.visitor.visit(self.start_node) return def run(self): return self.step() ILLEGAL_LOW_CHARS = '[\x01-\x08\x0B-\x0C\x0E-\x1F]' SURROGATE_BLOCK = '[\xF0-\xF7][\x80-\xBF][\x80-\xBF][\x80-\xBF]' ILLEGAL_HIGH_CHARS = '\xEF\xBF[\xBE\xBF]' #Note: Prolly fuzzy on this, but it looks as if characters from the surrogate block are allowed if in scalar form, which is encoded in UTF8 the same was as in surrogate block form XML_ILLEGAL_CHAR_PATTERN = re.compile('%s|%s'%(ILLEGAL_LOW_CHARS, ILLEGAL_HIGH_CHARS)) g_utf8TwoBytePattern = re.compile('([\xC0-\xC3])([\x80-\xBF])') g_cdataCharPattern = re.compile('[&<]|]]>') g_charToEntity = { '&': '&amp;', '<': '&lt;', ']]>': ']]&gt;', } # Slightly modified to not use types.Unicode import codecs def utf8_to_code(text, encoding): encoder = codecs.lookup(encoding)[0] # encode,decode,reader,writer if type(text) is not unicode: text = unicode(text, "utf-8") return encoder(text)[0] # result,size def strobj_to_utf8str(text, encoding): if string.upper(encoding) not in ["UTF-8", "ISO-8859-1", "LATIN-1"]: raise ValueError("Invalid encoding: %s"%encoding) encoder = codecs.lookup(encoding)[0] # encode,decode,reader,writer if type(text) is not unicode: text = unicode(text, "utf-8") #FIXME return str(encoder(text)[0]) def TranslateCdataAttr(characters): '''Handles normalization and some intelligence about quoting''' if not characters: return '', "'" if "'" in characters: delimiter = '"' new_chars = re.sub('"', '&quot;', characters) else: delimiter = "'" new_chars = re.sub("'", '&apos;', characters) #FIXME: There's more to normalization #Convert attribute new-lines to character entity # characters is possibly shorter than new_chars (no entities) if "\n" in characters: new_chars = re.sub('\n', '&#10;', new_chars) return new_chars, delimiter #Note: Unicode object only for now def TranslateCdata(characters, encoding='UTF-8', prev_chars='', markupSafe=0, charsetHandler=utf8_to_code): """ charsetHandler is a function that takes a string or unicode object as the first argument, representing the string to be procesed, and an encoding specifier as the second argument. It must return a string or unicode object """ if not characters: return '' if not markupSafe: if g_cdataCharPattern.search(characters): new_string = g_cdataCharPattern.subn( lambda m, d=g_charToEntity: d[m.group()], characters)[0] else: new_string = characters if prev_chars[-2:] == ']]' and characters[0] == '>': new_string = '&gt;' + new_string[1:] else: new_string = characters #Note: use decimal char entity rep because some browsers are broken #FIXME: This will bomb for high characters. Should, for instance, detect #The UTF-8 for 0xFFFE and put out &#xFFFE; if XML_ILLEGAL_CHAR_PATTERN.search(new_string): new_string = XML_ILLEGAL_CHAR_PATTERN.subn( lambda m: '&#%i;' % ord(m.group()), new_string)[0] new_string = charsetHandler(new_string, encoding) return new_string
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/element/xmlext.py
xmlext.py
from twisted.web2.dav.element.base import * ## # Section 1 ## class Error (WebDAVElement): """ Specifies an error condition. (RFC 3253, section 1.6) """ # FIXME: RFC 3253 doesn't quite seem to define this element... # FIXME: Move when we update to RFC 2518bis name = "error" allowed_children = { WebDAVElement: (0, None) } ## # Section 3 ## class Comment (WebDAVTextElement): """ Property used to track a brief comment about a resource that is suitable for presentation to a user. On a version, can be used to indicate why that version was created. (RFC 3253, section 3.1.1) """ name = "comment" hidden = True class CreatorDisplayName (WebDAVTextElement): """ Property which contains a description of the creator of the resource that is suitable for presentation to a user. (RFC 3253, section 3.1.2) """ name = "creator-displayname" hidden = True class SupportedMethod (WebDAVElement): """ Property which identifies a method that is supported by a resource. A method is supported by a resource if there is some state of that resource for which an application of that method will successfully satisfy all postconditions of that method, including any additional postconditions added by the features supported by that resource. (RFC 3253, section 3.1.3) """ name = "supported-method" hidden = True allowed_children = { WebDAVElement: (0, None) } allowed_attributes = { "name": True } class SupportedMethodSet (WebDAVElement): """ Property which identifies the methods that are supported by a resource. (RFC 3253, section 3.1.3) """ name = "supported-method-set" protected = True hidden = True allowed_children = { (dav_namespace, "supported-method"): (0, None) } class SupportedLiveProperty (WebDAVElement): """ Property which identifies a live property that is supported by a resource. A live property is supported by a resource if that property has the semantics defined for that property. The value of this property must identify all live properties defined by this document that are supported by the resource and should identify all live properties that are supported by the resource. (RFC 3253, section 3.1.4) """ name = "supported-live-property" # FIXME: Where is the name element defined? allowed_children = { (dav_namespace, "name"): (1, 1) } class SupportedLivePropertySet (WebDAVElement): """ Property which identifies the live properties that are supported by a resource. (RFC 3253, section 3.1.4) """ name = "supported-live-property-set" hidden = True protected = True allowed_children = { (dav_namespace, "supported-live-property"): (0, None) } class Report (WebDAVElement): """ A report. (RFC 3253, section 3.1.5) """ # FIXME: Section 3.1.5 is pretty low on information. Where else do we look? name = "report" allowed_children = { WebDAVElement: (0, None) } class SupportedReport (WebDAVElement): """ Identifies a report that is supported by the resource. (RFC 3253, section 3.1.5) """ name = "supported-report" # # FIXME: # # RFC 3253, section 3.1.5 defines supported-report as: # # <!ELEMENT supported-report report> # # Which means that a report child element is required. However, section # 3.6 defined a precondition with the same name (DAV:supported-report), # which means that, according to section 1.6.1, this XML must be issued if # the precondition fails: # # <?xml version="1.0"?> # <D:error xmlns:D="DAV:"> # <D:supported-report/> # </D:error> # # Which is a problem because here we use supported-report with no # children. # # Absent any better guidance, we'll allow no children for this element for # the time being. # allowed_children = { (dav_namespace, "report"): (0, 1) } class SupportedReportSet (WebDAVElement): """ Property which identifies the reports that are supported by the resource. (RFC 3253, section 3.1.5) """ name = "supported-report-set" hidden = True protected = True allowed_children = { (dav_namespace, "supported-report"): (0, None) } class ExpandProperty (WebDAVElement): """ Report which provides a mechanism for retrieving in one request the properties from resources identified by DAV:href property values. (RFC 3253, section 3.8) """ name = "expand-property" allowed_children = { (dav_namespace, "property"): (0, None) } class Property (WebDAVElement): """ Identifies a property by name. (RFC 3253, section 3.8) Principal which matches a user if the value of the identified property of a resource contains at most one DAV:href element, the value of that element identifies a principal, and the user matches that principal. (RFC 3744, section 5.5.1) """ name = "property" allowed_children = { (dav_namespace, "property"): (0, None) } allowed_attributes = { "name" : True, "namespace" : False, }
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/element/rfc3253.py
rfc3253.py
__all__ = [ "registerElements", "WebDAVContentHandler", "WebDAVDocument", ] import StringIO import xml.dom.minidom import xml.sax from twisted.web2.dav.element.base import * from twisted.web2.dav.element.util import PrintXML, encodeXMLName ## # Parsing ## def registerElements(module): """ Register XML elements defined in the given module with the parser. """ element_names = [] for element_class_name in dir(module): element_class = getattr(module, element_class_name) if type(element_class) is type and issubclass(element_class, WebDAVElement): if element_class.namespace is None: continue if element_class.name is None: continue if element_class.unregistered: continue qname = element_class.namespace, element_class.name if qname in elements_by_tag_name: raise AssertionError( "Attempting to register qname %s multiple times: (%r, %r)" % (qname, elements_by_tag_name[qname], element_class) ) if not (qname in elements_by_tag_name and issubclass(element_class, elements_by_tag_name[qname])): elements_by_tag_name[qname] = element_class element_names.append(element_class.__name__) return element_names def lookupElement(qname): """ Return the element class for the element with the given qname. """ return elements_by_tag_name[qname] elements_by_tag_name = {} class WebDAVContentHandler (xml.sax.handler.ContentHandler): def setDocumentLocator(self, locator): self.locator = locator locator = None def location(self): return "line %d, column %d" % (self.locator.getLineNumber(), self.locator.getColumnNumber()) def startDocument(self): self.stack = [{ "name" : None, "class" : None, "attributes" : None, "children" : [], }] def endDocument(self): top = self.stack[-1] assert top["name"] is None assert top["class"] is None assert top["attributes"] is None assert len(top["children"]) is 1, "Must have exactly one root element, got %d" % len(top["children"]) self.dom = WebDAVDocument(top["children"][0]) def startElementNS(self, name, qname, attributes): attributes_dict = {} if attributes.getLength() is not 0: for attr_name in attributes.getNames(): attributes_dict[encodeXMLName(attr_name)] = attributes.getValue(attr_name) tag_namespace, tag_name = name if (name not in elements_by_tag_name): class UnknownElement (WebDAVUnknownElement): namespace = tag_namespace name = tag_name element_class = UnknownElement else: element_class = elements_by_tag_name[name] self.stack.append({ "name" : name, "class" : element_class, "attributes" : attributes_dict, "children" : [], }) def endElementNS(self, name, qname): # Pop the current element from the stack... top = self.stack[-1] del(self.stack[-1]) assert top["name"] == name, "Last item on stack is %s while closing %s" % (top["name"], name) # ...then instantiate the element and add it to the parent's list of # children. try: element = top["class"](*top["children"], **top["attributes"]) except ValueError, e: e.args = ("%s at %s" % (e.args[0], self.location()),) + e.args[1:] raise # Re-raises modified e, but preserves traceback self.stack[-1]["children"].append(element) def characters(self, content): self.stack[-1]["children"].append(PCDATAElement(content)) def ignorableWhitespace(self, whitespace): self.characters(self, whitespace) def startElement(self, name, attributes): raise AssertionError("startElement() should not be called by namespace-aware parser") def endElement(self, name): raise AssertionError("endElement() should not be called by namespace-aware parser") def processingInstruction(self, target, data): raise AssertionError("processing instructions are not allowed") def skippedEntity(self, name): raise AssertionError("skipped entities are not allowed") class WebDAVDocument (object): """ WebDAV XML document. """ def _parse(source_is_string): def parse(source): handler = WebDAVContentHandler() parser = xml.sax.make_parser() parser.setContentHandler(handler) parser.setFeature(xml.sax.handler.feature_namespaces, True) if source_is_string: source = StringIO.StringIO(source) try: parser.parse(source) except xml.sax.SAXParseException, e: raise ValueError(e) return handler.dom return parse fromStream = staticmethod(_parse(False)) fromString = staticmethod(_parse(True )) def __init__(self, root_element): """ root_element must be a WebDAVElement instance. """ super(WebDAVDocument, self).__init__() if not isinstance(root_element, WebDAVElement): raise ValueError("Not a WebDAVElement: %r" % (obj,)) self.root_element = root_element def __str__(self): output = StringIO.StringIO() self.writeXML(output) return output.getvalue() def __eq__(self, other): if isinstance(other, WebDAVDocument): return self.root_element == other.root_element else: return NotImplemented def writeXML(self, output): document = xml.dom.minidom.Document() self.root_element.addToDOM(document, None) PrintXML(document, stream=output) def toxml(self): output = StringIO.StringIO() self.writeXML(output) return output.getvalue()
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/element/parser.py
parser.py
__all__ = ["http_COPY", "http_MOVE"] from twisted.python import log from twisted.web2 import responsecode from twisted.web2.http import HTTPError, StatusResponse from twisted.web2.filter.location import addLocation from twisted.web2.dav.idav import IDAVResource from twisted.web2.dav.fileop import copy, move # FIXME: This is circular import twisted.web2.dav.static def http_COPY(self, request): """ Respond to a COPY request. (RFC 2518, section 8.8) """ def doCopy(r): destination, destination_uri, depth = r # May need to add a location header addLocation(request, destination_uri) return copy(self.fp, destination.fp, destination_uri, depth) d = prepareForCopy(self, request) d.addCallback(doCopy) return d def http_MOVE(self, request): """ Respond to a MOVE request. (RFC 2518, section 8.9) """ def doMove(r): destination, destination_uri, depth = r # # RFC 2518, section 8.9 says that we must act as if the Depth header is set # to infinity, and that the client must omit the Depth header or set it to # infinity. # # This seems somewhat at odds with the notion that a bad request should be # rejected outright; if the client sends a bad depth header, the client is # broken, and section 8 suggests that a bad request should be rejected... # # Let's play it safe for now and ignore broken clients. # if self.fp.isdir() and depth != "infinity": msg = "Client sent illegal depth header value for MOVE: %s" % (depth,) log.err(msg) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, msg)) # May need to add a location header addLocation(request, destination_uri) return move(self.fp, request.uri, destination.fp, destination_uri, depth) d = prepareForCopy(self, request) d.addCallback(doMove) return d def prepareForCopy(self, request): # # Get the depth # depth = request.headers.getHeader("depth", "infinity") if depth not in ("0", "infinity"): msg = ("Client sent illegal depth header value: %s" % (depth,)) log.err(msg) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, msg)) # # Verify this resource exists # if not self.exists(): log.err("File not found: %s" % (self.fp.path,)) raise HTTPError(StatusResponse( responsecode.NOT_FOUND, "Source resource %s not found." % (request.uri,) )) # # Get the destination # destination_uri = request.headers.getHeader("destination") if not destination_uri: msg = "No destination header in %s request." % (request.method,) log.err(msg) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, msg)) d = request.locateResource(destination_uri) d.addCallback(_prepareForCopy, destination_uri, request, depth) return d def _prepareForCopy(destination, destination_uri, request, depth): # # Destination must be a DAV resource # try: destination = IDAVResource(destination) except TypeError: log.err("Attempt to %s to a non-DAV resource: (%s) %s" % (request.method, destination.__class__, destination_uri)) raise HTTPError(StatusResponse( responsecode.FORBIDDEN, "Destination %s is not a WebDAV resource." % (destination_uri,) )) # # FIXME: Right now we don't know how to copy to a non-DAVFile resource. # We may need some more API in IDAVResource. # So far, we need: .exists(), .fp.parent() # if not isinstance(destination, twisted.web2.dav.static.DAVFile): log.err("DAV copy between non-DAVFile DAV resources isn't implemented") raise HTTPError(StatusResponse( responsecode.NOT_IMPLEMENTED, "Destination %s is not a DAVFile resource." % (destination_uri,) )) # # Check for existing destination resource # overwrite = request.headers.getHeader("overwrite", True) if destination.exists() and not overwrite: log.err("Attempt to %s onto existing file without overwrite flag enabled: %s" % (request.method, destination.fp.path)) raise HTTPError(StatusResponse( responsecode.PRECONDITION_FAILED, "Destination %s already exists." % (destination_uri,) )) # # Make sure destination's parent exists # if not destination.fp.parent().isdir(): log.err("Attempt to %s to a resource with no parent: %s" % (request.method, destination.fp.path)) raise HTTPError(StatusResponse(responsecode.CONFLICT, "No parent collection.")) return destination, destination_uri, depth
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/method/copymove.py
copymove.py
__all__ = ["preconditions_PUT", "http_PUT"] from twisted.python import log from twisted.web2 import responsecode from twisted.web2.http import HTTPError, StatusResponse from twisted.web2.dav.fileop import put def preconditions_PUT(self, request): if self.fp.exists(): if not self.fp.isfile(): log.err("Unable to PUT to non-file: %s" % (self.fp.path,)) raise HTTPError(StatusResponse( responsecode.FORBIDDEN, "The requested resource exists but is not backed by a regular file." )) resource_is_new = False else: if not self.fp.parent().isdir(): log.err("No such directory: %s" % (self.fp.path,)) raise HTTPError(StatusResponse( responsecode.CONFLICT, "Parent collection resource does not exist." )) resource_is_new = True # # HTTP/1.1 (RFC 2068, section 9.6) requires that we respond with a Not # Implemented error if we get a Content-* header which we don't # recognize and handle properly. # for header, value in request.headers.getAllRawHeaders(): if header.startswith("Content-") and header not in ( #"Content-Base", # Doesn't make sense in PUT? #"Content-Encoding", # Requires that we decode it? "Content-Language", "Content-Length", #"Content-Location", # Doesn't make sense in PUT? "Content-MD5", #"Content-Range", # FIXME: Need to implement this "Content-Type", ): log.err("Client sent unrecognized content header in PUT request: %s" % (header,)) raise HTTPError(StatusResponse( responsecode.NOT_IMPLEMENTED, "Unrecognized content header %r in request." % (header,) )) def http_PUT(self, request): """ Respond to a PUT request. (RFC 2518, section 8.7) """ log.msg("Writing request stream to %s" % (self.fp.path,)) # # Don't pass in the request URI, since PUT isn't specified to be able # to return a MULTI_STATUS response, which is WebDAV-specific (and PUT is # not). # return put(request.stream, self.fp)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/method/put.py
put.py
__all__ = ["report_DAV__expand_property"] from twisted.python import log from twisted.python.failure import Failure from twisted.internet.defer import deferredGenerator, waitForDeferred from twisted.web2 import responsecode from twisted.web2.dav import davxml from twisted.web2.dav.http import statusForFailure from twisted.web2.dav.davxml import dav_namespace def report_DAV__expand_property(self, request, expand_property): """ Generate an expand-property REPORT. (RFC 3253, section 3.8) """ # FIXME: Handle depth header if not isinstance(expand_property, davxml.ExpandProperty): raise ValueError("%s expected as root element, not %s." % (davxml.ExpandProperty.sname(), expand_property.sname())) # # Expand DAV:allprop # properties = {} for property in expand_property.children: namespace = property.getAttribute("namespace") name = property.getAttribute("name") if not namespace: namespace = dav_namespace if (namespace, name) == (dav_namespace, "allprop"): all_properties = waitForDeferred(self.listAllProp(request)) yield all_properties all_properties = all_properties.getResult() for all_property in all_properties: properties[all_property.qname()] = property else: properties[(namespace, name)] = property # # Look up the requested properties # properties_by_status = { responsecode.OK : [], responsecode.NOT_FOUND : [], } for property in properties: my_properties = waitForDeferred(self.listProperties(request)) yield my_properties my_properties = my_properties.getResult() if property in my_properties: try: value = waitForDeferred(self.readProperty(property, request)) yield value value = value.getResult() if isinstance(value, davxml.HRef): raise NotImplementedError() else: raise NotImplementedError() except: f = Failure() log.err("Error reading property %r for resource %s: %s" % (property, self, f.value)) status = statusForFailure(f, "getting property: %s" % (property,)) if status not in properties_by_status: properties_by_status[status] = [] raise NotImplementedError() #properties_by_status[status].append( # ____propertyName(property) #) else: log.err("Can't find property %r for resource %s" % (property, self)) properties_by_status[responsecode.NOT_FOUND].append(property) raise NotImplementedError() report_DAV__expand_property = deferredGenerator(report_DAV__expand_property)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/method/report_expand.py
report_expand.py
__all__ = ["http_PROPFIND"] from twisted.python import log from twisted.python.failure import Failure from twisted.internet.defer import deferredGenerator, waitForDeferred from twisted.web2.http import HTTPError from twisted.web2 import responsecode from twisted.web2.http import StatusResponse from twisted.web2.dav import davxml from twisted.web2.dav.http import MultiStatusResponse, statusForFailure from twisted.web2.dav.util import normalizeURL, joinURL, davXMLFromStream def http_PROPFIND(self, request): """ Respond to a PROPFIND request. (RFC 2518, section 8.1) """ if not self.exists(): log.err("File not found: %s" % (self.fp.path,)) raise HTTPError(responsecode.NOT_FOUND) # # Read request body # try: doc = waitForDeferred(davXMLFromStream(request.stream)) yield doc doc = doc.getResult() except ValueError, e: log.err("Error while handling PROPFIND body: %s" % (e,)) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, str(e))) if doc is None: # No request body means get all properties. search_properties = "all" else: # # Parse request # find = doc.root_element if not isinstance(find, davxml.PropertyFind): error = ("Non-%s element in PROPFIND request body: %s" % (davxml.PropertyFind.sname(), find)) log.err(error) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, error)) container = find.children[0] if isinstance(container, davxml.AllProperties): # Get all properties search_properties = "all" elif isinstance(container, davxml.PropertyName): # Get names only search_properties = "names" elif isinstance(container, davxml.PropertyContainer): properties = container.children search_properties = [(p.namespace, p.name) for p in properties] else: raise AssertionError("Unexpected element type in %s: %s" % (davxml.PropertyFind.sname(), container)) # # Generate XML output stream # request_uri = request.uri depth = request.headers.getHeader("depth", "infinity") xml_responses = [] resources = [(self, None)] resources.extend(self.findChildren(depth)) for resource, uri in resources: if uri is None: uri = normalizeURL(request_uri) if self.isCollection() and not uri.endswith("/"): uri += "/" else: uri = joinURL(request_uri, uri) resource_properties = waitForDeferred(resource.listProperties(request)) yield resource_properties resource_properties = resource_properties.getResult() if search_properties is "names": properties_by_status = { responsecode.OK: [propertyName(p) for p in resource_properties] } else: properties_by_status = { responsecode.OK : [], responsecode.NOT_FOUND : [], } if search_properties is "all": properties_to_enumerate = waitForDeferred(resource.listAllprop(request)) yield properties_to_enumerate properties_to_enumerate = properties_to_enumerate.getResult() else: properties_to_enumerate = search_properties for property in properties_to_enumerate: if property in resource_properties: try: resource_property = waitForDeferred(resource.readProperty(property, request)) yield resource_property resource_property = resource_property.getResult() except: f = Failure() log.err("Error reading property %r for resource %s: %s" % (property, uri, f.value)) status = statusForFailure(f, "getting property: %s" % (property,)) if status not in properties_by_status: properties_by_status[status] = [] properties_by_status[status].append(propertyName(property)) else: properties_by_status[responsecode.OK].append(resource_property) else: log.err("Can't find property %r for resource %s" % (property, uri)) properties_by_status[responsecode.NOT_FOUND].append(propertyName(property)) propstats = [] for status in properties_by_status: properties = properties_by_status[status] if not properties: continue xml_status = davxml.Status.fromResponseCode(status) xml_container = davxml.PropertyContainer(*properties) xml_propstat = davxml.PropertyStatus(xml_container, xml_status) propstats.append(xml_propstat) xml_resource = davxml.HRef(uri) xml_response = davxml.PropertyStatusResponse(xml_resource, *propstats) xml_responses.append(xml_response) # # Return response # yield MultiStatusResponse(xml_responses) http_PROPFIND = deferredGenerator(http_PROPFIND) ## # Utilities ## def propertyName(name): property_namespace, property_name = name class PropertyName (davxml.WebDAVEmptyElement): namespace = property_namespace name = property_name return PropertyName()
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/method/propfind.py
propfind.py
__all__ = ["http_REPORT"] import string from twisted.python import log from twisted.internet.defer import deferredGenerator, waitForDeferred from twisted.web2 import responsecode from twisted.web2.http import HTTPError, StatusResponse from twisted.web2.dav import davxml from twisted.web2.dav.http import ErrorResponse from twisted.web2.dav.util import davXMLFromStream def http_REPORT(self, request): """ Respond to a REPORT request. (RFC 3253, section 3.6) """ if not self.fp.exists(): log.err("File not found: %s" % (self.fp.path,)) raise HTTPError(responsecode.NOT_FOUND) # # Read request body # try: doc = waitForDeferred(davXMLFromStream(request.stream)) yield doc doc = doc.getResult() except ValueError, e: log.err("Error while handling REPORT body: %s" % (e,)) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, str(e))) if doc is None: raise HTTPError(StatusResponse( responsecode.BAD_REQUEST, "REPORT request body may not be empty" )) # # Parse request # namespace = doc.root_element.namespace name = doc.root_element.name def to_method(s): ok = string.ascii_letters + string.digits + "_" out = [] for c in s: if c in ok: out.append(c) else: out.append("_") return "report_" + "".join(out) if namespace: method_name = to_method(namespace + "_" + name) else: method_name = to_method(name) try: method = getattr(self, method_name) except AttributeError: # # Requested report is not supported. # log.err("Unsupported REPORT {%s}%s for resource %s (no method %s)" % (namespace, name, self, method_name)) raise HTTPError(ErrorResponse( responsecode.FORBIDDEN, davxml.SupportedReport() )) yield method(request, doc.root_element) http_REPORT = deferredGenerator(http_REPORT)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/method/report.py
report.py
__all__ = ["http_PROPPATCH"] from twisted.python import log from twisted.python.failure import Failure from twisted.internet.defer import deferredGenerator, waitForDeferred from twisted.web2 import responsecode from twisted.web2.http import HTTPError, StatusResponse from twisted.web2.dav import davxml from twisted.web2.dav.http import MultiStatusResponse, PropertyStatusResponseQueue from twisted.web2.dav.util import davXMLFromStream def http_PROPPATCH(self, request): """ Respond to a PROPPATCH request. (RFC 2518, section 8.2) """ if not self.fp.exists(): log.err("File not found: %s" % (self.fp.path,)) raise HTTPError(responsecode.NOT_FOUND) # # Read request body # try: doc = waitForDeferred(davXMLFromStream(request.stream)) yield doc doc = doc.getResult() except ValueError, e: log.err("Error while handling PROPPATCH body: %s" % (e,)) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, str(e))) if doc is None: error = "Request XML body is required." log.err(error) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, error)) # # Parse request # update = doc.root_element if not isinstance(update, davxml.PropertyUpdate): error = ("Request XML body must be a propertyupdate element." % (davxml.PropertyUpdate.sname(),)) log.err(error) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, error)) responses = PropertyStatusResponseQueue("PROPPATCH", request.uri, responsecode.NO_CONTENT) undoActions = [] gotError = False try: # # Update properties # for setOrRemove in update.children: assert len(setOrRemove.children) == 1 container = setOrRemove.children[0] assert isinstance(container, davxml.PropertyContainer) properties = container.children def do(action, property): """ Perform action(property, request) while maintaining an undo queue. """ has = waitForDeferred(self.hasProperty(property, request)) yield has has = has.getResult() if has: oldProperty = waitForDeferred(self.readProperty(property, request)) yield oldProperty oldProperty.getResult() def undo(): return self.writeProperty(oldProperty, request) else: def undo(): return self.removeProperty(property, request) try: x = waitForDeferred(action(property, request)) yield x x.getResult() except ValueError, e: # Convert ValueError exception into HTTPError responses.add( Failure(exc_value=HTTPError(StatusResponse(responsecode.FORBIDDEN, str(e)))), property ) yield False return except: responses.add(Failure(), property) yield False return else: responses.add(responsecode.OK, property) # Only add undo action for those that succeed because those that fail will not have changed undoActions.append(undo) yield True return do = deferredGenerator(do) if isinstance(setOrRemove, davxml.Set): for property in properties: ok = waitForDeferred(do(self.writeProperty, property)) yield ok ok = ok.getResult() if not ok: gotError = True elif isinstance(setOrRemove, davxml.Remove): for property in properties: ok = waitForDeferred(do(self.removeProperty, property)) yield ok ok = ok.getResult() if not ok: gotError = True else: raise AssertionError("Unknown child of PropertyUpdate: %s" % (setOrRemove,)) except: # # If there is an error, we have to back out whatever we have # operations we have done because PROPPATCH is an # all-or-nothing request. # We handle the first one here, and then re-raise to handle the # rest in the containing scope. # for action in undoActions: x = waitForDeferred(action()) yield x x.getResult() raise # # If we had an error we need to undo any changes that did succeed and change status of # those to 424 Failed Dependency. # if gotError: for action in undoActions: x = waitForDeferred(action()) yield x x.getResult() responses.error() # # Return response # yield MultiStatusResponse([responses.response()]) http_PROPPATCH = deferredGenerator(http_PROPPATCH)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/dav/method/proppatch.py
proppatch.py
from zope.interface import implements from twisted.internet import defer, protocol from twisted.protocols import basic, policies from twisted.web2 import stream as stream_mod, http, http_headers, responsecode from twisted.web2.channel import http as httpchan from twisted.web2.channel.http import PERSIST_NO_PIPELINE, PERSIST_PIPELINE from twisted.web2.client import interfaces #from twisted.python.util import tracer class ProtocolError(Exception): pass class ClientRequest(object): """A class for describing an HTTP request to be sent to the server. """ def __init__(self, method, uri, headers, stream): """ @param method: The HTTP method to for this request, ex: 'GET', 'HEAD', 'POST', etc. @type method: C{str} @param uri: The URI of the resource to request, this may be absolute or relative, however the interpretation of this URI is left up to the remote server. @type uri: C{str} @param headers: Headers to be sent to the server. It is important to note that this object does not create any implicit headers. So it is up to the HTTP Client to add required headers such as 'Host'. @type headers: C{dict}, L{twisted.web2.http_headers.Headers}, or C{None} @param stream: Content body to send to the remote HTTP server. @type stream: L{twisted.web2.stream.IByteStream} """ self.method = method self.uri = uri if isinstance(headers, http_headers.Headers): self.headers = headers else: self.headers = http_headers.Headers(headers or {}) if stream is not None: self.stream = stream_mod.IByteStream(stream) else: self.stream = None class HTTPClientChannelRequest(httpchan.HTTPParser): parseCloseAsEnd = True outgoing_version = "HTTP/1.1" chunkedOut = False finished = False closeAfter = False def __init__(self, channel, request, closeAfter): httpchan.HTTPParser.__init__(self, channel) self.request = request self.closeAfter = closeAfter self.transport = self.channel.transport self.responseDefer = defer.Deferred() def submit(self): l = [] request = self.request if request.method == "HEAD": # No incoming data will arrive. self.length = 0 l.append('%s %s %s\r\n' % (request.method, request.uri, self.outgoing_version)) if request.headers is not None: for name, valuelist in request.headers.getAllRawHeaders(): for value in valuelist: l.append("%s: %s\r\n" % (name, value)) if request.stream is not None: if request.stream.length is not None: l.append("%s: %s\r\n" % ('Content-Length', request.stream.length)) else: # Got a stream with no length. Send as chunked and hope, against # the odds, that the server actually supports chunked uploads. l.append("%s: %s\r\n" % ('Transfer-Encoding', 'chunked')) self.chunkedOut = True if self.closeAfter: l.append("%s: %s\r\n" % ('Connection', 'close')) else: l.append("%s: %s\r\n" % ('Connection', 'Keep-Alive')) l.append("\r\n") self.transport.writeSequence(l) d = stream_mod.StreamProducer(request.stream).beginProducing(self) d.addCallback(self._finish).addErrback(self._error) def registerProducer(self, producer, streaming): """Register a producer. """ self.transport.registerProducer(producer, streaming) def unregisterProducer(self): self.transport.unregisterProducer() def write(self, data): if not data: return elif self.chunkedOut: self.transport.writeSequence(("%X\r\n" % len(data), data, "\r\n")) else: self.transport.write(data) def _finish(self, x): """We are finished writing data.""" if self.chunkedOut: # write last chunk and closing CRLF self.transport.write("0\r\n\r\n") self.finished = True self.channel.requestWriteFinished(self) del self.transport def _error(self, err): self.abortParse() self.responseDefer.errback(err) def _abortWithError(self, errcode, text): self.abortParse() self.responseDefer.errback(ProtocolError(text)) def connectionLost(self, reason): ### FIXME! pass def gotInitialLine(self, initialLine): parts = initialLine.split(' ', 2) # Parse the initial request line if len(parts) != 3: self._abortWithError(responsecode.BAD_REQUEST, 'Bad response line: %s' % initialLine) return strversion, self.code, message = parts try: protovers = http.parseVersion(strversion) if protovers[0] != 'http': raise ValueError() except ValueError: self._abortWithError(responsecode.BAD_REQUEST, "Unknown protocol: %s" % strversion) return self.version = protovers[1:3] # Ensure HTTP 0 or HTTP 1. if self.version[0] != 1: self._abortWithError(responsecode.HTTP_VERSION_NOT_SUPPORTED, 'Only HTTP 1.x is supported.') return ## FIXME: Actually creates Response, function is badly named! def createRequest(self): self.stream = stream_mod.ProducerStream() self.response = http.Response(self.code, self.inHeaders, self.stream) self.stream.registerProducer(self, True) del self.inHeaders ## FIXME: Actually processes Response, function is badly named! def processRequest(self): self.responseDefer.callback(self.response) def handleContentChunk(self, data): self.stream.write(data) def handleContentComplete(self): self.stream.finish() class EmptyHTTPClientManager(object): """A dummy HTTPClientManager. It doesn't do any client management, and is meant to be used only when creating an HTTPClientProtocol directly. """ implements(interfaces.IHTTPClientManager) def clientBusy(self, proto): pass def clientIdle(self, proto): pass def clientPipelining(self, proto): pass def clientGone(self, proto): pass class HTTPClientProtocol(basic.LineReceiver, policies.TimeoutMixin, object): """A HTTP 1.1 Client with request pipelining support.""" chanRequest = None maxHeaderLength = 10240 firstLine = 1 readPersistent = PERSIST_NO_PIPELINE # inputTimeOut should be pending whenever a complete request has # been written but the complete response has not yet been # received, and be reset every time data is received. inputTimeOut = 60 * 4 def __init__(self, manager=None): """ @param manager: The object this client reports it state to. @type manager: L{interfaces.IHTTPClientManager} """ self.outRequest = None self.inRequests = [] if manager is None: manager = EmptyHTTPClientManager() self.manager = manager def lineReceived(self, line): if not self.inRequests: # server sending random unrequested data. self.transport.loseConnection() return # If not currently writing this request, set timeout if self.inRequests[0] is not self.outRequest: self.setTimeout(self.inputTimeOut) if self.firstLine: self.firstLine = 0 self.inRequests[0].gotInitialLine(line) else: self.inRequests[0].lineReceived(line) def rawDataReceived(self, data): if not self.inRequests: print "Extra raw data!" # server sending random unrequested data. self.transport.loseConnection() return # If not currently writing this request, set timeout if self.inRequests[0] is not self.outRequest: self.setTimeout(self.inputTimeOut) self.inRequests[0].rawDataReceived(data) def submitRequest(self, request, closeAfter=True): """ @param request: The request to send to a remote server. @type request: L{ClientRequest} @param closeAfter: If True the 'Connection: close' header will be sent, otherwise 'Connection: keep-alive' @type closeAfter: C{bool} @return: L{twisted.internet.defer.Deferred} @callback: L{twisted.web2.http.Response} from the server. """ # Assert we're in a valid state to submit more assert self.outRequest is None assert ((self.readPersistent is PERSIST_NO_PIPELINE and not self.inRequests) or self.readPersistent is PERSIST_PIPELINE) self.manager.clientBusy(self) if closeAfter: self.readPersistent = False self.outRequest = chanRequest = HTTPClientChannelRequest(self, request, closeAfter) self.inRequests.append(chanRequest) chanRequest.submit() return chanRequest.responseDefer def requestWriteFinished(self, request): assert request is self.outRequest self.outRequest = None # Tell the manager if more requests can be submitted. self.setTimeout(self.inputTimeOut) if self.readPersistent is PERSIST_PIPELINE: self.manager.clientPipelining(self) def requestReadFinished(self, request): assert self.inRequests[0] is request del self.inRequests[0] self.firstLine = True if not self.inRequests: if self.readPersistent: self.setTimeout(None) self.manager.clientIdle(self) else: # print "No more requests, closing" self.transport.loseConnection() def setReadPersistent(self, persist): self.readPersistent = persist if not persist: # Tell all requests but first to abort. for request in self.inRequests[1:]: request.connectionLost(None) del self.inRequests[1:] def connectionLost(self, reason): self.readPersistent = False self.setTimeout(None) self.manager.clientGone(self) # Tell all requests to abort. for request in self.inRequests: if request is not None: request.connectionLost(reason) #isLastRequest = tracer(isLastRequest) #lineReceived = tracer(lineReceived) #rawDataReceived = tracer(rawDataReceived) #connectionLost = tracer(connectionLost) #requestReadFinished = tracer(requestReadFinished) #requestWriteFinished = tracer(requestWriteFinished) #submitRequest = tracer(submitRequest) def testConn(host): from twisted.internet import reactor d = protocol.ClientCreator(reactor, HTTPClientProtocol).connectTCP(host, 80) def gotResp(resp, num): def print_(n): print "DATA %s: %r" % (num, n) def printdone(n): print "DONE %s" % num print "GOT RESPONSE %s: %s" % (num, resp) stream_mod.readStream(resp.stream, print_).addCallback(printdone) def sendReqs(proto): proto.submitRequest(ClientRequest("GET", "/", {'Host':host}, None)).addCallback(gotResp, 1) proto.submitRequest(ClientRequest("GET", "/foo", {'Host':host}, None)).addCallback(gotResp, 2) d.addCallback(sendReqs) del d reactor.run()
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/client/http.py
http.py
from __future__ import generators import struct import zlib from twisted.web2 import stream # TODO: ungzip (can any browsers actually generate gzipped # upload data?) But it's necessary for client anyways. def gzipStream(input, compressLevel=6): crc, size = zlib.crc32(''), 0 # magic header, compression method, no flags header = '\037\213\010\000' # timestamp header += struct.pack('<L', 0) # uh.. stuff header += '\002\377' yield header compress = zlib.compressobj(compressLevel, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0) _compress = compress.compress _crc32 = zlib.crc32 yield input.wait for buf in input: if len(buf) != 0: crc = _crc32(buf, crc) size += len(buf) yield _compress(buf) yield input.wait yield compress.flush() yield struct.pack('<LL', crc & 0xFFFFFFFFL, size & 0xFFFFFFFFL) gzipStream=stream.generatorToStream(gzipStream) def deflateStream(input, compressLevel=6): # NOTE: this produces RFC-conformant but some-browser-incompatible output. # The RFC says that you're supposed to output zlib-format data, but many # browsers expect raw deflate output. Luckily all those browsers support # gzip, also, so they won't even see deflate output. compress = zlib.compressobj(compressLevel, zlib.DEFLATED, zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0) _compress = compress.compress yield input.wait for buf in input: if len(buf) != 0: yield _compress(buf) yield input.wait yield compress.flush() deflateStream=stream.generatorToStream(deflateStream) def gzipfilter(request, response): if response.stream is None or response.headers.getHeader('content-encoding'): # Empty stream, or already compressed. return response # FIXME: make this a more flexible matching scheme mimetype = response.headers.getHeader('content-type') if not mimetype or mimetype.mediaType != 'text': return response # Make sure to note we're going to return different content depending on # the accept-encoding header. vary = response.headers.getHeader('vary', []) if 'accept-encoding' not in vary: response.headers.setHeader('vary', vary+['accept-encoding']) ae = request.headers.getHeader('accept-encoding', {}) compressor = None # Always prefer gzip over deflate no matter what their q-values are. if ae.get('gzip', 0): response.stream = gzipStream(response.stream) response.headers.setHeader('content-encoding', ['gzip']) elif ae.get('deflate', 0): response.stream = deflateStream(response.stream) response.headers.setHeader('content-encoding', ['deflate']) return response __all__ = ['gzipfilter']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/filter/gzip.py
gzip.py
import time, os from twisted.web2 import http, http_headers, responsecode, stream # Some starts at writing a response filter to handle request ranges. class UnsatisfiableRangeRequest(Exception): pass def canonicalizeRange((start, end), size): """Return canonicalized (start, end) or raises UnsatisfiableRangeRequest exception. NOTE: end is the last byte *inclusive*, which is not the usual convention in python! Be very careful! A range of 0,1 should return 2 bytes.""" # handle "-500" ranges if start is None: start = max(0, size-end) end = None if end is None or end >= size: end = size - 1 if start >= size: raise UnsatisfiableRangeRequest return start,end def makeUnsatisfiable(request, oldresponse): if request.headers.hasHeader('if-range'): return oldresponse # Return resource instead of error response = http.Response(responsecode.REQUESTED_RANGE_NOT_SATISFIABLE) response.headers.setHeader("content-range", ('bytes', None, None, oldresponse.stream.length)) return response def makeSegment(inputStream, lastOffset, start, end): offset = start - lastOffset length = end + 1 - start if offset != 0: before, inputStream = inputStream.split(offset) before.close() return inputStream.split(length) def rangefilter(request, oldresponse): if oldresponse.stream is None: return oldresponse size = oldresponse.stream.length if size is None: # Does not deal with indeterminate length outputs return oldresponse oldresponse.headers.setHeader('accept-ranges',('bytes',)) rangespec = request.headers.getHeader('range') # If we've got a range header and the If-Range header check passes, and # the range type is bytes, do a partial response. if (rangespec is not None and http.checkIfRange(request, oldresponse) and rangespec[0] == 'bytes'): # If it's a single range, return a simple response if len(rangespec[1]) == 1: try: start,end = canonicalizeRange(rangespec[1][0], size) except UnsatisfiableRangeRequest: return makeUnsatisfiable(request, oldresponse) response = http.Response(responsecode.PARTIAL_CONTENT, oldresponse.headers) response.headers.setHeader('content-range',('bytes',start, end, size)) content, after = makeSegment(oldresponse.stream, 0, start, end) after.close() response.stream = content return response else: # Return a multipart/byteranges response lastOffset = -1 offsetList = [] for arange in rangespec[1]: try: start,end = canonicalizeRange(arange, size) except UnsatisfiableRangeRequest: continue if start <= lastOffset: # Stupid client asking for out-of-order or overlapping ranges, PUNT! return oldresponse offsetList.append((start,end)) lastOffset = end if not offsetList: return makeUnsatisfiable(request, oldresponse) content_type = oldresponse.headers.getRawHeaders('content-type') boundary = "%x%x" % (int(time.time()*1000000), os.getpid()) response = http.Response(responsecode.PARTIAL_CONTENT, oldresponse.headers) response.headers.setHeader('content-type', http_headers.MimeType('multipart', 'byteranges', [('boundary', boundary)])) response.stream = out = stream.CompoundStream() lastOffset = 0 origStream = oldresponse.stream headerString = "\r\n--%s" % boundary if len(content_type) == 1: headerString+='\r\nContent-Type: %s' % content_type[0] headerString+="\r\nContent-Range: %s\r\n\r\n" for start,end in offsetList: out.addStream(headerString % http_headers.generateContentRange(('bytes', start, end, size))) content, origStream = makeSegment(origStream, lastOffset, start, end) lastOffset = end + 1 out.addStream(content) origStream.close() out.addStream("\r\n--%s--\r\n" % boundary) return response else: return oldresponse __all__ = ['rangefilter']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/web2/filter/range.py
range.py
# """Support for working directly with IP packets""" import struct import socket from twisted.internet import protocol from twisted.pair import raw from zope.interface import implements class IPHeader: def __init__(self, data): (ihlversion, self.tos, self.tot_len, self.fragment_id, frag_off, self.ttl, self.protocol, self.check, saddr, daddr) \ = struct.unpack("!BBHHHBBH4s4s", data[:20]) self.saddr = socket.inet_ntoa(saddr) self.daddr = socket.inet_ntoa(daddr) self.version = ihlversion & 0x0F self.ihl = ((ihlversion & 0xF0) >> 4) << 2 self.fragment_offset = frag_off & 0x1FFF self.dont_fragment = (frag_off & 0x4000 != 0) self.more_fragments = (frag_off & 0x2000 != 0) MAX_SIZE = 2L**32 class IPProtocol(protocol.AbstractDatagramProtocol): implements(raw.IRawPacketProtocol) def __init__(self): self.ipProtos = {} def addProto(self, num, proto): proto = raw.IRawDatagramProtocol(proto) if num < 0: raise TypeError, 'Added protocol must be positive or zero' if num >= MAX_SIZE: raise TypeError, 'Added protocol must fit in 32 bits' if num not in self.ipProtos: self.ipProtos[num] = [] self.ipProtos[num].append(proto) def datagramReceived(self, data, partial, dest, source, protocol): header = IPHeader(data) for proto in self.ipProtos.get(header.protocol, ()): proto.datagramReceived(data=data[20:], partial=partial, source=header.saddr, dest=header.daddr, protocol=header.protocol, version=header.version, ihl=header.ihl, tos=header.tos, tot_len=header.tot_len, fragment_id=header.fragment_id, fragment_offset=header.fragment_offset, dont_fragment=header.dont_fragment, more_fragments=header.more_fragments, ttl=header.ttl, )
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/pair/ip.py
ip.py
# import errno, os from twisted.python import log, reflect, components from twisted.internet import base, fdesc, error from twisted.pair import ethernet, ip """ You need Eunuchs for twisted.pair.tuntap to work. Eunuchs is a library containing the missing manly parts of UNIX API for Python. Eunuchs is a library of Python extension that complement the standard libraries in parts where full support for the UNIX API (or the Linux API) is missing. Most of the functions wrapped by Eunuchs are low-level, dirty, but absolutely necessary functions for real systems programming. The aim is to have the functions added to mainstream Python libraries. Current list of functions included: - fchdir(2) - recvmsg(2) and sendmsg(2), including use of cmsg(3) - socketpair(2) - support for TUN/TAP virtual network interfaces Eunuchs doesn't have a proper web home right now, but you can fetch the source from http://ftp.debian.org/debian/pool/main/e/eunuch -- debian users can just use 'apt-get install python-eunuchs'. """ from eunuchs.tuntap import opentuntap, TuntapPacketInfo, makePacketInfo class TuntapPort(base.BasePort): """A Port that reads and writes packets from/to a TUN/TAP-device. TODO: Share general start/stop etc implementation details with twisted.internet.udp.Port. """ maxThroughput = 256 * 1024 # max bytes we read in one eventloop iteration def __init__(self, interface, proto, maxPacketSize=8192, reactor=None): if components.implements(proto, ethernet.IEthernetProtocol): self.ethernet = 1 else: self.ethernet = 0 assert components.implements(proto, ip.IIPProtocol) # XXX: fix me base.BasePort.__init__(self, reactor) self.interface = interface self.protocol = proto self.maxPacketSize = maxPacketSize self.setLogStr() def __repr__(self): return "<%s on %s>" % (self.protocol.__class__, self.interface) def startListening(self): """Create and bind my socket, and begin listening on it. This is called on unserialization, and must be called after creating a server to begin listening on the specified port. """ self._bindSocket() self._connectToProtocol() def _bindSocket(self): log.msg("%s starting on %s"%(self.protocol.__class__, self.interface)) try: fd, name = opentuntap(name=self.interface, ethernet=self.ethernet, packetinfo=0) except OSError, e: raise error.CannotListenError, (None, self.interface, e) fdesc.setNonBlocking(fd) self.interface = name self.connected = 1 self.fd = fd def fileno(self): return self.fd def _connectToProtocol(self): self.protocol.makeConnection(self) self.startReading() def doRead(self): """Called when my socket is ready for reading.""" read = 0 while read < self.maxThroughput: try: data = os.read(self.fd, self.maxPacketSize) read += len(data) # pkt = TuntapPacketInfo(data) self.protocol.datagramReceived(data, partial=0 # pkt.isPartial(), ) except OSError, e: if e.errno in (errno.EWOULDBLOCK,): return else: raise except IOError, e: if e.errno in (errno.EAGAIN, errno.EINTR): return else: raise except: log.deferr() def write(self, datagram): """Write a datagram.""" # header = makePacketInfo(0, 0) try: return os.write(self.fd, datagram) except IOError, e: if e.errno == errno.EINTR: return self.write(datagram) elif e.errno == errno.EMSGSIZE: raise error.MessageLengthError, "message too long" elif e.errno == errno.ECONNREFUSED: raise error.ConnectionRefusedError else: raise def writeSequence(self, seq): self.write("".join(seq)) def loseConnection(self): """Stop accepting connections on this port. This will shut down my socket and call self.connectionLost(). """ self.stopReading() if self.connected: from twisted.internet import reactor reactor.callLater(0, self.connectionLost) stopListening = loseConnection def connectionLost(self, reason=None): """Cleans up my socket. """ log.msg('(Tuntap %s Closed)' % self.interface) base.BasePort.connectionLost(self, reason) if hasattr(self, "protocol"): # we won't have attribute in ConnectedPort, in cases # where there was an error in connection process self.protocol.doStop() self.connected = 0 os.close(self.fd) del self.fd def setLogStr(self): self.logstr = reflect.qual(self.protocol.__class__) + " (TUNTAP)" def logPrefix(self): """Returns the name of my class, to prefix log entries with. """ return self.logstr def getHost(self): """ Returns a tuple of ('TUNTAP', interface), indicating the servers address """ return ('TUNTAP',)+self.interface
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/pair/tuntap.py
tuntap.py
import itertools from zope.interface import implements, directlyProvides from twisted.internet import error from twisted.internet import interfaces from OpenSSL.SSL import Error as NativeOpenSSLError from twisted.internet._sslverify import OpenSSLVerifyError class TLSNegotiation: def __init__(self, obj, connectState): self.obj = obj self.connectState = connectState self.sent = False self.readyToSend = connectState def __repr__(self): return 'TLSNegotiation(%r)' % (self.obj,) def pretendToVerify(self, other, tpt): # Set the transport problems list here? disconnections? # hmmmmm... need some negative path tests. if not self.obj.iosimVerify(other.obj): tpt.problems.append(OpenSSLVerifyError("fake cert", "fake errno", "fake depth")) tpt.disconnectReason = NativeOpenSSLError() tpt.loseConnection() class FakeTransport: """A wrapper around a file-like object to make it behave as a Transport. This doesn't actually stream the file to the attached protocol, and is thus useful mainly as a utility for debugging protocols. """ implements(interfaces.ITransport, interfaces.ITLSTransport) # ha ha not really _nextserial = itertools.count().next closed = 0 disconnecting = 0 disconnected = 0 disconnectReason = error.ConnectionDone("Connection done") producer = None streamingProducer = 0 tls = None def __init__(self): self.stream = [] self.problems = [] self.serial = self._nextserial() def __repr__(self): return 'FakeTransport<%s,%s,%s>' % ( self.isServer and 'S' or 'C', self.serial, self.protocol.__class__.__name__) def write(self, data): if self.tls is not None: self.tlsbuf.append(data) else: self.stream.append(data) def _checkProducer(self): # Cheating; this is called at "idle" times to allow producers to be # found and dealt with if self.producer: self.producer.resumeProducing() def registerProducer(self, producer, streaming): """From abstract.FileDescriptor """ self.producer = producer self.streamingProducer = streaming if not streaming: producer.resumeProducing() def unregisterProducer(self): self.producer = None def stopConsuming(self): self.unregisterProducer() self.loseConnection() def writeSequence(self, iovec): self.write("".join(iovec)) def loseConnection(self): self.disconnecting = True def reportDisconnect(self): if self.tls is not None: # We were in the middle of negotiating! Must have been a TLS problem. err = NativeOpenSSLError() else: err = self.disconnectReason self.protocol.connectionLost(err) def getPeer(self): # XXX: According to ITransport, this should return an IAddress! return 'file', 'file' def getHost(self): # XXX: According to ITransport, this should return an IAddress! return 'file' def resumeProducing(self): # Never sends data anyways pass def pauseProducing(self): # Never sends data anyways pass def stopProducing(self): self.loseConnection() def startTLS(self, contextFactory, beNormal=True): # Nothing's using this feature yet, but startTLS has an undocumented # second argument which defaults to true; if set to False, servers will # behave like clients and clients will behave like servers. connectState = self.isServer ^ beNormal self.tls = TLSNegotiation(contextFactory, connectState) self.tlsbuf = [] def getOutBuffer(self): S = self.stream if S: self.stream = [] return ''.join(S) elif self.tls is not None: if self.tls.readyToSend: # Only _send_ the TLS negotiation "packet" if I'm ready to. self.tls.sent = True return self.tls else: return None else: return None def bufferReceived(self, buf): if isinstance(buf, TLSNegotiation): assert self.tls is not None # By the time you're receiving a # negotiation, you have to have called # startTLS already. if self.tls.sent: self.tls.pretendToVerify(buf, self) self.tls = None # we're done with the handshake if we've gotten # this far... although maybe it failed...? # TLS started! Unbuffer... b, self.tlsbuf = self.tlsbuf, None self.writeSequence(b) directlyProvides(self, interfaces.ISSLTransport) else: # We haven't sent our own TLS negotiation: time to do that! self.tls.readyToSend = True else: self.protocol.dataReceived(buf) # this next bit is just to fake out problemsFromTransport, which is an # ultra-shitty API anyway. remove it when we manage to remove that. -glyph def getHandle(self): return self get_context = getHandle get_app_data = getHandle # end of gross problemsFromTransport stuff def makeFakeClient(c): ft = FakeTransport() ft.isServer = False ft.protocol = c return ft def makeFakeServer(s): ft = FakeTransport() ft.isServer = True ft.protocol = s return ft class IOPump: """Utility to pump data between clients and servers for protocol testing. Perhaps this is a utility worthy of being in protocol.py? """ def __init__(self, client, server, clientIO, serverIO, debug): self.client = client self.server = server self.clientIO = clientIO self.serverIO = serverIO self.debug = debug def flush(self, debug=False): """Pump until there is no more input or output. Returns whether any data was moved. """ result = False for x in range(1000): if self.pump(debug): result = True else: break else: assert 0, "Too long" return result def pump(self, debug=False): """Move data back and forth. Returns whether any data was moved. """ if self.debug or debug: print '-- GLUG --' sData = self.serverIO.getOutBuffer() cData = self.clientIO.getOutBuffer() self.clientIO._checkProducer() self.serverIO._checkProducer() if self.debug or debug: print '.' # XXX slightly buggy in the face of incremental output if cData: print 'C: '+repr(cData) if sData: print 'S: '+repr(sData) if cData: self.serverIO.bufferReceived(cData) if sData: self.clientIO.bufferReceived(sData) if cData or sData: return True if (self.serverIO.disconnecting and not self.serverIO.disconnected): if self.debug or debug: print '* C' self.serverIO.disconnected = True self.clientIO.disconnecting = True self.clientIO.reportDisconnect() return True if self.clientIO.disconnecting and not self.clientIO.disconnected: if self.debug or debug: print '* S' self.clientIO.disconnected = True self.serverIO.disconnecting = True self.serverIO.reportDisconnect() return True return False def connectedServerAndClient(ServerClass, ClientClass, clientTransportFactory=makeFakeClient, serverTransportFactory=makeFakeServer, debug=False): """Returns a 3-tuple: (client, server, pump) """ c = ClientClass() s = ServerClass() cio = clientTransportFactory(c) sio = serverTransportFactory(s) c.makeConnection(cio) s.makeConnection(sio) pump = IOPump(c, s, cio, sio, debug) # kick off server greeting, etc pump.flush() return c, s, pump
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/test/iosim.py
iosim.py
# Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. """ The point of integration of application and authentication. """ from twisted.internet import defer from twisted.internet.defer import maybeDeferred from twisted.python import failure, reflect from twisted.cred import error from zope.interface import providedBy, Interface class IRealm(Interface): """ The realm connects application-specific objects to the authentication system. """ def requestAvatar(avatarId, mind, *interfaces): """Return avatar implementing one of the given interfaces. @param avatarId: a string that identifies an avatar, as returned by L{ICredentialsChecker.requestAvatarId<twisted.cred.checkers.ICredentialsChecker.requestAvatarId>} (via a Deferred). Alternatively, it may be C{twisted.cred.checkers.ANONYMOUS}. @param mind: usually None. See the description of mind in L{Portal.login}. @param interfaces: the interface(s) the returned avatar should implement, e.g. C{IMailAccount}. See the description of L{Portal.login}. @returns: a deferred which will fire a tuple of (interface, avatarAspect, logout), or the tuple itself. The interface will be one of the interfaces passed in the 'interfaces' argument. The 'avatarAspect' will implement that interface. The 'logout' object is a callable which will detach the mind from the avatar. """ class Portal: """A mediator between clients and a realm. A portal is associated with one Realm and zero or more credentials checkers. When a login is attempted, the portal finds the appropriate credentials checker for the credentials given, invokes it, and if the credentials are valid, retrieves the appropriate avatar from the Realm. This class is not intended to be subclassed. Customization should be done in the realm object and in the credentials checker objects. """ def __init__(self, realm, checkers=()): """Create a Portal to a L{IRealm}. """ self.realm = realm self.checkers = {} for checker in checkers: self.registerChecker(checker) def listCredentialsInterfaces(self): """Return list of credentials interfaces that can be used to login.""" return self.checkers.keys() def registerChecker(self, checker, *credentialInterfaces): if not credentialInterfaces: credentialInterfaces = checker.credentialInterfaces for credentialInterface in credentialInterfaces: self.checkers[credentialInterface] = checker def login(self, credentials, mind, *interfaces): """ @param credentials: an implementor of twisted.cred.credentials.ICredentials @param mind: an object which implements a client-side interface for your particular realm. In many cases, this may be None, so if the word 'mind' confuses you, just ignore it. @param interfaces: list of interfaces for the perspective that the mind wishes to attach to. Usually, this will be only one interface, for example IMailAccount. For highly dynamic protocols, however, this may be a list like (IMailAccount, IUserChooser, IServiceInfo). To expand: if we are speaking to the system over IMAP, any information that will be relayed to the user MUST be returned as an IMailAccount implementor; IMAP clients would not be able to understand anything else. Any information about unusual status would have to be relayed as a single mail message in an otherwise-empty mailbox. However, in a web-based mail system, or a PB-based client, the ``mind'' object inside the web server (implemented with a dynamic page-viewing mechanism such as woven) or on the user's client program may be intelligent enough to respond to several ``server''-side interfaces. @return: A deferred which will fire a tuple of (interface, avatarAspect, logout). The interface will be one of the interfaces passed in the 'interfaces' argument. The 'avatarAspect' will implement that interface. The 'logout' object is a callable which will detach the mind from the avatar. It must be called when the user has conceptually disconnected from the service. Although in some cases this will not be in connectionLost (such as in a web-based session), it will always be at the end of a user's interactive session. """ ifac = providedBy(credentials) for i in ifac: c = self.checkers.get(i) if c is not None: return maybeDeferred(c.requestAvatarId, credentials ).addCallback(self.realm.requestAvatar, mind, *interfaces ) return defer.fail(failure.Failure(error.UnhandledCredentials( "No checker for %s" % ', '.join(map(reflect.qual, ifac)))))
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/cred/portal.py
portal.py
# Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. from zope.interface import implements, Interface import hmac import time import random class ICredentials(Interface): """I check credentials. Implementors _must_ specify which sub-interfaces of ICredentials to which it conforms, using zope.interface.implements(). """ class IUsernameHashedPassword(ICredentials): """I encapsulate a username and a hashed password. This credential is used when a hashed password is received from the party requesting authentication. CredentialCheckers which check this kind of credential must store the passwords in plaintext (or as password-equivalent hashes) form so that they can be hashed in a manner appropriate for the particular credentials class. @type username: C{str} @ivar username: The username associated with these credentials. """ def checkPassword(password): """Validate these credentials against the correct password. @param password: The correct, plaintext password against which to check. @return: a deferred which becomes, or a boolean indicating if the password matches. """ class IUsernamePassword(ICredentials): """I encapsulate a username and a plaintext password. This encapsulates the case where the password received over the network has been hashed with the identity function (That is, not at all). The CredentialsChecker may store the password in whatever format it desires, it need only transform the stored password in a similar way before performing the comparison. @type username: C{str} @ivar username: The username associated with these credentials. @type password: C{str} @ivar password: The password associated with these credentials. """ def checkPassword(password): """Validate these credentials against the correct password. @param password: The correct, plaintext password against which to check. @return: a deferred which becomes, or a boolean indicating if the password matches. """ class IAnonymous(ICredentials): """I am an explicitly anonymous request for access. """ class CramMD5Credentials: implements(IUsernameHashedPassword) challenge = '' response = '' def __init__(self, host=None): self.host = host def getChallenge(self): if self.challenge: return self.challenge # The data encoded in the first ready response contains an # presumptively arbitrary string of random digits, a timestamp, and # the fully-qualified primary host name of the server. The syntax of # the unencoded form must correspond to that of an RFC 822 'msg-id' # [RFC822] as described in [POP3]. # -- RFC 2195 r = random.randrange(0x7fffffff) t = time.time() self.challenge = '<%d.%d@%s>' % (r, t, self.host) return self.challenge def setResponse(self, response): self.username, self.response = response.split(None, 1) def moreChallenges(self): return False def checkPassword(self, password): verify = hmac.HMAC(password, self.challenge).hexdigest() return verify == self.response class UsernameHashedPassword: implements(IUsernameHashedPassword) def __init__(self, username, hashed): self.username = username self.hashed = hashed def checkPassword(self, password): return self.hashed == password class UsernamePassword: implements(IUsernamePassword) def __init__(self, username, password): self.username = username self.password = password def checkPassword(self, password): return self.password == password class Anonymous: implements(IAnonymous) class ISSHPrivateKey(ICredentials): """I encapsulate an SSH public key to be checked against a users private key. @ivar username: Duh? @ivar algName: The algorithm name for the blob. @ivar blob: The public key blob as sent by the client. @ivar sigData: The data the signature was made from. @ivar signature: The signed data. This is checked to verify that the user owns the private key. """ class SSHPrivateKey: implements(ISSHPrivateKey) def __init__(self, username, algName, blob, sigData, signature): self.username = username self.algName = algName self.blob = blob self.sigData = sigData self.signature = signature class IPluggableAuthenticationModules(ICredentials): """I encapsulate the authentication of a user via PAM (Pluggable Authentication Modules. I use PyPAM (available from http://www.tummy.com/Software/PyPam/index.html). @ivar username: The username for the user being logged in. @ivar pamConversion: A function that is called with a list of tuples (message, messageType). See the PAM documentation for the meaning of messageType. The function returns a Deferred which will fire with a list of (response, 0), one for each message. The 0 is currently unused, but is required by the PAM library. """ class PluggableAuthenticationModules: implements(IPluggableAuthenticationModules) def __init__(self, username, pamConversion): self.username = username self.pamConversion = pamConversion
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/cred/credentials.py
credentials.py
from __future__ import generators import os from zope.interface import implements, Interface from twisted.internet import defer from twisted.python import failure, log from twisted.cred import error, credentials try: from twisted.cred import pamauth except ImportError: # PyPAM is missing pamauth = None class ICredentialsChecker(Interface): """I check sub-interfaces of ICredentials. @cvar credentialInterfaces: A list of sub-interfaces of ICredentials which specifies which I may check. """ def requestAvatarId(credentials): """ @param credentials: something which implements one of the interfaces in self.credentialInterfaces. @return: a Deferred which will fire a string which identifies an avatar, an empty tuple to specify an authenticated anonymous user (provided as checkers.ANONYMOUS) or fire a Failure(UnauthorizedLogin). Alternatively, return the result itself. """ # A note on anonymity - We do not want None as the value for anonymous # because it is too easy to accidentally return it. We do not want the # empty string, because it is too easy to mistype a password file. For # example, an .htpasswd file may contain the lines: ['hello:asdf', # 'world:asdf', 'goodbye', ':world']. This misconfiguration will have an # ill effect in any case, but accidentally granting anonymous access is a # worse failure mode than simply granting access to an untypeable # username. We do not want an instance of 'object', because that would # create potential problems with persistence. ANONYMOUS = () class AllowAnonymousAccess: implements(ICredentialsChecker) credentialInterfaces = credentials.IAnonymous, def requestAvatarId(self, credentials): return defer.succeed(ANONYMOUS) class InMemoryUsernamePasswordDatabaseDontUse: """An extremely simple credentials checker. This is only of use in one-off test programs or examples which don't want to focus too much on how credentials are verified. You really don't want to use this for anything else. It is, at best, a toy. If you need a simple credentials checker for a real application, see L{FilePasswordDB}. """ implements(ICredentialsChecker) credentialInterfaces = (credentials.IUsernamePassword, credentials.IUsernameHashedPassword) def __init__(self, **users): self.users = users def addUser(self, username, password): self.users[username] = password def _cbPasswordMatch(self, matched, username): if matched: return username else: return failure.Failure(error.UnauthorizedLogin()) def requestAvatarId(self, credentials): if credentials.username in self.users: return defer.maybeDeferred( credentials.checkPassword, self.users[credentials.username]).addCallback( self._cbPasswordMatch, str(credentials.username)) else: return defer.fail(error.UnauthorizedLogin()) class FilePasswordDB: """A file-based, text-based username/password database. Records in the datafile for this class are delimited by a particular string. The username appears in a fixed field of the columns delimited by this string, as does the password. Both fields are specifiable. If the passwords are not stored plaintext, a hash function must be supplied to convert plaintext passwords to the form stored on disk and this CredentialsChecker will only be able to check IUsernamePassword credentials. If the passwords are stored plaintext, IUsernameHashedPassword credentials will be checkable as well. """ implements(ICredentialsChecker) cache = False _credCache = None _cacheTimestamp = 0 def __init__(self, filename, delim=':', usernameField=0, passwordField=1, caseSensitive=True, hash=None, cache=False): """ @type filename: C{str} @param filename: The name of the file from which to read username and password information. @type delim: C{str} @param delim: The field delimiter used in the file. @type usernameField: C{int} @param usernameField: The index of the username after splitting a line on the delimiter. @type passwordField: C{int} @param passwordField: The index of the password after splitting a line on the delimiter. @type caseSensitive: C{bool} @param caseSensitive: If true, consider the case of the username when performing a lookup. Ignore it otherwise. @type hash: Three-argument callable or C{None} @param hash: A function used to transform the plaintext password received over the network to a format suitable for comparison against the version stored on disk. The arguments to the callable are the username, the network-supplied password, and the in-file version of the password. If the return value compares equal to the version stored on disk, the credentials are accepted. @type cache: C{bool} @param cache: If true, maintain an in-memory cache of the contents of the password file. On lookups, the mtime of the file will be checked, and the file will only be re-parsed if the mtime is newer than when the cache was generated. """ self.filename = filename self.delim = delim self.ufield = usernameField self.pfield = passwordField self.caseSensitive = caseSensitive self.hash = hash self.cache = cache if self.hash is None: # The passwords are stored plaintext. We can support both # plaintext and hashed passwords received over the network. self.credentialInterfaces = ( credentials.IUsernamePassword, credentials.IUsernameHashedPassword ) else: # The passwords are hashed on disk. We can support only # plaintext passwords received over the network. self.credentialInterfaces = ( credentials.IUsernamePassword, ) def __getstate__(self): d = dict(vars(self)) for k in '_credCache', '_cacheTimestamp': try: del d[k] except KeyError: pass return d def _cbPasswordMatch(self, matched, username): if matched: return username else: return failure.Failure(error.UnauthorizedLogin()) def _loadCredentials(self): try: f = file(self.filename) except: log.err() raise error.UnauthorizedLogin() else: for line in f: line = line.rstrip() parts = line.split(self.delim) if self.ufield >= len(parts) or self.pfield >= len(parts): continue if self.caseSensitive: yield parts[self.ufield], parts[self.pfield] else: yield parts[self.ufield].lower(), parts[self.pfield] def getUser(self, username): if not self.caseSensitive: username = username.lower() if self.cache: if self._credCache is None or os.path.getmtime(self.filename) > self._cacheTimestamp: self._cacheTimestamp = os.path.getmtime(self.filename) self._credCache = dict(self._loadCredentials()) return username, self._credCache[username] else: for u, p in self._loadCredentials(): if u == username: return u, p raise KeyError(username) def requestAvatarId(self, c): try: u, p = self.getUser(c.username) except KeyError: return defer.fail(error.UnauthorizedLogin()) else: up = credentials.IUsernamePassword(c, None) if self.hash: if up is not None: h = self.hash(up.username, up.password, p) if h == p: return defer.succeed(u) return defer.fail(error.UnauthorizedLogin()) else: return defer.maybeDeferred(c.checkPassword, p ).addCallback(self._cbPasswordMatch, u) class PluggableAuthenticationModulesChecker: implements(ICredentialsChecker) credentialInterfaces = credentials.IPluggableAuthenticationModules, service = 'Twisted' def requestAvatarId(self, credentials): if not pamauth: return defer.fail(error.UnauthorizedLogin()) d = pamauth.pamAuthenticate(self.service, credentials.username, credentials.pamConversion) d.addCallback(lambda x: credentials.username) return d # For backwards compatibility # Allow access as the old name. OnDiskUsernamePasswordDatabase = FilePasswordDB
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/cred/checkers.py
checkers.py
from zope.interface import Interface, Attribute class VFSError(Exception): """Base class for all VFS errors.""" class PermissionError(VFSError): """The user does not have permission to perform the requested operation.""" class NotFoundError(VFSError): """The file or directory does not exist.""" class AlreadyExistsError(VFSError): """The file or directory already exists.""" class IFileSystemNode(Interface): parent = Attribute( """parent node""" ) def getMetadata(): """ returns a map of arbitrary metadata. As an example, here's what SFTP expects (but doesn't require): - C{'size'}: size of file in bytes - C{'uid'}: owner of the file - C{'gid'}: group owner of the file - C{'permissions'}: file permissions - C{'atime'}: last time the file was accessed - C{'mtime'}: last time the file was modified - C{'nlink'}: number of links to the file Protocols that need metadata should handle the case when a particular value isn't available as gracefully as possible. """ # XXX: There should be a setMetadata, probably taking a map of the same form # returned by getMetadata (although obviously keys like 'nlink' aren't # settable. Something like: # def setMetadata(metadata): # """Sets metadata for a node. # # Unrecognised keys will be ignored (but invalid values for a recognised # key may cause an error to be raised). # # Typical keys are 'permissions', 'uid', 'gid', 'atime' and 'mtime'. # # @param metadata: a dict, like the one getMetadata returns. # """ # osfs.OSNode implements this; other backends should be similarly updated. # -- spiv, 2006-06-02 def remove(): """ Removes this node. An error is raised if the node is a directory and is not empty. """ def rename(newName): """ Renames this node to newName. newName can be in a different directory. If the destination is an existing directory, an error will be raised. """ class IFileSystemLeaf(IFileSystemNode): def open(flags): """ Opens the file with flags. Flags should be a bitmask based on the os.O_* flags. """ def close(): """closes this node""" def readChunk(offset, length): """ Leaf should have been previously opened with suitable flags. Reads length bytes or until the end of file from this leaf from the given offset. """ def writeChunk(offset, data): """ Leaf should have been previously opened with suitable flags. Writes data to leaf from the given offset. """ class IFileSystemContainer(IFileSystemNode): def children(): """ returns a list of 2 element tuples [ ( path, nodeObject ) ] """ def child(childName): """ Returns a node object for child childName @raises NotFoundError if no child with that name exists. """ def createDirectory(childName): """ Creates a new folder named childName under this folder. An error is raised if the folder already exists. """ def createFile(childName, exclusive=True): """ Creates a new file named childName under this folder. If exclusive is True (the default), an AlreadyExistsError is raised if the file already exists. """ def exists(childName): """ returns True if container has a child childName, False otherwise """
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/vfs/ivfs.py
ivfs.py
from zope.interface import Interface, Attribute, implements def getAbsoluteSegments(path, cwd='/'): """ @param path: either a string or a list of string segments which specifys the desired path. may be relative to the cwd @param cwd: optional string specifying the current working directory returns a list of string segments which most succinctly describe how to get to path from root """ if not isinstance(path, list): paths = path.split("/") else: paths = path if len(paths) and paths[0] == "": paths = paths[1:] else: paths = cwd.split("/") + paths result = [] for path in paths: if path == "..": if len(result) > 1: result = result[:-1] else: result = [] elif path not in ("", "."): result.append(path) return result def fetch(root, path, cwd='/'): """ @param root: IFileSystemContainer which represents the root node of the filesystem @param path: either a string or a list of string segments which specifys the desired path. may be relative to the cwd @param cwd: optional string specifying the current working directory returns node described by path relative to the cwd """ paths = getAbsoluteSegments(path, cwd) currNode = root for path in paths: currNode = currNode.child(path) return currNode def basename(path, cwd='/'): s = getAbsoluteSegments(path, cwd) if s: return s[-1] return '' def dirname(path, cwd='/'): return "/" + "/".join(getAbsoluteSegments(path, cwd)[:-1]) def getRoot(node): while node.parent is not node: node = node.parent return node def getSegments(node): ret = [] while node.parent is not node: ret.append(node.name) node = node.parent ret.reverse() return ret class IFileSystem(Interface): root = Attribute("root IFileSystemNode of the IFileSystem") pathToCWD = Attribute("path to current working directory") def absPath(path): """ returns a normalized absolutized version of the pathname path """ def splitPath(path): """ returns a normalized absolutized version of the pathname path split on the filesystem's directory seperator """ def joinPath(tail, head): """ joins the two paths, tail and head """ def dirname(path): """ returns the directory name of the container for path """ def basename(path): """ returns the base name of pathname path """ def fetch(path): """ returns a node object representing the file with pathname path """ def _getImplicitChildren(dir): """ returns implicit children for a given dir this is placed in the filesystem so that the same directory can have different implicit children depending on what sort of filesystem it has been placed in (This may not be the best idea) returns a list of 2 element tuples, C{[ ( path, nodeObject ) ]}, e.g.:: [ ( ".", dir ), ( "..", dir.parent ) ] """ class FileSystem: """ Wraps unix-like VFS backends, in which directory separator is '/', root's path is '/', and all directories have '.' and '..'. Effectively, this is just a convenience wrapper around the other functions in this module which remembers the root node and the current working directory. """ implements(IFileSystem) def __init__(self, root, pathToCWD="/"): self.root = root self.root.filesystem = self self.pathToCWD = pathToCWD def absPath(self, path): return "/" + "/".join(self.splitPath(path)) def splitPath(self, path): return getAbsoluteSegments(path, self.pathToCWD) def joinPath(self, tail, head): if tail == "/": return tail + head else: return tail + "/" + head def dirname(self, path): return dirname(path, self.pathToCWD) def basename(self, path): return basename(path, self.pathToCWD) def fetch(self, pathToFile="."): return fetch(self.root, pathToFile, self.pathToCWD) def _getImplicitChildren(self, dir): return [(".", dir), ("..", dir.parent)]
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/vfs/pathutils.py
pathutils.py
import os, time from cStringIO import StringIO from twisted.protocols.ftp import IFTPShell, IReadFile, IWriteFile from twisted.internet import defer from twisted.internet.interfaces import IConsumer from twisted.web2.stream import StreamProducer, IByteStream from zope.interface import implements from twisted.python import components from twisted.vfs import ivfs, pathutils # XXX: Import this to make sure the adapter registration has happened. from twisted.vfs.adapters import stream class FileSystemToIFTPShellAdaptor: implements(IFTPShell) def __init__(self, filesystem): self.filesystem = filesystem def _makePath(segments): return '/'.join(segments) _makePath = staticmethod(_makePath) def makeDirectory(self, path): dirname, basename = path[:-1], path[-1] parent = self.filesystem.fetch(self._makePath(dirname)) try: parent.createDirectory(basename) except: return defer.fail() else: return defer.succeed(None) def removeDirectory(self, path): try: node = self.filesystem.fetch(self._makePath(path)) if not ivfs.IFileSystemContainer.providedBy(node): raise IOError("removeDirectory can only remove directories.") node.remove() except: return defer.fail() else: return defer.succeed(None) def removeFile(self, path): try: node = self.filesystem.fetch(self._makePath(path)) if ivfs.IFileSystemContainer.providedBy(node): raise IOError("removeFile cannot remove directories.") node.remove() except: return defer.fail() else: return defer.succeed(None) def list(self, path, keys=()): node = self.filesystem.fetch(self._makePath(path)) result = [] for childName, childNode in node.children(): attrs = self._attrify(childNode) result.append((childName, [attrs[attrName] for attrName in keys])) return defer.succeed(result) # XXX - this should probably go in a helper somewhere def _attrify(self, node): meta = node.getMetadata() permissions = meta.get('permissions', None) directory = ivfs.IFileSystemContainer.providedBy(node) if permissions is None: # WTF if ivfs.IFileSystemContainer.providedBy(node): permissions = 16877 else: permissions = 33188 return {'permissions': permissions, 'directory': directory, 'size': meta.get('size', 0), 'owner': str(meta.get('uid', 'user')), 'group': str(meta.get('gid', 'user')), 'modified': meta.get('mtime', time.time()), 'hardlinks': meta.get('nlink', 1) } def access(self, path): # XXX: stubbed out to always succeed. return defer.succeed(None) def openForReading(self, segs): node = self.filesystem.fetch(self._makePath(segs)) frvfs = FTPReadVFS(node) return defer.succeed(frvfs) def openForWriting(self, segs): # XXX: this method is way too ugly dirname, basename = segs[:-1], segs[-1] node = self.filesystem.fetch( self._makePath(dirname)).createFile(basename) fwvfs = FTPWriteVFS(node) return defer.succeed(fwvfs) def stat(self, path, keys=()): node = self.filesystem.fetch(self._makePath(path)) attrs = self._attrify(node) return defer.succeed([attrs[attrName] for attrName in keys]) def rename(self, from_, to): assert len(to) == 1, ( "renaming into other directories isn't supported yet.") try: self.filesystem.fetch(self._makePath(from_)).rename(to[0]) except: return defer.fail() else: return defer.succeed(None) class FTPReadVFS(object): implements(IReadFile) def __init__(self, node): self.node = node def send(self, consumer): return StreamProducer(IByteStream(self.node)).beginProducing(consumer) class FTPWriteVFS(object): implements(IWriteFile) def __init__(self, node): self.node = node def receive(self): return defer.succeed(IConsumer(self.node)) class _FileToConsumerAdapter: implements(IConsumer) def __init__(self, original): self.original = original self.offset = 0 def write(self, bytes): self.original.writeChunk(self.offset, bytes) self.offset += len(bytes) components.registerAdapter(FileSystemToIFTPShellAdaptor, pathutils.IFileSystem, IFTPShell) components.registerAdapter(_FileToConsumerAdapter, ivfs.IFileSystemLeaf, IConsumer)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/vfs/adapters/ftp.py
ftp.py
import os, time import zope.interface from twisted.python import components, log, util from twisted.conch.avatar import ConchUser from twisted.conch.interfaces import ISession, ISFTPFile from twisted.conch.ssh.filetransfer import ISFTPServer, FileTransferServer from twisted.conch.ssh.filetransfer import FXF_READ, FXF_WRITE, FXF_APPEND, FXF_CREAT, FXF_TRUNC, FXF_EXCL from twisted.conch.ssh.filetransfer import SFTPError from twisted.conch.ssh.filetransfer import FX_PERMISSION_DENIED, FX_FAILURE from twisted.conch.ssh.filetransfer import FX_NO_SUCH_FILE, FX_OP_UNSUPPORTED from twisted.conch.ssh.filetransfer import FX_NOT_A_DIRECTORY from twisted.conch.ssh.filetransfer import FX_FILE_IS_A_DIRECTORY from twisted.conch.ssh.filetransfer import FX_FILE_ALREADY_EXISTS from twisted.conch.ssh import session from twisted.conch.ls import lsLine from twisted.internet import defer from twisted.vfs import ivfs, pathutils def translateErrors(function): """Decorator that catches VFSErrors and re-raises them as the corresponding SFTPErrors.""" def f(*args, **kwargs): try: result = function(*args, **kwargs) if isinstance(result, defer.Deferred): result.addErrback(_ebtranslateErrors) return result except ivfs.PermissionError, e: raise SFTPError(FX_PERMISSION_DENIED, str(e)) except ivfs.NotFoundError, e: raise SFTPError(FX_NO_SUCH_FILE, e.args[0]) except ivfs.AlreadyExistsError, e: raise SFTPError(FX_FILE_ALREADY_EXISTS, e.args[0]) except ivfs.VFSError, e: raise SFTPError(FX_FAILURE, str(e)) except NotImplementedError, e: raise SFTPError(FX_OP_UNSUPPORTED, str(e)) util.mergeFunctionMetadata(function, f) return f def _ebtranslateErrors(failure): """This just re-raises the failure so that the translateErrors decorator around this errback can intercept it if it wants to.""" failure.raiseException() _ebtranslateErrors = translateErrors(_ebtranslateErrors) class AdaptFileSystemUserToISFTP: zope.interface.implements(ISFTPServer) def __init__(self, avatar): self.avatar = avatar self.openFiles = {} self.openDirs = {} self.filesystem = avatar.filesystem def _setAttrs(self, path, attrs): """ NOTE: this function assumes it runs as the logged-in user: i.e. under _runAsUser() """ if attrs.has_key("uid") and attrs.has_key("gid"): os.lchown(path, attrs["uid"], attrs["gid"]) if attrs.has_key("permissions"): os.chmod(path, attrs["permissions"]) if attrs.has_key("atime") and attrs.has_key("mtime"): os.utime(path, (attrs["atime"]. attrs["mtime"])) def gotVersion(self, otherVersion, extData): return {} def openFile(self, filename, flags, attrs): createPlease = False exclusive = False openFlags = 0 if flags & FXF_READ == FXF_READ and flags & FXF_WRITE == 0: openFlags = os.O_RDONLY if flags & FXF_WRITE == FXF_WRITE and flags & FXF_READ == 0: createPlease = True openFlags = os.O_WRONLY if flags & FXF_WRITE == FXF_WRITE and flags & FXF_READ == FXF_READ: createPlease = True openFlags = os.O_RDWR if flags & FXF_APPEND == FXF_APPEND: createPlease = True openFlags |= os.O_APPEND if flags & FXF_CREAT == FXF_CREAT: createPlease = True openFlags |= os.O_CREAT if flags & FXF_TRUNC == FXF_TRUNC: openFlags |= os.O_TRUNC if flags & FXF_EXCL == FXF_EXCL: exclusive = True # XXX Once we change readChunk/writeChunk we'll have to wrap # child in something that implements those. pathSegments = self.filesystem.splitPath(filename) dirname, basename = pathSegments[:-1], pathSegments[-1] parentNode = self.filesystem.fetch('/'.join(dirname)) if createPlease: child = parentNode.createFile(basename, exclusive) elif parentNode.exists(basename): child = parentNode.child(basename) if not ivfs.IFileSystemLeaf.providedBy(child): raise SFTPError(FX_FILE_IS_A_DIRECTORY, filename) else: raise SFTPError(FX_NO_SUCH_FILE, filename) child.open(openFlags) return AdaptFileSystemLeafToISFTPFile(child) openFile = translateErrors(openFile) def removeFile(self, filename): node = self.filesystem.fetch(filename) if not ivfs.IFileSystemLeaf.providedBy(node): raise SFTPError(FX_FILE_IS_A_DIRECTORY, filename) node.remove() removeFile = translateErrors(removeFile) def renameFile(self, oldpath, newpath): try: targetNode = self.filesystem.fetch(newpath) except (ivfs.NotFoundError, KeyError): # Something with the new name already exists. pass else: if ivfs.IFileSystemContainer(targetNode, None): # The target node is a container. We assume the caller means to # move the source node into the container rather than replace # it, and adjust newpath accordingly. newpath = self.filesystem.joinPath( newpath, self.filesystem.basename(oldpath)) old = self.filesystem.fetch(oldpath) old.rename(newpath) renameFile = translateErrors(renameFile) def makeDirectory(self, path, attrs): dirname = self.filesystem.dirname(path) basename = self.filesystem.basename(path) return self.filesystem.fetch(dirname).createDirectory(basename) makeDirectory = translateErrors(makeDirectory) def removeDirectory(self, path): self.filesystem.fetch(path).remove() removeDirectory = translateErrors(removeDirectory) def openDirectory(self, path): directory = self.filesystem.fetch(path) if not ivfs.IFileSystemContainer.providedBy(directory): raise SFTPError(FX_NOT_A_DIRECTORY, path) class DirList: def __init__(self, iter): self.iter = iter def __iter__(self): return self def next(self): (name, attrs) = self.iter.next() class st: pass s = st() s.st_mode = attrs["permissions"] s.st_uid = attrs["uid"] s.st_gid = attrs["gid"] s.st_size = attrs["size"] s.st_mtime = attrs["mtime"] s.st_nlink = attrs["nlink"] return ( name, lsLine(name, s), attrs ) def close(self): return return DirList( iter([(name, _attrify(file)) for (name, file) in self.filesystem.fetch(path).children()])) openDirectory = translateErrors(openDirectory) def getAttrs(self, path, followLinks): node = self.filesystem.fetch(path) return _attrify(node) getAttrs = translateErrors(getAttrs) def setAttrs(self, path, attrs): node = self.filesystem.fetch(path) try: # XXX: setMetadata isn't yet part of the IFileSystemNode interface # (but it should be). So we catch AttributeError, and translate it # to NotImplementedError because it's slightly nicer for clients. node.setMetadata(attrs) except AttributeError: raise NotImplementedError("NO SETATTR") setAttrs = translateErrors(setAttrs) def readLink(self, path): raise NotImplementedError("NO LINK") def makeLink(self, linkPath, targetPath): raise NotImplementedError("NO LINK") def realPath(self, path): return self.filesystem.absPath(path) class AdaptFileSystemLeafToISFTPFile: zope.interface.implements(ISFTPFile) def __init__(self, original): self.original = original def close(self): return self.original.close() def readChunk(self, offset, length): return self.original.readChunk(offset, length) def writeChunk(self, offset, data): return self.original.writeChunk(offset, data) def getAttrs(self): return _attrify(self.original) def setAttrs(self, attrs): try: # XXX: setMetadata isn't yet part of the IFileSystemNode interface # (but it should be). So we catch AttributeError, and translate it # to NotImplementedError because it's slightly nicer for clients. self.original.setMetadata(attrs) except AttributeError: raise NotImplementedError("NO SETATTR") class VFSConchSession: zope.interface.implements(ISession) def __init__(self, avatar): self.avatar = avatar def openShell(self, proto): self.avatar.conn.transport.transport.loseConnection() def getPty(self, term, windowSize, modes): pass def closed(self): log.msg('shell closed') class VFSConchUser(ConchUser): def __init__(self, username, root): ConchUser.__init__(self) self.username = username self.filesystem = pathutils.FileSystem(root) self.listeners = {} # dict mapping (interface, port) -> listener self.channelLookup.update( {"session": session.SSHSession}) self.subsystemLookup.update( {"sftp": FileTransferServer}) def logout(self): # XXX - this may be broken log.msg('avatar %s logging out (%i)' % (self.username, len(self.listeners))) def _attrify(node): meta = node.getMetadata() permissions = meta.get('permissions', None) if permissions is None: if ivfs.IFileSystemContainer.providedBy(node): permissions = 16877 else: permissions = 33188 return {'permissions': permissions, 'size': meta.get('size', 0), 'uid': meta.get('uid', 0), 'gid': meta.get('gid', 0), 'atime': meta.get('atime', time.time()), 'mtime': meta.get('mtime', time.time()), 'nlink': meta.get('nlink', 1) } components.registerAdapter(AdaptFileSystemUserToISFTP, VFSConchUser, ISFTPServer) components.registerAdapter(VFSConchSession, VFSConchUser, ISession)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/vfs/adapters/sftp.py
sftp.py
import os import os.path import errno from twisted.vfs import ivfs from zope.interface import implements __all__ = ['OSDirectory', 'OSFile', 'RunWithPrivSep', 'SetUIDProxy', 'ForceCreateModeProxy'] class OSNode: implements(ivfs.IFileSystemNode) def __init__(self, realPath, name=None, parent=None): self.name = name self.realPath = realPath if not parent: self.parent = self else: self.parent = parent def getMetadata(self): s = os.stat(self.realPath) return { "size" : s.st_size, "uid" : s.st_uid, "gid" : s.st_gid, "permissions" : s.st_mode, "atime" : s.st_atime, "mtime" : s.st_mtime, "nlink" : s.st_nlink } def setMetadata(self, attrs): if 'uid' in attrs and 'gid' in attrs: os.chown(self.realPath, attrs["uid"], attrs["gid"]) if 'permissions' in attrs: os.chmod(self.realPath, attrs["permissions"]) if 'atime' in attrs or 'mtime' in attrs: if None in (attrs.get("atime"), attrs.get("mtime")): st = os.stat(self.realPath) atime = attrs.get("atime", st.st_atime) mtime = attrs.get("mtime", st.st_mtime) else: atime = attrs['atime'] mtime = attrs['mtime'] os.utime(self.realPath, (atime, mtime)) def rename(self, newName): from twisted.vfs import pathutils newParent = pathutils.fetch(pathutils.getRoot(self), pathutils.dirname(newName)) # XXX spiv 2005-12-15 # assumes newParent is also an OSDirectory. Probably should politely # decline (rather than break with an undefined error) if it's not. newPath = os.path.join(newParent.realPath, pathutils.basename(newName)) os.rename(self.realPath, newPath) self.realPath = newPath self.name = newName self.parent = newParent def remove(self): raise NotImplementedError("Override me.") class OSFile(OSNode): implements(ivfs.IFileSystemLeaf) def create(self, exclusive=True): flags = os.O_WRONLY | os.O_CREAT if exclusive: flags |= os.O_EXCL try: fd = os.open(self.realPath, flags) except OSError, e: if e.errno == errno.EEXIST: raise ivfs.AlreadyExistsError(self.name) # Something unexpected happened. Let it propagate. raise f = os.fdopen(fd, "w") f.close() def open(self, flags): self.fd = os.open(self.realPath, flags) return self def readChunk(self, offset, length): os.lseek(self.fd, offset, 0) return os.read(self.fd, length) def writeChunk(self, offset, data): os.lseek(self.fd, offset, 0) return os.write(self.fd, data) def close(self): os.close(self.fd) def remove(self): os.remove(self.realPath) class OSDirectory(OSNode): implements(ivfs.IFileSystemContainer) def children(self): """See IFileSystemContainer.""" return ([('.', self), ('..', self.parent)] + [(childName, self.child(childName)) for childName in os.listdir(self.realPath)]) def child(self, childName): """See IFileSystemContainer.""" fullPath = os.path.join(self.realPath, childName) if not os.path.exists(fullPath): raise ivfs.NotFoundError(childName) if os.path.isdir(fullPath): nodeFactory = self.childDirFactory() else: nodeFactory = self.childFileFactory() return nodeFactory(fullPath, childName, self) def childDirFactory(cls): """Returns a callable that will be used to construct instances for subdirectories of this OSDirectory. The callable should accept the same interface as OSDirectory.__init__; i.e. take three args (path, name, parent), and return an IFileSystemContainer. By default, this will be the class of the child's parent. Override this method if you want a different behaviour. """ # If you subclass OSDirectory, this will ensure children of OSDirectory # are also your subclass. return cls childDirFactory = classmethod(childDirFactory) def childFileFactory(self): """Returns a callable that will be used to construct instances for files in this OSDirectory. The callable should accept the same interface as OSFile.__init__; i.e. take three args (path, name, parent), and return an IFileSystemLeaf. By default, this will be OSFile. Override this method if you want a different behaviour. """ return OSFile def createDirectory(self, childName): """See IFileSystemContainer.""" child = self.childDirFactory()(os.path.join(self.realPath, childName), childName, self) child.create() return child def createFile(self, childName, exclusive=True): """See IFileSystemContainer.""" child = self.childFileFactory()(os.path.join(self.realPath, childName), childName, self) child.create(exclusive=exclusive) return child def create(self): os.mkdir(self.realPath) def remove(self): os.rmdir(self.realPath) def exists(self, childName): """See IFileSystemContainer.""" return os.path.exists(os.path.join(self.realPath, childName)) class WrapFunc: def __init__(self, func, wrapper): self.func = func self.wrapper = wrapper def __call__(self, *args, **kwargs): return self.wrapper(self.func(*args, **kwargs)) class _OSNodeProxy: def __init__(self, target): self.target = target def __getattr__(self, name): attr = getattr(self.target, name) if name in ['child', 'createDirectory', 'createFile']: attr = WrapFunc(attr, self._wrapChild) return attr def _wrapChild(self, child): return _OSNodeProxy(child) class RunWithPrivSep: def __init__(self, func, euid, egid): self.func = func self.euid = euid self.egid = egid def __call__(self, *args, **kwargs): cureuid = os.geteuid() curegid = os.getegid() os.setegid(0) os.seteuid(0) os.setegid(self.egid) os.seteuid(self.euid) try: result = self.func(*args, **kwargs) finally: os.setegid(0) os.seteuid(0) os.setegid(cureuid) os.seteuid(curegid) return result class SetUIDProxy(_OSNodeProxy): def __init__(self, target, euid, egid): self.target = target self.euid = euid self.egid = egid def __getattr__(self, attrName): attr = _OSNodeProxy.__getattr__(self, attrName) if callable(attr): return RunWithPrivSep(attr, self.euid, self.egid) return attr def _wrapChild(self, child): return SetUIDProxy(child, self.euid, self.egid) def getMode(mode): if type(mode) is str: mode = int(mode, 8) assert type(mode) is int, 'invalid mode: %s' % mode return mode class ForceCreateModeProxy(_OSNodeProxy): def __init__(self, target, dirmode=None, filemode=None): self.target = target self.dirmode = None self.filemode = None if dirmode != None: self.dirmode = getMode(dirmode) if filemode != None: self.filemode = getMode(filemode) def createDirectory(self, *args, **kwargs): child = self.target.createDirectory(*args, **kwargs) if self.dirmode != None: os.chmod(child.realPath, self.dirmode) return self._wrapChild(child) def createFile(self, *args, **kwargs): child = self.target.createFile(*args, **kwargs) if self.filemode != None: os.chmod(child.realPath, self.filemode) return self._wrapChild(child) def _wrapChild(self, child): return ForceCreateModeProxy(child, self.dirmode, self.filemode)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/vfs/backends/osfs.py
osfs.py
import cStringIO from zope.interface import implements from twisted.vfs import ivfs, pathutils __all__ = ['FakeDirectory', 'FakeFile'] class _FakeNode: """Base class. Don't instantiate directly.""" def create(self): self.parent._children[self.name] = self def remove(self): del self.parent._children[self.name] def rename(self, newName): newParent = pathutils.fetch(pathutils.getRoot(self), pathutils.dirname(newName)) if newParent.exists(pathutils.basename(newName)): raise ivfs.VFSError( "Cannot rename over the top of an existing directory") del self.parent._children[self.name] self.name = pathutils.basename(pathutils.basename(newName)) newParent._children[self.name] = self self.parent = newParent class FakeDirectory(_FakeNode): """In-memory directory.""" implements(ivfs.IFileSystemContainer) def __init__(self, name=None, parent=None, children=None): self.name = name children = children or {} if not parent: self.parent = self else: self.parent = parent self._children = children def children( self ) : implicit = [('.', self), ('..', self.parent)] others = [(childName, self.child(childName)) for childName in self._children.keys() ] return implicit + others def child(self, childName): try: return self._children[childName] except KeyError: raise ivfs.NotFoundError(childName) def getMetadata(self): return { } def createFile(self, childName, exclusive=False): if exclusive and self.exists(childName): raise ivfs.AlreadyExistsError(childName) child = FakeFile(childName, self) child.create() return child def createDirectory(self, childName): if self.exists(childName): raise ivfs.AlreadyExistsError(childName) child = FakeDirectory(childName, self) child.create() return child def exists(self, childName): return self._children.has_key(childName) class FakeFile(_FakeNode): """In-memory file.""" implements(ivfs.IFileSystemLeaf) def __init__(self, name=None, parent=None, data=''): self.data = cStringIO.StringIO() self.data.write(data) self.parent = parent self.name = name def open(self, flags): return self def getMetadata(self): size = len(self.data.getvalue()) self.data.seek(0) return { 'size': size } def readChunk(self, offset, length): self.data.seek(offset) return self.data.read(length) def writeChunk(self, offset, data): self.data.seek(offset) self.data.write(data) def close(self): pass def children(self): print "this might break and if it does we should fix the caller" return []
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/vfs/backends/inmem.py
inmem.py
# # Author: Clark Evans ([email protected]) # """ flow.stage Various stages for manipulating data flows, in particular, those stages which take more than one input stages or alternative input, such as a callback. """ from base import * from wrap import wrap from twisted.python.failure import Failure class Map(Stage): """ flow equivalent to map: Map(function, stage, ... ) Apply a function to every item yielded and yield the results. If additional stages are passed, the function must take that many arguments and is applied to the items of all lists in parallel. If a list is shorter than another, it is assumed to be extended with None items. If the function is None, the identity function is assumed; if there are multiple list arguments, Map stage returns a sequence consisting of tuples containing the corresponding items from all lists. For example:: def fn(val): return val + 10 source = flow.Map(fn,range(4)) printFlow(source) """ def __init__(self, func, stage, *stages): Stage.__init__(self) self.func = func self._stage = [wrap(stage)] for stage in stages: self._stage.append(wrap(stage)) self._index = 0 def _yield(self): if self.results or self.stop or self.failure: return if not self._index: self._curr = [] self._done = True while self._index < len(self._stage): idx = self._index curr = self._stage[idx] instruction = curr._yield() if instruction: return instruction if curr.results: self._curr.append(curr.results.pop(0)) self._index += 1 self._done = False continue if curr.stop: self._curr.append(None) self._index += 1 continue if curr.failure: self.failure = curr.failure return raise AssertionError("flow.Map ; no results, stop or failure?") if self._done: self.stop = 1 return curr = tuple(self._curr) if self.func: try: curr = self.func(*curr) except Failure, fail: self.failure = fail return except: self.failure = Failure() return self.results.append(curr) self._index = 0 class Zip(Map): """ Zips two or more stages into a stream of N tuples For example:: source = flow.Zip([1,flow.Cooperate(),2,3],["one","two"]) printFlow(source) """ def __init__(self, *stages): Map.__init__(self, None, stages[0], *stages[1:]) class Concurrent(Stage): """ Executes stages concurrently This stage allows two or more stages (branches) to be executed at the same time. It returns each stage as it becomes available. This can be used if you have N callbacks, and you want to yield and wait for the first available one that produces results. Once a stage is retuned, its next() method should be used to extract the value for the stage. """ class Instruction(CallLater): def __init__(self, inst): self.inst = inst def callLater(self, callable): for inst in self.inst: inst.callLater(callable) def __init__(self, *stages): Stage.__init__(self) self._stages = [] for stage in stages: self._stages.append(wrap(stage)) def _yield(self): if self.results or self.stop or self.failure: return stages = self._stages later = [] exit = None while stages: if stages[0] is exit: if self.results: return break curr = stages.pop(0) instruction = curr._yield() if curr.results: self.results.append(curr) if curr.failure: self.failure = curr.failure return if curr.stop: exit = None if self.results: return continue stages.append(curr) if not exit: exit = curr if instruction: if isinstance(instruction, CallLater): if instruction not in later: later.append(instruction) continue raise Unsupported(instruction) if later: return Concurrent.Instruction(later) self.stop = True class Merge(Stage): """ Merges two or more Stages results into a single stream For example:: source = flow.Zip([1,flow.Cooperate(),2,3],["one","two"]) printFlow(source) """ def __init__(self, *stages): Stage.__init__(self) self.concurrent = Concurrent(*stages) def _yield(self): if self.results or self.stop or self.failure: return instruction = self.concurrent._yield() if instruction: return instruction for stage in self.concurrent.results: self.results.extend(stage.results) stage.results = [] self.concurrent.results = [] if self.concurrent.stop: self.stop = True self.failure = self.concurrent.failure class Callback(Stage): """ Converts a single-thread push interface into a pull interface. Once this stage is constructed, its result, errback, and finish member variables may be called by a producer. The results of which can be obtained by yielding the Callback and then calling next(). For example:: source = flow.Callback() reactor.callLater(0, lambda: source.result("one")) reactor.callLater(.5, lambda: source.result("two")) reactor.callLater(1, lambda: source.finish()) printFlow(source) """ # TODO: Potentially rename this 'Consumer' and make it # comply with protocols.IConsumer # TODO: Make the inverse stage, which is an IProducer class Instruction(CallLater): def __init__(self): self.flow = lambda: True def callLater(self, callable): self.flow = callable def __init__(self, *trap): Stage.__init__(self, *trap) self._finished = False self._cooperate = Callback.Instruction() def result(self,result): """ called by the producer to indicate a successful result """ self.results.append(result) self._cooperate.flow() def finish(self): """ called by producer to indicate successful stream completion """ assert not self.failure, "failed streams should not be finished" self._finished = True self._cooperate.flow() def errback(self, fail): """ called by the producer in case of Failure """ self.failure = fail self._cooperate.flow() def _yield(self): if self.results or self.stop or self.failure: return if not self.results: if self._finished: self.stop = True return return self._cooperate __call__ = result
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/flow/stage.py
stage.py
# # Author: Clark Evans ([email protected]) # """ flow -- asynchronous data flows using generators This module provides a mechanism for using async data flows through the use of generators. The basic idea of flow is that when ever you require data from a producer, you yield the producer. If the producer is ready, then you can call producer.next() to fetch the incoming data. Otherwise, the underlying controller will suspend the operation to try again later. For example, here is a simple 'printer' which consumes items from its source by printing them. Note that to get a new item, it first yields the data source and then calls source.next():: from __future__ import generators from twisted.flow import flow from twisted.internet import reactor, defer def printer(source): source = flow.wrap(source) while True: yield source print source.next() someFlowSource = [\"one\", flow.Cooperate(1), \"two\"] d = flow.Deferred(printer(someFlowSource)) d.addCallback(lambda _: reactor.stop()) reactor.run() In the example above, there are three objects imported from the flow module:: - flow.wrap converts many data sources such as lists, generators, and deferreds, into a special instruction object, a Stage. In this case, a simple list is wrapped. - flow.Deferred is a flow Controller which executes the stage passed to it, aggregating all results into a list which is passed to the deferred's callback. In this case, the result list is empty, but the callback is used to stop the reactor after the printing has finished. - flow.Cooperate is a special instruction object which is used by the flow Controller. In this case, the the flow pauses for one second between \"one\" and \"two\". Most classes in the flow module an Instruction, either a CallLater or a Stage. A Stage instruction is used to wrap various sorts of producers, anything from a simple string to Callback functions. Some stages can be constructed directly, such as Zip, Concurrent, Merge, Callback, or Threaded. But in most cases, in particular _String, _List, _Iterable, and _Deferred, state construction is handled through the wrap function. Stages can yield other stages to build a processing chain, results which are returned to the previous stage, or a CallLater instruction which causes the whole operation to be suspended. Typically, the CallLater instructions as passed up the call stack till the top level, or Controller. The controller then typically returns control, but registers itself to be called later. Once called again, the controller sets up the call stack and resumes the top level generator. There is a special CallLater, Cooperate, which simply resumes the chain of stages at a later time. Some stages, Callback, _Deferred, and Threaded have their own special CallLater which handles the process of resuming flow for their specific case. The inheritence hierarchy defined here looks like this:: Instruction CallLater Cooperate Stage # private stages (use flow.wrap) _String _List _Iterable _Deferred # public stages Map Zip Concurrent Merge Block Callback* Threaded* Controller Deferred Block Protocol """ from twisted.flow.base import * from twisted.flow.stage import * from twisted.flow.pipe import * from twisted.flow.wrap import wrap from twisted.flow.controller import Deferred, Block from twisted.flow.protocol import makeProtocol, Protocol
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/flow/flow.py
flow.py
# # Author: Clark Evans ([email protected]) # """ flow.wrap This module provides the wrap() function in the flow module and the private classes used for its implementation. """ from base import * from twisted.python.failure import Failure from twisted.internet.defer import Deferred class _String(Stage): """ Wrapper for a string object; don't create directly use flow.wrap This is probably the simplest stage of all. It is a constant list of one item. See wrap for an example. """ def __init__(self, str): Stage.__init__(self) self.results.append(str) self.stop = True def _yield(self): pass class _List(Stage): """ Wrapper for lists and tuple objects; don't create directly A simple stage, which admits the usage of instructions, such as Cooperate() within the list. This would be much simpler without logic to handle instructions. """ def __init__(self, seq): Stage.__init__(self) self._seq = list(seq) def _yield(self): seq = self._seq while seq: result = seq.pop(0) if isinstance(result, Instruction): return result self.results.append(result) self.stop = True class _DeferredInstruction(CallLater): def __init__(self, deferred): self.deferred = deferred def callLater(self, callable): self.deferred.addBoth(callable) class _Iterable(Stage): """ Wrapper for iterable objects, pass in a next() function This wraps functions (or bound methods). Execution starts with the initial function. If the return value is a Stage, then control passes on to that stage for the next round of execution. If the return value is Cooperate, then the chain of Stages is put on hold, and this return value travels all the way up the call stack so that the underlying mechanism can sleep, or perform other tasks, etc. All other non-Instruction return values, Failure objects included, are passed back to the previous stage via self.result All exceptions signal the end of the Stage. StopIteration means to stop without providing a result, while all other exceptions provide a Failure self.result followed by stoppage. """ def __init__(self, iterable, *trap): Stage.__init__(self, *trap) self._iterable = iter(iterable) self._next = None def _yield(self): """ executed during a yield statement """ if self.results or self.stop or self.failure: return while True: next = self._next if next: instruction = next._yield() if instruction: return instruction self._next = None try: result = self._iterable.next() if isinstance(result, Instruction): if isinstance(result, Stage): self._next = result continue return result if isinstance(result, Deferred): if result.called: continue return _DeferredInstruction(result) self.results.append(result) except StopIteration: self.stop = True except Failure, fail: self.failure = fail except: self.failure = Failure() return class _Deferred(Stage): """ Wraps a Deferred object into a stage; create with flow.wrap This stage provides a callback 'catch' for errback and callbacks. If not called, then this returns an Instruction which will let the reactor execute other operations, such as the producer for this deferred. """ def __init__(self, deferred, *trap): Stage.__init__(self, *trap) self._called = False deferred.addCallbacks(self._callback, self._errback) self._cooperate = _DeferredInstruction(deferred) def _callback(self, res): self._called = True self.results = [res] def _errback(self, fail): self._called = True self.failure = fail def _yield(self): if self.results or self.stop or self.failure: return if not self._called: return self._cooperate if self._called: self.stop = True return def wrap(obj, *trap): """ Wraps various objects for use within a flow The following example illustrates many different ways in which regular objects can be wrapped by the flow module to behave in a cooperative manner. For example:: # required imports from __future__ import generators from twisted.flow import flow from twisted.internet import reactor, defer # save this function, it is used everwhere def printFlow(source): def printer(source): source = flow.wrap(source) while True: yield source print source.next() d = flow.Deferred(printer(source)) d.addCallback(lambda _: reactor.stop()) reactor.run() source = "string" printFlow(source) source = ["one",flow.Cooperate(1),"two"] printFlow(source) def source(): yield "aeye" yield flow.Cooperate() yield "capin" printFlow(source) source = Deferred() reactor.callLater(1, lambda: source.callback("howdy")) printFlow(source) """ if isinstance(obj, Stage): if trap: # merge trap list trap = list(trap) for ex in obj._trap: if ex not in trap: trap.append(ex) obj._trap = tuple(trap) return obj if callable(obj): obj = obj() typ = type(obj) if typ is type([]) or typ is type(tuple()): return _List(obj) if typ is type(''): return _String(obj) if isinstance(obj, Deferred): return _Deferred(obj, *trap) try: return _Iterable(obj, *trap) except TypeError: pass raise ValueError, "A wrapper is not available for %r" % (obj,)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/flow/wrap.py
wrap.py
# # Author: Clark Evans ([email protected]) # """ flow.base This module contains the core exceptions and base classes in the flow module. See flow.flow for more detailed information """ import twisted.python.compat from twisted.internet import reactor import time # # Exceptions used within flow # class Unsupported(NotImplementedError): """ Indicates that the given stage does not know what to do with the flow instruction that was returned. """ def __init__(self, inst): msg = "Unsupported flow instruction: %s " % repr(inst) TypeError.__init__(self,msg) class NotReadyError(RuntimeError): """ Raised when a stage has not been subject to a yield """ pass # # Abstract/Base Classes # class Instruction: """ Has special meaning when yielded in a flow """ pass class Controller: """ Flow controller At the base of every flow, is a controller class which interprets the instructions, especially the CallLater instructions. This is primarly just a marker class to denote which classes consume Instruction events. If a controller cannot handle a particular instruction, it raises the Unsupported exception. """ pass class CallLater(Instruction): """ Instruction to support callbacks This is the instruction which is returned during the yield of the _Deferred and Callback stage. The underlying flow driver should call the 'callLater' function with the callable to be executed after each callback. """ def callLater(self, callable): pass class Cooperate(CallLater): """ Requests that processing be paused so other tasks can resume Yield this object when the current chain would block or periodically during an intensive processing task. The flow mechanism uses these objects to signal that the current processing chain should be paused and resumed later. This allows other delayed operations to be processed, etc. Usage is quite simple:: # within some generator wrapped by a Controller yield Cooperate(1) # yield for a second or more """ def __init__(self, timeout = 0): self.timeout = timeout def callLater(self, callable): reactor.callLater(self.timeout, callable) class Stage(Instruction): """ Abstract base defining protocol for iterator/generators in a flow This is the primary component in the flow system, it is an iterable object which must be passed to a yield statement before each call to next(). Usage:: iterable = DerivedStage( ... , SpamError, EggsError)) yield iterable for result in iterable: # handle good result, or SpamError or EggsError yield iterable Alternatively, when inside a generator, the next() method can be used directly. In this case, if no results are available, StopIteration is raised, and if left uncaught, will nicely end the generator. Of course, unexpected failures are raised. This technique is especially useful when pulling from more than one stage at a time. For example:: def someGenerator(): iterable = SomeStage( ... , SpamError, EggsError) while True: yield iterable result = iterable.next() # handle good result or SpamError or EggsError For many generators, the results become available in chunks of rows. While the default value is to get one row at a time, there is a 'chunked' property which allows them to be returned via the next() method as many rows rather than row by row. For example:: iterable = DerivedStage(...) iterable.chunked = True for results in iterable: for result in results: # handle good result yield iterable For those wishing more control at the cost of a painful experience, the following member variables can be used to great effect:: - results: This is a list of results produced by the generator, they can be fetched one by one using next() or in a group together. If no results were produced, then this is an empty list. These results should be removed from the list after they are read; or, after reading all of the results set to an empty list - stop: This is true if the underlying generator has finished execution (raised a StopIteration or returned). Note that several results may exist, and stop may be true. - failure: If the generator produced an exception, then it is wrapped as a Failure object and put here. Note that several results may have been produced before the failure. To ensure that the failure isn't accidently reported twice, it is adviseable to set stop to True. The order in which these member variables is used is *critical* for proper adherance to the flow protocol. First, all successful results should be handled. Second, the iterable should be checked to see if it is finished. Third, a failure should be checked; while handling a failure, either the loop should be exited, or the iterable's stop member should be set. For example:: iterable = SomeStage(...) while True: yield iterable if iterable.results: for result in iterable.results: # handle good result iterable.results = [] if iterable.stop: break if iterable.failure: iterable.stop = True # handle iterable.failure break """ def __init__(self, *trap): self._trap = trap self.stop = False self.failure = None self.results = [] self.chunked = False def __iter__(self): return self def next(self): """ return current result This is the primary function to be called to retrieve the current result. It complies with the iterator protocol by raising StopIteration when the stage is complete. It also raises an exception if it is called before the stage is yielded. """ if self.results: if self.chunked: ret = self.results self.results = [] return ret else: return self.results.pop(0) if self.stop: raise StopIteration() if self.failure: self.stop = True cr = self.failure.check(*self._trap) if cr: return cr self.failure.raiseException() raise NotReadyError("Must 'yield' this object before calling next()") def _yield(self): """ executed during a yield statement by previous stage This method is private within the scope of the flow module, it is used by one stage in the flow to ask a subsequent stage to produce its value. The result of the yield is then stored in self.result and is an instance of Failure if a problem occurred. """ raise NotImplementedError
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/flow/base.py
base.py
# # Author: Clark Evans ([email protected]) # Stability: The API is stable, but the implementation may still # have one or more bugs; threads are tough. # """ flow.thread Support for threads within a flow """ from __future__ import nested_scopes from base import * from twisted.python.failure import Failure from twisted.internet import reactor from time import sleep class Threaded(Stage): """ A stage which runs a blocking iterable in a separate thread This stage tunnels output from an iterable executed in a separate thread to the main thread. This process is carried out by a result buffer, and returning Cooperate if the buffer is empty. The wrapped iterable's __iter__ and next() methods will only be invoked in the spawned thread. This can be used in one of two ways, first, it can be extended via inheritance; with the functionality of the inherited code implementing next(), and using init() for initialization code to be run in the thread. If the iterable happens to have a chunked attribute, and that attribute is true, then this wrapper will assume that data arrives in chunks via a sequence instead of by values. For example:: from __future__ import generators from twisted.internet import reactor, defer from twisted.flow import flow from twisted.flow.threads import Threaded def countSleep(index): from time import sleep for index in range(index): sleep(.3) print "sleep", index yield index def countCooperate(index): for index in range(index): yield flow.Cooperate(.1) print "cooperate", index yield "coop %s" % index d = flow.Deferred( flow.Merge( Threaded(countSleep(5)), countCooperate(5))) def prn(x): print x reactor.stop() d.addCallback(prn) reactor.run() """ class Instruction(CallLater): def __init__(self): self.callable = None self.immediate = False def callLater(self, callable): if self.immediate: reactor.callLater(0,callable) else: self.callable = callable def __call__(self): callable = self.callable if callable: self.callable = None callable() def __init__(self, iterable, *trap): Stage.__init__(self, trap) self._iterable = iterable self._cooperate = Threaded.Instruction() self.srcchunked = getattr(iterable, 'chunked', False) reactor.callInThread(self._process) def _process_result(self, val): if self.srcchunked: self.results.extend(val) else: self.results.append(val) self._cooperate() def _stopping(self): self.stop = True self._cooperate() def _process(self): try: self._iterable = iter(self._iterable) except: self.failure = Failure() else: try: while True: val = self._iterable.next() reactor.callFromThread(self._process_result, val) except StopIteration: reactor.callFromThread(self._stopping) except: self.failure = Failure() reactor.callFromThread(self._cooperate) self._cooperate.immediate = True def _yield(self): if self.results or self.stop or self.failure: return return self._cooperate class QueryIterator: """ Converts a database query into a result iterator Example usage:: from __future__ import generators from twisted.enterprise import adbapi from twisted.internet import reactor from twisted.flow import flow from twisted.flow.threads import QueryIterator, Threaded dbpool = adbapi.ConnectionPool("SomeDriver",host='localhost', db='Database',user='User',passwd='Password') # # I test with... # from pyPgSQL import PgSQL # dbpool = PgSQL sql = ''' (SELECT 'one') UNION ALL (SELECT 'two') UNION ALL (SELECT 'three') ''' def consumer(): print "executing" query = Threaded(QueryIterator(dbpool, sql)) print "yielding" yield query print "done yeilding" for row in query: print "Processed result : ", row yield query from twisted.internet import reactor def finish(result): print "Deferred Complete : ", result reactor.stop() f = flow.Deferred(consumer()) f.addBoth(finish) reactor.run() """ def __init__(self, pool, sql, fetchmany=False, fetchall=False): self.curs = None self.sql = sql self.pool = pool if fetchmany: self.next = self.next_fetchmany self.chunked = True if fetchall: self.next = self.next_fetchall self.chunked = True def __iter__(self): self.conn = self.pool.connect() self.curs = self.conn.cursor() self.curs.execute(self.sql) return self def next_fetchall(self): if self.curs: ret = self.curs.fetchall() self.curs = None self.conn = None return ret raise StopIteration def next_fetchmany(self): ret = self.curs.fetchmany() if not ret: self.curs = None self.conn = None raise StopIteration return ret def next(self): ret = self.curs.fetchone() if not ret: self.curs = None self.conn = None raise StopIteration return ret
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/flow/threads.py
threads.py
# # Author: Clark Evans ([email protected]) # """ flow.controller This implements the various flow controllers, that is, those things which run the flow stack. """ from base import * from wrap import wrap from twisted.internet import defer class Block(Controller,Stage): """ A controller which blocks on Cooperate events This converts a Stage into an iterable which can be used directly in python for loops and other iteratable constructs. It does this by eating any Cooperate values and sleeping. This is largely helpful for testing or within a threaded environment. It converts other stages into one which does not emit cooperate events, ie:: [1,2, Cooperate(), 3] => [1,2,3] """ def __init__(self, stage, *trap): Stage.__init__(self) self._stage = wrap(stage,*trap) self.block = time.sleep def next(self): """ fetch the next value from the Stage flow """ stage = self._stage while True: result = stage._yield() if result: if isinstance(result, Cooperate): if result.__class__ == Cooperate: self.block(result.timeout) continue raise Unsupported(result) return stage.next() class Deferred(Controller, defer.Deferred): """ wraps up a Stage with a Deferred interface In this version, the results of the Stage are used to construct a list of results and then sent to deferred. Further, in this version Cooperate is implemented via reactor's callLater. For example:: from twisted.internet import reactor from twisted.flow import flow def res(x): print x d = flow.Deferred([1,2,3]) d.addCallback(res) reactor.iterate() """ def __init__(self, stage, *trap): defer.Deferred.__init__(self) self._results = [] self._stage = wrap(stage, *trap) self._execute() def results(self, results): self._results.extend(results) def _execute(self, dummy = None): cmd = self._stage while True: result = cmd._yield() if cmd.results: self.results(cmd.results) cmd.results = [] if cmd.stop: if not self.called: self.callback(self._results) return if cmd.failure: cmd.stop = True if cmd._trap: error = cmd.failure.check(*cmd._trap) if error: self._results.append(error) continue self.errback(cmd.failure) return if result: if isinstance(result, CallLater): result.callLater(self._execute) return raise Unsupported(result)
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/flow/controller.py
controller.py
# # Author: Clark Evans ([email protected]) """ flow.pipe This contains various filter stages which have exactly one input stage. These stages take a single input and modify its results, ie a rewrite stage. """ from base import * from wrap import wrap from twisted.python.failure import Failure class Pipe(Stage): """ abstract stage which takes a single input stage """ def __init__(self, source, *trap): Stage.__init__(self, *trap) self._source = wrap(source) def _yield(self): while not self.results \ and not self.stop \ and not self.failure: source = self._source instruction = source._yield() if instruction: return instruction if source.failure: self.failure = source.failure return results = source.results stop = source.stop if stop: self.stop = True source.results = [] self.process(results, stop) def process(self, results): """ process implemented by the pipe Take a set of possibly empty results and sets the member variables: results, stop, or failure appropriately """ raise NotImplementedError class Filter(Pipe): """ flow equivalent to filter: Filter(function, source, ... ) Yield those elements from a source stage for which a function returns true. If the function is None, the identity function is assumed, that is, all items yielded that are false (zero or empty) are discarded. For example:: def odd(val): if val % 2: return True def range(): yield 1 yield 2 yield 3 yield 4 source = flow.Filter(odd,range) printFlow(source) """ def __init__(self, func, source, *trap): Pipe.__init__(self, source, *trap) self._func = func def process(self, results, stop): self.results.extend(filter(self._func,results)) class LineBreak(Pipe): """ pipe stage which breaks its input into lines """ def __init__(self, source, *trap, **kwargs): Pipe.__init__(self, source, *trap) self._delimiter = kwargs.get('delimiter','\r\n') self._maxlen = int(kwargs.get('maxlength', 16384))+1 self._trailer = int(kwargs.get('trailer',False)) self._buffer = [] self._currlen = 0 def process(self, results, stop): for block in results: lines = str(block).split(self._delimiter) if len(lines) < 2: tail = lines[0] else: tail = lines.pop() if self._buffer: self._buffer.append(lines.pop(0)) self.results.append("".join(self._buffer)) self._buffer = [] self.results.extend(lines) self._currlen = 0 if tail: self._currlen += len(tail) self._buffer.append(tail) if stop and self._buffer: tail = "".join(self._buffer) if self._trailer: self.results.append(tail) else: raise RuntimeError, "trailing data remains: '%s'" % tail[:10]
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/flow/pipe.py
pipe.py
# # Author: Clark Evans ([email protected]) # """ flow.protocol This allows one to use flow module to create protocols, a protocol is actually a controller, but it is specialized enough to deserve its own module. """ import types from base import * from wrap import wrap from stage import Callback from twisted.internet import protocol from twisted.internet.error import ConnectionLost, ConnectionDone def makeProtocol(controller, baseClass = protocol.Protocol, *callbacks, **kwargs): """ Construct a flow based protocol This takes a base protocol class, and a set of callbacks and creates a connection flow based on the two. For example, the following would build a simple 'echo' protocol:: from __future__ import generators from twisted.internet import reactor, protocol from twisted.flow import flow PORT = 8392 def echoServer(conn): yield conn for data in conn: conn.write(data) yield conn def echoClient(conn): conn.write("hello, world!") yield conn print "server said: ", conn.next() reactor.callLater(0,reactor.stop) server = protocol.ServerFactory() server.protocol = flow.makeProtocol(echoServer) reactor.listenTCP(PORT,server) client = protocol.ClientFactory() client.protocol = flow.makeProtocol(echoClient) reactor.connectTCP("localhost", PORT, client) reactor.run() Of course, the best part about flow is that you can nest stages. Therefore it is quite easy to make a lineBreaker generator which takes an input connection and produces and output connection. Anyway, the code is almost identical as far as the client/server is concerned:: # this is a filter generator, it consumes from the # incoming connection, and yields results to # the next stage, the echoServer below def lineBreaker(conn, lineEnding = "\\n"): lst = [] yield conn for chunk in conn: pos = chunk.find(lineEnding) if pos > -1: lst.append(chunk[:pos]) yield "".join(lst) lst = [chunk[pos+1:]] else: lst.append(chunk) yield conn yield "".join(lst) # note that this class is only slightly modified, # simply comment out the line breaker line to see # how the server behaves without the filter... def echoServer(conn): lines = flow.wrap(lineBreaker(conn)) yield lines for data in lines: conn.write(data) yield lines # and the only thing that is changed is that we # are sending data in strange chunks, and even # putting the last chunk on hold for 2 seconds. def echoClient(conn): conn.write("Good Morning!\\nPlease ") yield conn print "server said: ", conn.next() conn.write("do not disregard ") reactor.callLater(2, conn.write, "this.\\n") yield conn print "server said: ", conn.next() reactor.callLater(0,reactor.stop) """ if not callbacks: callbacks = ('dataReceived',) trap = kwargs.get("trap", tuple()) class _Protocol(Controller, Callback, baseClass): def __init__(self): Callback.__init__(self, *trap) setattr(self, callbacks[0], self) # TODO: support more than one callback via Concurrent def _execute(self, dummy = None): cmd = self._controller self.write = self.transport.write while True: instruction = cmd._yield() if instruction: if isinstance(instruction, CallLater): instruction.callLater(self._execute) return raise Unsupported(instruction) if cmd.stop: self.transport.loseConnection() return if cmd.failure: self.transport.loseConnection() cmd.failure.trap() return if cmd.results: self.transport.writeSequence(cmd.results) cmd.results = [] def connectionMade(self): if types.ClassType == type(self.controller): self._controller = wrap(self.controller(self)) else: self._controller = wrap(self.controller()) self._execute() def connectionLost(self, reason=protocol.connectionDone): if isinstance(reason.value, ConnectionDone) or \ (isinstance(reason.value, ConnectionLost) and \ self.finishOnConnectionLost): self.finish() else: self.errback(reason) self._execute() _Protocol.finishOnConnectionLost = kwargs.get("finishOnConnectionLost",True) _Protocol.controller = controller return _Protocol def _NotImplController(protocol): raise NotImplementedError Protocol = makeProtocol(_NotImplController) Protocol.__doc__ = """ A concrete flow.Protocol for inheritance """
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/flow/protocol.py
protocol.py
from __future__ import generators import traceback, gc, sys from twisted.python import log, threadpool from twisted.internet import interfaces, utils # Methods in this list will be omitted from a failed test's traceback if # they are the final frame. _failureConditionals = [ 'fail', 'failIf', 'failUnless', 'failUnlessRaises', 'failUnlessEqual', 'failUnlessIdentical', 'failIfEqual', 'assertApproximates'] # --------------------------------- DEFAULT_TIMEOUT = object() DEFAULT_TIMEOUT_DURATION = 120.0 class FailureError(Exception): """Wraps around a Failure so it can get re-raised as an Exception""" def __init__(self, failure): Exception.__init__(self) self.original = failure class DirtyReactorError(Exception): """emitted when the reactor has been left in an unclean state""" class DirtyReactorWarning(Warning): """emitted when the reactor has been left in an unclean state""" class PendingTimedCallsError(Exception): """raised when timed calls are left in the reactor""" DIRTY_REACTOR_MSG = "THIS WILL BECOME AN ERROR SOON! reactor left in unclean state, the following Selectables were left over: " PENDING_TIMED_CALLS_MSG = "pendingTimedCalls still pending (consider setting twisted.internet.base.DelayedCall.debug = True):" class _Janitor(object): logErrCheck = True cleanPending = cleanThreads = cleanReactor = True def postCaseCleanup(self): return self._dispatch('cleanPending') def postClassCleanup(self): return self._dispatch('cleanReactor', 'cleanPending', 'cleanThreads') def _dispatch(self, *attrs): for attr in attrs: getattr(self, "do_%s" % attr)() def do_cleanPending(cls): # don't import reactor when module is loaded from twisted.internet import reactor # flush short-range timers reactor.iterate(0) reactor.iterate(0) pending = reactor.getDelayedCalls() if pending: s = PENDING_TIMED_CALLS_MSG for p in pending: s += " %s\n" % (p,) if p.active(): p.cancel() # delete the rest else: print "WEIRNESS! pending timed call not active+!" raise PendingTimedCallsError(s) do_cleanPending = utils.suppressWarnings( do_cleanPending, (('ignore',), {'category': DeprecationWarning, 'message': r'reactor\.iterate cannot be used.*'})) do_cleanPending = classmethod(do_cleanPending) def do_cleanThreads(cls): from twisted.internet import reactor if interfaces.IReactorThreads.providedBy(reactor): reactor.suggestThreadPoolSize(0) if hasattr(reactor, 'threadpool') and reactor.threadpool: reactor.threadpool.stop() reactor.threadpool = None # *Put it back* and *start it up again*. The # reactor's threadpool is *private*: we cannot just # rape it and walk away. reactor.threadpool = threadpool.ThreadPool(0, 10) reactor.threadpool.start() do_cleanThreads = classmethod(do_cleanThreads) def do_cleanReactor(cls): s = [] from twisted.internet import reactor removedSelectables = reactor.removeAll() if removedSelectables: s.append(DIRTY_REACTOR_MSG) for sel in removedSelectables: if interfaces.IProcessTransport.providedBy(sel): sel.signalProcess('KILL') s.append(repr(sel)) if s: raise DirtyReactorError(' '.join(s)) do_cleanReactor = classmethod(do_cleanReactor) def doGcCollect(cls): gc.collect() def suppress(action='ignore', **kwarg): """sets up the .suppress tuple properly, pass options to this method as you would the stdlib warnings.filterwarnings() so to use this with a .suppress magic attribute you would do the following: >>> from twisted.trial import unittest, util >>> import warnings >>> >>> class TestFoo(unittest.TestCase): ... def testFooBar(self): ... warnings.warn("i am deprecated", DeprecationWarning) ... testFooBar.suppress = [util.suppress(message='i am deprecated')] ... >>> note that as with the todo and timeout attributes: the module level attribute acts as a default for the class attribute which acts as a default for the method attribute. The suppress attribute can be overridden at any level by specifying .suppress = [] """ return ((action,), kwarg) def profiled(f, outputFile): def _(*args, **kwargs): if sys.version_info[0:2] != (2, 4): import profile prof = profile.Profile() try: result = prof.runcall(f, *args, **kwargs) prof.dump_stats(outputFile) except SystemExit: pass prof.print_stats() return result else: # use hotshot, profile is broken in 2.4 import hotshot.stats prof = hotshot.Profile(outputFile) try: return prof.runcall(f, *args, **kwargs) finally: stats = hotshot.stats.load(outputFile) stats.strip_dirs() stats.sort_stats('cum') # 'time' stats.print_stats(100) return _ def getPythonContainers(meth): """Walk up the Python tree from method 'meth', finding its class, its module and all containing packages.""" containers = [] containers.append(meth.im_class) moduleName = meth.im_class.__module__ while moduleName is not None: module = sys.modules.get(moduleName, None) if module is None: module = __import__(moduleName) containers.append(module) moduleName = getattr(module, '__module__', None) return containers _DEFAULT = object() def acquireAttribute(objects, attr, default=_DEFAULT): """Go through the list 'objects' sequentially until we find one which has attribute 'attr', then return the value of that attribute. If not found, return 'default' if set, otherwise, raise AttributeError. """ for obj in objects: if hasattr(obj, attr): return getattr(obj, attr) if default is not _DEFAULT: return default raise AttributeError('attribute %r not found in %r' % (attr, objects)) def findObject(name): """Get a fully-named package, module, module-global object or attribute. Forked from twisted.python.reflect.namedAny. Returns a tuple of (bool, obj). If bool is True, the named object exists and is returned as obj. If bool is False, the named object does not exist and the value of obj is unspecified. """ names = name.split('.') topLevelPackage = None moduleNames = names[:] while not topLevelPackage: trialname = '.'.join(moduleNames) if len(trialname) == 0: return (False, None) try: topLevelPackage = __import__(trialname) except ImportError: # if the ImportError happened in the module being imported, # this is a failure that should be handed to our caller. # count stack frames to tell the difference. exc_info = sys.exc_info() if len(traceback.extract_tb(exc_info[2])) > 1: try: # Clean up garbage left in sys.modules. del sys.modules[trialname] except KeyError: # Python 2.4 has fixed this. Yay! pass raise exc_info[0], exc_info[1], exc_info[2] moduleNames.pop() obj = topLevelPackage for n in names[1:]: try: obj = getattr(obj, n) except AttributeError: return (False, obj) return (True, obj) __all__ = ['FailureError', 'DirtyReactorWarning', 'DirtyReactorError', 'PendingTimedCallsError']
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/trial/util.py
util.py
import sys, os import time import warnings from twisted.python import reflect, failure, log from twisted.python.util import untilConcludes from twisted.trial import itrial import zope.interface as zi pyunit = __import__('unittest') class BrokenTestCaseWarning(Warning): """emitted as a warning when an exception occurs in one of setUp, tearDown, setUpClass, or tearDownClass""" class SafeStream(object): """ Wraps a stream object so that all C{write} calls are wrapped in L{untilConcludes}. """ def __init__(self, original): self.original = original def __getattr__(self, name): return getattr(self.original, name) def write(self, *a, **kw): return untilConcludes(self.original.write, *a, **kw) class TestResult(pyunit.TestResult, object): """Accumulates the results of several L{twisted.trial.unittest.TestCase}s. """ def __init__(self): super(TestResult, self).__init__() self.skips = [] self.expectedFailures = [] self.unexpectedSuccesses = [] self.successes = [] self._timings = [] def __repr__(self): return ('<%s run=%d errors=%d failures=%d todos=%d dones=%d skips=%d>' % (reflect.qual(self.__class__), self.testsRun, len(self.errors), len(self.failures), len(self.expectedFailures), len(self.skips), len(self.unexpectedSuccesses))) def _getTime(self): return time.time() def startTest(self, test): """This must be called before the given test is commenced. @type test: L{pyunit.TestCase} """ super(TestResult, self).startTest(test) self._testStarted = self._getTime() def stopTest(self, test): """This must be called after the given test is completed. @type test: L{pyunit.TestCase} """ super(TestResult, self).stopTest(test) self._lastTime = self._getTime() - self._testStarted def addFailure(self, test, fail): """Report a failed assertion for the given test. @type test: L{pyunit.TestCase} @type fail: L{failure.Failure} or L{tuple} """ if isinstance(fail, tuple): fail = failure.Failure(fail[1], fail[0], fail[2]) self.failures.append((test, fail)) def addError(self, test, error): """Report an error that occurred while running the given test. @type test: L{pyunit.TestCase} @type fail: L{failure.Failure} or L{tuple} """ if isinstance(error, tuple): error = failure.Failure(error[1], error[0], error[2]) self.errors.append((test, error)) def addSkip(self, test, reason): """ Report that the given test was skipped. In Trial, tests can be 'skipped'. Tests are skipped mostly because there is some platform or configuration issue that prevents them from being run correctly. @type test: L{pyunit.TestCase} @type reason: L{str} """ self.skips.append((test, reason)) def addUnexpectedSuccess(self, test, todo): """Report that the given test succeeded against expectations. In Trial, tests can be marked 'todo'. That is, they are expected to fail. When a test that is expected to fail instead succeeds, it should call this method to report the unexpected success. @type test: L{pyunit.TestCase} @type todo: L{unittest.Todo} """ # XXX - 'todo' should just be a string self.unexpectedSuccesses.append((test, todo)) def addExpectedFailure(self, test, error, todo): """Report that the given test succeeded against expectations. In Trial, tests can be marked 'todo'. That is, they are expected to fail. @type test: L{pyunit.TestCase} @type error: L{failure.Failure} @type todo: L{unittest.Todo} """ # XXX - 'todo' should just be a string self.expectedFailures.append((test, error, todo)) def addSuccess(self, test): """Report that the given test succeeded. @type test: L{pyunit.TestCase} """ self.successes.append((test,)) def upDownError(self, method, error, warn, printStatus): pass def cleanupErrors(self, errs): """Report an error that occurred during the cleanup between tests. """ # XXX - deprecate this method, we don't need it any more def startSuite(self, name): # XXX - these should be removed, but not in this branch pass def endSuite(self, name): # XXX - these should be removed, but not in this branch pass class Reporter(TestResult): zi.implements(itrial.IReporter) separator = '-' * 79 doubleSeparator = '=' * 79 def __init__(self, stream=sys.stdout, tbformat='default', realtime=False): super(Reporter, self).__init__() self.stream = SafeStream(stream) self.tbformat = tbformat self.realtime = realtime def startTest(self, test): super(Reporter, self).startTest(test) def addFailure(self, test, fail): super(Reporter, self).addFailure(test, fail) if self.realtime: fail = self.failures[-1][1] # guarantee it's a Failure self.write(self._formatFailureTraceback(fail)) def addError(self, test, error): super(Reporter, self).addError(test, error) if self.realtime: error = self.errors[-1][1] # guarantee it's a Failure self.write(self._formatFailureTraceback(error)) def write(self, format, *args): s = str(format) assert isinstance(s, type('')) if args: self.stream.write(s % args) else: self.stream.write(s) untilConcludes(self.stream.flush) def writeln(self, format, *args): self.write(format, *args) self.write('\n') def upDownError(self, method, error, warn, printStatus): super(Reporter, self).upDownError(method, error, warn, printStatus) if warn: tbStr = self._formatFailureTraceback(error) log.msg(tbStr) msg = ("caught exception in %s, your TestCase is broken\n\n%s" % (method, tbStr)) warnings.warn(msg, BrokenTestCaseWarning, stacklevel=2) def cleanupErrors(self, errs): super(Reporter, self).cleanupErrors(errs) warnings.warn("%s\n%s" % ("REACTOR UNCLEAN! traceback(s) follow: ", self._formatFailureTraceback(errs)), BrokenTestCaseWarning) def _trimFrames(self, frames): # when a method fails synchronously, the stack looks like this: # [0]: defer.maybeDeferred() # [1]: utils.runWithWarningsSuppressed() # [2:-2]: code in the test method which failed # [-1]: unittest.fail # when a method fails inside a Deferred (i.e., when the test method # returns a Deferred, and that Deferred's errback fires), the stack # captured inside the resulting Failure looks like this: # [0]: defer.Deferred._runCallbacks # [1:-2]: code in the testmethod which failed # [-1]: unittest.fail # as a result, we want to trim either [maybeDeferred,runWWS] or # [Deferred._runCallbacks] from the front, and trim the # [unittest.fail] from the end. newFrames = list(frames) if len(frames) < 2: return newFrames first = newFrames[0] second = newFrames[1] if (first[0] == "maybeDeferred" and os.path.splitext(os.path.basename(first[1]))[0] == 'defer' and second[0] == "runWithWarningsSuppressed" and os.path.splitext(os.path.basename(second[1]))[0] == 'utils'): newFrames = newFrames[2:] elif (first[0] == "_runCallbacks" and os.path.splitext(os.path.basename(first[1]))[0] == 'defer'): newFrames = newFrames[1:] last = newFrames[-1] if (last[0].startswith('fail') and os.path.splitext(os.path.basename(last[1]))[0] == 'unittest'): newFrames = newFrames[:-1] return newFrames def _formatFailureTraceback(self, fail): if isinstance(fail, str): return fail.rstrip() + '\n' fail.frames, frames = self._trimFrames(fail.frames), fail.frames result = fail.getTraceback(detail=self.tbformat, elideFrameworkCode=True) fail.frames = frames return result def _printResults(self, flavour, errors, formatter): for content in errors: self.writeln(self.doubleSeparator) self.writeln('%s: %s' % (flavour, content[0].id())) self.writeln('') self.write(formatter(*(content[1:]))) def _printExpectedFailure(self, error, todo): return 'Reason: %r\n%s' % (todo.reason, self._formatFailureTraceback(error)) def _printUnexpectedSuccess(self, todo): ret = 'Reason: %r\n' % (todo.reason,) if todo.errors: ret += 'Expected errors: %s\n' % (', '.join(todo.errors),) return ret def printErrors(self): """Print all of the non-success results in full to the stream. """ self.write('\n') self._printResults('[SKIPPED]', self.skips, lambda x : '%s\n' % x) self._printResults('[TODO]', self.expectedFailures, self._printExpectedFailure) self._printResults('[FAIL]', self.failures, self._formatFailureTraceback) self._printResults('[ERROR]', self.errors, self._formatFailureTraceback) self._printResults('[SUCCESS!?!]', self.unexpectedSuccesses, self._printUnexpectedSuccess) def printSummary(self): """Print a line summarising the test results to the stream. """ summaries = [] for stat in ("skips", "expectedFailures", "failures", "errors", "unexpectedSuccesses", "successes"): num = len(getattr(self, stat)) if num: summaries.append('%s=%d' % (stat, num)) summary = (summaries and ' ('+', '.join(summaries)+')') or '' if not self.wasSuccessful(): status = "FAILED" else: status = "PASSED" self.write("%s%s\n", status, summary) class MinimalReporter(Reporter): """A minimalist reporter that prints only a summary of the test result, in the form of (timeTaken, #tests, #tests, #errors, #failures, #skips). """ _runStarted = None def startTest(self, test): super(MinimalReporter, self).startTest(test) if self._runStarted is None: self._runStarted = self._getTime() def printErrors(self): pass def printSummary(self): numTests = self.testsRun t = (self._runStarted - self._getTime(), numTests, numTests, len(self.errors), len(self.failures), len(self.skips)) self.writeln(' '.join(map(str, t))) class TextReporter(Reporter): """ Simple reporter that prints a single character for each test as it runs, along with the standard Trial summary text. """ def addSuccess(self, test): super(TextReporter, self).addSuccess(test) self.write('.') def addError(self, *args): super(TextReporter, self).addError(*args) self.write('E') def addFailure(self, *args): super(TextReporter, self).addFailure(*args) self.write('F') def addSkip(self, *args): super(TextReporter, self).addSkip(*args) self.write('S') def addExpectedFailure(self, *args): super(TextReporter, self).addExpectedFailure(*args) self.write('T') def addUnexpectedSuccess(self, *args): super(TextReporter, self).addUnexpectedSuccess(*args) self.write('!') class VerboseTextReporter(Reporter): """ A verbose reporter that prints the name of each test as it is running. Each line is printed with the name of the test, followed by the result of that test. """ # This is actually the bwverbose option def startTest(self, tm): self.write('%s ... ', tm.id()) super(VerboseTextReporter, self).startTest(tm) def addSuccess(self, test): super(VerboseTextReporter, self).addSuccess(test) self.write('[OK]') def addError(self, *args): super(VerboseTextReporter, self).addError(*args) self.write('[ERROR]') def addFailure(self, *args): super(VerboseTextReporter, self).addFailure(*args) self.write('[FAILURE]') def addSkip(self, *args): super(VerboseTextReporter, self).addSkip(*args) self.write('[SKIPPED]') def addExpectedFailure(self, *args): super(VerboseTextReporter, self).addExpectedFailure(*args) self.write('[TODO]') def addUnexpectedSuccess(self, *args): super(VerboseTextReporter, self).addUnexpectedSuccess(*args) self.write('[SUCCESS!?!]') def stopTest(self, test): super(VerboseTextReporter, self).stopTest(test) self.write('\n') class TimingTextReporter(VerboseTextReporter): """Prints out each test as it is running, followed by the time taken for each test to run. """ def stopTest(self, method): super(TimingTextReporter, self).stopTest(method) self.write("(%.03f secs)\n" % self._lastTime) class _AnsiColorizer(object): """ A colorizer is an object that loosely wraps around a stream, allowing callers to write text to the stream in a particular color. Colorizer classes must implement C{supported()} and C{write(text, color)}. """ _colors = dict(black=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37) def __init__(self, stream): self.stream = stream def supported(self): """ A class method that returns True if the current platform supports coloring terminal output using this method. Returns False otherwise. """ # assuming stderr # isatty() returns False when SSHd into Win32 machine if 'CYGWIN' in os.environ: return True if not sys.stderr.isatty(): return False # auto color only on TTYs try: import curses curses.setupterm() return curses.tigetnum("colors") > 2 except: # guess false in case of error return False supported = classmethod(supported) def write(self, text, color): """ Write the given text to the stream in the given color. @param text: Text to be written to the stream. @param color: A string label for a color. e.g. 'red', 'white'. """ color = self._colors[color] self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text)) class _Win32Colorizer(object): """ See _AnsiColorizer docstring. """ def __init__(self, stream): from win32console import GetStdHandle, STD_OUTPUT_HANDLE, \ FOREGROUND_RED, FOREGROUND_BLUE, FOREGROUND_GREEN, \ FOREGROUND_INTENSITY red, green, blue, bold = (FOREGROUND_RED, FOREGROUND_GREEN, FOREGROUND_BLUE, FOREGROUND_INTENSITY) self.stream = stream self.screenBuffer = GetStdHandle(STD_OUTPUT_HANDLE) self._colors = { 'normal': red | green | blue, 'red': red | bold, 'green': green | bold, 'blue': blue | bold, 'yellow': red | green | bold, 'magenta': red | blue | bold, 'cyan': green | blue | bold, 'white': red | green | blue | bold } def supported(self): try: import win32console screenBuffer = win32console.GetStdHandle( win32console.STD_OUTPUT_HANDLE) except ImportError: return False import pywintypes try: screenBuffer.SetConsoleTextAttribute( win32console.FOREGROUND_RED | win32console.FOREGROUND_GREEN | win32console.FOREGROUND_BLUE) except pywintypes.error: return False else: return True supported = classmethod(supported) def write(self, text, color): color = self._colors[color] self.screenBuffer.SetConsoleTextAttribute(color) self.stream.write(text) self.screenBuffer.SetConsoleTextAttribute(self._colors['normal']) class _NullColorizer(object): """ See _AnsiColorizer docstring. """ def __init__(self, stream): self.stream = stream def supported(self): return True supported = classmethod(supported) def write(self, text, color): self.stream.write(text) class TreeReporter(Reporter): """Print out the tests in the form a tree. Tests are indented according to which class and module they belong. Results are printed in ANSI color. """ currentLine = '' indent = ' ' columns = 79 FAILURE = 'red' ERROR = 'red' TODO = 'blue' SKIP = 'blue' TODONE = 'red' SUCCESS = 'green' def __init__(self, stream=sys.stdout, tbformat='default', realtime=False): super(TreeReporter, self).__init__(stream, tbformat, realtime) self._lastTest = [] for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]: if colorizer.supported(): self._colorizer = colorizer(stream) break def getDescription(self, test): """ Return the name of the method which 'test' represents. This is what gets displayed in the leaves of the tree. e.g. getDescription(TestCase('test_foo')) ==> test_foo """ return test.id().split('.')[-1] def addSuccess(self, test): super(TreeReporter, self).addSuccess(test) self.endLine('[OK]', self.SUCCESS) def addError(self, *args): super(TreeReporter, self).addError(*args) self.endLine('[ERROR]', self.ERROR) def addFailure(self, *args): super(TreeReporter, self).addFailure(*args) self.endLine('[FAIL]', self.FAILURE) def addSkip(self, *args): super(TreeReporter, self).addSkip(*args) self.endLine('[SKIPPED]', self.SKIP) def addExpectedFailure(self, *args): super(TreeReporter, self).addExpectedFailure(*args) self.endLine('[TODO]', self.TODO) def addUnexpectedSuccess(self, *args): super(TreeReporter, self).addUnexpectedSuccess(*args) self.endLine('[SUCCESS!?!]', self.TODONE) def write(self, format, *args): if args: format = format % args self.currentLine = format super(TreeReporter, self).write(self.currentLine) def _testPrelude(self, test): segments = [test.__class__.__module__, test.__class__.__name__] indentLevel = 0 for seg in segments: if indentLevel < len(self._lastTest): if seg != self._lastTest[indentLevel]: self.write('%s%s\n' % (self.indent * indentLevel, seg)) else: self.write('%s%s\n' % (self.indent * indentLevel, seg)) indentLevel += 1 self._lastTest = segments def cleanupErrors(self, errs): self._colorizer.write(' cleanup errors', self.ERROR) self.endLine('[ERROR]', self.ERROR) super(TreeReporter, self).cleanupErrors(errs) def upDownError(self, method, error, warn, printStatus): self.write(self.color(" %s" % method, self.ERROR)) if printStatus: self.endLine('[ERROR]', self.ERROR) super(TreeReporter, self).upDownError(method, error, warn, printStatus) def startTest(self, method): self._testPrelude(method) self.write('%s%s ... ' % (self.indent * (len(self._lastTest)), self.getDescription(method))) super(TreeReporter, self).startTest(method) def endLine(self, message, color): spaces = ' ' * (self.columns - len(self.currentLine) - len(message)) super(TreeReporter, self).write(spaces) self._colorizer.write(message, color) super(TreeReporter, self).write("\n")
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/trial/reporter.py
reporter.py
import zope.interface as zi class ITestCase(zi.Interface): """DEPRECATED in Twisted 2.5. This interface will be removed in the next release. Implementing it has no impact. """ def setUp(): """I am run before each method is run""" def tearDown(): """I am run after each method is run""" class IReporter(zi.Interface): """I report results from a run of a test suite. In all lists below, 'Results' are either a twisted.python.failure.Failure object, or a string. """ stream = zi.Attribute("@ivar stream: the io-stream that this reporter will write to") tbformat = zi.Attribute("@ivar tbformat: either 'default', 'brief', or 'verbose'") args = zi.Attribute("@ivar args: additional string argument passed from the command line") shouldStop = zi.Attribute("@ivar shouldStop: a boolean indicating that" " this reporter would like the test run to stop.") def startTest(method): """report the beginning of a run of a single test method @param method: an object that is adaptable to ITestMethod """ def stopTest(method): """report the status of a single test method @param method: an object that is adaptable to ITestMethod """ def startSuite(name): """suites which wish to appear in reporter output should call this before running their tests""" def endSuite(name): """called at the end of a suite, if and only if that suite has called 'startSuite' """ def cleanupErrors(errs): """called when the reactor has been left in a 'dirty' state @param errs: a list of L{twisted.python.failure.Failure}s """ def upDownError(userMeth, warn=True, printStatus=True): """called when an error occurs in a setUp* or tearDown* method @param warn: indicates whether or not the reporter should emit a warning about the error @type warn: Boolean @param printStatus: indicates whether or not the reporter should print the name of the method and the status message appropriate for the type of error @type printStatus: Boolean """ def addSuccess(test): """Record that test passed."""
zope.app.twisted
/zope.app.twisted-3.5.0.tar.gz/zope.app.twisted-3.5.0/src/twisted/trial/itrial.py
itrial.py