id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1,600 | _cpmodpy.py | midgetspy_Sick-Beard/cherrypy/_cpmodpy.py | """Native adapter for serving CherryPy via mod_python
Basic usage:
##########################################
# Application in a module called myapp.py
##########################################
import cherrypy
class Root:
@cherrypy.expose
def index(self):
return 'Hi there, Ho there, Hey there'
# We will use this method from the mod_python configuration
# as the entry point to our application
def setup_server():
cherrypy.tree.mount(Root())
cherrypy.config.update({'environment': 'production',
'log.screen': False,
'show_tracebacks': False})
##########################################
# mod_python settings for apache2
# This should reside in your httpd.conf
# or a file that will be loaded at
# apache startup
##########################################
# Start
DocumentRoot "/"
Listen 8080
LoadModule python_module /usr/lib/apache2/modules/mod_python.so
<Location "/">
PythonPath "sys.path+['/path/to/my/application']"
SetHandler python-program
PythonHandler cherrypy._cpmodpy::handler
PythonOption cherrypy.setup myapp::setup_server
PythonDebug On
</Location>
# End
The actual path to your mod_python.so is dependent on your
environment. In this case we suppose a global mod_python
installation on a Linux distribution such as Ubuntu.
We do set the PythonPath configuration setting so that
your application can be found by from the user running
the apache2 instance. Of course if your application
resides in the global site-package this won't be needed.
Then restart apache2 and access http://127.0.0.1:8080
"""
import logging
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import cherrypy
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil
# ------------------------------ Request-handling
def setup(req):
from mod_python import apache
# Run any setup functions defined by a "PythonOption cherrypy.setup" directive.
options = req.get_options()
if 'cherrypy.setup' in options:
for function in options['cherrypy.setup'].split():
atoms = function.split('::', 1)
if len(atoms) == 1:
mod = __import__(atoms[0], globals(), locals())
else:
modname, fname = atoms
mod = __import__(modname, globals(), locals(), [fname])
func = getattr(mod, fname)
func()
cherrypy.config.update({'log.screen': False,
"tools.ignore_headers.on": True,
"tools.ignore_headers.headers": ['Range'],
})
engine = cherrypy.engine
if hasattr(engine, "signal_handler"):
engine.signal_handler.unsubscribe()
if hasattr(engine, "console_control_handler"):
engine.console_control_handler.unsubscribe()
engine.autoreload.unsubscribe()
cherrypy.server.unsubscribe()
def _log(msg, level):
newlevel = apache.APLOG_ERR
if logging.DEBUG >= level:
newlevel = apache.APLOG_DEBUG
elif logging.INFO >= level:
newlevel = apache.APLOG_INFO
elif logging.WARNING >= level:
newlevel = apache.APLOG_WARNING
# On Windows, req.server is required or the msg will vanish. See
# http://www.modpython.org/pipermail/mod_python/2003-October/014291.html.
# Also, "When server is not specified...LogLevel does not apply..."
apache.log_error(msg, newlevel, req.server)
engine.subscribe('log', _log)
engine.start()
def cherrypy_cleanup(data):
engine.exit()
try:
# apache.register_cleanup wasn't available until 3.1.4.
apache.register_cleanup(cherrypy_cleanup)
except AttributeError:
req.server.register_cleanup(req, cherrypy_cleanup)
class _ReadOnlyRequest:
expose = ('read', 'readline', 'readlines')
def __init__(self, req):
for method in self.expose:
self.__dict__[method] = getattr(req, method)
recursive = False
_isSetUp = False
def handler(req):
from mod_python import apache
try:
global _isSetUp
if not _isSetUp:
setup(req)
_isSetUp = True
# Obtain a Request object from CherryPy
local = req.connection.local_addr
local = httputil.Host(local[0], local[1], req.connection.local_host or "")
remote = req.connection.remote_addr
remote = httputil.Host(remote[0], remote[1], req.connection.remote_host or "")
scheme = req.parsed_uri[0] or 'http'
req.get_basic_auth_pw()
try:
# apache.mpm_query only became available in mod_python 3.1
q = apache.mpm_query
threaded = q(apache.AP_MPMQ_IS_THREADED)
forked = q(apache.AP_MPMQ_IS_FORKED)
except AttributeError:
bad_value = ("You must provide a PythonOption '%s', "
"either 'on' or 'off', when running a version "
"of mod_python < 3.1")
threaded = options.get('multithread', '').lower()
if threaded == 'on':
threaded = True
elif threaded == 'off':
threaded = False
else:
raise ValueError(bad_value % "multithread")
forked = options.get('multiprocess', '').lower()
if forked == 'on':
forked = True
elif forked == 'off':
forked = False
else:
raise ValueError(bad_value % "multiprocess")
sn = cherrypy.tree.script_name(req.uri or "/")
if sn is None:
send_response(req, '404 Not Found', [], '')
else:
app = cherrypy.tree.apps[sn]
method = req.method
path = req.uri
qs = req.args or ""
reqproto = req.protocol
headers = req.headers_in.items()
rfile = _ReadOnlyRequest(req)
prev = None
try:
redirections = []
while True:
request, response = app.get_serving(local, remote, scheme,
"HTTP/1.1")
request.login = req.user
request.multithread = bool(threaded)
request.multiprocess = bool(forked)
request.app = app
request.prev = prev
# Run the CherryPy Request object and obtain the response
try:
request.run(method, path, qs, reqproto, headers, rfile)
break
except cherrypy.InternalRedirect, ir:
app.release_serving()
prev = request
if not recursive:
if ir.path in redirections:
raise RuntimeError("InternalRedirector visited the "
"same URL twice: %r" % ir.path)
else:
# Add the *previous* path_info + qs to redirections.
if qs:
qs = "?" + qs
redirections.append(sn + path + qs)
# Munge environment and try again.
method = "GET"
path = ir.path
qs = ir.query_string
rfile = StringIO()
send_response(req, response.status, response.header_list,
response.body, response.stream)
finally:
app.release_serving()
except:
tb = format_exc()
cherrypy.log(tb, 'MOD_PYTHON', severity=logging.ERROR)
s, h, b = bare_error()
send_response(req, s, h, b)
return apache.OK
def send_response(req, status, headers, body, stream=False):
# Set response status
req.status = int(status[:3])
# Set response headers
req.content_type = "text/plain"
for header, value in headers:
if header.lower() == 'content-type':
req.content_type = value
continue
req.headers_out.add(header, value)
if stream:
# Flush now so the status and headers are sent immediately.
req.flush()
# Set response body
if isinstance(body, basestring):
req.write(body)
else:
for seg in body:
req.write(seg)
# --------------- Startup tools for CherryPy + mod_python --------------- #
import os
import re
def read_process(cmd, args=""):
pipein, pipeout = os.popen4("%s %s" % (cmd, args))
try:
firstline = pipeout.readline()
if (re.search(r"(not recognized|No such file|not found)", firstline,
re.IGNORECASE)):
raise IOError('%s must be on your system path.' % cmd)
output = firstline + pipeout.read()
finally:
pipeout.close()
return output
class ModPythonServer(object):
template = """
# Apache2 server configuration file for running CherryPy with mod_python.
DocumentRoot "/"
Listen %(port)s
LoadModule python_module modules/mod_python.so
<Location %(loc)s>
SetHandler python-program
PythonHandler %(handler)s
PythonDebug On
%(opts)s
</Location>
"""
def __init__(self, loc="/", port=80, opts=None, apache_path="apache",
handler="cherrypy._cpmodpy::handler"):
self.loc = loc
self.port = port
self.opts = opts
self.apache_path = apache_path
self.handler = handler
def start(self):
opts = "".join([" PythonOption %s %s\n" % (k, v)
for k, v in self.opts])
conf_data = self.template % {"port": self.port,
"loc": self.loc,
"opts": opts,
"handler": self.handler,
}
mpconf = os.path.join(os.path.dirname(__file__), "cpmodpy.conf")
f = open(mpconf, 'wb')
try:
f.write(conf_data)
finally:
f.close()
response = read_process(self.apache_path, "-k start -f %s" % mpconf)
self.ready = True
return response
def stop(self):
os.popen("apache -k stop")
self.ready = False
| 11,133 | Python | .py | 269 | 28.527881 | 87 | 0.541746 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,601 | _cpchecker.py | midgetspy_Sick-Beard/cherrypy/_cpchecker.py | import os
import warnings
import cherrypy
class Checker(object):
"""A checker for CherryPy sites and their mounted applications.
on: set this to False to turn off the checker completely.
When this object is called at engine startup, it executes each
of its own methods whose names start with "check_". If you wish
to disable selected checks, simply add a line in your global
config which sets the appropriate method to False:
[global]
checker.check_skipped_app_config = False
You may also dynamically add or replace check_* methods in this way.
"""
on = True
def __init__(self):
self._populate_known_types()
def __call__(self):
"""Run all check_* methods."""
if self.on:
oldformatwarning = warnings.formatwarning
warnings.formatwarning = self.formatwarning
try:
for name in dir(self):
if name.startswith("check_"):
method = getattr(self, name)
if method and callable(method):
method()
finally:
warnings.formatwarning = oldformatwarning
def formatwarning(self, message, category, filename, lineno, line=None):
"""Function to format a warning."""
return "CherryPy Checker:\n%s\n\n" % message
# This value should be set inside _cpconfig.
global_config_contained_paths = False
def check_app_config_entries_dont_start_with_script_name(self):
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
continue
if sn == '':
continue
sn_atoms = sn.strip("/").split("/")
for key in app.config.keys():
key_atoms = key.strip("/").split("/")
if key_atoms[:len(sn_atoms)] == sn_atoms:
warnings.warn(
"The application mounted at %r has config " \
"entries that start with its script name: %r" % (sn, key))
def check_site_config_entries_in_app_config(self):
for sn, app in cherrypy.tree.apps.iteritems():
if not isinstance(app, cherrypy.Application):
continue
msg = []
for section, entries in app.config.iteritems():
if section.startswith('/'):
for key, value in entries.iteritems():
for n in ("engine.", "server.", "tree.", "checker."):
if key.startswith(n):
msg.append("[%s] %s = %s" % (section, key, value))
if msg:
msg.insert(0,
"The application mounted at %r contains the following "
"config entries, which are only allowed in site-wide "
"config. Move them to a [global] section and pass them "
"to cherrypy.config.update() instead of tree.mount()." % sn)
warnings.warn(os.linesep.join(msg))
def check_skipped_app_config(self):
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
msg = "The Application mounted at %r has an empty config." % sn
if self.global_config_contained_paths:
msg += (" It looks like the config you passed to "
"cherrypy.config.update() contains application-"
"specific sections. You must explicitly pass "
"application config via "
"cherrypy.tree.mount(..., config=app_config)")
warnings.warn(msg)
return
def check_app_config_brackets(self):
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
continue
for key in app.config.keys():
if key.startswith("[") or key.endswith("]"):
warnings.warn(
"The application mounted at %r has config " \
"section names with extraneous brackets: %r. "
"Config *files* need brackets; config *dicts* "
"(e.g. passed to tree.mount) do not." % (sn, key))
def check_static_paths(self):
# Use the dummy Request object in the main thread.
request = cherrypy.request
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
request.app = app
for section in app.config:
# get_resource will populate request.config
request.get_resource(section + "/dummy.html")
conf = request.config.get
if conf("tools.staticdir.on", False):
msg = ""
root = conf("tools.staticdir.root")
dir = conf("tools.staticdir.dir")
if dir is None:
msg = "tools.staticdir.dir is not set."
else:
fulldir = ""
if os.path.isabs(dir):
fulldir = dir
if root:
msg = ("dir is an absolute path, even "
"though a root is provided.")
testdir = os.path.join(root, dir[1:])
if os.path.exists(testdir):
msg += ("\nIf you meant to serve the "
"filesystem folder at %r, remove "
"the leading slash from dir." % testdir)
else:
if not root:
msg = "dir is a relative path and no root provided."
else:
fulldir = os.path.join(root, dir)
if not os.path.isabs(fulldir):
msg = "%r is not an absolute path." % fulldir
if fulldir and not os.path.exists(fulldir):
if msg:
msg += "\n"
msg += ("%r (root + dir) is not an existing "
"filesystem path." % fulldir)
if msg:
warnings.warn("%s\nsection: [%s]\nroot: %r\ndir: %r"
% (msg, section, root, dir))
# -------------------------- Compatibility -------------------------- #
obsolete = {
'server.default_content_type': 'tools.response_headers.headers',
'log_access_file': 'log.access_file',
'log_config_options': None,
'log_file': 'log.error_file',
'log_file_not_found': None,
'log_request_headers': 'tools.log_headers.on',
'log_to_screen': 'log.screen',
'show_tracebacks': 'request.show_tracebacks',
'throw_errors': 'request.throw_errors',
'profiler.on': ('cherrypy.tree.mount(profiler.make_app('
'cherrypy.Application(Root())))'),
}
deprecated = {}
def _compat(self, config):
"""Process config and warn on each obsolete or deprecated entry."""
for section, conf in config.items():
if isinstance(conf, dict):
for k, v in conf.items():
if k in self.obsolete:
warnings.warn("%r is obsolete. Use %r instead.\n"
"section: [%s]" %
(k, self.obsolete[k], section))
elif k in self.deprecated:
warnings.warn("%r is deprecated. Use %r instead.\n"
"section: [%s]" %
(k, self.deprecated[k], section))
else:
if section in self.obsolete:
warnings.warn("%r is obsolete. Use %r instead."
% (section, self.obsolete[section]))
elif section in self.deprecated:
warnings.warn("%r is deprecated. Use %r instead."
% (section, self.deprecated[section]))
def check_compatibility(self):
"""Process config and warn on each obsolete or deprecated entry."""
self._compat(cherrypy.config)
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._compat(app.config)
# ------------------------ Known Namespaces ------------------------ #
extra_config_namespaces = []
def _known_ns(self, app):
ns = ["wsgi"]
ns.extend(app.toolboxes.keys())
ns.extend(app.namespaces.keys())
ns.extend(app.request_class.namespaces.keys())
ns.extend(cherrypy.config.namespaces.keys())
ns += self.extra_config_namespaces
for section, conf in app.config.items():
is_path_section = section.startswith("/")
if is_path_section and isinstance(conf, dict):
for k, v in conf.items():
atoms = k.split(".")
if len(atoms) > 1:
if atoms[0] not in ns:
# Spit out a special warning if a known
# namespace is preceded by "cherrypy."
if (atoms[0] == "cherrypy" and atoms[1] in ns):
msg = ("The config entry %r is invalid; "
"try %r instead.\nsection: [%s]"
% (k, ".".join(atoms[1:]), section))
else:
msg = ("The config entry %r is invalid, because "
"the %r config namespace is unknown.\n"
"section: [%s]" % (k, atoms[0], section))
warnings.warn(msg)
elif atoms[0] == "tools":
if atoms[1] not in dir(cherrypy.tools):
msg = ("The config entry %r may be invalid, "
"because the %r tool was not found.\n"
"section: [%s]" % (k, atoms[1], section))
warnings.warn(msg)
def check_config_namespaces(self):
"""Process config and warn on each unknown config namespace."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._known_ns(app)
# -------------------------- Config Types -------------------------- #
known_config_types = {}
def _populate_known_types(self):
import __builtin__ as builtins
b = [x for x in vars(builtins).values()
if type(x) is type(str)]
def traverse(obj, namespace):
for name in dir(obj):
# Hack for 3.2's warning about body_params
if name == 'body_params':
continue
vtype = type(getattr(obj, name, None))
if vtype in b:
self.known_config_types[namespace + "." + name] = vtype
traverse(cherrypy.request, "request")
traverse(cherrypy.response, "response")
traverse(cherrypy.server, "server")
traverse(cherrypy.engine, "engine")
traverse(cherrypy.log, "log")
def _known_types(self, config):
msg = ("The config entry %r in section %r is of type %r, "
"which does not match the expected type %r.")
for section, conf in config.items():
if isinstance(conf, dict):
for k, v in conf.items():
if v is not None:
expected_type = self.known_config_types.get(k, None)
vtype = type(v)
if expected_type and vtype != expected_type:
warnings.warn(msg % (k, section, vtype.__name__,
expected_type.__name__))
else:
k, v = section, conf
if v is not None:
expected_type = self.known_config_types.get(k, None)
vtype = type(v)
if expected_type and vtype != expected_type:
warnings.warn(msg % (k, section, vtype.__name__,
expected_type.__name__))
def check_config_types(self):
"""Assert that config values are of the same type as default values."""
self._known_types(cherrypy.config)
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._known_types(app.config)
# -------------------- Specific config warnings -------------------- #
def check_localhost(self):
"""Warn if any socket_host is 'localhost'. See #711."""
for k, v in cherrypy.config.items():
if k == 'server.socket_host' and v == 'localhost':
warnings.warn("The use of 'localhost' as a socket host can "
"cause problems on newer systems, since 'localhost' can "
"map to either an IPv4 or an IPv6 address. You should "
"use '127.0.0.1' or '[::1]' instead.")
| 14,612 | Python | .py | 275 | 33.370909 | 85 | 0.473684 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,602 | _cpthreadinglocal.py | midgetspy_Sick-Beard/cherrypy/_cpthreadinglocal.py | # This is a backport of Python-2.4's threading.local() implementation
"""Thread-local objects
(Note that this module provides a Python version of thread
threading.local class. Depending on the version of Python you're
using, there may be a faster one available. You should always import
the local class from threading.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = mydata.__dict__.items()
... items.sort()
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
# Threading import is at end
class _localbase(object):
__slots__ = '_local__key', '_local__args', '_local__lock'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
key = 'thread.local.' + str(id(self))
object.__setattr__(self, '_local__key', key)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__lock', RLock())
if args or kw and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
dict = object.__getattribute__(self, '__dict__')
currentThread().__dict__[key] = dict
return self
def _patch(self):
key = object.__getattribute__(self, '_local__key')
d = currentThread().__dict__.get(key)
if d is None:
d = {}
currentThread().__dict__[key] = d
object.__setattr__(self, '__dict__', d)
# we have a new instance dict, so call out __init__ if we have
# one
cls = type(self)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(self, '_local__args')
cls.__init__(self, *args, **kw)
else:
object.__setattr__(self, '__dict__', d)
class local(_localbase):
def __getattribute__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__getattribute__(self, name)
finally:
lock.release()
def __setattr__(self, name, value):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__setattr__(self, name, value)
finally:
lock.release()
def __delattr__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__delattr__(self, name)
finally:
lock.release()
def __del__():
threading_enumerate = enumerate
__getattribute__ = object.__getattribute__
def __del__(self):
key = __getattribute__(self, '_local__key')
try:
threads = list(threading_enumerate())
except:
# if enumerate fails, as it seems to do during
# shutdown, we'll skip cleanup under the assumption
# that there is nothing to clean up
return
for thread in threads:
try:
__dict__ = thread.__dict__
except AttributeError:
# Thread is dying, rest in peace
continue
if key in __dict__:
try:
del __dict__[key]
except KeyError:
pass # didn't have anything in this thread
return __del__
__del__ = __del__()
from threading import currentThread, enumerate, RLock
| 6,855 | Python | .py | 184 | 29.586957 | 74 | 0.584057 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,603 | _cprequest.py | midgetspy_Sick-Beard/cherrypy/_cprequest.py |
from Cookie import SimpleCookie, CookieError
import os
import sys
import time
import types
import warnings
import cherrypy
from cherrypy import _cpreqbody, _cpconfig
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil, file_generator
class Hook(object):
"""A callback and its metadata: failsafe, priority, and kwargs."""
__metaclass__ = cherrypy._AttributeDocstrings
callback = None
callback__doc = """
The bare callable that this Hook object is wrapping, which will
be called when the Hook is called."""
failsafe = False
failsafe__doc = """
If True, the callback is guaranteed to run even if other callbacks
from the same call point raise exceptions."""
priority = 50
priority__doc = """
Defines the order of execution for a list of Hooks. Priority numbers
should be limited to the closed interval [0, 100], but values outside
this range are acceptable, as are fractional values."""
kwargs = {}
kwargs__doc = """
A set of keyword arguments that will be passed to the
callable on each call."""
def __init__(self, callback, failsafe=None, priority=None, **kwargs):
self.callback = callback
if failsafe is None:
failsafe = getattr(callback, "failsafe", False)
self.failsafe = failsafe
if priority is None:
priority = getattr(callback, "priority", 50)
self.priority = priority
self.kwargs = kwargs
def __cmp__(self, other):
return cmp(self.priority, other.priority)
def __call__(self):
"""Run self.callback(**self.kwargs)."""
return self.callback(**self.kwargs)
def __repr__(self):
cls = self.__class__
return ("%s.%s(callback=%r, failsafe=%r, priority=%r, %s)"
% (cls.__module__, cls.__name__, self.callback,
self.failsafe, self.priority,
", ".join(['%s=%r' % (k, v)
for k, v in self.kwargs.items()])))
class HookMap(dict):
"""A map of call points to lists of callbacks (Hook objects)."""
def __new__(cls, points=None):
d = dict.__new__(cls)
for p in points or []:
d[p] = []
return d
def __init__(self, *a, **kw):
pass
def attach(self, point, callback, failsafe=None, priority=None, **kwargs):
"""Append a new Hook made from the supplied arguments."""
self[point].append(Hook(callback, failsafe, priority, **kwargs))
def run(self, point):
"""Execute all registered Hooks (callbacks) for the given point."""
exc = None
hooks = self[point]
hooks.sort()
for hook in hooks:
# Some hooks are guaranteed to run even if others at
# the same hookpoint fail. We will still log the failure,
# but proceed on to the next hook. The only way
# to stop all processing from one of these hooks is
# to raise SystemExit and stop the whole server.
if exc is None or hook.failsafe:
try:
hook()
except (KeyboardInterrupt, SystemExit):
raise
except (cherrypy.HTTPError, cherrypy.HTTPRedirect,
cherrypy.InternalRedirect):
exc = sys.exc_info()[1]
except:
exc = sys.exc_info()[1]
cherrypy.log(traceback=True, severity=40)
if exc:
raise
def __copy__(self):
newmap = self.__class__()
# We can't just use 'update' because we want copies of the
# mutable values (each is a list) as well.
for k, v in self.items():
newmap[k] = v[:]
return newmap
copy = __copy__
def __repr__(self):
cls = self.__class__
return "%s.%s(points=%r)" % (cls.__module__, cls.__name__, self.keys())
# Config namespace handlers
def hooks_namespace(k, v):
"""Attach bare hooks declared in config."""
# Use split again to allow multiple hooks for a single
# hookpoint per path (e.g. "hooks.before_handler.1").
# Little-known fact you only get from reading source ;)
hookpoint = k.split(".", 1)[0]
if isinstance(v, basestring):
v = cherrypy.lib.attributes(v)
if not isinstance(v, Hook):
v = Hook(v)
cherrypy.serving.request.hooks[hookpoint].append(v)
def request_namespace(k, v):
"""Attach request attributes declared in config."""
# Provides config entries to set request.body attrs (like attempt_charsets).
if k[:5] == 'body.':
setattr(cherrypy.serving.request.body, k[5:], v)
else:
setattr(cherrypy.serving.request, k, v)
def response_namespace(k, v):
"""Attach response attributes declared in config."""
# Provides config entries to set default response headers
# http://cherrypy.org/ticket/889
if k[:8] == 'headers.':
cherrypy.serving.response.headers[k.split('.', 1)[1]] = v
else:
setattr(cherrypy.serving.response, k, v)
def error_page_namespace(k, v):
"""Attach error pages declared in config."""
if k != 'default':
k = int(k)
cherrypy.serving.request.error_page[k] = v
hookpoints = ['on_start_resource', 'before_request_body',
'before_handler', 'before_finalize',
'on_end_resource', 'on_end_request',
'before_error_response', 'after_error_response']
class Request(object):
"""An HTTP request.
This object represents the metadata of an HTTP request message;
that is, it contains attributes which describe the environment
in which the request URL, headers, and body were sent (if you
want tools to interpret the headers and body, those are elsewhere,
mostly in Tools). This 'metadata' consists of socket data,
transport characteristics, and the Request-Line. This object
also contains data regarding the configuration in effect for
the given URL, and the execution plan for generating a response.
"""
__metaclass__ = cherrypy._AttributeDocstrings
prev = None
prev__doc = """
The previous Request object (if any). This should be None
unless we are processing an InternalRedirect."""
# Conversation/connection attributes
local = httputil.Host("127.0.0.1", 80)
local__doc = \
"An httputil.Host(ip, port, hostname) object for the server socket."
remote = httputil.Host("127.0.0.1", 1111)
remote__doc = \
"An httputil.Host(ip, port, hostname) object for the client socket."
scheme = "http"
scheme__doc = """
The protocol used between client and server. In most cases,
this will be either 'http' or 'https'."""
server_protocol = "HTTP/1.1"
server_protocol__doc = """
The HTTP version for which the HTTP server is at least
conditionally compliant."""
base = ""
base__doc = """The (scheme://host) portion of the requested URL.
In some cases (e.g. when proxying via mod_rewrite), this may contain
path segments which cherrypy.url uses when constructing url's, but
which otherwise are ignored by CherryPy. Regardless, this value
MUST NOT end in a slash."""
# Request-Line attributes
request_line = ""
request_line__doc = """
The complete Request-Line received from the client. This is a
single string consisting of the request method, URI, and protocol
version (joined by spaces). Any final CRLF is removed."""
method = "GET"
method__doc = """
Indicates the HTTP method to be performed on the resource identified
by the Request-URI. Common methods include GET, HEAD, POST, PUT, and
DELETE. CherryPy allows any extension method; however, various HTTP
servers and gateways may restrict the set of allowable methods.
CherryPy applications SHOULD restrict the set (on a per-URI basis)."""
query_string = ""
query_string__doc = """
The query component of the Request-URI, a string of information to be
interpreted by the resource. The query portion of a URI follows the
path component, and is separated by a '?'. For example, the URI
'http://www.cherrypy.org/wiki?a=3&b=4' has the query component,
'a=3&b=4'."""
query_string_encoding = 'utf8'
query_string_encoding__doc = """
The encoding expected for query string arguments after % HEX HEX decoding).
If a query string is provided that cannot be decoded with this encoding,
404 is raised (since technically it's a different URI). If you want
arbitrary encodings to not error, set this to 'Latin-1'; you can then
encode back to bytes and re-decode to whatever encoding you like later.
"""
protocol = (1, 1)
protocol__doc = """The HTTP protocol version corresponding to the set
of features which should be allowed in the response. If BOTH
the client's request message AND the server's level of HTTP
compliance is HTTP/1.1, this attribute will be the tuple (1, 1).
If either is 1.0, this attribute will be the tuple (1, 0).
Lower HTTP protocol versions are not explicitly supported."""
params = {}
params__doc = """
A dict which combines query string (GET) and request entity (POST)
variables. This is populated in two stages: GET params are added
before the 'on_start_resource' hook, and POST params are added
between the 'before_request_body' and 'before_handler' hooks."""
# Message attributes
header_list = []
header_list__doc = """
A list of the HTTP request headers as (name, value) tuples.
In general, you should use request.headers (a dict) instead."""
headers = httputil.HeaderMap()
headers__doc = """
A dict-like object containing the request headers. Keys are header
names (in Title-Case format); however, you may get and set them in
a case-insensitive manner. That is, headers['Content-Type'] and
headers['content-type'] refer to the same value. Values are header
values (decoded according to RFC 2047 if necessary). See also:
httputil.HeaderMap, httputil.HeaderElement."""
cookie = SimpleCookie()
cookie__doc = """See help(Cookie)."""
body = None
body__doc = """See help(cherrypy.request.body)"""
rfile = None
rfile__doc = """
If the request included an entity (body), it will be available
as a stream in this attribute. However, the rfile will normally
be read for you between the 'before_request_body' hook and the
'before_handler' hook, and the resulting string is placed into
either request.params or the request.body attribute.
You may disable the automatic consumption of the rfile by setting
request.process_request_body to False, either in config for the desired
path, or in an 'on_start_resource' or 'before_request_body' hook.
WARNING: In almost every case, you should not attempt to read from the
rfile stream after CherryPy's automatic mechanism has read it. If you
turn off the automatic parsing of rfile, you should read exactly the
number of bytes specified in request.headers['Content-Length'].
Ignoring either of these warnings may result in a hung request thread
or in corruption of the next (pipelined) request.
"""
process_request_body = True
process_request_body__doc = """
If True, the rfile (if any) is automatically read and parsed,
and the result placed into request.params or request.body."""
methods_with_bodies = ("POST", "PUT")
methods_with_bodies__doc = """
A sequence of HTTP methods for which CherryPy will automatically
attempt to read a body from the rfile."""
body = None
body__doc = """
If the request Content-Type is 'application/x-www-form-urlencoded'
or multipart, this will be None. Otherwise, this will contain the
request entity body as an open file object (which you can .read());
this value is set between the 'before_request_body' and 'before_handler'
hooks (assuming that process_request_body is True)."""
body_params = None
body_params__doc = """
If the request Content-Type is 'application/x-www-form-urlencoded' or
multipart, this will be a dict of the params pulled from the entity
body; that is, it will be the portion of request.params that come
from the message body (sometimes called "POST params", although they
can be sent with various HTTP method verbs). This value is set between
the 'before_request_body' and 'before_handler' hooks (assuming that
process_request_body is True)."""
# Dispatch attributes
dispatch = cherrypy.dispatch.Dispatcher()
dispatch__doc = """
The object which looks up the 'page handler' callable and collects
config for the current request based on the path_info, other
request attributes, and the application architecture. The core
calls the dispatcher as early as possible, passing it a 'path_info'
argument.
The default dispatcher discovers the page handler by matching path_info
to a hierarchical arrangement of objects, starting at request.app.root.
See help(cherrypy.dispatch) for more information."""
script_name = ""
script_name__doc = """
The 'mount point' of the application which is handling this request.
This attribute MUST NOT end in a slash. If the script_name refers to
the root of the URI, it MUST be an empty string (not "/").
"""
path_info = "/"
path_info__doc = """
The 'relative path' portion of the Request-URI. This is relative
to the script_name ('mount point') of the application which is
handling this request."""
login = None
login__doc = """
When authentication is used during the request processing this is
set to 'False' if it failed and to the 'username' value if it succeeded.
The default 'None' implies that no authentication happened."""
# Note that cherrypy.url uses "if request.app:" to determine whether
# the call is during a real HTTP request or not. So leave this None.
app = None
app__doc = \
"""The cherrypy.Application object which is handling this request."""
handler = None
handler__doc = """
The function, method, or other callable which CherryPy will call to
produce the response. The discovery of the handler and the arguments
it will receive are determined by the request.dispatch object.
By default, the handler is discovered by walking a tree of objects
starting at request.app.root, and is then passed all HTTP params
(from the query string and POST body) as keyword arguments."""
toolmaps = {}
toolmaps__doc = """
A nested dict of all Toolboxes and Tools in effect for this request,
of the form: {Toolbox.namespace: {Tool.name: config dict}}."""
config = None
config__doc = """
A flat dict of all configuration entries which apply to the
current request. These entries are collected from global config,
application config (based on request.path_info), and from handler
config (exactly how is governed by the request.dispatch object in
effect for this request; by default, handler config can be attached
anywhere in the tree between request.app.root and the final handler,
and inherits downward)."""
is_index = None
is_index__doc = """
This will be True if the current request is mapped to an 'index'
resource handler (also, a 'default' handler if path_info ends with
a slash). The value may be used to automatically redirect the
user-agent to a 'more canonical' URL which either adds or removes
the trailing slash. See cherrypy.tools.trailing_slash."""
hooks = HookMap(hookpoints)
hooks__doc = """
A HookMap (dict-like object) of the form: {hookpoint: [hook, ...]}.
Each key is a str naming the hook point, and each value is a list
of hooks which will be called at that hook point during this request.
The list of hooks is generally populated as early as possible (mostly
from Tools specified in config), but may be extended at any time.
See also: _cprequest.Hook, _cprequest.HookMap, and cherrypy.tools."""
error_response = cherrypy.HTTPError(500).set_response
error_response__doc = """
The no-arg callable which will handle unexpected, untrapped errors
during request processing. This is not used for expected exceptions
(like NotFound, HTTPError, or HTTPRedirect) which are raised in
response to expected conditions (those should be customized either
via request.error_page or by overriding HTTPError.set_response).
By default, error_response uses HTTPError(500) to return a generic
error response to the user-agent."""
error_page = {}
error_page__doc = """
A dict of {error code: response filename or callable} pairs.
The error code must be an int representing a given HTTP error code,
or the string 'default', which will be used if no matching entry
is found for a given numeric code.
If a filename is provided, the file should contain a Python string-
formatting template, and can expect by default to receive format
values with the mapping keys %(status)s, %(message)s, %(traceback)s,
and %(version)s. The set of format mappings can be extended by
overriding HTTPError.set_response.
If a callable is provided, it will be called by default with keyword
arguments 'status', 'message', 'traceback', and 'version', as for a
string-formatting template. The callable must return a string or iterable of
strings which will be set to response.body. It may also override headers or
perform any other processing.
If no entry is given for an error code, and no 'default' entry exists,
a default template will be used.
"""
show_tracebacks = True
show_tracebacks__doc = """
If True, unexpected errors encountered during request processing will
include a traceback in the response body."""
show_mismatched_params = True
show_mismatched_params__doc = """
If True, mismatched parameters encountered during PageHandler invocation
processing will be included in the response body."""
throws = (KeyboardInterrupt, SystemExit, cherrypy.InternalRedirect)
throws__doc = \
"""The sequence of exceptions which Request.run does not trap."""
throw_errors = False
throw_errors__doc = """
If True, Request.run will not trap any errors (except HTTPRedirect and
HTTPError, which are more properly called 'exceptions', not errors)."""
closed = False
closed__doc = """
True once the close method has been called, False otherwise."""
stage = None
stage__doc = """
A string containing the stage reached in the request-handling process.
This is useful when debugging a live server with hung requests."""
namespaces = _cpconfig.NamespaceSet(
**{"hooks": hooks_namespace,
"request": request_namespace,
"response": response_namespace,
"error_page": error_page_namespace,
"tools": cherrypy.tools,
})
def __init__(self, local_host, remote_host, scheme="http",
server_protocol="HTTP/1.1"):
"""Populate a new Request object.
local_host should be an httputil.Host object with the server info.
remote_host should be an httputil.Host object with the client info.
scheme should be a string, either "http" or "https".
"""
self.local = local_host
self.remote = remote_host
self.scheme = scheme
self.server_protocol = server_protocol
self.closed = False
# Put a *copy* of the class error_page into self.
self.error_page = self.error_page.copy()
# Put a *copy* of the class namespaces into self.
self.namespaces = self.namespaces.copy()
self.stage = None
def close(self):
"""Run cleanup code. (Core)"""
if not self.closed:
self.closed = True
self.stage = 'on_end_request'
self.hooks.run('on_end_request')
self.stage = 'close'
def run(self, method, path, query_string, req_protocol, headers, rfile):
"""Process the Request. (Core)
method, path, query_string, and req_protocol should be pulled directly
from the Request-Line (e.g. "GET /path?key=val HTTP/1.0").
path should be %XX-unquoted, but query_string should not be.
They both MUST be byte strings, not unicode strings.
headers should be a list of (name, value) tuples.
rfile should be a file-like object containing the HTTP request entity.
When run() is done, the returned object should have 3 attributes:
status, e.g. "200 OK"
header_list, a list of (name, value) tuples
body, an iterable yielding strings
Consumer code (HTTP servers) should then access these response
attributes to build the outbound stream.
"""
response = cherrypy.serving.response
self.stage = 'run'
try:
self.error_response = cherrypy.HTTPError(500).set_response
self.method = method
path = path or "/"
self.query_string = query_string or ''
self.params = {}
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
rp = int(req_protocol[5]), int(req_protocol[7])
sp = int(self.server_protocol[5]), int(self.server_protocol[7])
self.protocol = min(rp, sp)
response.headers.protocol = self.protocol
# Rebuild first line of the request (e.g. "GET /path HTTP/1.0").
url = path
if query_string:
url += '?' + query_string
self.request_line = '%s %s %s' % (method, url, req_protocol)
self.header_list = list(headers)
self.headers = httputil.HeaderMap()
self.rfile = rfile
self.body = None
self.cookie = SimpleCookie()
self.handler = None
# path_info should be the path from the
# app root (script_name) to the handler.
self.script_name = self.app.script_name
self.path_info = pi = path[len(self.script_name):]
self.stage = 'respond'
self.respond(pi)
except self.throws:
raise
except:
if self.throw_errors:
raise
else:
# Failure in setup, error handler or finalize. Bypass them.
# Can't use handle_error because we may not have hooks yet.
cherrypy.log(traceback=True, severity=40)
if self.show_tracebacks:
body = format_exc()
else:
body = ""
r = bare_error(body)
response.output_status, response.header_list, response.body = r
if self.method == "HEAD":
# HEAD requests MUST NOT return a message-body in the response.
response.body = []
try:
cherrypy.log.access()
except:
cherrypy.log.error(traceback=True)
if response.timed_out:
raise cherrypy.TimeoutError()
return response
# Uncomment for stage debugging
# stage = property(lambda self: self._stage, lambda self, v: print(v))
def respond(self, path_info):
"""Generate a response for the resource at self.path_info. (Core)"""
response = cherrypy.serving.response
try:
try:
try:
if self.app is None:
raise cherrypy.NotFound()
# Get the 'Host' header, so we can HTTPRedirect properly.
self.stage = 'process_headers'
self.process_headers()
# Make a copy of the class hooks
self.hooks = self.__class__.hooks.copy()
self.toolmaps = {}
self.stage = 'get_resource'
self.get_resource(path_info)
self.body = _cpreqbody.RequestBody(
self.rfile, self.headers, request_params=self.params)
self.namespaces(self.config)
self.stage = 'on_start_resource'
self.hooks.run('on_start_resource')
# Parse the querystring
self.stage = 'process_query_string'
self.process_query_string()
# Process the body
if self.process_request_body:
if self.method not in self.methods_with_bodies:
self.process_request_body = False
self.stage = 'before_request_body'
self.hooks.run('before_request_body')
if self.process_request_body:
self.body.process()
# Run the handler
self.stage = 'before_handler'
self.hooks.run('before_handler')
if self.handler:
self.stage = 'handler'
response.body = self.handler()
# Finalize
self.stage = 'before_finalize'
self.hooks.run('before_finalize')
response.finalize()
except (cherrypy.HTTPRedirect, cherrypy.HTTPError), inst:
inst.set_response()
self.stage = 'before_finalize (HTTPError)'
self.hooks.run('before_finalize')
response.finalize()
finally:
self.stage = 'on_end_resource'
self.hooks.run('on_end_resource')
except self.throws:
raise
except:
if self.throw_errors:
raise
self.handle_error()
def process_query_string(self):
"""Parse the query string into Python structures. (Core)"""
try:
p = httputil.parse_query_string(
self.query_string, encoding=self.query_string_encoding)
except UnicodeDecodeError:
raise cherrypy.HTTPError(
404, "The given query string could not be processed. Query "
"strings for this resource must be encoded with %r." %
self.query_string_encoding)
# Python 2 only: keyword arguments must be byte strings (type 'str').
for key, value in p.items():
if isinstance(key, unicode):
del p[key]
p[key.encode(self.query_string_encoding)] = value
self.params.update(p)
def process_headers(self):
"""Parse HTTP header data into Python structures. (Core)"""
# Process the headers into self.headers
headers = self.headers
for name, value in self.header_list:
# Call title() now (and use dict.__method__(headers))
# so title doesn't have to be called twice.
name = name.title()
value = value.strip()
# Warning: if there is more than one header entry for cookies (AFAIK,
# only Konqueror does that), only the last one will remain in headers
# (but they will be correctly stored in request.cookie).
if "=?" in value:
dict.__setitem__(headers, name, httputil.decode_TEXT(value))
else:
dict.__setitem__(headers, name, value)
# Handle cookies differently because on Konqueror, multiple
# cookies come on different lines with the same key
if name == 'Cookie':
try:
self.cookie.load(value)
except CookieError:
msg = "Illegal cookie name %s" % value.split('=')[0]
raise cherrypy.HTTPError(400, msg)
if not dict.__contains__(headers, 'Host'):
# All Internet-based HTTP/1.1 servers MUST respond with a 400
# (Bad Request) status code to any HTTP/1.1 request message
# which lacks a Host header field.
if self.protocol >= (1, 1):
msg = "HTTP/1.1 requires a 'Host' request header."
raise cherrypy.HTTPError(400, msg)
host = dict.get(headers, 'Host')
if not host:
host = self.local.name or self.local.ip
self.base = "%s://%s" % (self.scheme, host)
def get_resource(self, path):
"""Call a dispatcher (which sets self.handler and .config). (Core)"""
# First, see if there is a custom dispatch at this URI. Custom
# dispatchers can only be specified in app.config, not in _cp_config
# (since custom dispatchers may not even have an app.root).
dispatch = self.app.find_config(path, "request.dispatch", self.dispatch)
# dispatch() should set self.handler and self.config
dispatch(path)
def handle_error(self):
"""Handle the last unanticipated exception. (Core)"""
try:
self.hooks.run("before_error_response")
if self.error_response:
self.error_response()
self.hooks.run("after_error_response")
cherrypy.serving.response.finalize()
except cherrypy.HTTPRedirect, inst:
inst.set_response()
cherrypy.serving.response.finalize()
# ------------------------- Properties ------------------------- #
def _get_body_params(self):
warnings.warn(
"body_params is deprecated in CherryPy 3.2, will be removed in "
"CherryPy 3.3.",
DeprecationWarning
)
return self.body.params
body_params = property(_get_body_params,
doc="""
If the request Content-Type is 'application/x-www-form-urlencoded' or
multipart, this will be a dict of the params pulled from the entity
body; that is, it will be the portion of request.params that come
from the message body (sometimes called "POST params", although they
can be sent with various HTTP method verbs). This value is set between
the 'before_request_body' and 'before_handler' hooks (assuming that
process_request_body is True).
Deprecated in 3.2, will be removed for 3.3""")
class ResponseBody(object):
"""The body of the HTTP response (the response entity)."""
def __get__(self, obj, objclass=None):
if obj is None:
# When calling on the class instead of an instance...
return self
else:
return obj._body
def __set__(self, obj, value):
# Convert the given value to an iterable object.
if isinstance(value, basestring):
# strings get wrapped in a list because iterating over a single
# item list is much faster than iterating over every character
# in a long string.
if value:
value = [value]
else:
# [''] doesn't evaluate to False, so replace it with [].
value = []
elif isinstance(value, types.FileType):
value = file_generator(value)
elif value is None:
value = []
obj._body = value
class Response(object):
"""An HTTP Response, including status, headers, and body.
Application developers should use Response.headers (a dict) to
set or modify HTTP response headers. When the response is finalized,
Response.headers is transformed into Response.header_list as
(key, value) tuples.
"""
__metaclass__ = cherrypy._AttributeDocstrings
# Class attributes for dev-time introspection.
status = ""
status__doc = """The HTTP Status-Code and Reason-Phrase."""
header_list = []
header_list__doc = """
A list of the HTTP response headers as (name, value) tuples.
In general, you should use response.headers (a dict) instead."""
headers = httputil.HeaderMap()
headers__doc = """
A dict-like object containing the response headers. Keys are header
names (in Title-Case format); however, you may get and set them in
a case-insensitive manner. That is, headers['Content-Type'] and
headers['content-type'] refer to the same value. Values are header
values (decoded according to RFC 2047 if necessary). See also:
httputil.HeaderMap, httputil.HeaderElement."""
cookie = SimpleCookie()
cookie__doc = """See help(Cookie)."""
body = ResponseBody()
body__doc = """The body (entity) of the HTTP response."""
time = None
time__doc = """The value of time.time() when created. Use in HTTP dates."""
timeout = 300
timeout__doc = """Seconds after which the response will be aborted."""
timed_out = False
timed_out__doc = """
Flag to indicate the response should be aborted, because it has
exceeded its timeout."""
stream = False
stream__doc = """If False, buffer the response body."""
def __init__(self):
self.status = None
self.header_list = None
self._body = []
self.time = time.time()
self.headers = httputil.HeaderMap()
# Since we know all our keys are titled strings, we can
# bypass HeaderMap.update and get a big speed boost.
dict.update(self.headers, {
"Content-Type": 'text/html',
"Server": "CherryPy/" + cherrypy.__version__,
"Date": httputil.HTTPDate(self.time),
})
self.cookie = SimpleCookie()
def collapse_body(self):
"""Collapse self.body to a single string; replace it and return it."""
if isinstance(self.body, basestring):
return self.body
newbody = ''.join([chunk for chunk in self.body])
self.body = newbody
return newbody
def finalize(self):
"""Transform headers (and cookies) into self.header_list. (Core)"""
try:
code, reason, _ = httputil.valid_status(self.status)
except ValueError, x:
raise cherrypy.HTTPError(500, x.args[0])
headers = self.headers
self.output_status = str(code) + " " + headers.encode(reason)
if self.stream:
# The upshot: wsgiserver will chunk the response if
# you pop Content-Length (or set it explicitly to None).
# Note that lib.static sets C-L to the file's st_size.
if dict.get(headers, 'Content-Length') is None:
dict.pop(headers, 'Content-Length', None)
elif code < 200 or code in (204, 205, 304):
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body."
dict.pop(headers, 'Content-Length', None)
self.body = ""
else:
# Responses which are not streamed should have a Content-Length,
# but allow user code to set Content-Length if desired.
if dict.get(headers, 'Content-Length') is None:
content = self.collapse_body()
dict.__setitem__(headers, 'Content-Length', len(content))
# Transform our header dict into a list of tuples.
self.header_list = h = headers.output()
cookie = self.cookie.output()
if cookie:
for line in cookie.split("\n"):
if line.endswith("\r"):
# Python 2.4 emits cookies joined by LF but 2.5+ by CRLF.
line = line[:-1]
name, value = line.split(": ", 1)
if isinstance(name, unicode):
name = name.encode("ISO-8859-1")
if isinstance(value, unicode):
value = headers.encode(value)
h.append((name, value))
def check_timeout(self):
"""If now > self.time + self.timeout, set self.timed_out.
This purposefully sets a flag, rather than raising an error,
so that a monitor thread can interrupt the Response thread.
"""
if time.time() > self.time + self.timeout:
self.timed_out = True
| 39,031 | Python | .py | 775 | 38.687742 | 82 | 0.614566 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,604 | _cplogging.py | midgetspy_Sick-Beard/cherrypy/_cplogging.py | """CherryPy logging."""
import datetime
import logging
# Silence the no-handlers "warning" (stderr write!) in stdlib logging
logging.Logger.manager.emittedNoHandlerWarning = 1
logfmt = logging.Formatter("%(message)s")
import os
import sys
import cherrypy
from cherrypy import _cperror
class LogManager(object):
appid = None
error_log = None
access_log = None
access_log_format = \
'%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
def __init__(self, appid=None, logger_root="cherrypy"):
self.logger_root = logger_root
self.appid = appid
if appid is None:
self.error_log = logging.getLogger("%s.error" % logger_root)
self.access_log = logging.getLogger("%s.access" % logger_root)
else:
self.error_log = logging.getLogger("%s.error.%s" % (logger_root, appid))
self.access_log = logging.getLogger("%s.access.%s" % (logger_root, appid))
self.error_log.setLevel(logging.INFO)
self.access_log.setLevel(logging.INFO)
cherrypy.engine.subscribe('graceful', self.reopen_files)
def reopen_files(self):
"""Close and reopen all file handlers."""
for log in (self.error_log, self.access_log):
for h in log.handlers:
if isinstance(h, logging.FileHandler):
h.acquire()
h.stream.close()
h.stream = open(h.baseFilename, h.mode)
h.release()
def error(self, msg='', context='', severity=logging.INFO, traceback=False):
"""Write to the error log.
This is not just for errors! Applications may call this at any time
to log application-specific information.
"""
if traceback:
msg += _cperror.format_exc()
self.error_log.log(severity, ' '.join((self.time(), context, msg)))
def __call__(self, *args, **kwargs):
"""Write to the error log.
This is not just for errors! Applications may call this at any time
to log application-specific information.
"""
return self.error(*args, **kwargs)
def access(self):
"""Write to the access log (in Apache/NCSA Combined Log format).
Like Apache started doing in 2.0.46, non-printable and other special
characters in %r (and we expand that to all parts) are escaped using
\\xhh sequences, where hh stands for the hexadecimal representation
of the raw byte. Exceptions from this rule are " and \\, which are
escaped by prepending a backslash, and all whitespace characters,
which are written in their C-style notation (\\n, \\t, etc).
"""
request = cherrypy.serving.request
remote = request.remote
response = cherrypy.serving.response
outheaders = response.headers
inheaders = request.headers
if response.output_status is None:
status = "-"
else:
status = response.output_status.split(" ", 1)[0]
atoms = {'h': remote.name or remote.ip,
'l': '-',
'u': getattr(request, "login", None) or "-",
't': self.time(),
'r': request.request_line,
's': status,
'b': dict.get(outheaders, 'Content-Length', '') or "-",
'f': dict.get(inheaders, 'Referer', ''),
'a': dict.get(inheaders, 'User-Agent', ''),
}
for k, v in atoms.items():
if isinstance(v, unicode):
v = v.encode('utf8')
elif not isinstance(v, str):
v = str(v)
# Fortunately, repr(str) escapes unprintable chars, \n, \t, etc
# and backslash for us. All we have to do is strip the quotes.
v = repr(v)[1:-1]
# Escape double-quote.
atoms[k] = v.replace('"', '\\"')
try:
self.access_log.log(logging.INFO, self.access_log_format % atoms)
except:
self(traceback=True)
def time(self):
"""Return now() in Apache Common Log Format (no timezone)."""
now = datetime.datetime.now()
monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
month = monthnames[now.month - 1].capitalize()
return ('[%02d/%s/%04d:%02d:%02d:%02d]' %
(now.day, month, now.year, now.hour, now.minute, now.second))
def _get_builtin_handler(self, log, key):
for h in log.handlers:
if getattr(h, "_cpbuiltin", None) == key:
return h
# ------------------------- Screen handlers ------------------------- #
def _set_screen_handler(self, log, enable, stream=None):
h = self._get_builtin_handler(log, "screen")
if enable:
if not h:
if stream is None:
stream = sys.stderr
h = logging.StreamHandler(stream)
h.setFormatter(logfmt)
h._cpbuiltin = "screen"
log.addHandler(h)
elif h:
log.handlers.remove(h)
def _get_screen(self):
h = self._get_builtin_handler
has_h = h(self.error_log, "screen") or h(self.access_log, "screen")
return bool(has_h)
def _set_screen(self, newvalue):
self._set_screen_handler(self.error_log, newvalue, stream=sys.stderr)
self._set_screen_handler(self.access_log, newvalue, stream=sys.stdout)
screen = property(_get_screen, _set_screen,
doc="If True, error and access will print to stderr.")
# -------------------------- File handlers -------------------------- #
def _add_builtin_file_handler(self, log, fname):
h = logging.FileHandler(fname)
h.setFormatter(logfmt)
h._cpbuiltin = "file"
log.addHandler(h)
def _set_file_handler(self, log, filename):
h = self._get_builtin_handler(log, "file")
if filename:
if h:
if h.baseFilename != os.path.abspath(filename):
h.close()
log.handlers.remove(h)
self._add_builtin_file_handler(log, filename)
else:
self._add_builtin_file_handler(log, filename)
else:
if h:
h.close()
log.handlers.remove(h)
def _get_error_file(self):
h = self._get_builtin_handler(self.error_log, "file")
if h:
return h.baseFilename
return ''
def _set_error_file(self, newvalue):
self._set_file_handler(self.error_log, newvalue)
error_file = property(_get_error_file, _set_error_file,
doc="The filename for self.error_log.")
def _get_access_file(self):
h = self._get_builtin_handler(self.access_log, "file")
if h:
return h.baseFilename
return ''
def _set_access_file(self, newvalue):
self._set_file_handler(self.access_log, newvalue)
access_file = property(_get_access_file, _set_access_file,
doc="The filename for self.access_log.")
# ------------------------- WSGI handlers ------------------------- #
def _set_wsgi_handler(self, log, enable):
h = self._get_builtin_handler(log, "wsgi")
if enable:
if not h:
h = WSGIErrorHandler()
h.setFormatter(logfmt)
h._cpbuiltin = "wsgi"
log.addHandler(h)
elif h:
log.handlers.remove(h)
def _get_wsgi(self):
return bool(self._get_builtin_handler(self.error_log, "wsgi"))
def _set_wsgi(self, newvalue):
self._set_wsgi_handler(self.error_log, newvalue)
wsgi = property(_get_wsgi, _set_wsgi,
doc="If True, error messages will be sent to wsgi.errors.")
class WSGIErrorHandler(logging.Handler):
"A handler class which writes logging records to environ['wsgi.errors']."
def flush(self):
"""Flushes the stream."""
try:
stream = cherrypy.serving.request.wsgi_environ.get('wsgi.errors')
except (AttributeError, KeyError):
pass
else:
stream.flush()
def emit(self, record):
"""Emit a record."""
try:
stream = cherrypy.serving.request.wsgi_environ.get('wsgi.errors')
except (AttributeError, KeyError):
pass
else:
try:
msg = self.format(record)
fs = "%s\n"
import types
if not hasattr(types, "UnicodeType"): #if no unicode support...
stream.write(fs % msg)
else:
try:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode("UTF-8"))
self.flush()
except:
self.handleError(record)
| 9,493 | Python | .py | 213 | 31.511737 | 87 | 0.535202 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,605 | _cpwsgi_server.py | midgetspy_Sick-Beard/cherrypy/_cpwsgi_server.py | """WSGI server interface (see PEP 333). This adds some CP-specific bits to
the framework-agnostic wsgiserver package.
"""
import sys
import cherrypy
from cherrypy import wsgiserver
class CPHTTPRequest(wsgiserver.HTTPRequest):
pass
class CPHTTPConnection(wsgiserver.HTTPConnection):
pass
class CPWSGIServer(wsgiserver.CherryPyWSGIServer):
"""Wrapper for wsgiserver.CherryPyWSGIServer.
wsgiserver has been designed to not reference CherryPy in any way,
so that it can be used in other frameworks and applications. Therefore,
we wrap it here, so we can set our own mount points from cherrypy.tree
and apply some attributes from config -> cherrypy.server -> wsgiserver.
"""
def __init__(self, server_adapter=cherrypy.server):
self.server_adapter = server_adapter
self.max_request_header_size = self.server_adapter.max_request_header_size or 0
self.max_request_body_size = self.server_adapter.max_request_body_size or 0
server_name = (self.server_adapter.socket_host or
self.server_adapter.socket_file or
None)
self.wsgi_version = self.server_adapter.wsgi_version
s = wsgiserver.CherryPyWSGIServer
s.__init__(self, server_adapter.bind_addr, cherrypy.tree,
self.server_adapter.thread_pool,
server_name,
max=self.server_adapter.thread_pool_max,
request_queue_size=self.server_adapter.socket_queue_size,
timeout=self.server_adapter.socket_timeout,
shutdown_timeout=self.server_adapter.shutdown_timeout,
)
self.protocol = self.server_adapter.protocol_version
self.nodelay = self.server_adapter.nodelay
ssl_module = self.server_adapter.ssl_module or 'pyopenssl'
if self.server_adapter.ssl_context:
adapter_class = wsgiserver.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain)
self.ssl_adapter.context = self.server_adapter.ssl_context
elif self.server_adapter.ssl_certificate:
adapter_class = wsgiserver.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain)
| 2,686 | Python | .py | 50 | 41.26 | 88 | 0.660465 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,606 | _cpnative_server.py | midgetspy_Sick-Beard/cherrypy/_cpnative_server.py | """Native adapter for serving CherryPy via its builtin server."""
import logging
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import cherrypy
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil
from cherrypy import wsgiserver
class NativeGateway(wsgiserver.Gateway):
recursive = False
def respond(self):
req = self.req
try:
# Obtain a Request object from CherryPy
local = req.server.bind_addr
local = httputil.Host(local[0], local[1], "")
remote = req.conn.remote_addr, req.conn.remote_port
remote = httputil.Host(remote[0], remote[1], "")
scheme = req.scheme
sn = cherrypy.tree.script_name(req.uri or "/")
if sn is None:
self.send_response('404 Not Found', [], [''])
else:
app = cherrypy.tree.apps[sn]
method = req.method
path = req.path
qs = req.qs or ""
headers = req.inheaders.items()
rfile = req.rfile
prev = None
try:
redirections = []
while True:
request, response = app.get_serving(
local, remote, scheme, "HTTP/1.1")
request.multithread = True
request.multiprocess = False
request.app = app
request.prev = prev
# Run the CherryPy Request object and obtain the response
try:
request.run(method, path, qs, req.request_protocol, headers, rfile)
break
except cherrypy.InternalRedirect, ir:
app.release_serving()
prev = request
if not self.recursive:
if ir.path in redirections:
raise RuntimeError("InternalRedirector visited the "
"same URL twice: %r" % ir.path)
else:
# Add the *previous* path_info + qs to redirections.
if qs:
qs = "?" + qs
redirections.append(sn + path + qs)
# Munge environment and try again.
method = "GET"
path = ir.path
qs = ir.query_string
rfile = StringIO()
self.send_response(
response.output_status, response.header_list,
response.body)
finally:
app.release_serving()
except:
tb = format_exc()
#print tb
cherrypy.log(tb, 'NATIVE_ADAPTER', severity=logging.ERROR)
s, h, b = bare_error()
self.send_response(s, h, b)
def send_response(self, status, headers, body):
req = self.req
# Set response status
req.status = str(status or "500 Server Error")
# Set response headers
for header, value in headers:
req.outheaders.append((header, value))
if (req.ready and not req.sent_headers):
req.sent_headers = True
req.send_headers()
# Set response body
for seg in body:
req.write(seg)
class CPHTTPServer(wsgiserver.HTTPServer):
"""Wrapper for wsgiserver.HTTPServer.
wsgiserver has been designed to not reference CherryPy in any way,
so that it can be used in other frameworks and applications.
Therefore, we wrap it here, so we can apply some attributes
from config -> cherrypy.server -> HTTPServer.
"""
def __init__(self, server_adapter=cherrypy.server):
self.server_adapter = server_adapter
server_name = (self.server_adapter.socket_host or
self.server_adapter.socket_file or
None)
wsgiserver.HTTPServer.__init__(
self, server_adapter.bind_addr, NativeGateway,
minthreads=server_adapter.thread_pool,
maxthreads=server_adapter.thread_pool_max,
server_name=server_name)
self.max_request_header_size = self.server_adapter.max_request_header_size or 0
self.max_request_body_size = self.server_adapter.max_request_body_size or 0
self.request_queue_size = self.server_adapter.socket_queue_size
self.timeout = self.server_adapter.socket_timeout
self.shutdown_timeout = self.server_adapter.shutdown_timeout
self.protocol = self.server_adapter.protocol_version
self.nodelay = self.server_adapter.nodelay
ssl_module = self.server_adapter.ssl_module or 'pyopenssl'
if self.server_adapter.ssl_context:
adapter_class = wsgiserver.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain)
self.ssl_adapter.context = self.server_adapter.ssl_context
elif self.server_adapter.ssl_certificate:
adapter_class = wsgiserver.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain)
| 6,042 | Python | .py | 124 | 31.379032 | 95 | 0.543425 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,607 | _cpwsgi.py | midgetspy_Sick-Beard/cherrypy/_cpwsgi.py | """WSGI interface (see PEP 333)."""
import sys as _sys
import cherrypy as _cherrypy
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from cherrypy import _cperror
from cherrypy.lib import httputil
def downgrade_wsgi_ux_to_1x(environ):
"""Return a new environ dict for WSGI 1.x from the given WSGI u.x environ."""
env1x = {}
url_encoding = environ[u'wsgi.url_encoding']
for k, v in environ.items():
if k in [u'PATH_INFO', u'SCRIPT_NAME', u'QUERY_STRING']:
v = v.encode(url_encoding)
elif isinstance(v, unicode):
v = v.encode('ISO-8859-1')
env1x[k.encode('ISO-8859-1')] = v
return env1x
class VirtualHost(object):
"""Select a different WSGI application based on the Host header.
This can be useful when running multiple sites within one CP server.
It allows several domains to point to different applications. For example:
root = Root()
RootApp = cherrypy.Application(root)
Domain2App = cherrypy.Application(root)
SecureApp = cherrypy.Application(Secure())
vhost = cherrypy._cpwsgi.VirtualHost(RootApp,
domains={'www.domain2.example': Domain2App,
'www.domain2.example:443': SecureApp,
})
cherrypy.tree.graft(vhost)
default: required. The default WSGI application.
use_x_forwarded_host: if True (the default), any "X-Forwarded-Host"
request header will be used instead of the "Host" header. This
is commonly added by HTTP servers (such as Apache) when proxying.
domains: a dict of {host header value: application} pairs.
The incoming "Host" request header is looked up in this dict,
and, if a match is found, the corresponding WSGI application
will be called instead of the default. Note that you often need
separate entries for "example.com" and "www.example.com".
In addition, "Host" headers may contain the port number.
"""
def __init__(self, default, domains=None, use_x_forwarded_host=True):
self.default = default
self.domains = domains or {}
self.use_x_forwarded_host = use_x_forwarded_host
def __call__(self, environ, start_response):
domain = environ.get('HTTP_HOST', '')
if self.use_x_forwarded_host:
domain = environ.get("HTTP_X_FORWARDED_HOST", domain)
nextapp = self.domains.get(domain)
if nextapp is None:
nextapp = self.default
return nextapp(environ, start_response)
class InternalRedirector(object):
"""WSGI middleware that handles raised cherrypy.InternalRedirect."""
def __init__(self, nextapp, recursive=False):
self.nextapp = nextapp
self.recursive = recursive
def __call__(self, environ, start_response):
redirections = []
while True:
environ = environ.copy()
try:
return self.nextapp(environ, start_response)
except _cherrypy.InternalRedirect, ir:
sn = environ.get('SCRIPT_NAME', '')
path = environ.get('PATH_INFO', '')
qs = environ.get('QUERY_STRING', '')
# Add the *previous* path_info + qs to redirections.
old_uri = sn + path
if qs:
old_uri += "?" + qs
redirections.append(old_uri)
if not self.recursive:
# Check to see if the new URI has been redirected to already
new_uri = sn + ir.path
if ir.query_string:
new_uri += "?" + ir.query_string
if new_uri in redirections:
ir.request.close()
raise RuntimeError("InternalRedirector visited the "
"same URL twice: %r" % new_uri)
# Munge the environment and try again.
environ['REQUEST_METHOD'] = "GET"
environ['PATH_INFO'] = ir.path
environ['QUERY_STRING'] = ir.query_string
environ['wsgi.input'] = StringIO()
environ['CONTENT_LENGTH'] = "0"
environ['cherrypy.previous_request'] = ir.request
class ExceptionTrapper(object):
def __init__(self, nextapp, throws=(KeyboardInterrupt, SystemExit)):
self.nextapp = nextapp
self.throws = throws
def __call__(self, environ, start_response):
return _TrappedResponse(self.nextapp, environ, start_response, self.throws)
class _TrappedResponse(object):
response = iter([])
def __init__(self, nextapp, environ, start_response, throws):
self.nextapp = nextapp
self.environ = environ
self.start_response = start_response
self.throws = throws
self.started_response = False
self.response = self.trap(self.nextapp, self.environ, self.start_response)
self.iter_response = iter(self.response)
def __iter__(self):
self.started_response = True
return self
def next(self):
return self.trap(self.iter_response.next)
def close(self):
if hasattr(self.response, 'close'):
self.response.close()
def trap(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except self.throws:
raise
except StopIteration:
raise
except:
tb = _cperror.format_exc()
#print('trapped (started %s):' % self.started_response, tb)
_cherrypy.log(tb, severity=40)
if not _cherrypy.request.show_tracebacks:
tb = ""
s, h, b = _cperror.bare_error(tb)
if self.started_response:
# Empty our iterable (so future calls raise StopIteration)
self.iter_response = iter([])
else:
self.iter_response = iter(b)
try:
self.start_response(s, h, _sys.exc_info())
except:
# "The application must not trap any exceptions raised by
# start_response, if it called start_response with exc_info.
# Instead, it should allow such exceptions to propagate
# back to the server or gateway."
# But we still log and call close() to clean up ourselves.
_cherrypy.log(traceback=True, severity=40)
raise
if self.started_response:
return "".join(b)
else:
return b
# WSGI-to-CP Adapter #
class AppResponse(object):
"""WSGI response iterable for CherryPy applications."""
def __init__(self, environ, start_response, cpapp):
if environ.get(u'wsgi.version') == (u'u', 0):
environ = downgrade_wsgi_ux_to_1x(environ)
self.environ = environ
self.cpapp = cpapp
try:
self.run()
except:
self.close()
raise
r = _cherrypy.serving.response
self.iter_response = iter(r.body)
self.write = start_response(r.output_status, r.header_list)
def __iter__(self):
return self
def next(self):
return self.iter_response.next()
def close(self):
"""Close and de-reference the current request and response. (Core)"""
self.cpapp.release_serving()
def run(self):
"""Create a Request object using environ."""
env = self.environ.get
local = httputil.Host('', int(env('SERVER_PORT', 80)),
env('SERVER_NAME', ''))
remote = httputil.Host(env('REMOTE_ADDR', ''),
int(env('REMOTE_PORT', -1)),
env('REMOTE_HOST', ''))
scheme = env('wsgi.url_scheme')
sproto = env('ACTUAL_SERVER_PROTOCOL', "HTTP/1.1")
request, resp = self.cpapp.get_serving(local, remote, scheme, sproto)
# LOGON_USER is served by IIS, and is the name of the
# user after having been mapped to a local account.
# Both IIS and Apache set REMOTE_USER, when possible.
request.login = env('LOGON_USER') or env('REMOTE_USER') or None
request.multithread = self.environ['wsgi.multithread']
request.multiprocess = self.environ['wsgi.multiprocess']
request.wsgi_environ = self.environ
request.prev = env('cherrypy.previous_request', None)
meth = self.environ['REQUEST_METHOD']
path = httputil.urljoin(self.environ.get('SCRIPT_NAME', ''),
self.environ.get('PATH_INFO', ''))
qs = self.environ.get('QUERY_STRING', '')
rproto = self.environ.get('SERVER_PROTOCOL')
headers = self.translate_headers(self.environ)
rfile = self.environ['wsgi.input']
request.run(meth, path, qs, rproto, headers, rfile)
headerNames = {'HTTP_CGI_AUTHORIZATION': 'Authorization',
'CONTENT_LENGTH': 'Content-Length',
'CONTENT_TYPE': 'Content-Type',
'REMOTE_HOST': 'Remote-Host',
'REMOTE_ADDR': 'Remote-Addr',
}
def translate_headers(self, environ):
"""Translate CGI-environ header names to HTTP header names."""
for cgiName in environ:
# We assume all incoming header keys are uppercase already.
if cgiName in self.headerNames:
yield self.headerNames[cgiName], environ[cgiName]
elif cgiName[:5] == "HTTP_":
# Hackish attempt at recovering original header names.
translatedHeader = cgiName[5:].replace("_", "-")
yield translatedHeader, environ[cgiName]
class CPWSGIApp(object):
"""A WSGI application object for a CherryPy Application.
pipeline: a list of (name, wsgiapp) pairs. Each 'wsgiapp' MUST be a
constructor that takes an initial, positional 'nextapp' argument,
plus optional keyword arguments, and returns a WSGI application
(that takes environ and start_response arguments). The 'name' can
be any you choose, and will correspond to keys in self.config.
head: rather than nest all apps in the pipeline on each call, it's only
done the first time, and the result is memoized into self.head. Set
this to None again if you change self.pipeline after calling self.
config: a dict whose keys match names listed in the pipeline. Each
value is a further dict which will be passed to the corresponding
named WSGI callable (from the pipeline) as keyword arguments.
"""
pipeline = [('ExceptionTrapper', ExceptionTrapper),
('InternalRedirector', InternalRedirector),
]
head = None
config = {}
response_class = AppResponse
def __init__(self, cpapp, pipeline=None):
self.cpapp = cpapp
self.pipeline = self.pipeline[:]
if pipeline:
self.pipeline.extend(pipeline)
self.config = self.config.copy()
def tail(self, environ, start_response):
"""WSGI application callable for the actual CherryPy application.
You probably shouldn't call this; call self.__call__ instead,
so that any WSGI middleware in self.pipeline can run first.
"""
return self.response_class(environ, start_response, self.cpapp)
def __call__(self, environ, start_response):
head = self.head
if head is None:
# Create and nest the WSGI apps in our pipeline (in reverse order).
# Then memoize the result in self.head.
head = self.tail
for name, callable in self.pipeline[::-1]:
conf = self.config.get(name, {})
head = callable(head, **conf)
self.head = head
return head(environ, start_response)
def namespace_handler(self, k, v):
"""Config handler for the 'wsgi' namespace."""
if k == "pipeline":
# Note this allows multiple 'wsgi.pipeline' config entries
# (but each entry will be processed in a 'random' order).
# It should also allow developers to set default middleware
# in code (passed to self.__init__) that deployers can add to
# (but not remove) via config.
self.pipeline.extend(v)
elif k == "response_class":
self.response_class = v
else:
name, arg = k.split(".", 1)
bucket = self.config.setdefault(name, {})
bucket[arg] = v
| 13,366 | Python | .py | 273 | 35.666667 | 84 | 0.583379 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,608 | _cperror.py | midgetspy_Sick-Beard/cherrypy/_cperror.py | """Error classes for CherryPy."""
from cgi import escape as _escape
from sys import exc_info as _exc_info
from traceback import format_exception as _format_exception
from urlparse import urljoin as _urljoin
from cherrypy.lib import httputil as _httputil
class CherryPyException(Exception):
pass
class TimeoutError(CherryPyException):
"""Exception raised when Response.timed_out is detected."""
pass
class InternalRedirect(CherryPyException):
"""Exception raised to switch to the handler for a different URL.
Any request.params must be supplied in a query string.
"""
def __init__(self, path, query_string=""):
import cherrypy
self.request = cherrypy.serving.request
self.query_string = query_string
if "?" in path:
# Separate any params included in the path
path, self.query_string = path.split("?", 1)
# Note that urljoin will "do the right thing" whether url is:
# 1. a URL relative to root (e.g. "/dummy")
# 2. a URL relative to the current path
# Note that any query string will be discarded.
path = _urljoin(self.request.path_info, path)
# Set a 'path' member attribute so that code which traps this
# error can have access to it.
self.path = path
CherryPyException.__init__(self, path, self.query_string)
class HTTPRedirect(CherryPyException):
"""Exception raised when the request should be redirected.
The new URL must be passed as the first argument to the Exception,
e.g., HTTPRedirect(newUrl). Multiple URLs are allowed. If a URL is
absolute, it will be used as-is. If it is relative, it is assumed
to be relative to the current cherrypy.request.path_info.
"""
def __init__(self, urls, status=None):
import cherrypy
request = cherrypy.serving.request
if isinstance(urls, basestring):
urls = [urls]
abs_urls = []
for url in urls:
# Note that urljoin will "do the right thing" whether url is:
# 1. a complete URL with host (e.g. "http://www.example.com/test")
# 2. a URL relative to root (e.g. "/dummy")
# 3. a URL relative to the current path
# Note that any query string in cherrypy.request is discarded.
url = _urljoin(cherrypy.url(), url)
abs_urls.append(url)
self.urls = abs_urls
# RFC 2616 indicates a 301 response code fits our goal; however,
# browser support for 301 is quite messy. Do 302/303 instead. See
# http://www.alanflavell.org.uk/www/post-redirect.html
if status is None:
if request.protocol >= (1, 1):
status = 303
else:
status = 302
else:
status = int(status)
if status < 300 or status > 399:
raise ValueError("status must be between 300 and 399.")
self.status = status
CherryPyException.__init__(self, abs_urls, status)
def set_response(self):
"""Modify cherrypy.response status, headers, and body to represent self.
CherryPy uses this internally, but you can also use it to create an
HTTPRedirect object and set its output without *raising* the exception.
"""
import cherrypy
response = cherrypy.serving.response
response.status = status = self.status
if status in (300, 301, 302, 303, 307):
response.headers['Content-Type'] = "text/html;charset=utf-8"
# "The ... URI SHOULD be given by the Location field
# in the response."
response.headers['Location'] = self.urls[0]
# "Unless the request method was HEAD, the entity of the response
# SHOULD contain a short hypertext note with a hyperlink to the
# new URI(s)."
msg = {300: "This resource can be found at <a href='%s'>%s</a>.",
301: "This resource has permanently moved to <a href='%s'>%s</a>.",
302: "This resource resides temporarily at <a href='%s'>%s</a>.",
303: "This resource can be found at <a href='%s'>%s</a>.",
307: "This resource has moved temporarily to <a href='%s'>%s</a>.",
}[status]
msgs = [msg % (u, u) for u in self.urls]
response.body = "<br />\n".join(msgs)
# Previous code may have set C-L, so we have to reset it
# (allow finalize to set it).
response.headers.pop('Content-Length', None)
elif status == 304:
# Not Modified.
# "The response MUST include the following header fields:
# Date, unless its omission is required by section 14.18.1"
# The "Date" header should have been set in Response.__init__
# "...the response SHOULD NOT include other entity-headers."
for key in ('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Location', 'Content-MD5',
'Content-Range', 'Content-Type', 'Expires',
'Last-Modified'):
if key in response.headers:
del response.headers[key]
# "The 304 response MUST NOT contain a message-body."
response.body = None
# Previous code may have set C-L, so we have to reset it.
response.headers.pop('Content-Length', None)
elif status == 305:
# Use Proxy.
# self.urls[0] should be the URI of the proxy.
response.headers['Location'] = self.urls[0]
response.body = None
# Previous code may have set C-L, so we have to reset it.
response.headers.pop('Content-Length', None)
else:
raise ValueError("The %s status code is unknown." % status)
def __call__(self):
"""Use this exception as a request.handler (raise self)."""
raise self
def clean_headers(status):
"""Remove any headers which should not apply to an error response."""
import cherrypy
response = cherrypy.serving.response
# Remove headers which applied to the original content,
# but do not apply to the error page.
respheaders = response.headers
for key in ["Accept-Ranges", "Age", "ETag", "Location", "Retry-After",
"Vary", "Content-Encoding", "Content-Length", "Expires",
"Content-Location", "Content-MD5", "Last-Modified"]:
if key in respheaders:
del respheaders[key]
if status != 416:
# A server sending a response with status code 416 (Requested
# range not satisfiable) SHOULD include a Content-Range field
# with a byte-range-resp-spec of "*". The instance-length
# specifies the current length of the selected resource.
# A response with status code 206 (Partial Content) MUST NOT
# include a Content-Range field with a byte-range- resp-spec of "*".
if "Content-Range" in respheaders:
del respheaders["Content-Range"]
class HTTPError(CherryPyException):
""" Exception used to return an HTTP error code (4xx-5xx) to the client.
This exception will automatically set the response status and body.
A custom message (a long description to display in the browser)
can be provided in place of the default.
"""
def __init__(self, status=500, message=None):
self.status = status
try:
self.code, self.reason, defaultmsg = _httputil.valid_status(status)
except ValueError, x:
raise self.__class__(500, x.args[0])
if self.code < 400 or self.code > 599:
raise ValueError("status must be between 400 and 599.")
# See http://www.python.org/dev/peps/pep-0352/
# self.message = message
self._message = message or defaultmsg
CherryPyException.__init__(self, status, message)
def set_response(self):
"""Modify cherrypy.response status, headers, and body to represent self.
CherryPy uses this internally, but you can also use it to create an
HTTPError object and set its output without *raising* the exception.
"""
import cherrypy
response = cherrypy.serving.response
clean_headers(self.code)
# In all cases, finalize will be called after this method,
# so don't bother cleaning up response values here.
response.status = self.status
tb = None
if cherrypy.serving.request.show_tracebacks:
tb = format_exc()
response.headers['Content-Type'] = "text/html;charset=utf-8"
response.headers.pop('Content-Length', None)
content = self.get_error_page(self.status, traceback=tb,
message=self._message)
response.body = content
_be_ie_unfriendly(self.code)
def get_error_page(self, *args, **kwargs):
return get_error_page(*args, **kwargs)
def __call__(self):
"""Use this exception as a request.handler (raise self)."""
raise self
class NotFound(HTTPError):
"""Exception raised when a URL could not be mapped to any handler (404)."""
def __init__(self, path=None):
if path is None:
import cherrypy
request = cherrypy.serving.request
path = request.script_name + request.path_info
self.args = (path,)
HTTPError.__init__(self, 404, "The path '%s' was not found." % path)
_HTTPErrorTemplate = '''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"></meta>
<title>%(status)s</title>
<style type="text/css">
#powered_by {
margin-top: 20px;
border-top: 2px solid black;
font-style: italic;
}
#traceback {
color: red;
}
</style>
</head>
<body>
<h2>%(status)s</h2>
<p>%(message)s</p>
<pre id="traceback">%(traceback)s</pre>
<div id="powered_by">
<span>Powered by <a href="http://www.cherrypy.org">CherryPy %(version)s</a></span>
</div>
</body>
</html>
'''
def get_error_page(status, **kwargs):
"""Return an HTML page, containing a pretty error response.
status should be an int or a str.
kwargs will be interpolated into the page template.
"""
import cherrypy
try:
code, reason, message = _httputil.valid_status(status)
except ValueError, x:
raise cherrypy.HTTPError(500, x.args[0])
# We can't use setdefault here, because some
# callers send None for kwarg values.
if kwargs.get('status') is None:
kwargs['status'] = "%s %s" % (code, reason)
if kwargs.get('message') is None:
kwargs['message'] = message
if kwargs.get('traceback') is None:
kwargs['traceback'] = ''
if kwargs.get('version') is None:
kwargs['version'] = cherrypy.__version__
for k, v in kwargs.iteritems():
if v is None:
kwargs[k] = ""
else:
kwargs[k] = _escape(kwargs[k])
# Use a custom template or callable for the error page?
pages = cherrypy.serving.request.error_page
error_page = pages.get(code) or pages.get('default')
if error_page:
try:
if callable(error_page):
return error_page(**kwargs)
else:
return open(error_page, 'rb').read() % kwargs
except:
e = _format_exception(*_exc_info())[-1]
m = kwargs['message']
if m:
m += "<br />"
m += "In addition, the custom error page failed:\n<br />%s" % e
kwargs['message'] = m
return _HTTPErrorTemplate % kwargs
_ie_friendly_error_sizes = {
400: 512, 403: 256, 404: 512, 405: 256,
406: 512, 408: 512, 409: 512, 410: 256,
500: 512, 501: 512, 505: 512,
}
def _be_ie_unfriendly(status):
import cherrypy
response = cherrypy.serving.response
# For some statuses, Internet Explorer 5+ shows "friendly error
# messages" instead of our response.body if the body is smaller
# than a given size. Fix this by returning a body over that size
# (by adding whitespace).
# See http://support.microsoft.com/kb/q218155/
s = _ie_friendly_error_sizes.get(status, 0)
if s:
s += 1
# Since we are issuing an HTTP error status, we assume that
# the entity is short, and we should just collapse it.
content = response.collapse_body()
l = len(content)
if l and l < s:
# IN ADDITION: the response must be written to IE
# in one chunk or it will still get replaced! Bah.
content = content + (" " * (s - l))
response.body = content
response.headers[u'Content-Length'] = str(len(content))
def format_exc(exc=None):
"""Return exc (or sys.exc_info if None), formatted."""
if exc is None:
exc = _exc_info()
if exc == (None, None, None):
return ""
import traceback
return "".join(traceback.format_exception(*exc))
def bare_error(extrabody=None):
"""Produce status, headers, body for a critical error.
Returns a triple without calling any other questionable functions,
so it should be as error-free as possible. Call it from an HTTP server
if you get errors outside of the request.
If extrabody is None, a friendly but rather unhelpful error message
is set in the body. If extrabody is a string, it will be appended
as-is to the body.
"""
# The whole point of this function is to be a last line-of-defense
# in handling errors. That is, it must not raise any errors itself;
# it cannot be allowed to fail. Therefore, don't add to it!
# In particular, don't call any other CP functions.
body = "Unrecoverable error in the server."
if extrabody is not None:
if not isinstance(extrabody, str):
extrabody = extrabody.encode('utf-8')
body += "\n" + extrabody
return ("500 Internal Server Error",
[('Content-Type', 'text/plain'),
('Content-Length', str(len(body)))],
[body])
| 15,119 | Python | .py | 320 | 36.15 | 87 | 0.603065 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,609 | __init__.py | midgetspy_Sick-Beard/cherrypy/__init__.py | """CherryPy is a pythonic, object-oriented HTTP framework.
CherryPy consists of not one, but four separate API layers.
The APPLICATION LAYER is the simplest. CherryPy applications are written as
a tree of classes and methods, where each branch in the tree corresponds to
a branch in the URL path. Each method is a 'page handler', which receives
GET and POST params as keyword arguments, and returns or yields the (HTML)
body of the response. The special method name 'index' is used for paths
that end in a slash, and the special method name 'default' is used to
handle multiple paths via a single handler. This layer also includes:
* the 'exposed' attribute (and cherrypy.expose)
* cherrypy.quickstart()
* _cp_config attributes
* cherrypy.tools (including cherrypy.session)
* cherrypy.url()
The ENVIRONMENT LAYER is used by developers at all levels. It provides
information about the current request and response, plus the application
and server environment, via a (default) set of top-level objects:
* cherrypy.request
* cherrypy.response
* cherrypy.engine
* cherrypy.server
* cherrypy.tree
* cherrypy.config
* cherrypy.thread_data
* cherrypy.log
* cherrypy.HTTPError, NotFound, and HTTPRedirect
* cherrypy.lib
The EXTENSION LAYER allows advanced users to construct and share their own
plugins. It consists of:
* Hook API
* Tool API
* Toolbox API
* Dispatch API
* Config Namespace API
Finally, there is the CORE LAYER, which uses the core API's to construct
the default components which are available at higher layers. You can think
of the default components as the 'reference implementation' for CherryPy.
Megaframeworks (and advanced users) may replace the default components
with customized or extended components. The core API's are:
* Application API
* Engine API
* Request API
* Server API
* WSGI API
These API's are described in the CherryPy specification:
http://www.cherrypy.org/wiki/CherryPySpec
"""
__version__ = "3.2.0rc1"
from urlparse import urljoin as _urljoin
from urllib import urlencode as _urlencode
class _AttributeDocstrings(type):
"""Metaclass for declaring docstrings for class attributes."""
# The full docstring for this type is down in the __init__ method so
# that it doesn't show up in help() for every consumer class.
def __init__(cls, name, bases, dct):
'''Metaclass for declaring docstrings for class attributes.
Base Python doesn't provide any syntax for setting docstrings on
'data attributes' (non-callables). This metaclass allows class
definitions to follow the declaration of a data attribute with
a docstring for that attribute; the attribute docstring will be
popped from the class dict and folded into the class docstring.
The naming convention for attribute docstrings is:
<attrname> + "__doc".
For example:
class Thing(object):
"""A thing and its properties."""
__metaclass__ = cherrypy._AttributeDocstrings
height = 50
height__doc = """The height of the Thing in inches."""
In which case, help(Thing) starts like this:
>>> help(mod.Thing)
Help on class Thing in module pkg.mod:
class Thing(__builtin__.object)
| A thing and its properties.
|
| height [= 50]:
| The height of the Thing in inches.
|
The benefits of this approach over hand-edited class docstrings:
1. Places the docstring nearer to the attribute declaration.
2. Makes attribute docs more uniform ("name (default): doc").
3. Reduces mismatches of attribute _names_ between
the declaration and the documentation.
4. Reduces mismatches of attribute default _values_ between
the declaration and the documentation.
The benefits of a metaclass approach over other approaches:
1. Simpler ("less magic") than interface-based solutions.
2. __metaclass__ can be specified at the module global level
for classic classes.
For various formatting reasons, you should write multiline docs
with a leading newline and not a trailing one:
response__doc = """
The response object for the current thread. In the main thread,
and any threads which are not HTTP requests, this is None."""
The type of the attribute is intentionally not included, because
that's not How Python Works. Quack.
'''
newdoc = [cls.__doc__ or ""]
dctkeys = dct.keys()
dctkeys.sort()
for name in dctkeys:
if name.endswith("__doc"):
# Remove the magic doc attribute.
if hasattr(cls, name):
delattr(cls, name)
# Make a uniformly-indented docstring from it.
val = '\n'.join([' ' + line.strip()
for line in dct[name].split('\n')])
# Get the default value.
attrname = name[:-5]
try:
attrval = getattr(cls, attrname)
except AttributeError:
attrval = "missing"
# Add the complete attribute docstring to our list.
newdoc.append("%s [= %r]:\n%s" % (attrname, attrval, val))
# Add our list of new docstrings to the class docstring.
cls.__doc__ = "\n\n".join(newdoc)
from cherrypy._cperror import HTTPError, HTTPRedirect, InternalRedirect
from cherrypy._cperror import NotFound, CherryPyException, TimeoutError
from cherrypy import _cpdispatch as dispatch
from cherrypy import _cptools
tools = _cptools.default_toolbox
Tool = _cptools.Tool
from cherrypy import _cprequest
from cherrypy.lib import httputil as _httputil
from cherrypy import _cptree
tree = _cptree.Tree()
from cherrypy._cptree import Application
from cherrypy import _cpwsgi as wsgi
from cherrypy import process
try:
from cherrypy.process import win32
engine = win32.Win32Bus()
engine.console_control_handler = win32.ConsoleCtrlHandler(engine)
del win32
except ImportError:
engine = process.bus
# Timeout monitor
class _TimeoutMonitor(process.plugins.Monitor):
def __init__(self, bus):
self.servings = []
process.plugins.Monitor.__init__(self, bus, self.run)
def acquire(self):
self.servings.append((serving.request, serving.response))
def release(self):
try:
self.servings.remove((serving.request, serving.response))
except ValueError:
pass
def run(self):
"""Check timeout on all responses. (Internal)"""
for req, resp in self.servings:
resp.check_timeout()
engine.timeout_monitor = _TimeoutMonitor(engine)
engine.timeout_monitor.subscribe()
engine.autoreload = process.plugins.Autoreloader(engine)
engine.autoreload.subscribe()
engine.thread_manager = process.plugins.ThreadManager(engine)
engine.thread_manager.subscribe()
engine.signal_handler = process.plugins.SignalHandler(engine)
from cherrypy import _cpserver
server = _cpserver.Server()
server.subscribe()
def quickstart(root=None, script_name="", config=None):
"""Mount the given root, start the builtin server (and engine), then block.
root: an instance of a "controller class" (a collection of page handler
methods) which represents the root of the application.
script_name: a string containing the "mount point" of the application.
This should start with a slash, and be the path portion of the URL
at which to mount the given root. For example, if root.index() will
handle requests to "http://www.example.com:8080/dept/app1/", then
the script_name argument would be "/dept/app1".
It MUST NOT end in a slash. If the script_name refers to the root
of the URI, it MUST be an empty string (not "/").
config: a file or dict containing application config. If this contains
a [global] section, those entries will be used in the global
(site-wide) config.
"""
if config:
_global_conf_alias.update(config)
tree.mount(root, script_name, config)
if hasattr(engine, "signal_handler"):
engine.signal_handler.subscribe()
if hasattr(engine, "console_control_handler"):
engine.console_control_handler.subscribe()
engine.start()
engine.block()
try:
from threading import local as _local
except ImportError:
from cherrypy._cpthreadinglocal import local as _local
class _Serving(_local):
"""An interface for registering request and response objects.
Rather than have a separate "thread local" object for the request and
the response, this class works as a single threadlocal container for
both objects (and any others which developers wish to define). In this
way, we can easily dump those objects when we stop/start a new HTTP
conversation, yet still refer to them as module-level globals in a
thread-safe way.
"""
__metaclass__ = _AttributeDocstrings
request = _cprequest.Request(_httputil.Host("127.0.0.1", 80),
_httputil.Host("127.0.0.1", 1111))
request__doc = """
The request object for the current thread. In the main thread,
and any threads which are not receiving HTTP requests, this is None."""
response = _cprequest.Response()
response__doc = """
The response object for the current thread. In the main thread,
and any threads which are not receiving HTTP requests, this is None."""
def load(self, request, response):
self.request = request
self.response = response
def clear(self):
"""Remove all attributes of self."""
self.__dict__.clear()
serving = _Serving()
class _ThreadLocalProxy(object):
__slots__ = ['__attrname__', '__dict__']
def __init__(self, attrname):
self.__attrname__ = attrname
def __getattr__(self, name):
child = getattr(serving, self.__attrname__)
return getattr(child, name)
def __setattr__(self, name, value):
if name in ("__attrname__",):
object.__setattr__(self, name, value)
else:
child = getattr(serving, self.__attrname__)
setattr(child, name, value)
def __delattr__(self, name):
child = getattr(serving, self.__attrname__)
delattr(child, name)
def _get_dict(self):
child = getattr(serving, self.__attrname__)
d = child.__class__.__dict__.copy()
d.update(child.__dict__)
return d
__dict__ = property(_get_dict)
def __getitem__(self, key):
child = getattr(serving, self.__attrname__)
return child[key]
def __setitem__(self, key, value):
child = getattr(serving, self.__attrname__)
child[key] = value
def __delitem__(self, key):
child = getattr(serving, self.__attrname__)
del child[key]
def __contains__(self, key):
child = getattr(serving, self.__attrname__)
return key in child
def __len__(self):
child = getattr(serving, self.__attrname__)
return len(child)
def __nonzero__(self):
child = getattr(serving, self.__attrname__)
return bool(child)
# Create request and response object (the same objects will be used
# throughout the entire life of the webserver, but will redirect
# to the "serving" object)
request = _ThreadLocalProxy('request')
response = _ThreadLocalProxy('response')
# Create thread_data object as a thread-specific all-purpose storage
class _ThreadData(_local):
"""A container for thread-specific data."""
thread_data = _ThreadData()
# Monkeypatch pydoc to allow help() to go through the threadlocal proxy.
# Jan 2007: no Googleable examples of anyone else replacing pydoc.resolve.
# The only other way would be to change what is returned from type(request)
# and that's not possible in pure Python (you'd have to fake ob_type).
def _cherrypy_pydoc_resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, _ThreadLocalProxy):
thing = getattr(serving, thing.__attrname__)
return _pydoc._builtin_resolve(thing, forceload)
try:
import pydoc as _pydoc
_pydoc._builtin_resolve = _pydoc.resolve
_pydoc.resolve = _cherrypy_pydoc_resolve
except ImportError:
pass
from cherrypy import _cplogging
class _GlobalLogManager(_cplogging.LogManager):
def __call__(self, *args, **kwargs):
# Do NOT use try/except here. See http://www.cherrypy.org/ticket/945
if hasattr(request, 'app') and hasattr(request.app, 'log'):
log = request.app.log
else:
log = self
return log.error(*args, **kwargs)
def access(self):
try:
return request.app.log.access()
except AttributeError:
return _cplogging.LogManager.access(self)
log = _GlobalLogManager()
# Set a default screen handler on the global log.
log.screen = True
log.error_file = ''
# Using an access file makes CP about 10% slower. Leave off by default.
log.access_file = ''
def _buslog(msg, level):
log.error(msg, 'ENGINE', severity=level)
engine.subscribe('log', _buslog)
# Helper functions for CP apps #
def expose(func=None, alias=None):
"""Expose the function, optionally providing an alias or set of aliases."""
def expose_(func):
func.exposed = True
if alias is not None:
if isinstance(alias, basestring):
parents[alias.replace(".", "_")] = func
else:
for a in alias:
parents[a.replace(".", "_")] = func
return func
import sys, types
if isinstance(func, (types.FunctionType, types.MethodType)):
if alias is None:
# @expose
func.exposed = True
return func
else:
# func = expose(func, alias)
parents = sys._getframe(1).f_locals
return expose_(func)
elif func is None:
if alias is None:
# @expose()
parents = sys._getframe(1).f_locals
return expose_
else:
# @expose(alias="alias") or
# @expose(alias=["alias1", "alias2"])
parents = sys._getframe(1).f_locals
return expose_
else:
# @expose("alias") or
# @expose(["alias1", "alias2"])
parents = sys._getframe(1).f_locals
alias = func
return expose_
def url(path="", qs="", script_name=None, base=None, relative=None):
"""Create an absolute URL for the given path.
If 'path' starts with a slash ('/'), this will return
(base + script_name + path + qs).
If it does not start with a slash, this returns
(base + script_name [+ request.path_info] + path + qs).
If script_name is None, cherrypy.request will be used
to find a script_name, if available.
If base is None, cherrypy.request.base will be used (if available).
Note that you can use cherrypy.tools.proxy to change this.
Finally, note that this function can be used to obtain an absolute URL
for the current request path (minus the querystring) by passing no args.
If you call url(qs=cherrypy.request.query_string), you should get the
original browser URL (assuming no internal redirections).
If relative is None or not provided, request.app.relative_urls will
be used (if available, else False). If False, the output will be an
absolute URL (including the scheme, host, vhost, and script_name).
If True, the output will instead be a URL that is relative to the
current request path, perhaps including '..' atoms. If relative is
the string 'server', the output will instead be a URL that is
relative to the server root; i.e., it will start with a slash.
"""
if isinstance(qs, (tuple, list, dict)):
qs = _urlencode(qs)
if qs:
qs = '?' + qs
if request.app:
if not path.startswith("/"):
# Append/remove trailing slash from path_info as needed
# (this is to support mistyped URL's without redirecting;
# if you want to redirect, use tools.trailing_slash).
pi = request.path_info
if request.is_index is True:
if not pi.endswith('/'):
pi = pi + '/'
elif request.is_index is False:
if pi.endswith('/') and pi != '/':
pi = pi[:-1]
if path == "":
path = pi
else:
path = _urljoin(pi, path)
if script_name is None:
script_name = request.script_name
if base is None:
base = request.base
newurl = base + script_name + path + qs
else:
# No request.app (we're being called outside a request).
# We'll have to guess the base from server.* attributes.
# This will produce very different results from the above
# if you're using vhosts or tools.proxy.
if base is None:
base = server.base()
path = (script_name or "") + path
newurl = base + path + qs
if './' in newurl:
# Normalize the URL by removing ./ and ../
atoms = []
for atom in newurl.split('/'):
if atom == '.':
pass
elif atom == '..':
atoms.pop()
else:
atoms.append(atom)
newurl = '/'.join(atoms)
# At this point, we should have a fully-qualified absolute URL.
if relative is None:
relative = getattr(request.app, "relative_urls", False)
# See http://www.ietf.org/rfc/rfc2396.txt
if relative == 'server':
# "A relative reference beginning with a single slash character is
# termed an absolute-path reference, as defined by <abs_path>..."
# This is also sometimes called "server-relative".
newurl = '/' + '/'.join(newurl.split('/', 3)[3:])
elif relative:
# "A relative reference that does not begin with a scheme name
# or a slash character is termed a relative-path reference."
old = url().split('/')[:-1]
new = newurl.split('/')
while old and new:
a, b = old[0], new[0]
if a != b:
break
old.pop(0)
new.pop(0)
new = (['..'] * len(old)) + new
newurl = '/'.join(new)
return newurl
# import _cpconfig last so it can reference other top-level objects
from cherrypy import _cpconfig
# Use _global_conf_alias so quickstart can use 'config' as an arg
# without shadowing cherrypy.config.
config = _global_conf_alias = _cpconfig.Config()
config.defaults = {
'tools.log_tracebacks.on': True,
'tools.log_headers.on': True,
'tools.trailing_slash.on': True,
'tools.encode.on': True
}
config.namespaces["log"] = lambda k, v: setattr(log, k, v)
config.namespaces["checker"] = lambda k, v: setattr(checker, k, v)
# Must reset to get our defaults applied.
config.reset()
from cherrypy import _cpchecker
checker = _cpchecker.Checker()
engine.subscribe('start', checker)
| 20,462 | Python | .py | 451 | 35.742794 | 80 | 0.63433 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,610 | _cpserver.py | midgetspy_Sick-Beard/cherrypy/_cpserver.py | """Manage HTTP servers with CherryPy."""
import warnings
import cherrypy
from cherrypy.lib import attributes
# We import * because we want to export check_port
# et al as attributes of this module.
from cherrypy.process.servers import *
class Server(ServerAdapter):
"""An adapter for an HTTP server.
You can set attributes (like socket_host and socket_port)
on *this* object (which is probably cherrypy.server), and call
quickstart. For example:
cherrypy.server.socket_port = 80
cherrypy.quickstart()
"""
socket_port = 8080
_socket_host = '127.0.0.1'
def _get_socket_host(self):
return self._socket_host
def _set_socket_host(self, value):
if value == '':
raise ValueError("The empty string ('') is not an allowed value. "
"Use '0.0.0.0' instead to listen on all active "
"interfaces (INADDR_ANY).")
self._socket_host = value
socket_host = property(_get_socket_host, _set_socket_host,
doc="""The hostname or IP address on which to listen for connections.
Host values may be any IPv4 or IPv6 address, or any valid hostname.
The string 'localhost' is a synonym for '127.0.0.1' (or '::1', if
your hosts file prefers IPv6). The string '0.0.0.0' is a special
IPv4 entry meaning "any active interface" (INADDR_ANY), and '::'
is the similar IN6ADDR_ANY for IPv6. The empty string or None are
not allowed.""")
socket_file = None
socket_queue_size = 5
socket_timeout = 10
shutdown_timeout = 5
protocol_version = 'HTTP/1.1'
reverse_dns = False
thread_pool = 10
thread_pool_max = -1
max_request_header_size = 500 * 1024
max_request_body_size = 100 * 1024 * 1024
instance = None
ssl_context = None
ssl_certificate = None
ssl_certificate_chain = None
ssl_private_key = None
ssl_module = 'pyopenssl'
nodelay = True
wsgi_version = (1, 1)
def __init__(self):
self.bus = cherrypy.engine
self.httpserver = None
self.interrupt = None
self.running = False
def httpserver_from_self(self, httpserver=None):
"""Return a (httpserver, bind_addr) pair based on self attributes."""
if httpserver is None:
httpserver = self.instance
if httpserver is None:
from cherrypy import _cpwsgi_server
httpserver = _cpwsgi_server.CPWSGIServer(self)
if isinstance(httpserver, basestring):
# Is anyone using this? Can I add an arg?
httpserver = attributes(httpserver)(self)
return httpserver, self.bind_addr
def start(self):
"""Start the HTTP server."""
if not self.httpserver:
self.httpserver, self.bind_addr = self.httpserver_from_self()
ServerAdapter.start(self)
start.priority = 75
def _get_bind_addr(self):
if self.socket_file:
return self.socket_file
if self.socket_host is None and self.socket_port is None:
return None
return (self.socket_host, self.socket_port)
def _set_bind_addr(self, value):
if value is None:
self.socket_file = None
self.socket_host = None
self.socket_port = None
elif isinstance(value, basestring):
self.socket_file = value
self.socket_host = None
self.socket_port = None
else:
try:
self.socket_host, self.socket_port = value
self.socket_file = None
except ValueError:
raise ValueError("bind_addr must be a (host, port) tuple "
"(for TCP sockets) or a string (for Unix "
"domain sockets), not %r" % value)
bind_addr = property(_get_bind_addr, _set_bind_addr)
def base(self):
"""Return the base (scheme://host[:port] or sock file) for this server."""
if self.socket_file:
return self.socket_file
host = self.socket_host
if host in ('0.0.0.0', '::'):
# 0.0.0.0 is INADDR_ANY and :: is IN6ADDR_ANY.
# Look up the host name, which should be the
# safest thing to spit out in a URL.
import socket
host = socket.gethostname()
port = self.socket_port
if self.ssl_certificate:
scheme = "https"
if port != 443:
host += ":%s" % port
else:
scheme = "http"
if port != 80:
host += ":%s" % port
return "%s://%s" % (scheme, host)
| 4,921 | Python | .py | 118 | 30.398305 | 83 | 0.578722 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,611 | _cptree.py | midgetspy_Sick-Beard/cherrypy/_cptree.py | """CherryPy Application and Tree objects."""
import os
import cherrypy
from cherrypy import _cpconfig, _cplogging, _cprequest, _cpwsgi, tools
from cherrypy.lib import httputil
class Application(object):
"""A CherryPy Application.
Servers and gateways should not instantiate Request objects directly.
Instead, they should ask an Application object for a request object.
An instance of this class may also be used as a WSGI callable
(WSGI application object) for itself.
"""
__metaclass__ = cherrypy._AttributeDocstrings
root = None
root__doc = """
The top-most container of page handlers for this app. Handlers should
be arranged in a hierarchy of attributes, matching the expected URI
hierarchy; the default dispatcher then searches this hierarchy for a
matching handler. When using a dispatcher other than the default,
this value may be None."""
config = {}
config__doc = """
A dict of {path: pathconf} pairs, where 'pathconf' is itself a dict
of {key: value} pairs."""
namespaces = _cpconfig.NamespaceSet()
toolboxes = {'tools': cherrypy.tools}
log = None
log__doc = """A LogManager instance. See _cplogging."""
wsgiapp = None
wsgiapp__doc = """A CPWSGIApp instance. See _cpwsgi."""
request_class = _cprequest.Request
response_class = _cprequest.Response
relative_urls = False
def __init__(self, root, script_name="", config=None):
self.log = _cplogging.LogManager(id(self), cherrypy.log.logger_root)
self.root = root
self.script_name = script_name
self.wsgiapp = _cpwsgi.CPWSGIApp(self)
self.namespaces = self.namespaces.copy()
self.namespaces["log"] = lambda k, v: setattr(self.log, k, v)
self.namespaces["wsgi"] = self.wsgiapp.namespace_handler
self.config = self.__class__.config.copy()
if config:
self.merge(config)
def __repr__(self):
return "%s.%s(%r, %r)" % (self.__module__, self.__class__.__name__,
self.root, self.script_name)
script_name__doc = """
The URI "mount point" for this app. A mount point is that portion of
the URI which is constant for all URIs that are serviced by this
application; it does not include scheme, host, or proxy ("virtual host")
portions of the URI.
For example, if script_name is "/my/cool/app", then the URL
"http://www.example.com/my/cool/app/page1" might be handled by a
"page1" method on the root object.
The value of script_name MUST NOT end in a slash. If the script_name
refers to the root of the URI, it MUST be an empty string (not "/").
If script_name is explicitly set to None, then the script_name will be
provided for each call from request.wsgi_environ['SCRIPT_NAME'].
"""
def _get_script_name(self):
if self._script_name is None:
# None signals that the script name should be pulled from WSGI environ.
return cherrypy.serving.request.wsgi_environ['SCRIPT_NAME'].rstrip("/")
return self._script_name
def _set_script_name(self, value):
if value:
value = value.rstrip("/")
self._script_name = value
script_name = property(fget=_get_script_name, fset=_set_script_name,
doc=script_name__doc)
def merge(self, config):
"""Merge the given config into self.config."""
_cpconfig.merge(self.config, config)
# Handle namespaces specified in config.
self.namespaces(self.config.get("/", {}))
def find_config(self, path, key, default=None):
"""Return the most-specific value for key along path, or default."""
trail = path or "/"
while trail:
nodeconf = self.config.get(trail, {})
if key in nodeconf:
return nodeconf[key]
lastslash = trail.rfind("/")
if lastslash == -1:
break
elif lastslash == 0 and trail != "/":
trail = "/"
else:
trail = trail[:lastslash]
return default
def get_serving(self, local, remote, scheme, sproto):
"""Create and return a Request and Response object."""
req = self.request_class(local, remote, scheme, sproto)
req.app = self
for name, toolbox in self.toolboxes.items():
req.namespaces[name] = toolbox
resp = self.response_class()
cherrypy.serving.load(req, resp)
cherrypy.engine.timeout_monitor.acquire()
cherrypy.engine.publish('acquire_thread')
return req, resp
def release_serving(self):
"""Release the current serving (request and response)."""
req = cherrypy.serving.request
cherrypy.engine.timeout_monitor.release()
try:
req.close()
except:
cherrypy.log(traceback=True, severity=40)
cherrypy.serving.clear()
def __call__(self, environ, start_response):
return self.wsgiapp(environ, start_response)
class Tree(object):
"""A registry of CherryPy applications, mounted at diverse points.
An instance of this class may also be used as a WSGI callable
(WSGI application object), in which case it dispatches to all
mounted apps.
"""
apps = {}
apps__doc = """
A dict of the form {script name: application}, where "script name"
is a string declaring the URI mount point (no trailing slash), and
"application" is an instance of cherrypy.Application (or an arbitrary
WSGI callable if you happen to be using a WSGI server)."""
def __init__(self):
self.apps = {}
def mount(self, root, script_name="", config=None):
"""Mount a new app from a root object, script_name, and config.
root: an instance of a "controller class" (a collection of page
handler methods) which represents the root of the application.
This may also be an Application instance, or None if using
a dispatcher other than the default.
script_name: a string containing the "mount point" of the application.
This should start with a slash, and be the path portion of the
URL at which to mount the given root. For example, if root.index()
will handle requests to "http://www.example.com:8080/dept/app1/",
then the script_name argument would be "/dept/app1".
It MUST NOT end in a slash. If the script_name refers to the
root of the URI, it MUST be an empty string (not "/").
config: a file or dict containing application config.
"""
if script_name is None:
raise TypeError(
"The 'script_name' argument may not be None. Application "
"objects may, however, possess a script_name of None (in "
"order to inpect the WSGI environ for SCRIPT_NAME upon each "
"request). You cannot mount such Applications on this Tree; "
"you must pass them to a WSGI server interface directly.")
# Next line both 1) strips trailing slash and 2) maps "/" -> "".
script_name = script_name.rstrip("/")
if isinstance(root, Application):
app = root
if script_name != "" and script_name != app.script_name:
raise ValueError("Cannot specify a different script name and "
"pass an Application instance to cherrypy.mount")
script_name = app.script_name
else:
app = Application(root, script_name)
# If mounted at "", add favicon.ico
if (script_name == "" and root is not None
and not hasattr(root, "favicon_ico")):
favicon = os.path.join(os.getcwd(), os.path.dirname(__file__),
"favicon.ico")
root.favicon_ico = tools.staticfile.handler(favicon)
if config:
app.merge(config)
self.apps[script_name] = app
return app
def graft(self, wsgi_callable, script_name=""):
"""Mount a wsgi callable at the given script_name."""
# Next line both 1) strips trailing slash and 2) maps "/" -> "".
script_name = script_name.rstrip("/")
self.apps[script_name] = wsgi_callable
def script_name(self, path=None):
"""The script_name of the app at the given path, or None.
If path is None, cherrypy.request is used.
"""
if path is None:
try:
request = cherrypy.serving.request
path = httputil.urljoin(request.script_name,
request.path_info)
except AttributeError:
return None
while True:
if path in self.apps:
return path
if path == "":
return None
# Move one node up the tree and try again.
path = path[:path.rfind("/")]
def __call__(self, environ, start_response):
# If you're calling this, then you're probably setting SCRIPT_NAME
# to '' (some WSGI servers always set SCRIPT_NAME to '').
# Try to look up the app using the full path.
env1x = environ
if environ.get(u'wsgi.version') == (u'u', 0):
env1x = _cpwsgi.downgrade_wsgi_ux_to_1x(environ)
path = httputil.urljoin(env1x.get('SCRIPT_NAME', ''),
env1x.get('PATH_INFO', ''))
sn = self.script_name(path or "/")
if sn is None:
start_response('404 Not Found', [])
return []
app = self.apps[sn]
# Correct the SCRIPT_NAME and PATH_INFO environ entries.
environ = environ.copy()
if environ.get(u'wsgi.version') == (u'u', 0):
# Python 2/WSGI u.0: all strings MUST be of type unicode
enc = environ[u'wsgi.url_encoding']
environ[u'SCRIPT_NAME'] = sn.decode(enc)
environ[u'PATH_INFO'] = path[len(sn.rstrip("/")):].decode(enc)
else:
# Python 2/WSGI 1.x: all strings MUST be of type str
environ['SCRIPT_NAME'] = sn
environ['PATH_INFO'] = path[len(sn.rstrip("/")):]
return app(environ, start_response)
| 11,016 | Python | .py | 218 | 37.62844 | 84 | 0.597349 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,612 | _cpreqbody.py | midgetspy_Sick-Beard/cherrypy/_cpreqbody.py | """Request body processing for CherryPy.
When an HTTP request includes an entity body, it is often desirable to
provide that information to applications in a form other than the raw bytes.
Different content types demand different approaches. Examples:
* For a GIF file, we want the raw bytes in a stream.
* An HTML form is better parsed into its component fields, and each text field
decoded from bytes to unicode.
* A JSON body should be deserialized into a Python dict or list.
When the request contains a Content-Type header, the media type is used as a
key to look up a value in the 'request.body.processors' dict. If the full media
type is not found, then the major type is tried; for example, if no processor
is found for the 'image/jpeg' type, then we look for a processor for the 'image'
types altogether. If neither the full type nor the major type has a matching
processor, then a default processor is used (self.default_proc). For most
types, this means no processing is done, and the body is left unread as a
raw byte stream. Processors are configurable in an 'on_start_resource' hook.
Some processors, especially those for the 'text' types, attempt to decode bytes
to unicode. If the Content-Type request header includes a 'charset' parameter,
this is used to decode the entity. Otherwise, one or more default charsets may
be attempted, although this decision is up to each processor. If a processor
successfully decodes an Entity or Part, it should set the 'charset' attribute
on the Entity or Part to the name of the successful charset, so that
applications can easily re-encode or transcode the value if they wish.
If the Content-Type of the request entity is of major type 'multipart', then
the above parsing process, and possibly a decoding process, is performed for
each part.
For both the full entity and multipart parts, a Content-Disposition header may
be used to fill .name and .filename attributes on the request.body or the Part.
"""
import re
import tempfile
from urllib import unquote_plus
import cherrypy
from cherrypy.lib import httputil
# -------------------------------- Processors -------------------------------- #
def process_urlencoded(entity):
"""Read application/x-www-form-urlencoded data into entity.params."""
qs = entity.fp.read()
for charset in entity.attempt_charsets:
try:
params = {}
for aparam in qs.split('&'):
for pair in aparam.split(';'):
if not pair:
continue
atoms = pair.split('=', 1)
if len(atoms) == 1:
atoms.append('')
key = unquote_plus(atoms[0]).decode(charset)
value = unquote_plus(atoms[1]).decode(charset)
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
except UnicodeDecodeError:
pass
else:
entity.charset = charset
break
else:
raise cherrypy.HTTPError(
400, "The request entity could not be decoded. The following "
"charsets were attempted: %s" % repr(entity.attempt_charsets))
# Now that all values have been successfully parsed and decoded,
# apply them to the entity.params dict.
for key, value in params.items():
if key in entity.params:
if not isinstance(entity.params[key], list):
entity.params[key] = [entity.params[key]]
entity.params[key].append(value)
else:
entity.params[key] = value
def process_multipart(entity):
"""Read all multipart parts into entity.parts."""
ib = u""
if u'boundary' in entity.content_type.params:
# http://tools.ietf.org/html/rfc2046#section-5.1.1
# "The grammar for parameters on the Content-type field is such that it
# is often necessary to enclose the boundary parameter values in quotes
# on the Content-type line"
ib = entity.content_type.params['boundary'].strip(u'"')
if not re.match(u"^[ -~]{0,200}[!-~]$", ib):
raise ValueError(u'Invalid boundary in multipart form: %r' % (ib,))
ib = (u'--' + ib).encode('ascii')
# Find the first marker
while True:
b = entity.readline()
if not b:
return
b = b.strip()
if b == ib:
break
# Read all parts
while True:
part = entity.part_class.from_fp(entity.fp, ib)
entity.parts.append(part)
part.process()
if part.fp.done:
break
def process_multipart_form_data(entity):
"""Read all multipart/form-data parts into entity.parts or entity.params."""
process_multipart(entity)
kept_parts = []
for part in entity.parts:
if part.name is None:
kept_parts.append(part)
else:
if part.filename is None:
# It's a regular field
entity.params[part.name] = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
entity.params[part.name] = part
entity.parts = kept_parts
def _old_process_multipart(entity):
"""The behavior of 3.2 and lower. Deprecated and will be changed in 3.3."""
process_multipart(entity)
params = entity.params
for part in entity.parts:
if part.name is None:
key = u'parts'
else:
key = part.name
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
value = part
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
# --------------------------------- Entities --------------------------------- #
class Entity(object):
"""An HTTP request body, or MIME multipart body."""
__metaclass__ = cherrypy._AttributeDocstrings
params = None
params__doc = u"""
If the request Content-Type is 'application/x-www-form-urlencoded' or
multipart, this will be a dict of the params pulled from the entity
body; that is, it will be the portion of request.params that come
from the message body (sometimes called "POST params", although they
can be sent with various HTTP method verbs). This value is set between
the 'before_request_body' and 'before_handler' hooks (assuming that
process_request_body is True)."""
default_content_type = u'application/x-www-form-urlencoded'
# http://tools.ietf.org/html/rfc2046#section-4.1.2:
# "The default character set, which must be assumed in the
# absence of a charset parameter, is US-ASCII."
# However, many browsers send data in utf-8 with no charset.
attempt_charsets = [u'utf-8']
processors = {u'application/x-www-form-urlencoded': process_urlencoded,
u'multipart/form-data': process_multipart_form_data,
u'multipart': process_multipart,
}
def __init__(self, fp, headers, params=None, parts=None):
# Make an instance-specific copy of the class processors
# so Tools, etc. can replace them per-request.
self.processors = self.processors.copy()
self.fp = fp
self.headers = headers
if params is None:
params = {}
self.params = params
if parts is None:
parts = []
self.parts = parts
# Content-Type
self.content_type = headers.elements(u'Content-Type')
if self.content_type:
self.content_type = self.content_type[0]
else:
self.content_type = httputil.HeaderElement.from_str(
self.default_content_type)
# Copy the class 'attempt_charsets', prepending any Content-Type charset
dec = self.content_type.params.get(u"charset", None)
if dec:
dec = dec.decode('ISO-8859-1')
self.attempt_charsets = [dec] + [c for c in self.attempt_charsets
if c != dec]
else:
self.attempt_charsets = self.attempt_charsets[:]
# Length
self.length = None
clen = headers.get(u'Content-Length', None)
# If Transfer-Encoding is 'chunked', ignore any Content-Length.
if clen is not None and 'chunked' not in headers.get(u'Transfer-Encoding', ''):
try:
self.length = int(clen)
except ValueError:
pass
# Content-Disposition
self.name = None
self.filename = None
disp = headers.elements(u'Content-Disposition')
if disp:
disp = disp[0]
if 'name' in disp.params:
self.name = disp.params['name']
if self.name.startswith(u'"') and self.name.endswith(u'"'):
self.name = self.name[1:-1]
if 'filename' in disp.params:
self.filename = disp.params['filename']
if self.filename.startswith(u'"') and self.filename.endswith(u'"'):
self.filename = self.filename[1:-1]
# The 'type' attribute is deprecated in 3.2; remove it in 3.3.
type = property(lambda self: self.content_type)
def read(self, size=None, fp_out=None):
return self.fp.read(size, fp_out)
def readline(self, size=None):
return self.fp.readline(size)
def readlines(self, sizehint=None):
return self.fp.readlines(sizehint)
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None). Return fp_out."""
if fp_out is None:
fp_out = self.make_file()
self.read(fp_out=fp_out)
return fp_out
def make_file(self):
"""Return a file into which the request body will be read.
By default, this will return a TemporaryFile. Override as needed."""
return tempfile.TemporaryFile()
def fullvalue(self):
"""Return this entity as a string, whether stored in a file or not."""
if self.file:
# It was stored in a tempfile. Read it.
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
else:
value = self.value
return value
def process(self):
"""Execute the best-match processor for the given media type."""
proc = None
ct = self.content_type.value
try:
proc = self.processors[ct]
except KeyError:
toptype = ct.split(u'/', 1)[0]
try:
proc = self.processors[toptype]
except KeyError:
pass
if proc is None:
self.default_proc()
else:
proc(self)
def default_proc(self):
# Leave the fp alone for someone else to read. This works fine
# for request.body, but the Part subclasses need to override this
# so they can move on to the next part.
pass
class Part(Entity):
"""A MIME part entity, part of a multipart entity."""
default_content_type = u'text/plain'
# "The default character set, which must be assumed in the absence of a
# charset parameter, is US-ASCII."
attempt_charsets = [u'us-ascii', u'utf-8']
# This is the default in stdlib cgi. We may want to increase it.
maxrambytes = 1000
def __init__(self, fp, headers, boundary):
Entity.__init__(self, fp, headers)
self.boundary = boundary
self.file = None
self.value = None
def from_fp(cls, fp, boundary):
headers = cls.read_headers(fp)
return cls(fp, headers, boundary)
from_fp = classmethod(from_fp)
def read_headers(cls, fp):
headers = httputil.HeaderMap()
while True:
line = fp.readline()
if not line:
# No more data--illegal end of headers
raise EOFError(u"Illegal end of headers.")
if line == '\r\n':
# Normal end of headers
break
if not line.endswith('\r\n'):
raise ValueError(u"MIME requires CRLF terminators: %r" % line)
if line[0] in ' \t':
# It's a continuation line.
v = line.strip().decode(u'ISO-8859-1')
else:
k, v = line.split(":", 1)
k = k.strip().decode(u'ISO-8859-1')
v = v.strip().decode(u'ISO-8859-1')
existing = headers.get(k)
if existing:
v = u", ".join((existing, v))
headers[k] = v
return headers
read_headers = classmethod(read_headers)
def read_lines_to_boundary(self, fp_out=None):
"""Read bytes from self.fp and return or write them to a file.
If the 'fp_out' argument is None (the default), all bytes read are
returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like object that
supports the 'write' method; all bytes read will be written to the fp,
and that fp is returned.
"""
endmarker = self.boundary + "--"
delim = ""
prev_lf = True
lines = []
seen = 0
while True:
line = self.fp.readline(1 << 16)
if not line:
raise EOFError(u"Illegal end of multipart body.")
if line.startswith("--") and prev_lf:
strippedline = line.strip()
if strippedline == self.boundary:
break
if strippedline == endmarker:
self.fp.finish()
break
line = delim + line
if line.endswith("\r\n"):
delim = "\r\n"
line = line[:-2]
prev_lf = True
elif line.endswith("\n"):
delim = "\n"
line = line[:-1]
prev_lf = True
else:
delim = ""
prev_lf = False
if fp_out is None:
lines.append(line)
seen += len(line)
if seen > self.maxrambytes:
fp_out = self.make_file()
for line in lines:
fp_out.write(line)
else:
fp_out.write(line)
if fp_out is None:
result = ''.join(lines)
for charset in self.attempt_charsets:
try:
result = result.decode(charset)
except UnicodeDecodeError:
pass
else:
self.charset = charset
return result
else:
raise cherrypy.HTTPError(
400, "The request entity could not be decoded. The following "
"charsets were attempted: %s" % repr(self.attempt_charsets))
else:
fp_out.seek(0)
return fp_out
def default_proc(self):
if self.filename:
# Always read into a file if a .filename was given.
self.file = self.read_into_file()
else:
result = self.read_lines_to_boundary()
if isinstance(result, basestring):
self.value = result
else:
self.file = result
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None). Return fp_out."""
if fp_out is None:
fp_out = self.make_file()
self.read_lines_to_boundary(fp_out=fp_out)
return fp_out
Entity.part_class = Part
class Infinity(object):
def __cmp__(self, other):
return 1
def __sub__(self, other):
return self
inf = Infinity()
comma_separated_headers = ['Accept', 'Accept-Charset', 'Accept-Encoding',
'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control', 'Connection',
'Content-Encoding', 'Content-Language', 'Expect', 'If-Match',
'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'Te', 'Trailer',
'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning', 'Www-Authenticate']
class SizedReader:
def __init__(self, fp, length, maxbytes, bufsize=8192, has_trailers=False):
# Wrap our fp in a buffer so peek() works
self.fp = fp
self.length = length
self.maxbytes = maxbytes
self.buffer = ''
self.bufsize = bufsize
self.bytes_read = 0
self.done = False
self.has_trailers = has_trailers
def read(self, size=None, fp_out=None):
"""Read bytes from the request body and return or write them to a file.
A number of bytes less than or equal to the 'size' argument are read
off the socket. The actual number of bytes read are tracked in
self.bytes_read. The number may be smaller than 'size' when 1) the
client sends fewer bytes, 2) the 'Content-Length' request header
specifies fewer bytes than requested, or 3) the number of bytes read
exceeds self.maxbytes (in which case, 413 is raised).
If the 'fp_out' argument is None (the default), all bytes read are
returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like object that
supports the 'write' method; all bytes read will be written to the fp,
and None is returned.
"""
if self.length is None:
if size is None:
remaining = inf
else:
remaining = size
else:
remaining = self.length - self.bytes_read
if size and size < remaining:
remaining = size
if remaining == 0:
self.finish()
if fp_out is None:
return ''
else:
return None
chunks = []
# Read bytes from the buffer.
if self.buffer:
if remaining is inf:
data = self.buffer
self.buffer = ''
else:
data = self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
# Read bytes from the socket.
while remaining > 0:
chunksize = min(remaining, self.bufsize)
try:
data = self.fp.read(chunksize)
except Exception, e:
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, "Maximum request length: %r" % e.args[1])
else:
raise
if not data:
self.finish()
break
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
if fp_out is None:
return ''.join(chunks)
def readline(self, size=None):
"""Read a line from the request body and return it."""
chunks = []
while size is None or size > 0:
chunksize = self.bufsize
if size is not None and size < self.bufsize:
chunksize = size
data = self.read(chunksize)
if not data:
break
pos = data.find('\n') + 1
if pos:
chunks.append(data[:pos])
remainder = data[pos:]
self.buffer += remainder
self.bytes_read -= len(remainder)
break
else:
chunks.append(data)
return ''.join(chunks)
def readlines(self, sizehint=None):
"""Read lines from the request body and return them."""
if self.length is not None:
if sizehint is None:
sizehint = self.length - self.bytes_read
else:
sizehint = min(sizehint, self.length - self.bytes_read)
lines = []
seen = 0
while True:
line = self.readline()
if not line:
break
lines.append(line)
seen += len(line)
if seen >= sizehint:
break
return lines
def finish(self):
self.done = True
if self.has_trailers and hasattr(self.fp, 'read_trailer_lines'):
self.trailers = {}
try:
for line in self.fp.read_trailer_lines():
if line[0] in ' \t':
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(":", 1)
except ValueError:
raise ValueError("Illegal header line.")
k = k.strip().title()
v = v.strip()
if k in comma_separated_headers:
existing = self.trailers.get(envname)
if existing:
v = ", ".join((existing, v))
self.trailers[k] = v
except Exception, e:
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, "Maximum request length: %r" % e.args[1])
else:
raise
class RequestBody(Entity):
# Don't parse the request body at all if the client didn't provide
# a Content-Type header. See http://www.cherrypy.org/ticket/790
default_content_type = u''
bufsize = 8 * 1024
maxbytes = None
def __init__(self, fp, headers, params=None, request_params=None):
Entity.__init__(self, fp, headers, params)
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1
# When no explicit charset parameter is provided by the
# sender, media subtypes of the "text" type are defined
# to have a default charset value of "ISO-8859-1" when
# received via HTTP.
if self.content_type.value.startswith('text/'):
for c in (u'ISO-8859-1', u'iso-8859-1', u'Latin-1', u'latin-1'):
if c in self.attempt_charsets:
break
else:
self.attempt_charsets.append(u'ISO-8859-1')
# Temporary fix while deprecating passing .parts as .params.
self.processors[u'multipart'] = _old_process_multipart
if request_params is None:
request_params = {}
self.request_params = request_params
def process(self):
"""Include body params in request params."""
# "The presence of a message-body in a request is signaled by the
# inclusion of a Content-Length or Transfer-Encoding header field in
# the request's message-headers."
# It is possible to send a POST request with no body, for example;
# however, app developers are responsible in that case to set
# cherrypy.request.process_body to False so this method isn't called.
h = cherrypy.serving.request.headers
if u'Content-Length' not in h and u'Transfer-Encoding' not in h:
raise cherrypy.HTTPError(411)
self.fp = SizedReader(self.fp, self.length,
self.maxbytes, bufsize=self.bufsize,
has_trailers='Trailer' in h)
super(RequestBody, self).process()
# Body params should also be a part of the request_params
# add them in here.
request_params = self.request_params
for key, value in self.params.items():
# Python 2 only: keyword arguments must be byte strings (type 'str').
if isinstance(key, unicode):
key = key.encode('ISO-8859-1')
if key in request_params:
if not isinstance(request_params[key], list):
request_params[key] = [request_params[key]]
request_params[key].append(value)
else:
request_params[key] = value
| 26,950 | Python | .py | 606 | 30.808581 | 89 | 0.552295 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,613 | _cptools.py | midgetspy_Sick-Beard/cherrypy/_cptools.py | """CherryPy tools. A "tool" is any helper, adapted to CP.
Tools are usually designed to be used in a variety of ways (although some
may only offer one if they choose):
Library calls:
All tools are callables that can be used wherever needed.
The arguments are straightforward and should be detailed within the
docstring.
Function decorators:
All tools, when called, may be used as decorators which configure
individual CherryPy page handlers (methods on the CherryPy tree).
That is, "@tools.anytool()" should "turn on" the tool via the
decorated function's _cp_config attribute.
CherryPy config:
If a tool exposes a "_setup" callable, it will be called
once per Request (if the feature is "turned on" via config).
Tools may be implemented as any object with a namespace. The builtins
are generally either modules or instances of the tools.Tool class.
"""
import cherrypy
import warnings
def _getargs(func):
"""Return the names of all static arguments to the given function."""
# Use this instead of importing inspect for less mem overhead.
import types
if isinstance(func, types.MethodType):
func = func.im_func
co = func.func_code
return co.co_varnames[:co.co_argcount]
_attr_error = ("CherryPy Tools cannot be turned on directly. Instead, turn them "
"on via config, or use them as decorators on your page handlers.")
class Tool(object):
"""A registered function for use with CherryPy request-processing hooks.
help(tool.callable) should give you more information about this Tool.
"""
namespace = "tools"
def __init__(self, point, callable, name=None, priority=50):
self._point = point
self.callable = callable
self._name = name
self._priority = priority
self.__doc__ = self.callable.__doc__
self._setargs()
def _get_on(self):
raise AttributeError(_attr_error)
def _set_on(self, value):
raise AttributeError(_attr_error)
on = property(_get_on, _set_on)
def _setargs(self):
"""Copy func parameter names to obj attributes."""
try:
for arg in _getargs(self.callable):
setattr(self, arg, None)
except (TypeError, AttributeError):
if hasattr(self.callable, "__call__"):
for arg in _getargs(self.callable.__call__):
setattr(self, arg, None)
# IronPython 1.0 raises NotImplementedError because
# inspect.getargspec tries to access Python bytecode
# in co_code attribute.
except NotImplementedError:
pass
# IronPython 1B1 may raise IndexError in some cases,
# but if we trap it here it doesn't prevent CP from working.
except IndexError:
pass
def _merged_args(self, d=None):
"""Return a dict of configuration entries for this Tool."""
if d:
conf = d.copy()
else:
conf = {}
tm = cherrypy.serving.request.toolmaps[self.namespace]
if self._name in tm:
conf.update(tm[self._name])
if "on" in conf:
del conf["on"]
return conf
def __call__(self, *args, **kwargs):
"""Compile-time decorator (turn on the tool in config).
For example:
@tools.proxy()
def whats_my_base(self):
return cherrypy.request.base
whats_my_base.exposed = True
"""
if args:
raise TypeError("The %r Tool does not accept positional "
"arguments; you must use keyword arguments."
% self._name)
def tool_decorator(f):
if not hasattr(f, "_cp_config"):
f._cp_config = {}
subspace = self.namespace + "." + self._name + "."
f._cp_config[subspace + "on"] = True
for k, v in kwargs.items():
f._cp_config[subspace + k] = v
return f
return tool_decorator
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
conf = self._merged_args()
p = conf.pop("priority", None)
if p is None:
p = getattr(self.callable, "priority", self._priority)
cherrypy.serving.request.hooks.attach(self._point, self.callable,
priority=p, **conf)
class HandlerTool(Tool):
"""Tool which is called 'before main', that may skip normal handlers.
If the tool successfully handles the request (by setting response.body),
if should return True. This will cause CherryPy to skip any 'normal' page
handler. If the tool did not handle the request, it should return False
to tell CherryPy to continue on and call the normal page handler. If the
tool is declared AS a page handler (see the 'handler' method), returning
False will raise NotFound.
"""
def __init__(self, callable, name=None):
Tool.__init__(self, 'before_handler', callable, name)
def handler(self, *args, **kwargs):
"""Use this tool as a CherryPy page handler.
For example:
class Root:
nav = tools.staticdir.handler(section="/nav", dir="nav",
root=absDir)
"""
def handle_func(*a, **kw):
handled = self.callable(*args, **self._merged_args(kwargs))
if not handled:
raise cherrypy.NotFound()
return cherrypy.serving.response.body
handle_func.exposed = True
return handle_func
def _wrapper(self, **kwargs):
if self.callable(**kwargs):
cherrypy.serving.request.handler = None
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
conf = self._merged_args()
p = conf.pop("priority", None)
if p is None:
p = getattr(self.callable, "priority", self._priority)
cherrypy.serving.request.hooks.attach(self._point, self._wrapper,
priority=p, **conf)
class HandlerWrapperTool(Tool):
"""Tool which wraps request.handler in a provided wrapper function.
The 'newhandler' arg must be a handler wrapper function that takes a
'next_handler' argument, plus *args and **kwargs. Like all page handler
functions, it must return an iterable for use as cherrypy.response.body.
For example, to allow your 'inner' page handlers to return dicts
which then get interpolated into a template:
def interpolator(next_handler, *args, **kwargs):
filename = cherrypy.request.config.get('template')
cherrypy.response.template = env.get_template(filename)
response_dict = next_handler(*args, **kwargs)
return cherrypy.response.template.render(**response_dict)
cherrypy.tools.jinja = HandlerWrapperTool(interpolator)
"""
def __init__(self, newhandler, point='before_handler', name=None, priority=50):
self.newhandler = newhandler
self._point = point
self._name = name
self._priority = priority
def callable(self, debug=False):
innerfunc = cherrypy.serving.request.handler
def wrap(*args, **kwargs):
return self.newhandler(innerfunc, *args, **kwargs)
cherrypy.serving.request.handler = wrap
class ErrorTool(Tool):
"""Tool which is used to replace the default request.error_response."""
def __init__(self, callable, name=None):
Tool.__init__(self, None, callable, name)
def _wrapper(self):
self.callable(**self._merged_args())
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
cherrypy.serving.request.error_response = self._wrapper
# Builtin tools #
from cherrypy.lib import cptools, encoding, auth, static, jsontools
from cherrypy.lib import sessions as _sessions, xmlrpc as _xmlrpc
from cherrypy.lib import caching as _caching
from cherrypy.lib import auth_basic, auth_digest
class SessionTool(Tool):
"""Session Tool for CherryPy.
sessions.locking:
When 'implicit' (the default), the session will be locked for you,
just before running the page handler.
When 'early', the session will be locked before reading the request
body. This is off by default for safety reasons; for example,
a large upload would block the session, denying an AJAX
progress meter (see http://www.cherrypy.org/ticket/630).
When 'explicit' (or any other value), you need to call
cherrypy.session.acquire_lock() yourself before using
session data.
"""
def __init__(self):
# _sessions.init must be bound after headers are read
Tool.__init__(self, 'before_request_body', _sessions.init)
def _lock_session(self):
cherrypy.serving.session.acquire_lock()
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
hooks = cherrypy.serving.request.hooks
conf = self._merged_args()
p = conf.pop("priority", None)
if p is None:
p = getattr(self.callable, "priority", self._priority)
hooks.attach(self._point, self.callable, priority=p, **conf)
locking = conf.pop('locking', 'implicit')
if locking == 'implicit':
hooks.attach('before_handler', self._lock_session)
elif locking == 'early':
# Lock before the request body (but after _sessions.init runs!)
hooks.attach('before_request_body', self._lock_session,
priority=60)
else:
# Don't lock
pass
hooks.attach('before_finalize', _sessions.save)
hooks.attach('on_end_request', _sessions.close)
def regenerate(self):
"""Drop the current session and make a new one (with a new id)."""
sess = cherrypy.serving.session
sess.regenerate()
# Grab cookie-relevant tool args
conf = dict([(k, v) for k, v in self._merged_args().items()
if k in ('path', 'path_header', 'name', 'timeout',
'domain', 'secure')])
_sessions.set_response_cookie(**conf)
class XMLRPCController(object):
"""A Controller (page handler collection) for XML-RPC.
To use it, have your controllers subclass this base class (it will
turn on the tool for you).
You can also supply the following optional config entries:
tools.xmlrpc.encoding: 'utf-8'
tools.xmlrpc.allow_none: 0
XML-RPC is a rather discontinuous layer over HTTP; dispatching to the
appropriate handler must first be performed according to the URL, and
then a second dispatch step must take place according to the RPC method
specified in the request body. It also allows a superfluous "/RPC2"
prefix in the URL, supplies its own handler args in the body, and
requires a 200 OK "Fault" response instead of 404 when the desired
method is not found.
Therefore, XML-RPC cannot be implemented for CherryPy via a Tool alone.
This Controller acts as the dispatch target for the first half (based
on the URL); it then reads the RPC method from the request body and
does its own second dispatch step based on that method. It also reads
body params, and returns a Fault on error.
The XMLRPCDispatcher strips any /RPC2 prefix; if you aren't using /RPC2
in your URL's, you can safely skip turning on the XMLRPCDispatcher.
Otherwise, you need to use declare it in config:
request.dispatch: cherrypy.dispatch.XMLRPCDispatcher()
"""
# Note we're hard-coding this into the 'tools' namespace. We could do
# a huge amount of work to make it relocatable, but the only reason why
# would be if someone actually disabled the default_toolbox. Meh.
_cp_config = {'tools.xmlrpc.on': True}
def default(self, *vpath, **params):
rpcparams, rpcmethod = _xmlrpc.process_body()
subhandler = self
for attr in str(rpcmethod).split('.'):
subhandler = getattr(subhandler, attr, None)
if subhandler and getattr(subhandler, "exposed", False):
body = subhandler(*(vpath + rpcparams), **params)
else:
# http://www.cherrypy.org/ticket/533
# if a method is not found, an xmlrpclib.Fault should be returned
# raising an exception here will do that; see
# cherrypy.lib.xmlrpc.on_error
raise Exception('method "%s" is not supported' % attr)
conf = cherrypy.serving.request.toolmaps['tools'].get("xmlrpc", {})
_xmlrpc.respond(body,
conf.get('encoding', 'utf-8'),
conf.get('allow_none', 0))
return cherrypy.serving.response.body
default.exposed = True
class SessionAuthTool(HandlerTool):
def _setargs(self):
for name in dir(cptools.SessionAuth):
if not name.startswith("__"):
setattr(self, name, None)
class CachingTool(Tool):
"""Caching Tool for CherryPy."""
def _wrapper(self, **kwargs):
request = cherrypy.serving.request
if _caching.get(**kwargs):
request.handler = None
else:
if request.cacheable:
# Note the devious technique here of adding hooks on the fly
request.hooks.attach('before_finalize', _caching.tee_output,
priority=90)
_wrapper.priority = 20
def _setup(self):
"""Hook caching into cherrypy.request."""
conf = self._merged_args()
p = conf.pop("priority", None)
cherrypy.serving.request.hooks.attach('before_handler', self._wrapper,
priority=p, **conf)
class Toolbox(object):
"""A collection of Tools.
This object also functions as a config namespace handler for itself.
Custom toolboxes should be added to each Application's toolboxes dict.
"""
def __init__(self, namespace):
self.namespace = namespace
def __setattr__(self, name, value):
# If the Tool._name is None, supply it from the attribute name.
if isinstance(value, Tool):
if value._name is None:
value._name = name
value.namespace = self.namespace
object.__setattr__(self, name, value)
def __enter__(self):
"""Populate request.toolmaps from tools specified in config."""
cherrypy.serving.request.toolmaps[self.namespace] = map = {}
def populate(k, v):
toolname, arg = k.split(".", 1)
bucket = map.setdefault(toolname, {})
bucket[arg] = v
return populate
def __exit__(self, exc_type, exc_val, exc_tb):
"""Run tool._setup() for each tool in our toolmap."""
map = cherrypy.serving.request.toolmaps.get(self.namespace)
if map:
for name, settings in map.items():
if settings.get("on", False):
tool = getattr(self, name)
tool._setup()
class DeprecatedTool(Tool):
_name = None
warnmsg = "This Tool is deprecated."
def __init__(self, point, warnmsg=None):
self.point = point
if warnmsg is not None:
self.warnmsg = warnmsg
def __call__(self, *args, **kwargs):
warnings.warn(self.warnmsg)
def tool_decorator(f):
return f
return tool_decorator
def _setup(self):
warnings.warn(self.warnmsg)
default_toolbox = _d = Toolbox("tools")
_d.session_auth = SessionAuthTool(cptools.session_auth)
_d.proxy = Tool('before_request_body', cptools.proxy, priority=30)
_d.response_headers = Tool('on_start_resource', cptools.response_headers)
_d.log_tracebacks = Tool('before_error_response', cptools.log_traceback)
_d.log_headers = Tool('before_error_response', cptools.log_request_headers)
_d.log_hooks = Tool('on_end_request', cptools.log_hooks, priority=100)
_d.err_redirect = ErrorTool(cptools.redirect)
_d.etags = Tool('before_finalize', cptools.validate_etags, priority=75)
_d.decode = Tool('before_request_body', encoding.decode)
# the order of encoding, gzip, caching is important
_d.encode = Tool('before_handler', encoding.ResponseEncoder, priority=70)
_d.gzip = Tool('before_finalize', encoding.gzip, priority=80)
_d.staticdir = HandlerTool(static.staticdir)
_d.staticfile = HandlerTool(static.staticfile)
_d.sessions = SessionTool()
_d.xmlrpc = ErrorTool(_xmlrpc.on_error)
_d.caching = CachingTool('before_handler', _caching.get, 'caching')
_d.expires = Tool('before_finalize', _caching.expires)
_d.tidy = DeprecatedTool('before_finalize',
"The tidy tool has been removed from the standard distribution of CherryPy. "
"The most recent version can be found at http://tools.cherrypy.org/browser.")
_d.nsgmls = DeprecatedTool('before_finalize',
"The nsgmls tool has been removed from the standard distribution of CherryPy. "
"The most recent version can be found at http://tools.cherrypy.org/browser.")
_d.ignore_headers = Tool('before_request_body', cptools.ignore_headers)
_d.referer = Tool('before_request_body', cptools.referer)
_d.basic_auth = Tool('on_start_resource', auth.basic_auth)
_d.digest_auth = Tool('on_start_resource', auth.digest_auth)
_d.trailing_slash = Tool('before_handler', cptools.trailing_slash, priority=60)
_d.flatten = Tool('before_finalize', cptools.flatten)
_d.accept = Tool('on_start_resource', cptools.accept)
_d.redirect = Tool('on_start_resource', cptools.redirect)
_d.autovary = Tool('on_start_resource', cptools.autovary, priority=0)
_d.json_in = Tool('before_request_body', jsontools.json_in, priority=30)
_d.json_out = Tool('before_handler', jsontools.json_out, priority=30)
_d.auth_basic = Tool('before_handler', auth_basic.basic_auth, priority=1)
_d.auth_digest = Tool('before_handler', auth_digest.digest_auth, priority=1)
del _d, cptools, encoding, auth, static
| 19,648 | Python | .py | 392 | 39.130102 | 84 | 0.629534 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,614 | _cpconfig.py | midgetspy_Sick-Beard/cherrypy/_cpconfig.py | """Configuration system for CherryPy.
Configuration in CherryPy is implemented via dictionaries. Keys are strings
which name the mapped value, which may be of any type.
Architecture
------------
CherryPy Requests are part of an Application, which runs in a global context,
and configuration data may apply to any of those three scopes:
Global: configuration entries which apply everywhere are stored in
cherrypy.config.
Application: entries which apply to each mounted application are stored
on the Application object itself, as 'app.config'. This is a two-level
dict where each key is a path, or "relative URL" (for example, "/" or
"/path/to/my/page"), and each value is a config dict. Usually, this
data is provided in the call to tree.mount(root(), config=conf),
although you may also use app.merge(conf).
Request: each Request object possesses a single 'Request.config' dict.
Early in the request process, this dict is populated by merging global
config entries, Application entries (whose path equals or is a parent
of Request.path_info), and any config acquired while looking up the
page handler (see next).
Declaration
-----------
Configuration data may be supplied as a Python dictionary, as a filename,
or as an open file object. When you supply a filename or file, CherryPy
uses Python's builtin ConfigParser; you declare Application config by
writing each path as a section header:
[/path/to/my/page]
request.stream = True
To declare global configuration entries, place them in a [global] section.
You may also declare config entries directly on the classes and methods
(page handlers) that make up your CherryPy application via the '_cp_config'
attribute. For example:
class Demo:
_cp_config = {'tools.gzip.on': True}
def index(self):
return "Hello world"
index.exposed = True
index._cp_config = {'request.show_tracebacks': False}
Note, however, that this behavior is only guaranteed for the default
dispatcher. Other dispatchers may have different restrictions on where
you can attach _cp_config attributes.
Namespaces
----------
Configuration keys are separated into namespaces by the first "." in the key.
Current namespaces:
engine: Controls the 'application engine', including autoreload.
These can only be declared in the global config.
tree: Grafts cherrypy.Application objects onto cherrypy.tree.
These can only be declared in the global config.
hooks: Declares additional request-processing functions.
log: Configures the logging for each application.
These can only be declared in the global or / config.
request: Adds attributes to each Request.
response: Adds attributes to each Response.
server: Controls the default HTTP server via cherrypy.server.
These can only be declared in the global config.
tools: Runs and configures additional request-processing packages.
wsgi: Adds WSGI middleware to an Application's "pipeline".
These can only be declared in the app's root config ("/").
checker: Controls the 'checker', which looks for common errors in
app state (including config) when the engine starts.
Global config only.
The only key that does not exist in a namespace is the "environment" entry.
This special entry 'imports' other config entries from a template stored in
cherrypy._cpconfig.environments[environment]. It only applies to the global
config, and only when you use cherrypy.config.update.
You can define your own namespaces to be called at the Global, Application,
or Request level, by adding a named handler to cherrypy.config.namespaces,
app.namespaces, or app.request_class.namespaces. The name can
be any string, and the handler must be either a callable or a (Python 2.5
style) context manager.
"""
try:
set
except NameError:
from sets import Set as set
import cherrypy
from cherrypy.lib import reprconf
# Deprecated in CherryPy 3.2--remove in 3.3
NamespaceSet = reprconf.NamespaceSet
def merge(base, other):
"""Merge one app config (from a dict, file, or filename) into another.
If the given config is a filename, it will be appended to
the list of files to monitor for "autoreload" changes.
"""
if isinstance(other, basestring):
cherrypy.engine.autoreload.files.add(other)
# Load other into base
for section, value_map in reprconf.as_dict(other).items():
if not isinstance(value_map, dict):
raise ValueError(
"Application config must include section headers, but the "
"config you tried to merge doesn't have any sections. "
"Wrap your config in another dict with paths as section "
"headers, for example: {'/': config}.")
base.setdefault(section, {}).update(value_map)
class Config(reprconf.Config):
"""The 'global' configuration data for the entire CherryPy process."""
def update(self, config):
"""Update self from a dict, file or filename."""
if isinstance(config, basestring):
# Filename
cherrypy.engine.autoreload.files.add(config)
reprconf.Config.update(self, config)
def _apply(self, config):
"""Update self from a dict."""
if isinstance(config.get("global", None), dict):
if len(config) > 1:
cherrypy.checker.global_config_contained_paths = True
config = config["global"]
if 'tools.staticdir.dir' in config:
config['tools.staticdir.section'] = "global"
reprconf.Config._apply(self, config)
def __call__(self, *args, **kwargs):
"""Decorator for page handlers to set _cp_config."""
if args:
raise TypeError(
"The cherrypy.config decorator does not accept positional "
"arguments; you must use keyword arguments.")
def tool_decorator(f):
if not hasattr(f, "_cp_config"):
f._cp_config = {}
for k, v in kwargs.items():
f._cp_config[k] = v
return f
return tool_decorator
Config.environments = environments = {
"staging": {
'engine.autoreload_on': False,
'checker.on': False,
'tools.log_headers.on': False,
'request.show_tracebacks': False,
'request.show_mismatched_params': False,
},
"production": {
'engine.autoreload_on': False,
'checker.on': False,
'tools.log_headers.on': False,
'request.show_tracebacks': False,
'request.show_mismatched_params': False,
'log.screen': False,
},
"embedded": {
# For use with CherryPy embedded in another deployment stack.
'engine.autoreload_on': False,
'checker.on': False,
'tools.log_headers.on': False,
'request.show_tracebacks': False,
'request.show_mismatched_params': False,
'log.screen': False,
'engine.SIGHUP': None,
'engine.SIGTERM': None,
},
"test_suite": {
'engine.autoreload_on': False,
'checker.on': False,
'tools.log_headers.on': False,
'request.show_tracebacks': True,
'request.show_mismatched_params': True,
'log.screen': False,
},
}
def _server_namespace_handler(k, v):
"""Config handler for the "server" namespace."""
atoms = k.split(".", 1)
if len(atoms) > 1:
# Special-case config keys of the form 'server.servername.socket_port'
# to configure additional HTTP servers.
if not hasattr(cherrypy, "servers"):
cherrypy.servers = {}
servername, k = atoms
if servername not in cherrypy.servers:
from cherrypy import _cpserver
cherrypy.servers[servername] = _cpserver.Server()
# On by default, but 'on = False' can unsubscribe it (see below).
cherrypy.servers[servername].subscribe()
if k == 'on':
if v:
cherrypy.servers[servername].subscribe()
else:
cherrypy.servers[servername].unsubscribe()
else:
setattr(cherrypy.servers[servername], k, v)
else:
setattr(cherrypy.server, k, v)
Config.namespaces["server"] = _server_namespace_handler
def _engine_namespace_handler(k, v):
"""Backward compatibility handler for the "engine" namespace."""
engine = cherrypy.engine
if k == 'autoreload_on':
if v:
engine.autoreload.subscribe()
else:
engine.autoreload.unsubscribe()
elif k == 'autoreload_frequency':
engine.autoreload.frequency = v
elif k == 'autoreload_match':
engine.autoreload.match = v
elif k == 'reload_files':
engine.autoreload.files = set(v)
elif k == 'deadlock_poll_freq':
engine.timeout_monitor.frequency = v
elif k == 'SIGHUP':
engine.listeners['SIGHUP'] = set([v])
elif k == 'SIGTERM':
engine.listeners['SIGTERM'] = set([v])
elif "." in k:
plugin, attrname = k.split(".", 1)
plugin = getattr(engine, plugin)
if attrname == 'on':
if v and hasattr(getattr(plugin, 'subscribe', None), '__call__'):
plugin.subscribe()
return
elif (not v) and hasattr(getattr(plugin, 'unsubscribe', None), '__call__'):
plugin.unsubscribe()
return
setattr(plugin, attrname, v)
else:
setattr(engine, k, v)
Config.namespaces["engine"] = _engine_namespace_handler
def _tree_namespace_handler(k, v):
"""Namespace handler for the 'tree' config namespace."""
cherrypy.tree.graft(v, v.script_name)
cherrypy.engine.log("Mounted: %s on %s" % (v, v.script_name or "/"))
Config.namespaces["tree"] = _tree_namespace_handler
| 10,328 | Python | .py | 225 | 36.968889 | 88 | 0.642864 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,615 | _cpdispatch.py | midgetspy_Sick-Beard/cherrypy/_cpdispatch.py | """CherryPy dispatchers.
A 'dispatcher' is the object which looks up the 'page handler' callable
and collects config for the current request based on the path_info, other
request attributes, and the application architecture. The core calls the
dispatcher as early as possible, passing it a 'path_info' argument.
The default dispatcher discovers the page handler by matching path_info
to a hierarchical arrangement of objects, starting at request.app.root.
"""
import cherrypy
class PageHandler(object):
"""Callable which sets response.body."""
def __init__(self, callable, *args, **kwargs):
self.callable = callable
self.args = args
self.kwargs = kwargs
def __call__(self):
try:
return self.callable(*self.args, **self.kwargs)
except TypeError, x:
try:
test_callable_spec(self.callable, self.args, self.kwargs)
except cherrypy.HTTPError, error:
raise error
except:
raise x
raise
def test_callable_spec(callable, callable_args, callable_kwargs):
"""
Inspect callable and test to see if the given args are suitable for it.
When an error occurs during the handler's invoking stage there are 2
erroneous cases:
1. Too many parameters passed to a function which doesn't define
one of *args or **kwargs.
2. Too little parameters are passed to the function.
There are 3 sources of parameters to a cherrypy handler.
1. query string parameters are passed as keyword parameters to the handler.
2. body parameters are also passed as keyword parameters.
3. when partial matching occurs, the final path atoms are passed as
positional args.
Both the query string and path atoms are part of the URI. If they are
incorrect, then a 404 Not Found should be raised. Conversely the body
parameters are part of the request; if they are invalid a 400 Bad Request.
"""
show_mismatched_params = getattr(
cherrypy.serving.request, 'show_mismatched_params', False)
try:
(args, varargs, varkw, defaults) = inspect.getargspec(callable)
except TypeError:
if isinstance(callable, object) and hasattr(callable, '__call__'):
(args, varargs, varkw, defaults) = inspect.getargspec(callable.__call__)
else:
# If it wasn't one of our own types, re-raise
# the original error
raise
if args and args[0] == 'self':
args = args[1:]
arg_usage = dict([(arg, 0,) for arg in args])
vararg_usage = 0
varkw_usage = 0
extra_kwargs = set()
for i, value in enumerate(callable_args):
try:
arg_usage[args[i]] += 1
except IndexError:
vararg_usage += 1
for key in callable_kwargs.keys():
try:
arg_usage[key] += 1
except KeyError:
varkw_usage += 1
extra_kwargs.add(key)
# figure out which args have defaults.
args_with_defaults = args[-len(defaults or []):]
for i, val in enumerate(defaults or []):
# Defaults take effect only when the arg hasn't been used yet.
if arg_usage[args_with_defaults[i]] == 0:
arg_usage[args_with_defaults[i]] += 1
missing_args = []
multiple_args = []
for key, usage in arg_usage.items():
if usage == 0:
missing_args.append(key)
elif usage > 1:
multiple_args.append(key)
if missing_args:
# In the case where the method allows body arguments
# there are 3 potential errors:
# 1. not enough query string parameters -> 404
# 2. not enough body parameters -> 400
# 3. not enough path parts (partial matches) -> 404
#
# We can't actually tell which case it is,
# so I'm raising a 404 because that covers 2/3 of the
# possibilities
#
# In the case where the method does not allow body
# arguments it's definitely a 404.
message = None
if show_mismatched_params:
message = "Missing parameters: %s" % ",".join(missing_args)
raise cherrypy.HTTPError(404, message=message)
# the extra positional arguments come from the path - 404 Not Found
if not varargs and vararg_usage > 0:
raise cherrypy.HTTPError(404)
body_params = cherrypy.serving.request.body.params or {}
body_params = set(body_params.keys())
qs_params = set(callable_kwargs.keys()) - body_params
if multiple_args:
if qs_params.intersection(set(multiple_args)):
# If any of the multiple parameters came from the query string then
# it's a 404 Not Found
error = 404
else:
# Otherwise it's a 400 Bad Request
error = 400
message = None
if show_mismatched_params:
message = "Multiple values for parameters: "\
"%s" % ",".join(multiple_args)
raise cherrypy.HTTPError(error, message=message)
if not varkw and varkw_usage > 0:
# If there were extra query string parameters, it's a 404 Not Found
extra_qs_params = set(qs_params).intersection(extra_kwargs)
if extra_qs_params:
message = None
if show_mismatched_params:
message = "Unexpected query string "\
"parameters: %s" % ", ".join(extra_qs_params)
raise cherrypy.HTTPError(404, message=message)
# If there were any extra body parameters, it's a 400 Not Found
extra_body_params = set(body_params).intersection(extra_kwargs)
if extra_body_params:
message = None
if show_mismatched_params:
message = "Unexpected body parameters: "\
"%s" % ", ".join(extra_body_params)
raise cherrypy.HTTPError(400, message=message)
try:
import inspect
except ImportError:
test_callable_spec = lambda callable, args, kwargs: None
class LateParamPageHandler(PageHandler):
"""When passing cherrypy.request.params to the page handler, we do not
want to capture that dict too early; we want to give tools like the
decoding tool a chance to modify the params dict in-between the lookup
of the handler and the actual calling of the handler. This subclass
takes that into account, and allows request.params to be 'bound late'
(it's more complicated than that, but that's the effect).
"""
def _get_kwargs(self):
kwargs = cherrypy.serving.request.params.copy()
if self._kwargs:
kwargs.update(self._kwargs)
return kwargs
def _set_kwargs(self, kwargs):
self._kwargs = kwargs
kwargs = property(_get_kwargs, _set_kwargs,
doc='page handler kwargs (with '
'cherrypy.request.params copied in)')
class Dispatcher(object):
"""CherryPy Dispatcher which walks a tree of objects to find a handler.
The tree is rooted at cherrypy.request.app.root, and each hierarchical
component in the path_info argument is matched to a corresponding nested
attribute of the root object. Matching handlers must have an 'exposed'
attribute which evaluates to True. The special method name "index"
matches a URI which ends in a slash ("/"). The special method name
"default" may match a portion of the path_info (but only when no longer
substring of the path_info matches some other object).
This is the default, built-in dispatcher for CherryPy.
"""
__metaclass__ = cherrypy._AttributeDocstrings
dispatch_method_name = '_cp_dispatch'
dispatch_method_name__doc = """
The name of the dispatch method that nodes may optionally implement
to provide their own dynamic dispatch algorithm.
"""
def __init__(self, dispatch_method_name=None):
if dispatch_method_name:
self.dispatch_method_name = dispatch_method_name
def __call__(self, path_info):
"""Set handler and config for the current request."""
request = cherrypy.serving.request
func, vpath = self.find_handler(path_info)
if func:
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace("%2F", "/") for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.NotFound()
def find_handler(self, path):
"""Return the appropriate page handler, plus any virtual path.
This will return two objects. The first will be a callable,
which can be used to generate page output. Any parameters from
the query string or request body will be sent to that callable
as keyword arguments.
The callable is found by traversing the application's tree,
starting from cherrypy.request.app.root, and matching path
components to successive objects in the tree. For example, the
URL "/path/to/handler" might return root.path.to.handler.
The second object returned will be a list of names which are
'virtual path' components: parts of the URL which are dynamic,
and were not used when looking up the handler.
These virtual path components are passed to the handler as
positional arguments.
"""
request = cherrypy.serving.request
app = request.app
root = app.root
dispatch_name = self.dispatch_method_name
# Get config for the root object/path.
curpath = ""
nodeconf = {}
if hasattr(root, "_cp_config"):
nodeconf.update(root._cp_config)
if "/" in app.config:
nodeconf.update(app.config["/"])
object_trail = [['root', root, nodeconf, curpath]]
node = root
names = [x for x in path.strip('/').split('/') if x] + ['index']
iternames = names[:]
while iternames:
name = iternames[0]
# map to legal Python identifiers (replace '.' with '_')
objname = name.replace('.', '_')
nodeconf = {}
subnode = getattr(node, objname, None)
if subnode is None:
dispatch = getattr(node, dispatch_name, None)
if dispatch and callable(dispatch) and not \
getattr(dispatch, 'exposed', False):
subnode = dispatch(vpath=iternames)
name = iternames.pop(0)
node = subnode
if node is not None:
# Get _cp_config attached to this node.
if hasattr(node, "_cp_config"):
nodeconf.update(node._cp_config)
# Mix in values from app.config for this path.
curpath = "/".join((curpath, name))
if curpath in app.config:
nodeconf.update(app.config[curpath])
object_trail.append([name, node, nodeconf, curpath])
def set_conf():
"""Collapse all object_trail config into cherrypy.request.config."""
base = cherrypy.config.copy()
# Note that we merge the config from each node
# even if that node was None.
for name, obj, conf, curpath in object_trail:
base.update(conf)
if 'tools.staticdir.dir' in conf:
base['tools.staticdir.section'] = curpath
return base
# Try successive objects (reverse order)
num_candidates = len(object_trail) - 1
for i in range(num_candidates, -1, -1):
name, candidate, nodeconf, curpath = object_trail[i]
if candidate is None:
continue
# Try a "default" method on the current leaf.
if hasattr(candidate, "default"):
defhandler = candidate.default
if getattr(defhandler, 'exposed', False):
# Insert any extra _cp_config from the default handler.
conf = getattr(defhandler, "_cp_config", {})
object_trail.insert(i + 1, ["default", defhandler, conf, curpath])
request.config = set_conf()
# See http://www.cherrypy.org/ticket/613
request.is_index = path.endswith("/")
return defhandler, names[i:-1]
# Uncomment the next line to restrict positional params to "default".
# if i < num_candidates - 2: continue
# Try the current leaf.
if getattr(candidate, 'exposed', False):
request.config = set_conf()
if i == num_candidates:
# We found the extra ".index". Mark request so tools
# can redirect if path_info has no trailing slash.
request.is_index = True
else:
# We're not at an 'index' handler. Mark request so tools
# can redirect if path_info has NO trailing slash.
# Note that this also includes handlers which take
# positional parameters (virtual paths).
request.is_index = False
return candidate, names[i:-1]
# We didn't find anything
request.config = set_conf()
return None, []
class MethodDispatcher(Dispatcher):
"""Additional dispatch based on cherrypy.request.method.upper().
Methods named GET, POST, etc will be called on an exposed class.
The method names must be all caps; the appropriate Allow header
will be output showing all capitalized method names as allowable
HTTP verbs.
Note that the containing class must be exposed, not the methods.
"""
def __call__(self, path_info):
"""Set handler and config for the current request."""
request = cherrypy.serving.request
resource, vpath = self.find_handler(path_info)
if resource:
# Set Allow header
avail = [m for m in dir(resource) if m.isupper()]
if "GET" in avail and "HEAD" not in avail:
avail.append("HEAD")
avail.sort()
cherrypy.serving.response.headers['Allow'] = ", ".join(avail)
# Find the subhandler
meth = request.method.upper()
func = getattr(resource, meth, None)
if func is None and meth == "HEAD":
func = getattr(resource, "GET", None)
if func:
# Grab any _cp_config on the subhandler.
if hasattr(func, "_cp_config"):
request.config.update(func._cp_config)
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace("%2F", "/") for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.HTTPError(405)
else:
request.handler = cherrypy.NotFound()
class RoutesDispatcher(object):
"""A Routes based dispatcher for CherryPy."""
def __init__(self, full_result=False):
"""
Routes dispatcher
Set full_result to True if you wish the controller
and the action to be passed on to the page handler
parameters. By default they won't be.
"""
import routes
self.full_result = full_result
self.controllers = {}
self.mapper = routes.Mapper()
self.mapper.controller_scan = self.controllers.keys
def connect(self, name, route, controller, **kwargs):
self.controllers[name] = controller
self.mapper.connect(name, route, controller=name, **kwargs)
def redirect(self, url):
raise cherrypy.HTTPRedirect(url)
def __call__(self, path_info):
"""Set handler and config for the current request."""
func = self.find_handler(path_info)
if func:
cherrypy.serving.request.handler = LateParamPageHandler(func)
else:
cherrypy.serving.request.handler = cherrypy.NotFound()
def find_handler(self, path_info):
"""Find the right page handler, and set request.config."""
import routes
request = cherrypy.serving.request
config = routes.request_config()
config.mapper = self.mapper
if hasattr(request, 'wsgi_environ'):
config.environ = request.wsgi_environ
config.host = request.headers.get('Host', None)
config.protocol = request.scheme
config.redirect = self.redirect
result = self.mapper.match(path_info)
config.mapper_dict = result
params = {}
if result:
params = result.copy()
if not self.full_result:
params.pop('controller', None)
params.pop('action', None)
request.params.update(params)
# Get config for the root object/path.
request.config = base = cherrypy.config.copy()
curpath = ""
def merge(nodeconf):
if 'tools.staticdir.dir' in nodeconf:
nodeconf['tools.staticdir.section'] = curpath or "/"
base.update(nodeconf)
app = request.app
root = app.root
if hasattr(root, "_cp_config"):
merge(root._cp_config)
if "/" in app.config:
merge(app.config["/"])
# Mix in values from app.config.
atoms = [x for x in path_info.split("/") if x]
if atoms:
last = atoms.pop()
else:
last = None
for atom in atoms:
curpath = "/".join((curpath, atom))
if curpath in app.config:
merge(app.config[curpath])
handler = None
if result:
controller = result.get('controller', None)
controller = self.controllers.get(controller)
if controller:
# Get config from the controller.
if hasattr(controller, "_cp_config"):
merge(controller._cp_config)
action = result.get('action', None)
if action is not None:
handler = getattr(controller, action, None)
# Get config from the handler
if hasattr(handler, "_cp_config"):
merge(handler._cp_config)
# Do the last path atom here so it can
# override the controller's _cp_config.
if last:
curpath = "/".join((curpath, last))
if curpath in app.config:
merge(app.config[curpath])
return handler
def XMLRPCDispatcher(next_dispatcher=Dispatcher()):
from cherrypy.lib import xmlrpc
def xmlrpc_dispatch(path_info):
path_info = xmlrpc.patched_path(path_info)
return next_dispatcher(path_info)
return xmlrpc_dispatch
def VirtualHost(next_dispatcher=Dispatcher(), use_x_forwarded_host=True, **domains):
"""Select a different handler based on the Host header.
This can be useful when running multiple sites within one CP server.
It allows several domains to point to different parts of a single
website structure. For example:
http://www.domain.example -> root
http://www.domain2.example -> root/domain2/
http://www.domain2.example:443 -> root/secure
can be accomplished via the following config:
[/]
request.dispatch = cherrypy.dispatch.VirtualHost(
**{'www.domain2.example': '/domain2',
'www.domain2.example:443': '/secure',
})
next_dispatcher: the next dispatcher object in the dispatch chain.
The VirtualHost dispatcher adds a prefix to the URL and calls
another dispatcher. Defaults to cherrypy.dispatch.Dispatcher().
use_x_forwarded_host: if True (the default), any "X-Forwarded-Host"
request header will be used instead of the "Host" header. This
is commonly added by HTTP servers (such as Apache) when proxying.
**domains: a dict of {host header value: virtual prefix} pairs.
The incoming "Host" request header is looked up in this dict,
and, if a match is found, the corresponding "virtual prefix"
value will be prepended to the URL path before calling the
next dispatcher. Note that you often need separate entries
for "example.com" and "www.example.com". In addition, "Host"
headers may contain the port number.
"""
from cherrypy.lib import httputil
def vhost_dispatch(path_info):
request = cherrypy.serving.request
header = request.headers.get
domain = header('Host', '')
if use_x_forwarded_host:
domain = header("X-Forwarded-Host", domain)
prefix = domains.get(domain, "")
if prefix:
path_info = httputil.urljoin(prefix, path_info)
result = next_dispatcher(path_info)
# Touch up staticdir config. See http://www.cherrypy.org/ticket/614.
section = request.config.get('tools.staticdir.section')
if section:
section = section[len(prefix):]
request.config['tools.staticdir.section'] = section
return result
return vhost_dispatch
| 22,348 | Python | .py | 465 | 35.6 | 87 | 0.6026 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,616 | reprconf.py | midgetspy_Sick-Beard/cherrypy/lib/reprconf.py | """Generic configuration system using unrepr.
Configuration data may be supplied as a Python dictionary, as a filename,
or as an open file object. When you supply a filename or file, Python's
builtin ConfigParser is used (with some extensions).
Namespaces
----------
Configuration keys are separated into namespaces by the first "." in the key.
The only key that cannot exist in a namespace is the "environment" entry.
This special entry 'imports' other config entries from a template stored in
the Config.environments dict.
You can define your own namespaces to be called when new config is merged
by adding a named handler to Config.namespaces. The name can be any string,
and the handler must be either a callable or a context manager.
"""
from ConfigParser import ConfigParser
try:
set
except NameError:
from sets import Set as set
import sys
def as_dict(config):
"""Return a dict from 'config' whether it is a dict, file, or filename."""
if isinstance(config, basestring):
config = Parser().dict_from_file(config)
elif hasattr(config, 'read'):
config = Parser().dict_from_file(config)
return config
class NamespaceSet(dict):
"""A dict of config namespace names and handlers.
Each config entry should begin with a namespace name; the corresponding
namespace handler will be called once for each config entry in that
namespace, and will be passed two arguments: the config key (with the
namespace removed) and the config value.
Namespace handlers may be any Python callable; they may also be
Python 2.5-style 'context managers', in which case their __enter__
method should return a callable to be used as the handler.
See cherrypy.tools (the Toolbox class) for an example.
"""
def __call__(self, config):
"""Iterate through config and pass it to each namespace handler.
'config' should be a flat dict, where keys use dots to separate
namespaces, and values are arbitrary.
The first name in each config key is used to look up the corresponding
namespace handler. For example, a config entry of {'tools.gzip.on': v}
will call the 'tools' namespace handler with the args: ('gzip.on', v)
"""
# Separate the given config into namespaces
ns_confs = {}
for k in config:
if "." in k:
ns, name = k.split(".", 1)
bucket = ns_confs.setdefault(ns, {})
bucket[name] = config[k]
# I chose __enter__ and __exit__ so someday this could be
# rewritten using Python 2.5's 'with' statement:
# for ns, handler in self.iteritems():
# with handler as callable:
# for k, v in ns_confs.get(ns, {}).iteritems():
# callable(k, v)
for ns, handler in self.items():
exit = getattr(handler, "__exit__", None)
if exit:
callable = handler.__enter__()
no_exc = True
try:
try:
for k, v in ns_confs.get(ns, {}).items():
callable(k, v)
except:
# The exceptional case is handled here
no_exc = False
if exit is None:
raise
if not exit(*sys.exc_info()):
raise
# The exception is swallowed if exit() returns true
finally:
# The normal and non-local-goto cases are handled here
if no_exc and exit:
exit(None, None, None)
else:
for k, v in ns_confs.get(ns, {}).items():
handler(k, v)
def __repr__(self):
return "%s.%s(%s)" % (self.__module__, self.__class__.__name__,
dict.__repr__(self))
def __copy__(self):
newobj = self.__class__()
newobj.update(self)
return newobj
copy = __copy__
class Config(dict):
"""A dict-like set of configuration data, with defaults and namespaces.
May take a file, filename, or dict.
"""
defaults = {}
environments = {}
namespaces = NamespaceSet()
def __init__(self, file=None, **kwargs):
self.reset()
if file is not None:
self.update(file)
if kwargs:
self.update(kwargs)
def reset(self):
"""Reset self to default values."""
self.clear()
dict.update(self, self.defaults)
def update(self, config):
"""Update self from a dict, file or filename."""
if isinstance(config, basestring):
# Filename
config = Parser().dict_from_file(config)
elif hasattr(config, 'read'):
# Open file object
config = Parser().dict_from_file(config)
else:
config = config.copy()
self._apply(config)
def _apply(self, config):
"""Update self from a dict."""
which_env = config.get('environment')
if which_env:
env = self.environments[which_env]
for k in env:
if k not in config:
config[k] = env[k]
dict.update(self, config)
self.namespaces(config)
def __setitem__(self, k, v):
dict.__setitem__(self, k, v)
self.namespaces({k: v})
class Parser(ConfigParser):
"""Sub-class of ConfigParser that keeps the case of options and that raises
an exception if the file cannot be read.
"""
def optionxform(self, optionstr):
return optionstr
def read(self, filenames):
if isinstance(filenames, basestring):
filenames = [filenames]
for filename in filenames:
# try:
# fp = open(filename)
# except IOError:
# continue
fp = open(filename)
try:
self._read(fp, filename)
finally:
fp.close()
def as_dict(self, raw=False, vars=None):
"""Convert an INI file to a dictionary"""
# Load INI file into a dict
result = {}
for section in self.sections():
if section not in result:
result[section] = {}
for option in self.options(section):
value = self.get(section, option, raw, vars)
try:
value = unrepr(value)
except Exception, x:
msg = ("Config error in section: %r, option: %r, "
"value: %r. Config values must be valid Python." %
(section, option, value))
raise ValueError(msg, x.__class__.__name__, x.args)
result[section][option] = value
return result
def dict_from_file(self, file):
if hasattr(file, 'read'):
self.readfp(file)
else:
self.read(file)
return self.as_dict()
# public domain "unrepr" implementation, found on the web and then improved.
class _Builder:
def build(self, o):
m = getattr(self, 'build_' + o.__class__.__name__, None)
if m is None:
raise TypeError("unrepr does not recognize %s" %
repr(o.__class__.__name__))
return m(o)
def build_Subscript(self, o):
expr, flags, subs = o.getChildren()
expr = self.build(expr)
subs = self.build(subs)
return expr[subs]
def build_CallFunc(self, o):
children = map(self.build, o.getChildren())
callee = children.pop(0)
kwargs = children.pop() or {}
starargs = children.pop() or ()
args = tuple(children) + tuple(starargs)
return callee(*args, **kwargs)
def build_List(self, o):
return map(self.build, o.getChildren())
def build_Const(self, o):
return o.value
def build_Dict(self, o):
d = {}
i = iter(map(self.build, o.getChildren()))
for el in i:
d[el] = i.next()
return d
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
name = o.name
if name == 'None':
return None
if name == 'True':
return True
if name == 'False':
return False
# See if the Name is a package or module. If it is, import it.
try:
return modules(name)
except ImportError:
pass
# See if the Name is in builtins.
try:
import __builtin__
return getattr(__builtin__, name)
except AttributeError:
pass
raise TypeError("unrepr could not resolve the name %s" % repr(name))
def build_Add(self, o):
left, right = map(self.build, o.getChildren())
return left + right
def build_Getattr(self, o):
parent = self.build(o.expr)
return getattr(parent, o.attrname)
def build_NoneType(self, o):
return None
def build_UnarySub(self, o):
return - self.build(o.getChildren()[0])
def build_UnaryAdd(self, o):
return self.build(o.getChildren()[0])
def _astnode(s):
"""Return a Python ast Node compiled from a string."""
try:
import compiler
except ImportError:
# Fallback to eval when compiler package is not available,
# e.g. IronPython 1.0.
return eval(s)
p = compiler.parse("__tempvalue__ = " + s)
return p.getChildren()[1].getChildren()[0].getChildren()[1]
def unrepr(s):
"""Return a Python object compiled from a string."""
if not s:
return s
obj = _astnode(s)
return _Builder().build(obj)
def modules(modulePath):
"""Load a module and retrieve a reference to that module."""
try:
mod = sys.modules[modulePath]
if mod is None:
raise KeyError()
except KeyError:
# The last [''] is important.
mod = __import__(modulePath, globals(), locals(), [''])
return mod
def attributes(full_attribute_name):
"""Load a module and retrieve an attribute of that module."""
# Parse out the path, module, and attribute
last_dot = full_attribute_name.rfind(".")
attr_name = full_attribute_name[last_dot + 1:]
mod_path = full_attribute_name[:last_dot]
mod = modules(mod_path)
# Let an AttributeError propagate outward.
try:
attr = getattr(mod, attr_name)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'"
% (mod_path, attr_name))
# Return a reference to the attribute.
return attr
| 11,033 | Python | .py | 280 | 29.017857 | 79 | 0.574505 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,617 | sessions.py | midgetspy_Sick-Beard/cherrypy/lib/sessions.py | """Session implementation for CherryPy.
We use cherrypy.request to store some convenient variables as
well as data about the session for the current request. Instead of
polluting cherrypy.request we use a Session object bound to
cherrypy.session to store these variables.
"""
import datetime
import os
try:
import cPickle as pickle
except ImportError:
import pickle
import random
try:
# Python 2.5+
from hashlib import sha1 as sha
except ImportError:
from sha import new as sha
import time
import threading
import types
from warnings import warn
import cherrypy
from cherrypy.lib import httputil
missing = object()
class Session(object):
"""A CherryPy dict-like Session object (one per request)."""
__metaclass__ = cherrypy._AttributeDocstrings
_id = None
id_observers = None
id_observers__doc = "A list of callbacks to which to pass new id's."
id__doc = "The current session ID."
def _get_id(self):
return self._id
def _set_id(self, value):
self._id = value
for o in self.id_observers:
o(value)
id = property(_get_id, _set_id, doc=id__doc)
timeout = 60
timeout__doc = "Number of minutes after which to delete session data."
locked = False
locked__doc = """
If True, this session instance has exclusive read/write access
to session data."""
loaded = False
loaded__doc = """
If True, data has been retrieved from storage. This should happen
automatically on the first attempt to access session data."""
clean_thread = None
clean_thread__doc = "Class-level Monitor which calls self.clean_up."
clean_freq = 5
clean_freq__doc = "The poll rate for expired session cleanup in minutes."
originalid = None
originalid__doc = "The session id passed by the client. May be missing or unsafe."
missing = False
missing__doc = "True if the session requested by the client did not exist."
regenerated = False
regenerated__doc = """
True if the application called session.regenerate(). This is not set by
internal calls to regenerate the session id."""
debug = False
def __init__(self, id=None, **kwargs):
self.id_observers = []
self._data = {}
for k, v in kwargs.items():
setattr(self, k, v)
self.originalid = id
self.missing = False
if id is None:
if self.debug:
cherrypy.log('No id given; making a new one', 'TOOLS.SESSIONS')
self._regenerate()
else:
self.id = id
if not self._exists():
if self.debug:
cherrypy.log('Expired or malicious session %r; '
'making a new one' % id, 'TOOLS.SESSIONS')
# Expired or malicious session. Make a new one.
# See http://www.cherrypy.org/ticket/709.
self.id = None
self.missing = True
self._regenerate()
def regenerate(self):
"""Replace the current session (with a new id)."""
self.regenerated = True
self._regenerate()
def _regenerate(self):
if self.id is not None:
self.delete()
old_session_was_locked = self.locked
if old_session_was_locked:
self.release_lock()
self.id = None
while self.id is None:
self.id = self.generate_id()
# Assert that the generated id is not already stored.
if self._exists():
self.id = None
if old_session_was_locked:
self.acquire_lock()
def clean_up(self):
"""Clean up expired sessions."""
pass
try:
os.urandom(20)
except (AttributeError, NotImplementedError):
# os.urandom not available until Python 2.4. Fall back to random.random.
def generate_id(self):
"""Return a new session id."""
return sha('%s' % random.random()).hexdigest()
else:
def generate_id(self):
"""Return a new session id."""
return os.urandom(20).encode('hex')
def save(self):
"""Save session data."""
try:
# If session data has never been loaded then it's never been
# accessed: no need to save it
if self.loaded:
t = datetime.timedelta(seconds=self.timeout * 60)
expiration_time = datetime.datetime.now() + t
if self.debug:
cherrypy.log('Saving with expiry %s' % expiration_time,
'TOOLS.SESSIONS')
self._save(expiration_time)
finally:
if self.locked:
# Always release the lock if the user didn't release it
self.release_lock()
def load(self):
"""Copy stored session data into this session instance."""
data = self._load()
# data is either None or a tuple (session_data, expiration_time)
if data is None or data[1] < datetime.datetime.now():
if self.debug:
cherrypy.log('Expired session, flushing data', 'TOOLS.SESSIONS')
self._data = {}
else:
self._data = data[0]
self.loaded = True
# Stick the clean_thread in the class, not the instance.
# The instances are created and destroyed per-request.
cls = self.__class__
if self.clean_freq and not cls.clean_thread:
# clean_up is in instancemethod and not a classmethod,
# so that tool config can be accessed inside the method.
t = cherrypy.process.plugins.Monitor(
cherrypy.engine, self.clean_up, self.clean_freq * 60,
name='Session cleanup')
t.subscribe()
cls.clean_thread = t
t.start()
def delete(self):
"""Delete stored session data."""
self._delete()
def __getitem__(self, key):
if not self.loaded: self.load()
return self._data[key]
def __setitem__(self, key, value):
if not self.loaded: self.load()
self._data[key] = value
def __delitem__(self, key):
if not self.loaded: self.load()
del self._data[key]
def pop(self, key, default=missing):
"""Remove the specified key and return the corresponding value.
If key is not found, default is returned if given,
otherwise KeyError is raised.
"""
if not self.loaded: self.load()
if default is missing:
return self._data.pop(key)
else:
return self._data.pop(key, default)
def __contains__(self, key):
if not self.loaded: self.load()
return key in self._data
def has_key(self, key):
"""D.has_key(k) -> True if D has a key k, else False."""
if not self.loaded: self.load()
return key in self._data
def get(self, key, default=None):
"""D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None."""
if not self.loaded: self.load()
return self._data.get(key, default)
def update(self, d):
"""D.update(E) -> None. Update D from E: for k in E: D[k] = E[k]."""
if not self.loaded: self.load()
self._data.update(d)
def setdefault(self, key, default=None):
"""D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D."""
if not self.loaded: self.load()
return self._data.setdefault(key, default)
def clear(self):
"""D.clear() -> None. Remove all items from D."""
if not self.loaded: self.load()
self._data.clear()
def keys(self):
"""D.keys() -> list of D's keys."""
if not self.loaded: self.load()
return self._data.keys()
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples."""
if not self.loaded: self.load()
return self._data.items()
def values(self):
"""D.values() -> list of D's values."""
if not self.loaded: self.load()
return self._data.values()
class RamSession(Session):
# Class-level objects. Don't rebind these!
cache = {}
locks = {}
def clean_up(self):
"""Clean up expired sessions."""
now = datetime.datetime.now()
for id, (data, expiration_time) in self.cache.items():
if expiration_time <= now:
try:
del self.cache[id]
except KeyError:
pass
try:
del self.locks[id]
except KeyError:
pass
def _exists(self):
return self.id in self.cache
def _load(self):
return self.cache.get(self.id)
def _save(self, expiration_time):
self.cache[self.id] = (self._data, expiration_time)
def _delete(self):
self.cache.pop(self.id, None)
def acquire_lock(self):
"""Acquire an exclusive lock on the currently-loaded session data."""
self.locked = True
self.locks.setdefault(self.id, threading.RLock()).acquire()
def release_lock(self):
"""Release the lock on the currently-loaded session data."""
self.locks[self.id].release()
self.locked = False
def __len__(self):
"""Return the number of active sessions."""
return len(self.cache)
class FileSession(Session):
"""Implementation of the File backend for sessions
storage_path: the folder where session data will be saved. Each session
will be saved as pickle.dump(data, expiration_time) in its own file;
the filename will be self.SESSION_PREFIX + self.id.
"""
SESSION_PREFIX = 'session-'
LOCK_SUFFIX = '.lock'
pickle_protocol = pickle.HIGHEST_PROTOCOL
def __init__(self, id=None, **kwargs):
# The 'storage_path' arg is required for file-based sessions.
kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])
Session.__init__(self, id=id, **kwargs)
def setup(cls, **kwargs):
"""Set up the storage system for file-based sessions.
This should only be called once per process; this will be done
automatically when using sessions.init (as the built-in Tool does).
"""
# The 'storage_path' arg is required for file-based sessions.
kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])
for k, v in kwargs.items():
setattr(cls, k, v)
# Warn if any lock files exist at startup.
lockfiles = [fname for fname in os.listdir(cls.storage_path)
if (fname.startswith(cls.SESSION_PREFIX)
and fname.endswith(cls.LOCK_SUFFIX))]
if lockfiles:
plural = ('', 's')[len(lockfiles) > 1]
warn("%s session lockfile%s found at startup. If you are "
"only running one process, then you may need to "
"manually delete the lockfiles found at %r."
% (len(lockfiles), plural, cls.storage_path))
setup = classmethod(setup)
def _get_file_path(self):
f = os.path.join(self.storage_path, self.SESSION_PREFIX + self.id)
if not os.path.abspath(f).startswith(self.storage_path):
raise cherrypy.HTTPError(400, "Invalid session id in cookie.")
return f
def _exists(self):
path = self._get_file_path()
return os.path.exists(path)
def _load(self, path=None):
if path is None:
path = self._get_file_path()
try:
f = open(path, "rb")
try:
return pickle.load(f)
finally:
f.close()
except (IOError, EOFError):
return None
def _save(self, expiration_time):
f = open(self._get_file_path(), "wb")
try:
pickle.dump((self._data, expiration_time), f, self.pickle_protocol)
finally:
f.close()
def _delete(self):
try:
os.unlink(self._get_file_path())
except OSError:
pass
def acquire_lock(self, path=None):
"""Acquire an exclusive lock on the currently-loaded session data."""
if path is None:
path = self._get_file_path()
path += self.LOCK_SUFFIX
while True:
try:
lockfd = os.open(path, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
except OSError:
time.sleep(0.1)
else:
os.close(lockfd)
break
self.locked = True
def release_lock(self, path=None):
"""Release the lock on the currently-loaded session data."""
if path is None:
path = self._get_file_path()
os.unlink(path + self.LOCK_SUFFIX)
self.locked = False
def clean_up(self):
"""Clean up expired sessions."""
now = datetime.datetime.now()
# Iterate over all session files in self.storage_path
for fname in os.listdir(self.storage_path):
if (fname.startswith(self.SESSION_PREFIX)
and not fname.endswith(self.LOCK_SUFFIX)):
# We have a session file: lock and load it and check
# if it's expired. If it fails, nevermind.
path = os.path.join(self.storage_path, fname)
self.acquire_lock(path)
try:
contents = self._load(path)
# _load returns None on IOError
if contents is not None:
data, expiration_time = contents
if expiration_time < now:
# Session expired: deleting it
os.unlink(path)
finally:
self.release_lock(path)
def __len__(self):
"""Return the number of active sessions."""
return len([fname for fname in os.listdir(self.storage_path)
if (fname.startswith(self.SESSION_PREFIX)
and not fname.endswith(self.LOCK_SUFFIX))])
class PostgresqlSession(Session):
""" Implementation of the PostgreSQL backend for sessions. It assumes
a table like this:
create table session (
id varchar(40),
data text,
expiration_time timestamp
)
You must provide your own get_db function.
"""
pickle_protocol = pickle.HIGHEST_PROTOCOL
def __init__(self, id=None, **kwargs):
Session.__init__(self, id, **kwargs)
self.cursor = self.db.cursor()
def setup(cls, **kwargs):
"""Set up the storage system for Postgres-based sessions.
This should only be called once per process; this will be done
automatically when using sessions.init (as the built-in Tool does).
"""
for k, v in kwargs.items():
setattr(cls, k, v)
self.db = self.get_db()
setup = classmethod(setup)
def __del__(self):
if self.cursor:
self.cursor.close()
self.db.commit()
def _exists(self):
# Select session data from table
self.cursor.execute('select data, expiration_time from session '
'where id=%s', (self.id,))
rows = self.cursor.fetchall()
return bool(rows)
def _load(self):
# Select session data from table
self.cursor.execute('select data, expiration_time from session '
'where id=%s', (self.id,))
rows = self.cursor.fetchall()
if not rows:
return None
pickled_data, expiration_time = rows[0]
data = pickle.loads(pickled_data)
return data, expiration_time
def _save(self, expiration_time):
pickled_data = pickle.dumps(self._data, self.pickle_protocol)
self.cursor.execute('update session set data = %s, '
'expiration_time = %s where id = %s',
(pickled_data, expiration_time, self.id))
def _delete(self):
self.cursor.execute('delete from session where id=%s', (self.id,))
def acquire_lock(self):
"""Acquire an exclusive lock on the currently-loaded session data."""
# We use the "for update" clause to lock the row
self.locked = True
self.cursor.execute('select id from session where id=%s for update',
(self.id,))
def release_lock(self):
"""Release the lock on the currently-loaded session data."""
# We just close the cursor and that will remove the lock
# introduced by the "for update" clause
self.cursor.close()
self.locked = False
def clean_up(self):
"""Clean up expired sessions."""
self.cursor.execute('delete from session where expiration_time < %s',
(datetime.datetime.now(),))
class MemcachedSession(Session):
# The most popular memcached client for Python isn't thread-safe.
# Wrap all .get and .set operations in a single lock.
mc_lock = threading.RLock()
# This is a seperate set of locks per session id.
locks = {}
servers = ['127.0.0.1:11211']
def setup(cls, **kwargs):
"""Set up the storage system for memcached-based sessions.
This should only be called once per process; this will be done
automatically when using sessions.init (as the built-in Tool does).
"""
for k, v in kwargs.items():
setattr(cls, k, v)
import memcache
cls.cache = memcache.Client(cls.servers)
setup = classmethod(setup)
def _exists(self):
self.mc_lock.acquire()
try:
return bool(self.cache.get(self.id))
finally:
self.mc_lock.release()
def _load(self):
self.mc_lock.acquire()
try:
return self.cache.get(self.id)
finally:
self.mc_lock.release()
def _save(self, expiration_time):
# Send the expiration time as "Unix time" (seconds since 1/1/1970)
td = int(time.mktime(expiration_time.timetuple()))
self.mc_lock.acquire()
try:
if not self.cache.set(self.id, (self._data, expiration_time), td):
raise AssertionError("Session data for id %r not set." % self.id)
finally:
self.mc_lock.release()
def _delete(self):
self.cache.delete(self.id)
def acquire_lock(self):
"""Acquire an exclusive lock on the currently-loaded session data."""
self.locked = True
self.locks.setdefault(self.id, threading.RLock()).acquire()
def release_lock(self):
"""Release the lock on the currently-loaded session data."""
self.locks[self.id].release()
self.locked = False
def __len__(self):
"""Return the number of active sessions."""
raise NotImplementedError
# Hook functions (for CherryPy tools)
def save():
"""Save any changed session data."""
if not hasattr(cherrypy.serving, "session"):
return
request = cherrypy.serving.request
response = cherrypy.serving.response
# Guard against running twice
if hasattr(request, "_sessionsaved"):
return
request._sessionsaved = True
if response.stream:
# If the body is being streamed, we have to save the data
# *after* the response has been written out
request.hooks.attach('on_end_request', cherrypy.session.save)
else:
# If the body is not being streamed, we save the data now
# (so we can release the lock).
if isinstance(response.body, types.GeneratorType):
response.collapse_body()
cherrypy.session.save()
save.failsafe = True
def close():
"""Close the session object for this request."""
sess = getattr(cherrypy.serving, "session", None)
if getattr(sess, "locked", False):
# If the session is still locked we release the lock
sess.release_lock()
close.failsafe = True
close.priority = 90
def init(storage_type='ram', path=None, path_header=None, name='session_id',
timeout=60, domain=None, secure=False, clean_freq=5,
persistent=True, debug=False, **kwargs):
"""Initialize session object (using cookies).
storage_type: one of 'ram', 'file', 'postgresql'. This will be used
to look up the corresponding class in cherrypy.lib.sessions
globals. For example, 'file' will use the FileSession class.
path: the 'path' value to stick in the response cookie metadata.
path_header: if 'path' is None (the default), then the response
cookie 'path' will be pulled from request.headers[path_header].
name: the name of the cookie.
timeout: the expiration timeout (in minutes) for the stored session data.
If 'persistent' is True (the default), this is also the timeout
for the cookie.
domain: the cookie domain.
secure: if False (the default) the cookie 'secure' value will not
be set. If True, the cookie 'secure' value will be set (to 1).
clean_freq (minutes): the poll rate for expired session cleanup.
persistent: if True (the default), the 'timeout' argument will be used
to expire the cookie. If False, the cookie will not have an expiry,
and the cookie will be a "session cookie" which expires when the
browser is closed.
Any additional kwargs will be bound to the new Session instance,
and may be specific to the storage type. See the subclass of Session
you're using for more information.
"""
request = cherrypy.serving.request
# Guard against running twice
if hasattr(request, "_session_init_flag"):
return
request._session_init_flag = True
# Check if request came with a session ID
id = None
if name in request.cookie:
id = request.cookie[name].value
if debug:
cherrypy.log('ID obtained from request.cookie: %r' % id,
'TOOLS.SESSIONS')
# Find the storage class and call setup (first time only).
storage_class = storage_type.title() + 'Session'
storage_class = globals()[storage_class]
if not hasattr(cherrypy, "session"):
if hasattr(storage_class, "setup"):
storage_class.setup(**kwargs)
# Create and attach a new Session instance to cherrypy.serving.
# It will possess a reference to (and lock, and lazily load)
# the requested session data.
kwargs['timeout'] = timeout
kwargs['clean_freq'] = clean_freq
cherrypy.serving.session = sess = storage_class(id, **kwargs)
sess.debug = debug
def update_cookie(id):
"""Update the cookie every time the session id changes."""
cherrypy.serving.response.cookie[name] = id
sess.id_observers.append(update_cookie)
# Create cherrypy.session which will proxy to cherrypy.serving.session
if not hasattr(cherrypy, "session"):
cherrypy.session = cherrypy._ThreadLocalProxy('session')
if persistent:
cookie_timeout = timeout
else:
# See http://support.microsoft.com/kb/223799/EN-US/
# and http://support.mozilla.com/en-US/kb/Cookies
cookie_timeout = None
set_response_cookie(path=path, path_header=path_header, name=name,
timeout=cookie_timeout, domain=domain, secure=secure)
def set_response_cookie(path=None, path_header=None, name='session_id',
timeout=60, domain=None, secure=False):
"""Set a response cookie for the client.
path: the 'path' value to stick in the response cookie metadata.
path_header: if 'path' is None (the default), then the response
cookie 'path' will be pulled from request.headers[path_header].
name: the name of the cookie.
timeout: the expiration timeout for the cookie. If 0 or other boolean
False, no 'expires' param will be set, and the cookie will be a
"session cookie" which expires when the browser is closed.
domain: the cookie domain.
secure: if False (the default) the cookie 'secure' value will not
be set. If True, the cookie 'secure' value will be set (to 1).
"""
# Set response cookie
cookie = cherrypy.serving.response.cookie
cookie[name] = cherrypy.serving.session.id
cookie[name]['path'] = (path or cherrypy.serving.request.headers.get(path_header)
or '/')
# We'd like to use the "max-age" param as indicated in
# http://www.faqs.org/rfcs/rfc2109.html but IE doesn't
# save it to disk and the session is lost if people close
# the browser. So we have to use the old "expires" ... sigh ...
## cookie[name]['max-age'] = timeout * 60
if timeout:
e = time.time() + (timeout * 60)
cookie[name]['expires'] = httputil.HTTPDate(e)
if domain is not None:
cookie[name]['domain'] = domain
if secure:
cookie[name]['secure'] = 1
def expire():
"""Expire the current session cookie."""
name = cherrypy.serving.request.config.get('tools.sessions.name', 'session_id')
one_year = 60 * 60 * 24 * 365
e = time.time() - one_year
cherrypy.serving.response.cookie[name]['expires'] = httputil.HTTPDate(e)
| 26,677 | Python | .py | 607 | 32.647446 | 87 | 0.591877 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,618 | profiler.py | midgetspy_Sick-Beard/cherrypy/lib/profiler.py | """Profiler tools for CherryPy.
CherryPy users
==============
You can profile any of your pages as follows:
from cherrypy.lib import profiler
class Root:
p = profile.Profiler("/path/to/profile/dir")
def index(self):
self.p.run(self._index)
index.exposed = True
def _index(self):
return "Hello, world!"
cherrypy.tree.mount(Root())
You can also turn on profiling for all requests
using the make_app function as WSGI middleware.
CherryPy developers
===================
This module can be used whenever you make changes to CherryPy,
to get a quick sanity-check on overall CP performance. Use the
"--profile" flag when running the test suite. Then, use the serve()
function to browse the results in a web browser. If you run this
module from the command line, it will call serve() for you.
"""
# Make profiler output more readable by adding __init__ modules' parents.
def new_func_strip_path(func_name):
filename, line, name = func_name
if filename.endswith("__init__.py"):
return os.path.basename(filename[:-12]) + filename[-12:], line, name
return os.path.basename(filename), line, name
try:
import profile
import pstats
pstats.func_strip_path = new_func_strip_path
except ImportError:
profile = None
pstats = None
import os, os.path
import sys
import warnings
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
_count = 0
class Profiler(object):
def __init__(self, path=None):
if not path:
path = os.path.join(os.path.dirname(__file__), "profile")
self.path = path
if not os.path.exists(path):
os.makedirs(path)
def run(self, func, *args, **params):
"""Dump profile data into self.path."""
global _count
c = _count = _count + 1
path = os.path.join(self.path, "cp_%04d.prof" % c)
prof = profile.Profile()
result = prof.runcall(func, *args, **params)
prof.dump_stats(path)
return result
def statfiles(self):
"""statfiles() -> list of available profiles."""
return [f for f in os.listdir(self.path)
if f.startswith("cp_") and f.endswith(".prof")]
def stats(self, filename, sortby='cumulative'):
"""stats(index) -> output of print_stats() for the given profile."""
sio = StringIO()
if sys.version_info >= (2, 5):
s = pstats.Stats(os.path.join(self.path, filename), stream=sio)
s.strip_dirs()
s.sort_stats(sortby)
s.print_stats()
else:
# pstats.Stats before Python 2.5 didn't take a 'stream' arg,
# but just printed to stdout. So re-route stdout.
s = pstats.Stats(os.path.join(self.path, filename))
s.strip_dirs()
s.sort_stats(sortby)
oldout = sys.stdout
try:
sys.stdout = sio
s.print_stats()
finally:
sys.stdout = oldout
response = sio.getvalue()
sio.close()
return response
def index(self):
return """<html>
<head><title>CherryPy profile data</title></head>
<frameset cols='200, 1*'>
<frame src='menu' />
<frame name='main' src='' />
</frameset>
</html>
"""
index.exposed = True
def menu(self):
yield "<h2>Profiling runs</h2>"
yield "<p>Click on one of the runs below to see profiling data.</p>"
runs = self.statfiles()
runs.sort()
for i in runs:
yield "<a href='report?filename=%s' target='main'>%s</a><br />" % (i, i)
menu.exposed = True
def report(self, filename):
import cherrypy
cherrypy.response.headers['Content-Type'] = 'text/plain'
return self.stats(filename)
report.exposed = True
class ProfileAggregator(Profiler):
def __init__(self, path=None):
Profiler.__init__(self, path)
global _count
self.count = _count = _count + 1
self.profiler = profile.Profile()
def run(self, func, *args):
path = os.path.join(self.path, "cp_%04d.prof" % self.count)
result = self.profiler.runcall(func, *args)
self.profiler.dump_stats(path)
return result
class make_app:
def __init__(self, nextapp, path=None, aggregate=False):
"""Make a WSGI middleware app which wraps 'nextapp' with profiling.
nextapp: the WSGI application to wrap, usually an instance of
cherrypy.Application.
path: where to dump the profiling output.
aggregate: if True, profile data for all HTTP requests will go in
a single file. If False (the default), each HTTP request will
dump its profile data into a separate file.
"""
if profile is None or pstats is None:
msg = ("Your installation of Python does not have a profile module. "
"If you're on Debian, try `sudo apt-get install python-profiler`. "
"See http://www.cherrypy.org/wiki/ProfilingOnDebian for details.")
warnings.warn(msg)
self.nextapp = nextapp
self.aggregate = aggregate
if aggregate:
self.profiler = ProfileAggregator(path)
else:
self.profiler = Profiler(path)
def __call__(self, environ, start_response):
def gather():
result = []
for line in self.nextapp(environ, start_response):
result.append(line)
return result
return self.profiler.run(gather)
def serve(path=None, port=8080):
if profile is None or pstats is None:
msg = ("Your installation of Python does not have a profile module. "
"If you're on Debian, try `sudo apt-get install python-profiler`. "
"See http://www.cherrypy.org/wiki/ProfilingOnDebian for details.")
warnings.warn(msg)
import cherrypy
cherrypy.config.update({'server.socket_port': int(port),
'server.thread_pool': 10,
'environment': "production",
})
cherrypy.quickstart(Profiler(path))
if __name__ == "__main__":
serve(*tuple(sys.argv[1:]))
| 6,654 | Python | .py | 163 | 30.453988 | 87 | 0.589119 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,619 | httpauth.py | midgetspy_Sick-Beard/cherrypy/lib/httpauth.py | """
httpauth modules defines functions to implement HTTP Digest Authentication (RFC 2617).
This has full compliance with 'Digest' and 'Basic' authentication methods. In
'Digest' it supports both MD5 and MD5-sess algorithms.
Usage:
First use 'doAuth' to request the client authentication for a
certain resource. You should send an httplib.UNAUTHORIZED response to the
client so he knows he has to authenticate itself.
Then use 'parseAuthorization' to retrieve the 'auth_map' used in
'checkResponse'.
To use 'checkResponse' you must have already verified the password associated
with the 'username' key in 'auth_map' dict. Then you use the 'checkResponse'
function to verify if the password matches the one sent by the client.
SUPPORTED_ALGORITHM - list of supported 'Digest' algorithms
SUPPORTED_QOP - list of supported 'Digest' 'qop'.
"""
__version__ = 1, 0, 1
__author__ = "Tiago Cogumbreiro <[email protected]>"
__credits__ = """
Peter van Kampen for its recipe which implement most of Digest authentication:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/302378
"""
__license__ = """
Copyright (c) 2005, Tiago Cogumbreiro <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Sylvain Hellegouarch nor the names of his contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__all__ = ("digestAuth", "basicAuth", "doAuth", "checkResponse",
"parseAuthorization", "SUPPORTED_ALGORITHM", "md5SessionKey",
"calculateNonce", "SUPPORTED_QOP")
################################################################################
try:
# Python 2.5+
from hashlib import md5
except ImportError:
from md5 import new as md5
import time
import base64
from urllib2 import parse_http_list, parse_keqv_list
MD5 = "MD5"
MD5_SESS = "MD5-sess"
AUTH = "auth"
AUTH_INT = "auth-int"
SUPPORTED_ALGORITHM = (MD5, MD5_SESS)
SUPPORTED_QOP = (AUTH, AUTH_INT)
################################################################################
# doAuth
#
DIGEST_AUTH_ENCODERS = {
MD5: lambda val: md5(val).hexdigest(),
MD5_SESS: lambda val: md5(val).hexdigest(),
# SHA: lambda val: sha.new (val).hexdigest (),
}
def calculateNonce (realm, algorithm=MD5):
"""This is an auxaliary function that calculates 'nonce' value. It is used
to handle sessions."""
global SUPPORTED_ALGORITHM, DIGEST_AUTH_ENCODERS
assert algorithm in SUPPORTED_ALGORITHM
try:
encoder = DIGEST_AUTH_ENCODERS[algorithm]
except KeyError:
raise NotImplementedError ("The chosen algorithm (%s) does not have "\
"an implementation yet" % algorithm)
return encoder ("%d:%s" % (time.time(), realm))
def digestAuth (realm, algorithm=MD5, nonce=None, qop=AUTH):
"""Challenges the client for a Digest authentication."""
global SUPPORTED_ALGORITHM, DIGEST_AUTH_ENCODERS, SUPPORTED_QOP
assert algorithm in SUPPORTED_ALGORITHM
assert qop in SUPPORTED_QOP
if nonce is None:
nonce = calculateNonce (realm, algorithm)
return 'Digest realm="%s", nonce="%s", algorithm="%s", qop="%s"' % (
realm, nonce, algorithm, qop
)
def basicAuth (realm):
"""Challengenes the client for a Basic authentication."""
assert '"' not in realm, "Realms cannot contain the \" (quote) character."
return 'Basic realm="%s"' % realm
def doAuth (realm):
"""'doAuth' function returns the challenge string b giving priority over
Digest and fallback to Basic authentication when the browser doesn't
support the first one.
This should be set in the HTTP header under the key 'WWW-Authenticate'."""
return digestAuth (realm) + " " + basicAuth (realm)
################################################################################
# Parse authorization parameters
#
def _parseDigestAuthorization (auth_params):
# Convert the auth params to a dict
items = parse_http_list(auth_params)
params = parse_keqv_list(items)
# Now validate the params
# Check for required parameters
required = ["username", "realm", "nonce", "uri", "response"]
for k in required:
if k not in params:
return None
# If qop is sent then cnonce and nc MUST be present
if "qop" in params and not ("cnonce" in params \
and "nc" in params):
return None
# If qop is not sent, neither cnonce nor nc can be present
if ("cnonce" in params or "nc" in params) and \
"qop" not in params:
return None
return params
def _parseBasicAuthorization (auth_params):
username, password = base64.decodestring (auth_params).split (":", 1)
return {"username": username, "password": password}
AUTH_SCHEMES = {
"basic": _parseBasicAuthorization,
"digest": _parseDigestAuthorization,
}
def parseAuthorization (credentials):
"""parseAuthorization will convert the value of the 'Authorization' key in
the HTTP header to a map itself. If the parsing fails 'None' is returned.
"""
global AUTH_SCHEMES
auth_scheme, auth_params = credentials.split(" ", 1)
auth_scheme = auth_scheme.lower ()
parser = AUTH_SCHEMES[auth_scheme]
params = parser (auth_params)
if params is None:
return
assert "auth_scheme" not in params
params["auth_scheme"] = auth_scheme
return params
################################################################################
# Check provided response for a valid password
#
def md5SessionKey (params, password):
"""
If the "algorithm" directive's value is "MD5-sess", then A1
[the session key] is calculated only once - on the first request by the
client following receipt of a WWW-Authenticate challenge from the server.
This creates a 'session key' for the authentication of subsequent
requests and responses which is different for each "authentication
session", thus limiting the amount of material hashed with any one
key.
Because the server need only use the hash of the user
credentials in order to create the A1 value, this construction could
be used in conjunction with a third party authentication service so
that the web server would not need the actual password value. The
specification of such a protocol is beyond the scope of this
specification.
"""
keys = ("username", "realm", "nonce", "cnonce")
params_copy = {}
for key in keys:
params_copy[key] = params[key]
params_copy["algorithm"] = MD5_SESS
return _A1 (params_copy, password)
def _A1(params, password):
algorithm = params.get ("algorithm", MD5)
H = DIGEST_AUTH_ENCODERS[algorithm]
if algorithm == MD5:
# If the "algorithm" directive's value is "MD5" or is
# unspecified, then A1 is:
# A1 = unq(username-value) ":" unq(realm-value) ":" passwd
return "%s:%s:%s" % (params["username"], params["realm"], password)
elif algorithm == MD5_SESS:
# This is A1 if qop is set
# A1 = H( unq(username-value) ":" unq(realm-value) ":" passwd )
# ":" unq(nonce-value) ":" unq(cnonce-value)
h_a1 = H ("%s:%s:%s" % (params["username"], params["realm"], password))
return "%s:%s:%s" % (h_a1, params["nonce"], params["cnonce"])
def _A2(params, method, kwargs):
# If the "qop" directive's value is "auth" or is unspecified, then A2 is:
# A2 = Method ":" digest-uri-value
qop = params.get ("qop", "auth")
if qop == "auth":
return method + ":" + params["uri"]
elif qop == "auth-int":
# If the "qop" value is "auth-int", then A2 is:
# A2 = Method ":" digest-uri-value ":" H(entity-body)
entity_body = kwargs.get ("entity_body", "")
H = kwargs["H"]
return "%s:%s:%s" % (
method,
params["uri"],
H(entity_body)
)
else:
raise NotImplementedError ("The 'qop' method is unknown: %s" % qop)
def _computeDigestResponse(auth_map, password, method="GET", A1=None, **kwargs):
"""
Generates a response respecting the algorithm defined in RFC 2617
"""
params = auth_map
algorithm = params.get ("algorithm", MD5)
H = DIGEST_AUTH_ENCODERS[algorithm]
KD = lambda secret, data: H(secret + ":" + data)
qop = params.get ("qop", None)
H_A2 = H(_A2(params, method, kwargs))
if algorithm == MD5_SESS and A1 is not None:
H_A1 = H(A1)
else:
H_A1 = H(_A1(params, password))
if qop in ("auth", "auth-int"):
# If the "qop" value is "auth" or "auth-int":
# request-digest = <"> < KD ( H(A1), unq(nonce-value)
# ":" nc-value
# ":" unq(cnonce-value)
# ":" unq(qop-value)
# ":" H(A2)
# ) <">
request = "%s:%s:%s:%s:%s" % (
params["nonce"],
params["nc"],
params["cnonce"],
params["qop"],
H_A2,
)
elif qop is None:
# If the "qop" directive is not present (this construction is
# for compatibility with RFC 2069):
# request-digest =
# <"> < KD ( H(A1), unq(nonce-value) ":" H(A2) ) > <">
request = "%s:%s" % (params["nonce"], H_A2)
return KD(H_A1, request)
def _checkDigestResponse(auth_map, password, method="GET", A1=None, **kwargs):
"""This function is used to verify the response given by the client when
he tries to authenticate.
Optional arguments:
entity_body - when 'qop' is set to 'auth-int' you MUST provide the
raw data you are going to send to the client (usually the
HTML page.
request_uri - the uri from the request line compared with the 'uri'
directive of the authorization map. They must represent
the same resource (unused at this time).
"""
if auth_map['realm'] != kwargs.get('realm', None):
return False
response = _computeDigestResponse(auth_map, password, method, A1, **kwargs)
return response == auth_map["response"]
def _checkBasicResponse (auth_map, password, method='GET', encrypt=None, **kwargs):
# Note that the Basic response doesn't provide the realm value so we cannot
# test it
try:
return encrypt(auth_map["password"], auth_map["username"]) == password
except TypeError:
return encrypt(auth_map["password"]) == password
AUTH_RESPONSES = {
"basic": _checkBasicResponse,
"digest": _checkDigestResponse,
}
def checkResponse (auth_map, password, method="GET", encrypt=None, **kwargs):
"""'checkResponse' compares the auth_map with the password and optionally
other arguments that each implementation might need.
If the response is of type 'Basic' then the function has the following
signature:
checkBasicResponse (auth_map, password) -> bool
If the response is of type 'Digest' then the function has the following
signature:
checkDigestResponse (auth_map, password, method = 'GET', A1 = None) -> bool
The 'A1' argument is only used in MD5_SESS algorithm based responses.
Check md5SessionKey() for more info.
"""
global AUTH_RESPONSES
checker = AUTH_RESPONSES[auth_map["auth_scheme"]]
return checker (auth_map, password, method=method, encrypt=encrypt, **kwargs)
| 13,282 | Python | .py | 280 | 40.275 | 87 | 0.638202 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,620 | httputil.py | midgetspy_Sick-Beard/cherrypy/lib/httputil.py | """HTTP library functions."""
# This module contains functions for building an HTTP application
# framework: any one, not just one whose name starts with "Ch". ;) If you
# reference any modules from some popular framework inside *this* module,
# FuManChu will personally hang you up by your thumbs and submit you
# to a public caning.
from binascii import b2a_base64
from BaseHTTPServer import BaseHTTPRequestHandler
response_codes = BaseHTTPRequestHandler.responses.copy()
# From http://www.cherrypy.org/ticket/361
response_codes[500] = ('Internal Server Error',
'The server encountered an unexpected condition '
'which prevented it from fulfilling the request.')
response_codes[503] = ('Service Unavailable',
'The server is currently unable to handle the '
'request due to a temporary overloading or '
'maintenance of the server.')
import re
import urllib
from rfc822 import formatdate as HTTPDate
def urljoin(*atoms):
"""Return the given path *atoms, joined into a single URL.
This will correctly join a SCRIPT_NAME and PATH_INFO into the
original URL, even if either atom is blank.
"""
url = "/".join([x for x in atoms if x])
while "//" in url:
url = url.replace("//", "/")
# Special-case the final url of "", and return "/" instead.
return url or "/"
def protocol_from_http(protocol_str):
"""Return a protocol tuple from the given 'HTTP/x.y' string."""
return int(protocol_str[5]), int(protocol_str[7])
def get_ranges(headervalue, content_length):
"""Return a list of (start, stop) indices from a Range header, or None.
Each (start, stop) tuple will be composed of two ints, which are suitable
for use in a slicing operation. That is, the header "Range: bytes=3-6",
if applied against a Python string, is requesting resource[3:7]. This
function will return the list [(3, 7)].
If this function returns an empty list, you should return HTTP 416.
"""
if not headervalue:
return None
result = []
bytesunit, byteranges = headervalue.split("=", 1)
for brange in byteranges.split(","):
start, stop = [x.strip() for x in brange.split("-", 1)]
if start:
if not stop:
stop = content_length - 1
start, stop = int(start), int(stop)
if start >= content_length:
# From rfc 2616 sec 14.16:
# "If the server receives a request (other than one
# including an If-Range request-header field) with an
# unsatisfiable Range request-header field (that is,
# all of whose byte-range-spec values have a first-byte-pos
# value greater than the current length of the selected
# resource), it SHOULD return a response code of 416
# (Requested range not satisfiable)."
continue
if stop < start:
# From rfc 2616 sec 14.16:
# "If the server ignores a byte-range-spec because it
# is syntactically invalid, the server SHOULD treat
# the request as if the invalid Range header field
# did not exist. (Normally, this means return a 200
# response containing the full entity)."
return None
result.append((start, stop + 1))
else:
if not stop:
# See rfc quote above.
return None
# Negative subscript (last N bytes)
result.append((content_length - int(stop), content_length))
return result
class HeaderElement(object):
"""An element (with parameters) from an HTTP header's element list."""
def __init__(self, value, params=None):
self.value = value
if params is None:
params = {}
self.params = params
def __cmp__(self, other):
return cmp(self.value, other.value)
def __unicode__(self):
p = [";%s=%s" % (k, v) for k, v in self.params.iteritems()]
return u"%s%s" % (self.value, "".join(p))
def __str__(self):
return str(self.__unicode__())
def parse(elementstr):
"""Transform 'token;key=val' to ('token', {'key': 'val'})."""
# Split the element into a value and parameters. The 'value' may
# be of the form, "token=token", but we don't split that here.
atoms = [x.strip() for x in elementstr.split(";") if x.strip()]
if not atoms:
initial_value = ''
else:
initial_value = atoms.pop(0).strip()
params = {}
for atom in atoms:
atom = [x.strip() for x in atom.split("=", 1) if x.strip()]
key = atom.pop(0)
if atom:
val = atom[0]
else:
val = ""
params[key] = val
return initial_value, params
parse = staticmethod(parse)
def from_str(cls, elementstr):
"""Construct an instance from a string of the form 'token;key=val'."""
ival, params = cls.parse(elementstr)
return cls(ival, params)
from_str = classmethod(from_str)
q_separator = re.compile(r'; *q *=')
class AcceptElement(HeaderElement):
"""An element (with parameters) from an Accept* header's element list.
AcceptElement objects are comparable; the more-preferred object will be
"less than" the less-preferred object. They are also therefore sortable;
if you sort a list of AcceptElement objects, they will be listed in
priority order; the most preferred value will be first. Yes, it should
have been the other way around, but it's too late to fix now.
"""
def from_str(cls, elementstr):
qvalue = None
# The first "q" parameter (if any) separates the initial
# media-range parameter(s) (if any) from the accept-params.
atoms = q_separator.split(elementstr, 1)
media_range = atoms.pop(0).strip()
if atoms:
# The qvalue for an Accept header can have extensions. The other
# headers cannot, but it's easier to parse them as if they did.
qvalue = HeaderElement.from_str(atoms[0].strip())
media_type, params = cls.parse(media_range)
if qvalue is not None:
params["q"] = qvalue
return cls(media_type, params)
from_str = classmethod(from_str)
def qvalue(self):
val = self.params.get("q", "1")
if isinstance(val, HeaderElement):
val = val.value
return float(val)
qvalue = property(qvalue, doc="The qvalue, or priority, of this value.")
def __cmp__(self, other):
diff = cmp(self.qvalue, other.qvalue)
if diff == 0:
diff = cmp(str(self), str(other))
return diff
def header_elements(fieldname, fieldvalue):
"""Return a sorted HeaderElement list from a comma-separated header str."""
if not fieldvalue:
return []
result = []
for element in fieldvalue.split(","):
if fieldname.startswith("Accept") or fieldname == 'TE':
hv = AcceptElement.from_str(element)
else:
hv = HeaderElement.from_str(element)
result.append(hv)
result.sort()
result.reverse()
return result
def decode_TEXT(value):
"""Decode RFC-2047 TEXT (e.g. "=?utf-8?q?f=C3=BCr?=" -> u"f\xfcr")."""
from email.Header import decode_header
atoms = decode_header(value)
decodedvalue = ""
for atom, charset in atoms:
if charset is not None:
atom = atom.decode(charset)
decodedvalue += atom
return decodedvalue
def valid_status(status):
"""Return legal HTTP status Code, Reason-phrase and Message.
The status arg must be an int, or a str that begins with an int.
If status is an int, or a str and no reason-phrase is supplied,
a default reason-phrase will be provided.
"""
if not status:
status = 200
status = str(status)
parts = status.split(" ", 1)
if len(parts) == 1:
# No reason supplied.
code, = parts
reason = None
else:
code, reason = parts
reason = reason.strip()
try:
code = int(code)
except ValueError:
raise ValueError("Illegal response status from server "
"(%s is non-numeric)." % repr(code))
if code < 100 or code > 599:
raise ValueError("Illegal response status from server "
"(%s is out of range)." % repr(code))
if code not in response_codes:
# code is unknown but not illegal
default_reason, message = "", ""
else:
default_reason, message = response_codes[code]
if reason is None:
reason = default_reason
return code, reason, message
def _parse_qs(qs, keep_blank_values=0, strict_parsing=0, encoding='utf-8'):
"""Parse a query given as a string argument.
Arguments:
qs: URL-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
URL encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
Returns a dict, as G-d intended.
"""
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
d = {}
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = urllib.unquote(nv[0].replace('+', ' '))
name = name.decode(encoding, 'strict')
value = urllib.unquote(nv[1].replace('+', ' '))
value = value.decode(encoding, 'strict')
if name in d:
if not isinstance(d[name], list):
d[name] = [d[name]]
d[name].append(value)
else:
d[name] = value
return d
image_map_pattern = re.compile(r"[0-9]+,[0-9]+")
def parse_query_string(query_string, keep_blank_values=True, encoding='utf-8'):
"""Build a params dictionary from a query_string.
Duplicate key/value pairs in the provided query_string will be
returned as {'key': [val1, val2, ...]}. Single key/values will
be returned as strings: {'key': 'value'}.
"""
if image_map_pattern.match(query_string):
# Server-side image map. Map the coords to 'x' and 'y'
# (like CGI::Request does).
pm = query_string.split(",")
pm = {'x': int(pm[0]), 'y': int(pm[1])}
else:
pm = _parse_qs(query_string, keep_blank_values, encoding=encoding)
return pm
class CaseInsensitiveDict(dict):
"""A case-insensitive dict subclass.
Each key is changed on entry to str(key).title().
"""
def __getitem__(self, key):
return dict.__getitem__(self, str(key).title())
def __setitem__(self, key, value):
dict.__setitem__(self, str(key).title(), value)
def __delitem__(self, key):
dict.__delitem__(self, str(key).title())
def __contains__(self, key):
return dict.__contains__(self, str(key).title())
def get(self, key, default=None):
return dict.get(self, str(key).title(), default)
def has_key(self, key):
return dict.has_key(self, str(key).title())
def update(self, E):
for k in E.keys():
self[str(k).title()] = E[k]
def fromkeys(cls, seq, value=None):
newdict = cls()
for k in seq:
newdict[str(k).title()] = value
return newdict
fromkeys = classmethod(fromkeys)
def setdefault(self, key, x=None):
key = str(key).title()
try:
return self[key]
except KeyError:
self[key] = x
return x
def pop(self, key, default):
return dict.pop(self, str(key).title(), default)
class HeaderMap(CaseInsensitiveDict):
"""A dict subclass for HTTP request and response headers.
Each key is changed on entry to str(key).title(). This allows headers
to be case-insensitive and avoid duplicates.
Values are header values (decoded according to RFC 2047 if necessary).
"""
protocol = (1, 1)
def elements(self, key):
"""Return a sorted list of HeaderElements for the given header."""
key = str(key).title()
value = self.get(key)
return header_elements(key, value)
def values(self, key):
"""Return a sorted list of HeaderElement.value for the given header."""
return [e.value for e in self.elements(key)]
def output(self):
"""Transform self into a list of (name, value) tuples."""
header_list = []
for k, v in self.items():
if isinstance(k, unicode):
k = k.encode("ISO-8859-1")
if not isinstance(v, basestring):
v = str(v)
if isinstance(v, unicode):
v = self.encode(v)
header_list.append((k, v))
return header_list
def encode(self, v):
"""Return the given header value, encoded for HTTP output."""
# HTTP/1.0 says, "Words of *TEXT may contain octets
# from character sets other than US-ASCII." and
# "Recipients of header field TEXT containing octets
# outside the US-ASCII character set may assume that
# they represent ISO-8859-1 characters."
try:
v = v.encode("ISO-8859-1")
except UnicodeEncodeError:
if self.protocol == (1, 1):
# Encode RFC-2047 TEXT
# (e.g. u"\u8200" -> "=?utf-8?b?6IiA?=").
# We do our own here instead of using the email module
# because we never want to fold lines--folding has
# been deprecated by the HTTP working group.
v = b2a_base64(v.encode('utf-8'))
v = ('=?utf-8?b?' + v.strip('\n') + '?=')
else:
raise
return v
class Host(object):
"""An internet address.
name should be the client's host name. If not available (because no DNS
lookup is performed), the IP address should be used instead.
"""
ip = "0.0.0.0"
port = 80
name = "unknown.tld"
def __init__(self, ip, port, name=None):
self.ip = ip
self.port = port
if name is None:
name = ip
self.name = name
def __repr__(self):
return "httputil.Host(%r, %r, %r)" % (self.ip, self.port, self.name)
| 15,926 | Python | .py | 361 | 33.32687 | 80 | 0.586273 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,621 | auth_digest.py | midgetspy_Sick-Beard/cherrypy/lib/auth_digest.py | # This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
__doc__ = """An implementation of the server-side of HTTP Digest Access
Authentication, which is described in RFC 2617.
Example usage, using the built-in get_ha1_dict_plain function which uses a dict
of plaintext passwords as the credentials store:
userpassdict = {'alice' : '4x5istwelve'}
get_ha1 = cherrypy.lib.auth_digest.get_ha1_dict_plain(userpassdict)
digest_auth = {'tools.auth_digest.on': True,
'tools.auth_digest.realm': 'wonderland',
'tools.auth_digest.get_ha1': get_ha1,
'tools.auth_digest.key': 'a565c27146791cfb',
}
app_config = { '/' : digest_auth }
"""
__author__ = 'visteya'
__date__ = 'April 2009'
try:
from hashlib import md5
except ImportError:
# Python 2.4 and earlier
from md5 import new as md5
md5_hex = lambda s: md5(s).hexdigest()
import time
import base64
from urllib2 import parse_http_list, parse_keqv_list
import cherrypy
qop_auth = 'auth'
qop_auth_int = 'auth-int'
valid_qops = (qop_auth, qop_auth_int)
valid_algorithms = ('MD5', 'MD5-sess')
def TRACE(msg):
cherrypy.log(msg, context='TOOLS.AUTH_DIGEST')
# Three helper functions for users of the tool, providing three variants
# of get_ha1() functions for three different kinds of credential stores.
def get_ha1_dict_plain(user_password_dict):
"""Returns a get_ha1 function which obtains a plaintext password from a
dictionary of the form: {username : password}.
If you want a simple dictionary-based authentication scheme, with plaintext
passwords, use get_ha1_dict_plain(my_userpass_dict) as the value for the
get_ha1 argument to digest_auth().
"""
def get_ha1(realm, username):
password = user_password_dict.get(username)
if password:
return md5_hex('%s:%s:%s' % (username, realm, password))
return None
return get_ha1
def get_ha1_dict(user_ha1_dict):
"""Returns a get_ha1 function which obtains a HA1 password hash from a
dictionary of the form: {username : HA1}.
If you want a dictionary-based authentication scheme, but with
pre-computed HA1 hashes instead of plain-text passwords, use
get_ha1_dict(my_userha1_dict) as the value for the get_ha1
argument to digest_auth().
"""
def get_ha1(realm, username):
return user_ha1_dict.get(user)
return get_ha1
def get_ha1_file_htdigest(filename):
"""Returns a get_ha1 function which obtains a HA1 password hash from a
flat file with lines of the same format as that produced by the Apache
htdigest utility. For example, for realm 'wonderland', username 'alice',
and password '4x5istwelve', the htdigest line would be:
alice:wonderland:3238cdfe91a8b2ed8e39646921a02d4c
If you want to use an Apache htdigest file as the credentials store,
then use get_ha1_file_htdigest(my_htdigest_file) as the value for the
get_ha1 argument to digest_auth(). It is recommended that the filename
argument be an absolute path, to avoid problems.
"""
def get_ha1(realm, username):
result = None
f = open(filename, 'r')
for line in f:
u, r, ha1 = line.rstrip().split(':')
if u == username and r == realm:
result = ha1
break
f.close()
return result
return get_ha1
def synthesize_nonce(s, key, timestamp=None):
"""Synthesize a nonce value which resists spoofing and can be checked for staleness.
Returns a string suitable as the value for 'nonce' in the www-authenticate header.
Args:
s: a string related to the resource, such as the hostname of the server.
key: a secret string known only to the server.
timestamp: an integer seconds-since-the-epoch timestamp
"""
if timestamp is None:
timestamp = int(time.time())
h = md5_hex('%s:%s:%s' % (timestamp, s, key))
nonce = '%s:%s' % (timestamp, h)
return nonce
def H(s):
"""The hash function H"""
return md5_hex(s)
class HttpDigestAuthorization (object):
"""Class to parse a Digest Authorization header and perform re-calculation
of the digest.
"""
def errmsg(self, s):
return 'Digest Authorization header: %s' % s
def __init__(self, auth_header, http_method, debug=False):
self.http_method = http_method
self.debug = debug
scheme, params = auth_header.split(" ", 1)
self.scheme = scheme.lower()
if self.scheme != 'digest':
raise ValueError('Authorization scheme is not "Digest"')
self.auth_header = auth_header
# make a dict of the params
items = parse_http_list(params)
paramsd = parse_keqv_list(items)
self.realm = paramsd.get('realm')
self.username = paramsd.get('username')
self.nonce = paramsd.get('nonce')
self.uri = paramsd.get('uri')
self.method = paramsd.get('method')
self.response = paramsd.get('response') # the response digest
self.algorithm = paramsd.get('algorithm', 'MD5')
self.cnonce = paramsd.get('cnonce')
self.opaque = paramsd.get('opaque')
self.qop = paramsd.get('qop') # qop
self.nc = paramsd.get('nc') # nonce count
# perform some correctness checks
if self.algorithm not in valid_algorithms:
raise ValueError(self.errmsg("Unsupported value for algorithm: '%s'" % self.algorithm))
has_reqd = self.username and \
self.realm and \
self.nonce and \
self.uri and \
self.response
if not has_reqd:
raise ValueError(self.errmsg("Not all required parameters are present."))
if self.qop:
if self.qop not in valid_qops:
raise ValueError(self.errmsg("Unsupported value for qop: '%s'" % self.qop))
if not (self.cnonce and self.nc):
raise ValueError(self.errmsg("If qop is sent then cnonce and nc MUST be present"))
else:
if self.cnonce or self.nc:
raise ValueError(self.errmsg("If qop is not sent, neither cnonce nor nc can be present"))
def __str__(self):
return 'authorization : %s' % self.auth_header
def validate_nonce(self, s, key):
"""Validate the nonce.
Returns True if nonce was generated by synthesize_nonce() and the timestamp
is not spoofed, else returns False.
Args:
s: a string related to the resource, such as the hostname of the server.
key: a secret string known only to the server.
Both s and key must be the same values which were used to synthesize the nonce
we are trying to validate.
"""
try:
timestamp, hashpart = self.nonce.split(':', 1)
s_timestamp, s_hashpart = synthesize_nonce(s, key, timestamp).split(':', 1)
is_valid = s_hashpart == hashpart
if self.debug:
TRACE('validate_nonce: %s' % is_valid)
return is_valid
except ValueError: # split() error
pass
return False
def is_nonce_stale(self, max_age_seconds=600):
"""Returns True if a validated nonce is stale. The nonce contains a
timestamp in plaintext and also a secure hash of the timestamp. You should
first validate the nonce to ensure the plaintext timestamp is not spoofed.
"""
try:
timestamp, hashpart = self.nonce.split(':', 1)
if int(timestamp) + max_age_seconds > int(time.time()):
return False
except ValueError: # int() error
pass
if self.debug:
TRACE("nonce is stale")
return True
def HA2(self, entity_body=''):
"""Returns the H(A2) string. See RFC 2617 3.2.2.3."""
# RFC 2617 3.2.2.3
# If the "qop" directive's value is "auth" or is unspecified, then A2 is:
# A2 = method ":" digest-uri-value
#
# If the "qop" value is "auth-int", then A2 is:
# A2 = method ":" digest-uri-value ":" H(entity-body)
if self.qop is None or self.qop == "auth":
a2 = '%s:%s' % (self.http_method, self.uri)
elif self.qop == "auth-int":
a2 = "%s:%s:%s" % (self.http_method, self.uri, H(entity_body))
else:
# in theory, this should never happen, since I validate qop in __init__()
raise ValueError(self.errmsg("Unrecognized value for qop!"))
return H(a2)
def request_digest(self, ha1, entity_body=''):
"""Calculates the Request-Digest. See RFC 2617 3.2.2.1.
Arguments:
ha1 : the HA1 string obtained from the credentials store.
entity_body : if 'qop' is set to 'auth-int', then A2 includes a hash
of the "entity body". The entity body is the part of the
message which follows the HTTP headers. See RFC 2617 section
4.3. This refers to the entity the user agent sent in the request which
has the Authorization header. Typically GET requests don't have an entity,
and POST requests do.
"""
ha2 = self.HA2(entity_body)
# Request-Digest -- RFC 2617 3.2.2.1
if self.qop:
req = "%s:%s:%s:%s:%s" % (self.nonce, self.nc, self.cnonce, self.qop, ha2)
else:
req = "%s:%s" % (self.nonce, ha2)
# RFC 2617 3.2.2.2
#
# If the "algorithm" directive's value is "MD5" or is unspecified, then A1 is:
# A1 = unq(username-value) ":" unq(realm-value) ":" passwd
#
# If the "algorithm" directive's value is "MD5-sess", then A1 is
# calculated only once - on the first request by the client following
# receipt of a WWW-Authenticate challenge from the server.
# A1 = H( unq(username-value) ":" unq(realm-value) ":" passwd )
# ":" unq(nonce-value) ":" unq(cnonce-value)
if self.algorithm == 'MD5-sess':
ha1 = H('%s:%s:%s' % (ha1, self.nonce, self.cnonce))
digest = H('%s:%s' % (ha1, req))
return digest
def www_authenticate(realm, key, algorithm='MD5', nonce=None, qop=qop_auth, stale=False):
"""Constructs a WWW-Authenticate header for Digest authentication."""
if qop not in valid_qops:
raise ValueError("Unsupported value for qop: '%s'" % qop)
if algorithm not in valid_algorithms:
raise ValueError("Unsupported value for algorithm: '%s'" % algorithm)
if nonce is None:
nonce = synthesize_nonce(realm, key)
s = 'Digest realm="%s", nonce="%s", algorithm="%s", qop="%s"' % (
realm, nonce, algorithm, qop)
if stale:
s += ', stale="true"'
return s
def digest_auth(realm, get_ha1, key, debug=False):
"""digest_auth is a CherryPy tool which hooks at before_handler to perform
HTTP Digest Access Authentication, as specified in RFC 2617.
If the request has an 'authorization' header with a 'Digest' scheme, this
tool authenticates the credentials supplied in that header. If
the request has no 'authorization' header, or if it does but the scheme is
not "Digest", or if authentication fails, the tool sends a 401 response with
a 'WWW-Authenticate' Digest header.
Arguments:
realm: a string containing the authentication realm.
get_ha1: a callable which looks up a username in a credentials store
and returns the HA1 string, which is defined in the RFC to be
MD5(username : realm : password). The function's signature is:
get_ha1(realm, username)
where username is obtained from the request's 'authorization' header.
If username is not found in the credentials store, get_ha1() returns
None.
key: a secret string known only to the server, used in the synthesis of nonces.
"""
request = cherrypy.serving.request
auth_header = request.headers.get('authorization')
nonce_is_stale = False
if auth_header is not None:
try:
auth = HttpDigestAuthorization(auth_header, request.method, debug=debug)
except ValueError, e:
raise cherrypy.HTTPError(400, 'Bad Request: %s' % e)
if debug:
TRACE(str(auth))
if auth.validate_nonce(realm, key):
ha1 = get_ha1(realm, auth.username)
if ha1 is not None:
# note that for request.body to be available we need to hook in at
# before_handler, not on_start_resource like 3.1.x digest_auth does.
digest = auth.request_digest(ha1, entity_body=request.body)
if digest == auth.response: # authenticated
if debug:
TRACE("digest matches auth.response")
# Now check if nonce is stale.
# The choice of ten minutes' lifetime for nonce is somewhat arbitrary
nonce_is_stale = auth.is_nonce_stale(max_age_seconds=600)
if not nonce_is_stale:
request.login = auth.username
if debug:
TRACE("authentication of %s successful" % auth.username)
return
# Respond with 401 status and a WWW-Authenticate header
header = www_authenticate(realm, key, stale=nonce_is_stale)
if debug:
TRACE(header)
cherrypy.serving.response.headers['WWW-Authenticate'] = header
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
| 14,117 | Python | .py | 292 | 38.369863 | 106 | 0.619205 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,622 | encoding.py | midgetspy_Sick-Beard/cherrypy/lib/encoding.py | try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
set
except NameError:
from sets import Set as set
import struct
import time
import types
import cherrypy
from cherrypy.lib import file_generator
from cherrypy.lib import set_vary_header
def decode(encoding=None, default_encoding='utf-8'):
"""Replace or extend the list of charsets used to decode a request entity.
Either argument may be a single string or a list of strings.
encoding: If not None, restricts the set of charsets attempted while decoding
a request entity to the given set (even if a different charset is given in
the Content-Type request header).
default_encoding: Only in effect if the 'encoding' argument is not given.
If given, the set of charsets attempted while decoding a request entity is
*extended* with the given value(s).
"""
body = cherrypy.request.body
if encoding is not None:
if not isinstance(encoding, list):
encoding = [encoding]
body.attempt_charsets = encoding
elif default_encoding:
if not isinstance(default_encoding, list):
default_encoding = [default_encoding]
body.attempt_charsets = body.attempt_charsets + default_encoding
class ResponseEncoder:
default_encoding = 'utf-8'
failmsg = "Response body could not be encoded with %r."
encoding = None
errors = 'strict'
text_only = True
add_charset = True
debug = False
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.attempted_charsets = set()
request = cherrypy.serving.request
if request.handler is not None:
# Replace request.handler with self
if self.debug:
cherrypy.log('Replacing request.handler', 'TOOLS.ENCODE')
self.oldhandler = request.handler
request.handler = self
def encode_stream(self, encoding):
"""Encode a streaming response body.
Use a generator wrapper, and just pray it works as the stream is
being written out.
"""
if encoding in self.attempted_charsets:
return False
self.attempted_charsets.add(encoding)
def encoder(body):
for chunk in body:
if isinstance(chunk, unicode):
chunk = chunk.encode(encoding, self.errors)
yield chunk
self.body = encoder(self.body)
return True
def encode_string(self, encoding):
"""Encode a buffered response body."""
if encoding in self.attempted_charsets:
return False
self.attempted_charsets.add(encoding)
try:
body = []
for chunk in self.body:
if isinstance(chunk, unicode):
chunk = chunk.encode(encoding, self.errors)
body.append(chunk)
self.body = body
except (LookupError, UnicodeError):
return False
else:
return True
def find_acceptable_charset(self):
request = cherrypy.serving.request
response = cherrypy.serving.response
if self.debug:
cherrypy.log('response.stream %r' % response.stream, 'TOOLS.ENCODE')
if response.stream:
encoder = self.encode_stream
else:
encoder = self.encode_string
if "Content-Length" in response.headers:
# Delete Content-Length header so finalize() recalcs it.
# Encoded strings may be of different lengths from their
# unicode equivalents, and even from each other. For example:
# >>> t = u"\u7007\u3040"
# >>> len(t)
# 2
# >>> len(t.encode("UTF-8"))
# 6
# >>> len(t.encode("utf7"))
# 8
del response.headers["Content-Length"]
# Parse the Accept-Charset request header, and try to provide one
# of the requested charsets (in order of user preference).
encs = request.headers.elements('Accept-Charset')
charsets = [enc.value.lower() for enc in encs]
if self.debug:
cherrypy.log('charsets %s' % repr(charsets), 'TOOLS.ENCODE')
if self.encoding is not None:
# If specified, force this encoding to be used, or fail.
encoding = self.encoding.lower()
if self.debug:
cherrypy.log('Specified encoding %r' % encoding, 'TOOLS.ENCODE')
if (not charsets) or "*" in charsets or encoding in charsets:
if self.debug:
cherrypy.log('Attempting encoding %r' % encoding, 'TOOLS.ENCODE')
if encoder(encoding):
return encoding
else:
if not encs:
if self.debug:
cherrypy.log('Attempting default encoding %r' %
self.default_encoding, 'TOOLS.ENCODE')
# Any character-set is acceptable.
if encoder(self.default_encoding):
return self.default_encoding
else:
raise cherrypy.HTTPError(500, self.failmsg % self.default_encoding)
else:
if "*" not in charsets:
# If no "*" is present in an Accept-Charset field, then all
# character sets not explicitly mentioned get a quality
# value of 0, except for ISO-8859-1, which gets a quality
# value of 1 if not explicitly mentioned.
iso = 'iso-8859-1'
if iso not in charsets:
if self.debug:
cherrypy.log('Attempting ISO-8859-1 encoding',
'TOOLS.ENCODE')
if encoder(iso):
return iso
for element in encs:
if element.qvalue > 0:
if element.value == "*":
# Matches any charset. Try our default.
if self.debug:
cherrypy.log('Attempting default encoding due '
'to %r' % element, 'TOOLS.ENCODE')
if encoder(self.default_encoding):
return self.default_encoding
else:
encoding = element.value
if self.debug:
cherrypy.log('Attempting encoding %r (qvalue >'
'0)' % element, 'TOOLS.ENCODE')
if encoder(encoding):
return encoding
# No suitable encoding found.
ac = request.headers.get('Accept-Charset')
if ac is None:
msg = "Your client did not send an Accept-Charset header."
else:
msg = "Your client sent this Accept-Charset header: %s." % ac
msg += " We tried these charsets: %s." % ", ".join(self.attempted_charsets)
raise cherrypy.HTTPError(406, msg)
def __call__(self, *args, **kwargs):
response = cherrypy.serving.response
self.body = self.oldhandler(*args, **kwargs)
if isinstance(self.body, basestring):
# strings get wrapped in a list because iterating over a single
# item list is much faster than iterating over every character
# in a long string.
if self.body:
self.body = [self.body]
else:
# [''] doesn't evaluate to False, so replace it with [].
self.body = []
elif isinstance(self.body, types.FileType):
self.body = file_generator(self.body)
elif self.body is None:
self.body = []
ct = response.headers.elements("Content-Type")
if self.debug:
cherrypy.log('Content-Type: %r' % ct, 'TOOLS.ENCODE')
if ct:
if self.text_only:
ct = ct[0]
if ct.value.lower().startswith("text/"):
if self.debug:
cherrypy.log('Content-Type %r starts with "text/"' % ct,
'TOOLS.ENCODE')
do_find = True
else:
if self.debug:
cherrypy.log('Not finding because Content-Type %r does '
'not start with "text/"' % ct,
'TOOLS.ENCODE')
do_find = False
else:
if self.debug:
cherrypy.log('Finding because not text_only', 'TOOLS.ENCODE')
do_find = True
if do_find:
# Set "charset=..." param on response Content-Type header
ct.params['charset'] = self.find_acceptable_charset()
if self.add_charset:
if self.debug:
cherrypy.log('Setting Content-Type %r' % ct,
'TOOLS.ENCODE')
response.headers["Content-Type"] = str(ct)
return self.body
# GZIP
def compress(body, compress_level):
"""Compress 'body' at the given compress_level."""
import zlib
# See http://www.gzip.org/zlib/rfc-gzip.html
yield '\x1f\x8b' # ID1 and ID2: gzip marker
yield '\x08' # CM: compression method
yield '\x00' # FLG: none set
# MTIME: 4 bytes
yield struct.pack("<L", int(time.time()) & 0xFFFFFFFFL)
yield '\x02' # XFL: max compression, slowest algo
yield '\xff' # OS: unknown
crc = zlib.crc32("")
size = 0
zobj = zlib.compressobj(compress_level,
zlib.DEFLATED, -zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL, 0)
for line in body:
size += len(line)
crc = zlib.crc32(line, crc)
yield zobj.compress(line)
yield zobj.flush()
# CRC32: 4 bytes
yield struct.pack("<L", crc & 0xFFFFFFFFL)
# ISIZE: 4 bytes
yield struct.pack("<L", size & 0xFFFFFFFFL)
def decompress(body):
import gzip
zbuf = StringIO()
zbuf.write(body)
zbuf.seek(0)
zfile = gzip.GzipFile(mode='rb', fileobj=zbuf)
data = zfile.read()
zfile.close()
return data
def gzip(compress_level=5, mime_types=['text/html', 'text/plain'], debug=False):
"""Try to gzip the response body if Content-Type in mime_types.
cherrypy.response.headers['Content-Type'] must be set to one of the
values in the mime_types arg before calling this function.
No compression is performed if any of the following hold:
* The client sends no Accept-Encoding request header
* No 'gzip' or 'x-gzip' is present in the Accept-Encoding header
* No 'gzip' or 'x-gzip' with a qvalue > 0 is present
* The 'identity' value is given with a qvalue > 0.
"""
request = cherrypy.serving.request
response = cherrypy.serving.response
set_vary_header(response, "Accept-Encoding")
if not response.body:
# Response body is empty (might be a 304 for instance)
if debug:
cherrypy.log('No response body', context='TOOLS.GZIP')
return
# If returning cached content (which should already have been gzipped),
# don't re-zip.
if getattr(request, "cached", False):
if debug:
cherrypy.log('Not gzipping cached response', context='TOOLS.GZIP')
return
acceptable = request.headers.elements('Accept-Encoding')
if not acceptable:
# If no Accept-Encoding field is present in a request,
# the server MAY assume that the client will accept any
# content coding. In this case, if "identity" is one of
# the available content-codings, then the server SHOULD use
# the "identity" content-coding, unless it has additional
# information that a different content-coding is meaningful
# to the client.
if debug:
cherrypy.log('No Accept-Encoding', context='TOOLS.GZIP')
return
ct = response.headers.get('Content-Type', '').split(';')[0]
for coding in acceptable:
if coding.value == 'identity' and coding.qvalue != 0:
if debug:
cherrypy.log('Non-zero identity qvalue: %r' % coding,
context='TOOLS.GZIP')
return
if coding.value in ('gzip', 'x-gzip'):
if coding.qvalue == 0:
if debug:
cherrypy.log('Zero gzip qvalue: %r' % coding,
context='TOOLS.GZIP')
return
if ct not in mime_types:
if debug:
cherrypy.log('Content-Type %r not in mime_types %r' %
(ct, mime_types), context='TOOLS.GZIP')
return
if debug:
cherrypy.log('Gzipping', context='TOOLS.GZIP')
# Return a generator that compresses the page
response.headers['Content-Encoding'] = 'gzip'
response.body = compress(response.body, compress_level)
if "Content-Length" in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers["Content-Length"]
return
if debug:
cherrypy.log('No acceptable encoding found.', context='GZIP')
cherrypy.HTTPError(406, "identity, gzip").set_response()
| 14,444 | Python | .py | 314 | 31.296178 | 88 | 0.545151 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,623 | jsontools.py | midgetspy_Sick-Beard/cherrypy/lib/jsontools.py | import sys
import cherrypy
if sys.version_info >= (2, 6):
# Python 2.6: simplejson is part of the standard library
import json
else:
try:
import simplejson as json
except ImportError:
json = None
if json is None:
def json_decode(s):
raise ValueError('No JSON library is available')
def json_encode(s):
raise ValueError('No JSON library is available')
else:
json_decode = json.JSONDecoder().decode
json_encode = json.JSONEncoder().iterencode
def json_in(force=True, debug=False):
request = cherrypy.serving.request
def json_processor(entity):
"""Read application/json data into request.json."""
if not entity.headers.get(u"Content-Length", u""):
raise cherrypy.HTTPError(411)
body = entity.fp.read()
try:
request.json = json_decode(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
if force:
request.body.processors.clear()
request.body.default_proc = cherrypy.HTTPError(
415, 'Expected an application/json content type')
request.body.processors[u'application/json'] = json_processor
def json_out(debug=False):
request = cherrypy.serving.request
response = cherrypy.serving.response
real_handler = request.handler
def json_handler(*args, **kwargs):
response.headers['Content-Type'] = 'application/json'
value = real_handler(*args, **kwargs)
return json_encode(value)
request.handler = json_handler
| 1,574 | Python | .py | 43 | 29.860465 | 66 | 0.675926 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,624 | auth.py | midgetspy_Sick-Beard/cherrypy/lib/auth.py | import cherrypy
from cherrypy.lib import httpauth
def check_auth(users, encrypt=None, realm=None):
"""If an authorization header contains credentials, return True, else False."""
request = cherrypy.serving.request
if 'authorization' in request.headers:
# make sure the provided credentials are correctly set
ah = httpauth.parseAuthorization(request.headers['authorization'])
if ah is None:
raise cherrypy.HTTPError(400, 'Bad Request')
if not encrypt:
encrypt = httpauth.DIGEST_AUTH_ENCODERS[httpauth.MD5]
if hasattr(users, '__call__'):
try:
# backward compatibility
users = users() # expect it to return a dictionary
if not isinstance(users, dict):
raise ValueError("Authentication users must be a dictionary")
# fetch the user password
password = users.get(ah["username"], None)
except TypeError:
# returns a password (encrypted or clear text)
password = users(ah["username"])
else:
if not isinstance(users, dict):
raise ValueError("Authentication users must be a dictionary")
# fetch the user password
password = users.get(ah["username"], None)
# validate the authorization by re-computing it here
# and compare it with what the user-agent provided
if httpauth.checkResponse(ah, password, method=request.method,
encrypt=encrypt, realm=realm):
request.login = ah["username"]
return True
request.login = False
return False
def basic_auth(realm, users, encrypt=None, debug=False):
"""If auth fails, raise 401 with a basic authentication header.
realm: a string containing the authentication realm.
users: a dict of the form: {username: password} or a callable returning a dict.
encrypt: callable used to encrypt the password returned from the user-agent.
if None it defaults to a md5 encryption.
"""
if check_auth(users, encrypt):
if debug:
cherrypy.log('Auth successful', 'TOOLS.BASIC_AUTH')
return
# inform the user-agent this path is protected
cherrypy.serving.response.headers['www-authenticate'] = httpauth.basicAuth(realm)
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
def digest_auth(realm, users, debug=False):
"""If auth fails, raise 401 with a digest authentication header.
realm: a string containing the authentication realm.
users: a dict of the form: {username: password} or a callable returning a dict.
"""
if check_auth(users, realm=realm):
if debug:
cherrypy.log('Auth successful', 'TOOLS.DIGEST_AUTH')
return
# inform the user-agent this path is protected
cherrypy.serving.response.headers['www-authenticate'] = httpauth.digestAuth(realm)
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
| 3,288 | Python | .py | 62 | 40.435484 | 87 | 0.64586 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,625 | __init__.py | midgetspy_Sick-Beard/cherrypy/lib/__init__.py | """CherryPy Library"""
# Deprecated in CherryPy 3.2 -- remove in CherryPy 3.3
from cherrypy.lib.reprconf import _Builder, unrepr, modules, attributes
class file_generator(object):
"""Yield the given input (a file object) in chunks (default 64k). (Core)"""
def __init__(self, input, chunkSize=65536):
self.input = input
self.chunkSize = chunkSize
def __iter__(self):
return self
def __next__(self):
chunk = self.input.read(self.chunkSize)
if chunk:
return chunk
else:
self.input.close()
raise StopIteration()
next = __next__
def file_generator_limited(fileobj, count, chunk_size=65536):
"""Yield the given file object in chunks, stopping after `count`
bytes has been emitted. Default chunk size is 64kB. (Core)
"""
remaining = count
while remaining > 0:
chunk = fileobj.read(min(chunk_size, remaining))
chunklen = len(chunk)
if chunklen == 0:
return
remaining -= chunklen
yield chunk
def set_vary_header(response, header_name):
"Add a Vary header to a response"
varies = response.headers.get("Vary", "")
varies = [x.strip() for x in varies.split(",") if x.strip()]
if header_name not in varies:
varies.append(header_name)
response.headers['Vary'] = ", ".join(varies)
| 1,434 | Python | .py | 37 | 30.540541 | 80 | 0.617797 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,626 | static.py | midgetspy_Sick-Beard/cherrypy/lib/static.py | import logging
import mimetypes
mimetypes.init()
mimetypes.types_map['.dwg'] = 'image/x-dwg'
mimetypes.types_map['.ico'] = 'image/x-icon'
mimetypes.types_map['.bz2'] = 'application/x-bzip2'
mimetypes.types_map['.gz'] = 'application/x-gzip'
import os
import re
import stat
import time
from urllib import unquote
import cherrypy
from cherrypy.lib import cptools, httputil, file_generator_limited
def serve_file(path, content_type=None, disposition=None, name=None, debug=False):
"""Set status, headers, and body in order to serve the given path.
The Content-Type header will be set to the content_type arg, if provided.
If not provided, the Content-Type will be guessed by the file extension
of the 'path' argument.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, it will be set
to the basename of path. If disposition is None, no Content-Disposition
header will be written.
"""
response = cherrypy.serving.response
# If path is relative, users should fix it by making path absolute.
# That is, CherryPy should not guess where the application root is.
# It certainly should *not* use cwd (since CP may be invoked from a
# variety of paths). If using tools.staticdir, you can make your relative
# paths become absolute by supplying a value for "tools.staticdir.root".
if not os.path.isabs(path):
msg = "'%s' is not an absolute path." % path
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
try:
st = os.stat(path)
except OSError:
if debug:
cherrypy.log('os.stat(%r) failed' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Check if path is a directory.
if stat.S_ISDIR(st.st_mode):
# Let the caller deal with it as they like.
if debug:
cherrypy.log('%r is a directory' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
if content_type is None:
# Set content-type based on filename extension
ext = ""
i = path.rfind('.')
if i != -1:
ext = path[i:].lower()
content_type = mimetypes.types_map.get(ext, None)
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
name = os.path.basename(path)
cd = '%s; filename="%s"' % (disposition, name)
response.headers["Content-Disposition"] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
content_length = st.st_size
fileobj = open(path, 'rb')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def serve_fileobj(fileobj, content_type=None, disposition=None, name=None,
debug=False):
"""Set status, headers, and body in order to serve the given file object.
The Content-Type header will be set to the content_type arg, if provided.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, 'filename' will
not be set. If disposition is None, no Content-Disposition header will
be written.
CAUTION: If the request contains a 'Range' header, one or more seek()s will
be performed on the file object. This may cause undesired behavior if
the file object is not seekable. It could also produce undesired results
if the caller set the read position of the file object prior to calling
serve_fileobj(), expecting that the data would be served starting from that
position.
"""
response = cherrypy.serving.response
try:
st = os.fstat(fileobj.fileno())
except AttributeError:
if debug:
cherrypy.log('os has no fstat attribute', 'TOOLS.STATIC')
content_length = None
else:
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
content_length = st.st_size
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
cd = disposition
else:
cd = '%s; filename="%s"' % (disposition, name)
response.headers["Content-Disposition"] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def _serve_fileobj(fileobj, content_type, content_length, debug=False):
"""Internal. Set response.body to the given file object, perhaps ranged."""
response = cherrypy.serving.response
# HTTP/1.0 didn't have Range/Accept-Ranges headers, or the 206 code
request = cherrypy.serving.request
if request.protocol >= (1, 1):
response.headers["Accept-Ranges"] = "bytes"
r = httputil.get_ranges(request.headers.get('Range'), content_length)
if r == []:
response.headers['Content-Range'] = "bytes */%s" % content_length
message = "Invalid Range (first-byte-pos greater than Content-Length)"
if debug:
cherrypy.log(message, 'TOOLS.STATIC')
raise cherrypy.HTTPError(416, message)
if r:
if len(r) == 1:
# Return a single-part response.
start, stop = r[0]
if stop > content_length:
stop = content_length
r_len = stop - start
if debug:
cherrypy.log('Single part; start: %r, stop: %r' % (start, stop),
'TOOLS.STATIC')
response.status = "206 Partial Content"
response.headers['Content-Range'] = (
"bytes %s-%s/%s" % (start, stop - 1, content_length))
response.headers['Content-Length'] = r_len
fileobj.seek(start)
response.body = file_generator_limited(fileobj, r_len)
else:
# Return a multipart/byteranges response.
response.status = "206 Partial Content"
import mimetools
boundary = mimetools.choose_boundary()
ct = "multipart/byteranges; boundary=%s" % boundary
response.headers['Content-Type'] = ct
if "Content-Length" in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers["Content-Length"]
def file_ranges():
# Apache compatibility:
yield "\r\n"
for start, stop in r:
if debug:
cherrypy.log('Multipart; start: %r, stop: %r' % (start, stop),
'TOOLS.STATIC')
yield "--" + boundary
yield "\r\nContent-type: %s" % content_type
yield ("\r\nContent-range: bytes %s-%s/%s\r\n\r\n"
% (start, stop - 1, content_length))
fileobj.seek(start)
for chunk in file_generator_limited(fileobj, stop - start):
yield chunk
yield "\r\n"
# Final boundary
yield "--" + boundary + "--"
# Apache compatibility:
yield "\r\n"
response.body = file_ranges()
return response.body
else:
if debug:
cherrypy.log('No byteranges requested', 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
response.headers['Content-Length'] = content_length
response.body = fileobj
return response.body
def serve_download(path, name=None):
"""Serve 'path' as an application/x-download attachment."""
# This is such a common idiom I felt it deserved its own wrapper.
return serve_file(path, "application/x-download", "attachment", name)
def _attempt(filename, content_types, debug=False):
if debug:
cherrypy.log('Attempting %r (content_types %r)' %
(filename, content_types), 'TOOLS.STATICDIR')
try:
# you can set the content types for a
# complete directory per extension
content_type = None
if content_types:
r, ext = os.path.splitext(filename)
content_type = content_types.get(ext[1:], None)
serve_file(filename, content_type=content_type, debug=debug)
return True
except cherrypy.NotFound:
# If we didn't find the static file, continue handling the
# request. We might find a dynamic handler instead.
if debug:
cherrypy.log('NotFound', 'TOOLS.STATICFILE')
return False
def staticdir(section, dir, root="", match="", content_types=None, index="",
debug=False):
"""Serve a static resource from the given (root +) dir.
If 'match' is given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
If content_types is given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
If 'index' is provided, it should be the (relative) name of a file to
serve for directory requests. For example, if the dir argument is
'/home/me', the Request-URI is 'myapp', and the index arg is
'index.html', the file '/home/me/myapp/index.html' will be sought.
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICDIR')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' %
(request.path_info, match), 'TOOLS.STATICDIR')
return False
# Allow the use of '~' to refer to a user's home directory.
dir = os.path.expanduser(dir)
# If dir is relative, make absolute using "root".
if not os.path.isabs(dir):
if not root:
msg = "Static dir requires an absolute dir (or root)."
if debug:
cherrypy.log(msg, 'TOOLS.STATICDIR')
raise ValueError(msg)
dir = os.path.join(root, dir)
# Determine where we are in the object tree relative to 'section'
# (where the static tool was defined).
if section == 'global':
section = "/"
section = section.rstrip(r"\/")
branch = request.path_info[len(section) + 1:]
branch = unquote(branch.lstrip(r"\/"))
# If branch is "", filename will end in a slash
filename = os.path.join(dir, branch)
if debug:
cherrypy.log('Checking file %r to fulfill %r' %
(filename, request.path_info), 'TOOLS.STATICDIR')
# There's a chance that the branch pulled from the URL might
# have ".." or similar uplevel attacks in it. Check that the final
# filename is a child of dir.
if not os.path.normpath(filename).startswith(os.path.normpath(dir)):
raise cherrypy.HTTPError(403) # Forbidden
handled = _attempt(filename, content_types)
if not handled:
# Check for an index file if a folder was requested.
if index:
handled = _attempt(os.path.join(filename, index), content_types)
if handled:
request.is_index = filename[-1] in (r"\/")
return handled
def staticfile(filename, root=None, match="", content_types=None, debug=False):
"""Serve a static resource from the given (root +) filename.
If 'match' is given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
If content_types is given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICFILE')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' %
(request.path_info, match), 'TOOLS.STATICFILE')
return False
# If filename is relative, make absolute using "root".
if not os.path.isabs(filename):
if not root:
msg = "Static tool requires an absolute filename (got '%s')." % filename
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
filename = os.path.join(root, filename)
return _attempt(filename, content_types, debug=debug)
| 14,524 | Python | .py | 296 | 37.685811 | 91 | 0.60946 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,627 | cptools.py | midgetspy_Sick-Beard/cherrypy/lib/cptools.py | """Functions for builtin CherryPy tools."""
import logging
try:
# Python 2.5+
from hashlib import md5
except ImportError:
from md5 import new as md5
import re
try:
set
except NameError:
from sets import Set as set
import cherrypy
from cherrypy.lib import httputil as _httputil
# Conditional HTTP request support #
def validate_etags(autotags=False, debug=False):
"""Validate the current ETag against If-Match, If-None-Match headers.
If autotags is True, an ETag response-header value will be provided
from an MD5 hash of the response body (unless some other code has
already provided an ETag header). If False (the default), the ETag
will not be automatic.
WARNING: the autotags feature is not designed for URL's which allow
methods other than GET. For example, if a POST to the same URL returns
no content, the automatic ETag will be incorrect, breaking a fundamental
use for entity tags in a possibly destructive fashion. Likewise, if you
raise 304 Not Modified, the response body will be empty, the ETag hash
will be incorrect, and your application will break.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.24
"""
response = cherrypy.serving.response
# Guard against being run twice.
if hasattr(response, "ETag"):
return
status, reason, msg = _httputil.valid_status(response.status)
etag = response.headers.get('ETag')
# Automatic ETag generation. See warning in docstring.
if etag:
if debug:
cherrypy.log('ETag already set: %s' % etag, 'TOOLS.ETAGS')
elif not autotags:
if debug:
cherrypy.log('Autotags off', 'TOOLS.ETAGS')
elif status != 200:
if debug:
cherrypy.log('Status not 200', 'TOOLS.ETAGS')
else:
etag = response.collapse_body()
etag = '"%s"' % md5(etag).hexdigest()
if debug:
cherrypy.log('Setting ETag: %s' % etag, 'TOOLS.ETAGS')
response.headers['ETag'] = etag
response.ETag = etag
# "If the request would, without the If-Match header field, result in
# anything other than a 2xx or 412 status, then the If-Match header
# MUST be ignored."
if debug:
cherrypy.log('Status: %s' % status, 'TOOLS.ETAGS')
if status >= 200 and status <= 299:
request = cherrypy.serving.request
conditions = request.headers.elements('If-Match') or []
conditions = [str(x) for x in conditions]
if debug:
cherrypy.log('If-Match conditions: %s' % repr(conditions),
'TOOLS.ETAGS')
if conditions and not (conditions == ["*"] or etag in conditions):
raise cherrypy.HTTPError(412, "If-Match failed: ETag %r did "
"not match %r" % (etag, conditions))
conditions = request.headers.elements('If-None-Match') or []
conditions = [str(x) for x in conditions]
if debug:
cherrypy.log('If-None-Match conditions: %s' % repr(conditions),
'TOOLS.ETAGS')
if conditions == ["*"] or etag in conditions:
if debug:
cherrypy.log('request.method: %s' % request.method, 'TOOLS.ETAGS')
if request.method in ("GET", "HEAD"):
raise cherrypy.HTTPRedirect([], 304)
else:
raise cherrypy.HTTPError(412, "If-None-Match failed: ETag %r "
"matched %r" % (etag, conditions))
def validate_since():
"""Validate the current Last-Modified against If-Modified-Since headers.
If no code has set the Last-Modified response header, then no validation
will be performed.
"""
response = cherrypy.serving.response
lastmod = response.headers.get('Last-Modified')
if lastmod:
status, reason, msg = _httputil.valid_status(response.status)
request = cherrypy.serving.request
since = request.headers.get('If-Unmodified-Since')
if since and since != lastmod:
if (status >= 200 and status <= 299) or status == 412:
raise cherrypy.HTTPError(412)
since = request.headers.get('If-Modified-Since')
if since and since == lastmod:
if (status >= 200 and status <= 299) or status == 304:
if request.method in ("GET", "HEAD"):
raise cherrypy.HTTPRedirect([], 304)
else:
raise cherrypy.HTTPError(412)
# Tool code #
def proxy(base=None, local='X-Forwarded-Host', remote='X-Forwarded-For',
scheme='X-Forwarded-Proto', debug=False):
"""Change the base URL (scheme://host[:port][/path]).
For running a CP server behind Apache, lighttpd, or other HTTP server.
If you want the new request.base to include path info (not just the host),
you must explicitly set base to the full base path, and ALSO set 'local'
to '', so that the X-Forwarded-Host request header (which never includes
path info) does not override it. Regardless, the value for 'base' MUST
NOT end in a slash.
cherrypy.request.remote.ip (the IP address of the client) will be
rewritten if the header specified by the 'remote' arg is valid.
By default, 'remote' is set to 'X-Forwarded-For'. If you do not
want to rewrite remote.ip, set the 'remote' arg to an empty string.
"""
request = cherrypy.serving.request
if scheme:
s = request.headers.get(scheme, None)
if debug:
cherrypy.log('Testing scheme %r:%r' % (scheme, s), 'TOOLS.PROXY')
if s == 'on' and 'ssl' in scheme.lower():
# This handles e.g. webfaction's 'X-Forwarded-Ssl: on' header
scheme = 'https'
else:
# This is for lighttpd/pound/Mongrel's 'X-Forwarded-Proto: https'
scheme = s
if not scheme:
scheme = request.base[:request.base.find("://")]
if local:
lbase = request.headers.get(local, None)
if debug:
cherrypy.log('Testing local %r:%r' % (local, lbase), 'TOOLS.PROXY')
if lbase is not None:
base = lbase.split(',')[0]
if not base:
port = request.local.port
if port == 80:
base = '127.0.0.1'
else:
base = '127.0.0.1:%s' % port
if base.find("://") == -1:
# add http:// or https:// if needed
base = scheme + "://" + base
request.base = base
if remote:
xff = request.headers.get(remote)
if debug:
cherrypy.log('Testing remote %r:%r' % (remote, xff), 'TOOLS.PROXY')
if xff:
if remote == 'X-Forwarded-For':
# See http://bob.pythonmac.org/archives/2005/09/23/apache-x-forwarded-for-caveat/
xff = xff.split(',')[-1].strip()
request.remote.ip = xff
def ignore_headers(headers=('Range',), debug=False):
"""Delete request headers whose field names are included in 'headers'.
This is a useful tool for working behind certain HTTP servers;
for example, Apache duplicates the work that CP does for 'Range'
headers, and will doubly-truncate the response.
"""
request = cherrypy.serving.request
for name in headers:
if name in request.headers:
if debug:
cherrypy.log('Ignoring request header %r' % name,
'TOOLS.IGNORE_HEADERS')
del request.headers[name]
def response_headers(headers=None, debug=False):
"""Set headers on the response."""
if debug:
cherrypy.log('Setting response headers: %s' % repr(headers),
'TOOLS.RESPONSE_HEADERS')
for name, value in (headers or []):
cherrypy.serving.response.headers[name] = value
response_headers.failsafe = True
def referer(pattern, accept=True, accept_missing=False, error=403,
message='Forbidden Referer header.', debug=False):
"""Raise HTTPError if Referer header does/does not match the given pattern.
pattern: a regular expression pattern to test against the Referer.
accept: if True, the Referer must match the pattern; if False,
the Referer must NOT match the pattern.
accept_missing: if True, permit requests with no Referer header.
error: the HTTP error code to return to the client on failure.
message: a string to include in the response body on failure.
"""
try:
ref = cherrypy.serving.request.headers['Referer']
match = bool(re.match(pattern, ref))
if debug:
cherrypy.log('Referer %r matches %r' % (ref, pattern),
'TOOLS.REFERER')
if accept == match:
return
except KeyError:
if debug:
cherrypy.log('No Referer header', 'TOOLS.REFERER')
if accept_missing:
return
raise cherrypy.HTTPError(error, message)
class SessionAuth(object):
"""Assert that the user is logged in."""
session_key = "username"
debug = False
def check_username_and_password(self, username, password):
pass
def anonymous(self):
"""Provide a temporary user name for anonymous users."""
pass
def on_login(self, username):
pass
def on_logout(self, username):
pass
def on_check(self, username):
pass
def login_screen(self, from_page='..', username='', error_msg='', **kwargs):
return """<html><body>
Message: %(error_msg)s
<form method="post" action="do_login">
Login: <input type="text" name="username" value="%(username)s" size="10" /><br />
Password: <input type="password" name="password" size="10" /><br />
<input type="hidden" name="from_page" value="%(from_page)s" /><br />
<input type="submit" />
</form>
</body></html>""" % {'from_page': from_page, 'username': username,
'error_msg': error_msg}
def do_login(self, username, password, from_page='..', **kwargs):
"""Login. May raise redirect, or return True if request handled."""
response = cherrypy.serving.response
error_msg = self.check_username_and_password(username, password)
if error_msg:
body = self.login_screen(from_page, username, error_msg)
response.body = body
if "Content-Length" in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers["Content-Length"]
return True
else:
cherrypy.serving.request.login = username
cherrypy.session[self.session_key] = username
self.on_login(username)
raise cherrypy.HTTPRedirect(from_page or "/")
def do_logout(self, from_page='..', **kwargs):
"""Logout. May raise redirect, or return True if request handled."""
sess = cherrypy.session
username = sess.get(self.session_key)
sess[self.session_key] = None
if username:
cherrypy.serving.request.login = None
self.on_logout(username)
raise cherrypy.HTTPRedirect(from_page)
def do_check(self):
"""Assert username. May raise redirect, or return True if request handled."""
sess = cherrypy.session
request = cherrypy.serving.request
response = cherrypy.serving.response
username = sess.get(self.session_key)
if not username:
sess[self.session_key] = username = self.anonymous()
if self.debug:
cherrypy.log('No session[username], trying anonymous', 'TOOLS.SESSAUTH')
if not username:
url = cherrypy.url(qs=request.query_string)
if self.debug:
cherrypy.log('No username, routing to login_screen with '
'from_page %r' % url, 'TOOLS.SESSAUTH')
response.body = self.login_screen(url)
if "Content-Length" in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers["Content-Length"]
return True
if self.debug:
cherrypy.log('Setting request.login to %r' % username, 'TOOLS.SESSAUTH')
request.login = username
self.on_check(username)
def run(self):
request = cherrypy.serving.request
response = cherrypy.serving.response
path = request.path_info
if path.endswith('login_screen'):
if self.debug:
cherrypy.log('routing %r to login_screen' % path, 'TOOLS.SESSAUTH')
return self.login_screen(**request.params)
elif path.endswith('do_login'):
if request.method != 'POST':
response.headers['Allow'] = "POST"
if self.debug:
cherrypy.log('do_login requires POST', 'TOOLS.SESSAUTH')
raise cherrypy.HTTPError(405)
if self.debug:
cherrypy.log('routing %r to do_login' % path, 'TOOLS.SESSAUTH')
return self.do_login(**request.params)
elif path.endswith('do_logout'):
if request.method != 'POST':
response.headers['Allow'] = "POST"
raise cherrypy.HTTPError(405)
if self.debug:
cherrypy.log('routing %r to do_logout' % path, 'TOOLS.SESSAUTH')
return self.do_logout(**request.params)
else:
if self.debug:
cherrypy.log('No special path, running do_check', 'TOOLS.SESSAUTH')
return self.do_check()
def session_auth(**kwargs):
sa = SessionAuth()
for k, v in kwargs.items():
setattr(sa, k, v)
return sa.run()
session_auth.__doc__ = """Session authentication hook.
Any attribute of the SessionAuth class may be overridden via a keyword arg
to this function:
""" + "\n".join(["%s: %s" % (k, type(getattr(SessionAuth, k)).__name__)
for k in dir(SessionAuth) if not k.startswith("__")])
def log_traceback(severity=logging.ERROR, debug=False):
"""Write the last error's traceback to the cherrypy error log."""
cherrypy.log("", "HTTP", severity=severity, traceback=True)
def log_request_headers(debug=False):
"""Write request headers to the cherrypy error log."""
h = [" %s: %s" % (k, v) for k, v in cherrypy.serving.request.header_list]
cherrypy.log('\nRequest Headers:\n' + '\n'.join(h), "HTTP")
def log_hooks(debug=False):
"""Write request.hooks to the cherrypy error log."""
request = cherrypy.serving.request
msg = []
# Sort by the standard points if possible.
from cherrypy import _cprequest
points = _cprequest.hookpoints
for k in request.hooks.keys():
if k not in points:
points.append(k)
for k in points:
msg.append(" %s:" % k)
v = request.hooks.get(k, [])
v.sort()
for h in v:
msg.append(" %r" % h)
cherrypy.log('\nRequest Hooks for ' + cherrypy.url() +
':\n' + '\n'.join(msg), "HTTP")
def redirect(url='', internal=True, debug=False):
"""Raise InternalRedirect or HTTPRedirect to the given url."""
if debug:
cherrypy.log('Redirecting %sto: %s' %
({True: 'internal ', False: ''}[internal], url),
'TOOLS.REDIRECT')
if internal:
raise cherrypy.InternalRedirect(url)
else:
raise cherrypy.HTTPRedirect(url)
def trailing_slash(missing=True, extra=False, status=None, debug=False):
"""Redirect if path_info has (missing|extra) trailing slash."""
request = cherrypy.serving.request
pi = request.path_info
if debug:
cherrypy.log('is_index: %r, missing: %r, extra: %r, path_info: %r' %
(request.is_index, missing, extra, pi),
'TOOLS.TRAILING_SLASH')
if request.is_index is True:
if missing:
if not pi.endswith('/'):
new_url = cherrypy.url(pi + '/', request.query_string)
raise cherrypy.HTTPRedirect(new_url, status=status or 301)
elif request.is_index is False:
if extra:
# If pi == '/', don't redirect to ''!
if pi.endswith('/') and pi != '/':
new_url = cherrypy.url(pi[:-1], request.query_string)
raise cherrypy.HTTPRedirect(new_url, status=status or 301)
def flatten(debug=False):
"""Wrap response.body in a generator that recursively iterates over body.
This allows cherrypy.response.body to consist of 'nested generators';
that is, a set of generators that yield generators.
"""
import types
def flattener(input):
numchunks = 0
for x in input:
if not isinstance(x, types.GeneratorType):
numchunks += 1
yield x
else:
for y in flattener(x):
numchunks += 1
yield y
if debug:
cherrypy.log('Flattened %d chunks' % numchunks, 'TOOLS.FLATTEN')
response = cherrypy.serving.response
response.body = flattener(response.body)
def accept(media=None, debug=False):
"""Return the client's preferred media-type (from the given Content-Types).
If 'media' is None (the default), no test will be performed.
If 'media' is provided, it should be the Content-Type value (as a string)
or values (as a list or tuple of strings) which the current resource
can emit. The client's acceptable media ranges (as declared in the
Accept request header) will be matched in order to these Content-Type
values; the first such string is returned. That is, the return value
will always be one of the strings provided in the 'media' arg (or None
if 'media' is None).
If no match is found, then HTTPError 406 (Not Acceptable) is raised.
Note that most web browsers send */* as a (low-quality) acceptable
media range, which should match any Content-Type. In addition, "...if
no Accept header field is present, then it is assumed that the client
accepts all media types."
Matching types are checked in order of client preference first,
and then in the order of the given 'media' values.
Note that this function does not honor accept-params (other than "q").
"""
if not media:
return
if isinstance(media, basestring):
media = [media]
request = cherrypy.serving.request
# Parse the Accept request header, and try to match one
# of the requested media-ranges (in order of preference).
ranges = request.headers.elements('Accept')
if not ranges:
# Any media type is acceptable.
if debug:
cherrypy.log('No Accept header elements', 'TOOLS.ACCEPT')
return media[0]
else:
# Note that 'ranges' is sorted in order of preference
for element in ranges:
if element.qvalue > 0:
if element.value == "*/*":
# Matches any type or subtype
if debug:
cherrypy.log('Match due to */*', 'TOOLS.ACCEPT')
return media[0]
elif element.value.endswith("/*"):
# Matches any subtype
mtype = element.value[:-1] # Keep the slash
for m in media:
if m.startswith(mtype):
if debug:
cherrypy.log('Match due to %s' % element.value,
'TOOLS.ACCEPT')
return m
else:
# Matches exact value
if element.value in media:
if debug:
cherrypy.log('Match due to %s' % element.value,
'TOOLS.ACCEPT')
return element.value
# No suitable media-range found.
ah = request.headers.get('Accept')
if ah is None:
msg = "Your client did not send an Accept header."
else:
msg = "Your client sent this Accept header: %s." % ah
msg += (" But this resource only emits these media types: %s." %
", ".join(media))
raise cherrypy.HTTPError(406, msg)
class MonitoredHeaderMap(_httputil.HeaderMap):
def __init__(self):
self.accessed_headers = set()
def __getitem__(self, key):
self.accessed_headers.add(key)
return _httputil.HeaderMap.__getitem__(self, key)
def __contains__(self, key):
self.accessed_headers.add(key)
return _httputil.HeaderMap.__contains__(self, key)
def get(self, key, default=None):
self.accessed_headers.add(key)
return _httputil.HeaderMap.get(self, key, default=default)
def has_key(self, key):
self.accessed_headers.add(key)
return _httputil.HeaderMap.has_key(self, key)
def autovary(ignore=None, debug=False):
"""Auto-populate the Vary response header based on request.header access."""
request = cherrypy.serving.request
req_h = request.headers
request.headers = MonitoredHeaderMap()
request.headers.update(req_h)
if ignore is None:
ignore = set(['Content-Disposition', 'Content-Length', 'Content-Type'])
def set_response_header():
resp_h = cherrypy.serving.response.headers
v = set([e.value for e in resp_h.elements('Vary')])
if debug:
cherrypy.log('Accessed headers: %s' % request.headers.accessed_headers,
'TOOLS.AUTOVARY')
v = v.union(request.headers.accessed_headers)
v = v.difference(ignore)
v = list(v)
v.sort()
resp_h['Vary'] = ', '.join(v)
request.hooks.attach('before_finalize', set_response_header, 95)
| 22,834 | Python | .py | 487 | 35.577002 | 98 | 0.593928 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,628 | caching.py | midgetspy_Sick-Beard/cherrypy/lib/caching.py | import datetime
import threading
import time
import cherrypy
from cherrypy.lib import cptools, httputil
class Cache(object):
def get(self):
raise NotImplemented
def put(self, obj, size):
raise NotImplemented
def delete(self):
raise NotImplemented
def clear(self):
raise NotImplemented
# ------------------------------- Memory Cache ------------------------------- #
class AntiStampedeCache(dict):
def wait(self, key, timeout=5, debug=False):
"""Return the cached value for the given key, or None.
If timeout is not None (the default), and the value is already
being calculated by another thread, wait until the given timeout has
elapsed. If the value is available before the timeout expires, it is
returned. If not, None is returned, and a sentinel placed in the cache
to signal other threads to wait.
If timeout is None, no waiting is performed nor sentinels used.
"""
value = self.get(key)
if isinstance(value, threading._Event):
if timeout is None:
# Ignore the other thread and recalc it ourselves.
if debug:
cherrypy.log('No timeout', 'TOOLS.CACHING')
return None
# Wait until it's done or times out.
if debug:
cherrypy.log('Waiting up to %s seconds' % timeout, 'TOOLS.CACHING')
value.wait(timeout)
if value.result is not None:
# The other thread finished its calculation. Use it.
if debug:
cherrypy.log('Result!', 'TOOLS.CACHING')
return value.result
# Timed out. Stick an Event in the slot so other threads wait
# on this one to finish calculating the value.
if debug:
cherrypy.log('Timed out', 'TOOLS.CACHING')
e = threading.Event()
e.result = None
dict.__setitem__(self, key, e)
return None
elif value is None:
# Stick an Event in the slot so other threads wait
# on this one to finish calculating the value.
if debug:
cherrypy.log('Timed out', 'TOOLS.CACHING')
e = threading.Event()
e.result = None
dict.__setitem__(self, key, e)
return value
def __setitem__(self, key, value):
"""Set the cached value for the given key."""
existing = self.get(key)
dict.__setitem__(self, key, value)
if isinstance(existing, threading._Event):
# Set Event.result so other threads waiting on it have
# immediate access without needing to poll the cache again.
existing.result = value
existing.set()
class MemoryCache(Cache):
"""An in-memory cache for varying response content.
Each key in self.store is a URI, and each value is an AntiStampedeCache.
The response for any given URI may vary based on the values of
"selecting request headers"; that is, those named in the Vary
response header. We assume the list of header names to be constant
for each URI throughout the lifetime of the application, and store
that list in self.store[uri].selecting_headers.
The items contained in self.store[uri] have keys which are tuples of request
header values (in the same order as the names in its selecting_headers),
and values which are the actual responses.
"""
maxobjects = 1000
maxobj_size = 100000
maxsize = 10000000
delay = 600
antistampede_timeout = 5
expire_freq = 0.1
debug = False
def __init__(self):
self.clear()
# Run self.expire_cache in a separate daemon thread.
t = threading.Thread(target=self.expire_cache, name='expire_cache')
self.expiration_thread = t
if hasattr(threading.Thread, "daemon"):
# Python 2.6+
t.daemon = True
else:
t.setDaemon(True)
t.start()
def clear(self):
"""Reset the cache to its initial, empty state."""
self.store = {}
self.expirations = {}
self.tot_puts = 0
self.tot_gets = 0
self.tot_hist = 0
self.tot_expires = 0
self.tot_non_modified = 0
self.cursize = 0
def expire_cache(self):
# expire_cache runs in a separate thread which the servers are
# not aware of. It's possible that "time" will be set to None
# arbitrarily, so we check "while time" to avoid exceptions.
# See tickets #99 and #180 for more information.
while time:
now = time.time()
# Must make a copy of expirations so it doesn't change size
# during iteration
for expiration_time, objects in self.expirations.items():
if expiration_time <= now:
for obj_size, uri, sel_header_values in objects:
try:
del self.store[uri][sel_header_values]
self.tot_expires += 1
self.cursize -= obj_size
except KeyError:
# the key may have been deleted elsewhere
pass
del self.expirations[expiration_time]
time.sleep(self.expire_freq)
def get(self):
"""Return the current variant if in the cache, else None."""
request = cherrypy.serving.request
self.tot_gets += 1
uri = cherrypy.url(qs=request.query_string)
uricache = self.store.get(uri)
if uricache is None:
return None
header_values = [request.headers.get(h, '')
for h in uricache.selecting_headers]
header_values.sort()
variant = uricache.wait(key=tuple(header_values),
timeout=self.antistampede_timeout,
debug=self.debug)
if variant is not None:
self.tot_hist += 1
return variant
def put(self, variant, size):
"""Store the current variant in the cache."""
request = cherrypy.serving.request
response = cherrypy.serving.response
uri = cherrypy.url(qs=request.query_string)
uricache = self.store.get(uri)
if uricache is None:
uricache = AntiStampedeCache()
uricache.selecting_headers = [
e.value for e in response.headers.elements('Vary')]
self.store[uri] = uricache
if len(self.store) < self.maxobjects:
total_size = self.cursize + size
# checks if there's space for the object
if (size < self.maxobj_size and total_size < self.maxsize):
# add to the expirations list
expiration_time = response.time + self.delay
bucket = self.expirations.setdefault(expiration_time, [])
bucket.append((size, uri, uricache.selecting_headers))
# add to the cache
header_values = [request.headers.get(h, '')
for h in uricache.selecting_headers]
header_values.sort()
uricache[tuple(header_values)] = variant
self.tot_puts += 1
self.cursize = total_size
def delete(self):
"""Remove ALL cached variants of the current resource."""
uri = cherrypy.url(qs=cherrypy.serving.request.query_string)
self.store.pop(uri, None)
def get(invalid_methods=("POST", "PUT", "DELETE"), debug=False, **kwargs):
"""Try to obtain cached output. If fresh enough, raise HTTPError(304).
If POST, PUT, or DELETE:
* invalidates (deletes) any cached response for this resource
* sets request.cached = False
* sets request.cacheable = False
else if a cached copy exists:
* sets request.cached = True
* sets request.cacheable = False
* sets response.headers to the cached values
* checks the cached Last-Modified response header against the
current If-(Un)Modified-Since request headers; raises 304
if necessary.
* sets response.status and response.body to the cached values
* returns True
otherwise:
* sets request.cached = False
* sets request.cacheable = True
* returns False
"""
request = cherrypy.serving.request
response = cherrypy.serving.response
if not hasattr(cherrypy, "_cache"):
# Make a process-wide Cache object.
cherrypy._cache = kwargs.pop("cache_class", MemoryCache)()
# Take all remaining kwargs and set them on the Cache object.
for k, v in kwargs.items():
setattr(cherrypy._cache, k, v)
cherrypy._cache.debug = debug
# POST, PUT, DELETE should invalidate (delete) the cached copy.
# See http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.10.
if request.method in invalid_methods:
if debug:
cherrypy.log('request.method %r in invalid_methods %r' %
(request.method, invalid_methods), 'TOOLS.CACHING')
cherrypy._cache.delete()
request.cached = False
request.cacheable = False
return False
if 'no-cache' in [e.value for e in request.headers.elements('Pragma')]:
request.cached = False
request.cacheable = True
return False
cache_data = cherrypy._cache.get()
request.cached = bool(cache_data)
request.cacheable = not request.cached
if request.cached:
# Serve the cached copy.
max_age = cherrypy._cache.delay
for v in [e.value for e in request.headers.elements('Cache-Control')]:
atoms = v.split('=', 1)
directive = atoms.pop(0)
if directive == 'max-age':
if len(atoms) != 1 or not atoms[0].isdigit():
raise cherrypy.HTTPError(400, "Invalid Cache-Control header")
max_age = int(atoms[0])
break
elif directive == 'no-cache':
if debug:
cherrypy.log('Ignoring cache due to Cache-Control: no-cache',
'TOOLS.CACHING')
request.cached = False
request.cacheable = True
return False
if debug:
cherrypy.log('Reading response from cache', 'TOOLS.CACHING')
s, h, b, create_time = cache_data
age = int(response.time - create_time)
if (age > max_age):
if debug:
cherrypy.log('Ignoring cache due to age > %d' % max_age,
'TOOLS.CACHING')
request.cached = False
request.cacheable = True
return False
# Copy the response headers. See http://www.cherrypy.org/ticket/721.
response.headers = rh = httputil.HeaderMap()
for k in h:
dict.__setitem__(rh, k, dict.__getitem__(h, k))
# Add the required Age header
response.headers["Age"] = str(age)
try:
# Note that validate_since depends on a Last-Modified header;
# this was put into the cached copy, and should have been
# resurrected just above (response.headers = cache_data[1]).
cptools.validate_since()
except cherrypy.HTTPRedirect, x:
if x.status == 304:
cherrypy._cache.tot_non_modified += 1
raise
# serve it & get out from the request
response.status = s
response.body = b
else:
if debug:
cherrypy.log('request is not cached', 'TOOLS.CACHING')
return request.cached
def tee_output():
request = cherrypy.serving.request
if 'no-store' in request.headers.values('Cache-Control'):
return
def tee(body):
"""Tee response.body into a list."""
if ('no-cache' in response.headers.values('Pragma') or
'no-store' in response.headers.values('Cache-Control')):
for chunk in body:
yield chunk
return
output = []
for chunk in body:
output.append(chunk)
yield chunk
# save the cache data
body = ''.join(output)
cherrypy._cache.put((response.status, response.headers or {},
body, response.time), len(body))
response = cherrypy.serving.response
response.body = tee(response.body)
def expires(secs=0, force=False, debug=False):
"""Tool for influencing cache mechanisms using the 'Expires' header.
'secs' must be either an int or a datetime.timedelta, and indicates the
number of seconds between response.time and when the response should
expire. The 'Expires' header will be set to (response.time + secs).
If 'secs' is zero, the 'Expires' header is set one year in the past, and
the following "cache prevention" headers are also set:
'Pragma': 'no-cache'
'Cache-Control': 'no-cache, must-revalidate'
If 'force' is False (the default), the following headers are checked:
'Etag', 'Last-Modified', 'Age', 'Expires'. If any are already present,
none of the above response headers are set.
"""
response = cherrypy.serving.response
headers = response.headers
cacheable = False
if not force:
# some header names that indicate that the response can be cached
for indicator in ('Etag', 'Last-Modified', 'Age', 'Expires'):
if indicator in headers:
cacheable = True
break
if not cacheable and not force:
if debug:
cherrypy.log('request is not cacheable', 'TOOLS.EXPIRES')
else:
if debug:
cherrypy.log('request is cacheable', 'TOOLS.EXPIRES')
if isinstance(secs, datetime.timedelta):
secs = (86400 * secs.days) + secs.seconds
if secs == 0:
if force or ("Pragma" not in headers):
headers["Pragma"] = "no-cache"
if cherrypy.serving.request.protocol >= (1, 1):
if force or "Cache-Control" not in headers:
headers["Cache-Control"] = "no-cache, must-revalidate"
# Set an explicit Expires date in the past.
expiry = httputil.HTTPDate(1169942400.0)
else:
expiry = httputil.HTTPDate(response.time + secs)
if force or "Expires" not in headers:
headers["Expires"] = expiry
| 15,405 | Python | .py | 335 | 32.925373 | 84 | 0.578699 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,629 | covercp.py | midgetspy_Sick-Beard/cherrypy/lib/covercp.py | """Code-coverage tools for CherryPy.
To use this module, or the coverage tools in the test suite,
you need to download 'coverage.py', either Gareth Rees' original
implementation:
http://www.garethrees.org/2001/12/04/python-coverage/
or Ned Batchelder's enhanced version:
http://www.nedbatchelder.com/code/modules/coverage.html
To turn on coverage tracing, use the following code:
cherrypy.engine.subscribe('start', covercp.start)
DO NOT subscribe anything on the 'start_thread' channel, as previously
recommended. Calling start once in the main thread should be sufficient
to start coverage on all threads. Calling start again in each thread
effectively clears any coverage data gathered up to that point.
Run your code, then use the covercp.serve() function to browse the
results in a web browser. If you run this module from the command line,
it will call serve() for you.
"""
import re
import sys
import cgi
from urllib import quote_plus
import os, os.path
localFile = os.path.join(os.path.dirname(__file__), "coverage.cache")
try:
from coverage import the_coverage as coverage
def start():
coverage.start()
except ImportError:
# Setting coverage to None will raise errors
# that need to be trapped downstream.
coverage = None
import warnings
warnings.warn("No code coverage will be performed; coverage.py could not be imported.")
def start():
pass
start.priority = 20
TEMPLATE_MENU = """<html>
<head>
<title>CherryPy Coverage Menu</title>
<style>
body {font: 9pt Arial, serif;}
#tree {
font-size: 8pt;
font-family: Andale Mono, monospace;
white-space: pre;
}
#tree a:active, a:focus {
background-color: black;
padding: 1px;
color: white;
border: 0px solid #9999FF;
-moz-outline-style: none;
}
.fail { color: red;}
.pass { color: #888;}
#pct { text-align: right;}
h3 {
font-size: small;
font-weight: bold;
font-style: italic;
margin-top: 5px;
}
input { border: 1px solid #ccc; padding: 2px; }
.directory {
color: #933;
font-style: italic;
font-weight: bold;
font-size: 10pt;
}
.file {
color: #400;
}
a { text-decoration: none; }
#crumbs {
color: white;
font-size: 8pt;
font-family: Andale Mono, monospace;
width: 100%;
background-color: black;
}
#crumbs a {
color: #f88;
}
#options {
line-height: 2.3em;
border: 1px solid black;
background-color: #eee;
padding: 4px;
}
#exclude {
width: 100%;
margin-bottom: 3px;
border: 1px solid #999;
}
#submit {
background-color: black;
color: white;
border: 0;
margin-bottom: -9px;
}
</style>
</head>
<body>
<h2>CherryPy Coverage</h2>"""
TEMPLATE_FORM = """
<div id="options">
<form action='menu' method=GET>
<input type='hidden' name='base' value='%(base)s' />
Show percentages <input type='checkbox' %(showpct)s name='showpct' value='checked' /><br />
Hide files over <input type='text' id='pct' name='pct' value='%(pct)s' size='3' />%%<br />
Exclude files matching<br />
<input type='text' id='exclude' name='exclude' value='%(exclude)s' size='20' />
<br />
<input type='submit' value='Change view' id="submit"/>
</form>
</div>"""
TEMPLATE_FRAMESET = """<html>
<head><title>CherryPy coverage data</title></head>
<frameset cols='250, 1*'>
<frame src='menu?base=%s' />
<frame name='main' src='' />
</frameset>
</html>
"""
TEMPLATE_COVERAGE = """<html>
<head>
<title>Coverage for %(name)s</title>
<style>
h2 { margin-bottom: .25em; }
p { margin: .25em; }
.covered { color: #000; background-color: #fff; }
.notcovered { color: #fee; background-color: #500; }
.excluded { color: #00f; background-color: #fff; }
table .covered, table .notcovered, table .excluded
{ font-family: Andale Mono, monospace;
font-size: 10pt; white-space: pre; }
.lineno { background-color: #eee;}
.notcovered .lineno { background-color: #000;}
table { border-collapse: collapse;
</style>
</head>
<body>
<h2>%(name)s</h2>
<p>%(fullpath)s</p>
<p>Coverage: %(pc)s%%</p>"""
TEMPLATE_LOC_COVERED = """<tr class="covered">
<td class="lineno">%s </td>
<td>%s</td>
</tr>\n"""
TEMPLATE_LOC_NOT_COVERED = """<tr class="notcovered">
<td class="lineno">%s </td>
<td>%s</td>
</tr>\n"""
TEMPLATE_LOC_EXCLUDED = """<tr class="excluded">
<td class="lineno">%s </td>
<td>%s</td>
</tr>\n"""
TEMPLATE_ITEM = "%s%s<a class='file' href='report?name=%s' target='main'>%s</a>\n"
def _percent(statements, missing):
s = len(statements)
e = s - len(missing)
if s > 0:
return int(round(100.0 * e / s))
return 0
def _show_branch(root, base, path, pct=0, showpct=False, exclude=""):
# Show the directory name and any of our children
dirs = [k for k, v in root.items() if v]
dirs.sort()
for name in dirs:
newpath = os.path.join(path, name)
if newpath.lower().startswith(base):
relpath = newpath[len(base):]
yield "| " * relpath.count(os.sep)
yield "<a class='directory' href='menu?base=%s&exclude=%s'>%s</a>\n" % \
(newpath, quote_plus(exclude), name)
for chunk in _show_branch(root[name], base, newpath, pct, showpct, exclude):
yield chunk
# Now list the files
if path.lower().startswith(base):
relpath = path[len(base):]
files = [k for k, v in root.items() if not v]
files.sort()
for name in files:
newpath = os.path.join(path, name)
pc_str = ""
if showpct:
try:
_, statements, _, missing, _ = coverage.analysis2(newpath)
except:
# Yes, we really want to pass on all errors.
pass
else:
pc = _percent(statements, missing)
pc_str = ("%3d%% " % pc).replace(' ', ' ')
if pc < float(pct) or pc == -1:
pc_str = "<span class='fail'>%s</span>" % pc_str
else:
pc_str = "<span class='pass'>%s</span>" % pc_str
yield TEMPLATE_ITEM % ("| " * (relpath.count(os.sep) + 1),
pc_str, newpath, name)
def _skip_file(path, exclude):
if exclude:
return bool(re.search(exclude, path))
def _graft(path, tree):
d = tree
p = path
atoms = []
while True:
p, tail = os.path.split(p)
if not tail:
break
atoms.append(tail)
atoms.append(p)
if p != "/":
atoms.append("/")
atoms.reverse()
for node in atoms:
if node:
d = d.setdefault(node, {})
def get_tree(base, exclude):
"""Return covered module names as a nested dict."""
tree = {}
coverage.get_ready()
runs = list(coverage.cexecuted.keys())
if runs:
for path in runs:
if not _skip_file(path, exclude) and not os.path.isdir(path):
_graft(path, tree)
return tree
class CoverStats(object):
def __init__(self, root=None):
if root is None:
# Guess initial depth. Files outside this path will not be
# reachable from the web interface.
import cherrypy
root = os.path.dirname(cherrypy.__file__)
self.root = root
def index(self):
return TEMPLATE_FRAMESET % self.root.lower()
index.exposed = True
def menu(self, base="/", pct="50", showpct="",
exclude=r'python\d\.\d|test|tut\d|tutorial'):
# The coverage module uses all-lower-case names.
base = base.lower().rstrip(os.sep)
yield TEMPLATE_MENU
yield TEMPLATE_FORM % locals()
# Start by showing links for parent paths
yield "<div id='crumbs'>"
path = ""
atoms = base.split(os.sep)
atoms.pop()
for atom in atoms:
path += atom + os.sep
yield ("<a href='menu?base=%s&exclude=%s'>%s</a> %s"
% (path, quote_plus(exclude), atom, os.sep))
yield "</div>"
yield "<div id='tree'>"
# Then display the tree
tree = get_tree(base, exclude)
if not tree:
yield "<p>No modules covered.</p>"
else:
for chunk in _show_branch(tree, base, "/", pct,
showpct == 'checked', exclude):
yield chunk
yield "</div>"
yield "</body></html>"
menu.exposed = True
def annotated_file(self, filename, statements, excluded, missing):
source = open(filename, 'r')
buffer = []
for lineno, line in enumerate(source.readlines()):
lineno += 1
line = line.strip("\n\r")
empty_the_buffer = True
if lineno in excluded:
template = TEMPLATE_LOC_EXCLUDED
elif lineno in missing:
template = TEMPLATE_LOC_NOT_COVERED
elif lineno in statements:
template = TEMPLATE_LOC_COVERED
else:
empty_the_buffer = False
buffer.append((lineno, line))
if empty_the_buffer:
for lno, pastline in buffer:
yield template % (lno, cgi.escape(pastline))
buffer = []
yield template % (lineno, cgi.escape(line))
def report(self, name):
coverage.get_ready()
filename, statements, excluded, missing, _ = coverage.analysis2(name)
pc = _percent(statements, missing)
yield TEMPLATE_COVERAGE % dict(name=os.path.basename(name),
fullpath=name,
pc=pc)
yield '<table>\n'
for line in self.annotated_file(filename, statements, excluded,
missing):
yield line
yield '</table>'
yield '</body>'
yield '</html>'
report.exposed = True
def serve(path=localFile, port=8080, root=None):
if coverage is None:
raise ImportError("The coverage module could not be imported.")
coverage.cache_default = path
import cherrypy
cherrypy.config.update({'server.socket_port': int(port),
'server.thread_pool': 10,
'environment': "production",
})
cherrypy.quickstart(CoverStats(root))
if __name__ == "__main__":
serve(*tuple(sys.argv[1:]))
| 11,672 | Python | .py | 316 | 26.21519 | 96 | 0.539195 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,630 | auth_basic.py | midgetspy_Sick-Beard/cherrypy/lib/auth_basic.py | # This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
__doc__ = """Module auth_basic.py provides a CherryPy 3.x tool which implements
the server-side of HTTP Basic Access Authentication, as described in RFC 2617.
Example usage, using the built-in checkpassword_dict function which uses a dict
as the credentials store:
userpassdict = {'bird' : 'bebop', 'ornette' : 'wayout'}
checkpassword = cherrypy.lib.auth_basic.checkpassword_dict(userpassdict)
basic_auth = {'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'earth',
'tools.auth_basic.checkpassword': checkpassword,
}
app_config = { '/' : basic_auth }
"""
__author__ = 'visteya'
__date__ = 'April 2009'
import binascii
import base64
import cherrypy
def checkpassword_dict(user_password_dict):
"""Returns a checkpassword function which checks credentials
against a dictionary of the form: {username : password}.
If you want a simple dictionary-based authentication scheme, use
checkpassword_dict(my_credentials_dict) as the value for the
checkpassword argument to basic_auth().
"""
def checkpassword(realm, user, password):
p = user_password_dict.get(user)
return p and p == password or False
return checkpassword
def basic_auth(realm, checkpassword, debug=False):
"""basic_auth is a CherryPy tool which hooks at before_handler to perform
HTTP Basic Access Authentication, as specified in RFC 2617.
If the request has an 'authorization' header with a 'Basic' scheme, this
tool attempts to authenticate the credentials supplied in that header. If
the request has no 'authorization' header, or if it does but the scheme is
not 'Basic', or if authentication fails, the tool sends a 401 response with
a 'WWW-Authenticate' Basic header.
Arguments:
realm: a string containing the authentication realm.
checkpassword: a callable which checks the authentication credentials.
Its signature is checkpassword(realm, username, password). where
username and password are the values obtained from the request's
'authorization' header. If authentication succeeds, checkpassword
returns True, else it returns False.
"""
if '"' in realm:
raise ValueError('Realm cannot contain the " (quote) character.')
request = cherrypy.serving.request
auth_header = request.headers.get('authorization')
if auth_header is not None:
try:
scheme, params = auth_header.split(' ', 1)
if scheme.lower() == 'basic':
# since CherryPy claims compability with Python 2.3, we must use
# the legacy API of base64
username_password = base64.decodestring(params)
username, password = username_password.split(':', 1)
if checkpassword(realm, username, password):
if debug:
cherrypy.log('Auth succeeded', 'TOOLS.AUTH_BASIC')
request.login = username
return # successful authentication
except (ValueError, binascii.Error): # split() error, base64.decodestring() error
raise cherrypy.HTTPError(400, 'Bad Request')
# Respond with 401 status and a WWW-Authenticate header
cherrypy.serving.response.headers['www-authenticate'] = 'Basic realm="%s"' % realm
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
| 3,639 | Python | .py | 69 | 44.188406 | 90 | 0.680295 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,631 | xmlrpc.py | midgetspy_Sick-Beard/cherrypy/lib/xmlrpc.py | import sys
import cherrypy
def process_body():
"""Return (params, method) from request body."""
try:
import xmlrpclib
return xmlrpclib.loads(cherrypy.request.body.read())
except Exception:
return ('ERROR PARAMS',), 'ERRORMETHOD'
def patched_path(path):
"""Return 'path', doctored for RPC."""
if not path.endswith('/'):
path += '/'
if path.startswith('/RPC2/'):
# strip the first /rpc2
path = path[5:]
return path
def _set_response(body):
# The XML-RPC spec (http://www.xmlrpc.com/spec) says:
# "Unless there's a lower-level error, always return 200 OK."
# Since Python's xmlrpclib interprets a non-200 response
# as a "Protocol Error", we'll just return 200 every time.
response = cherrypy.response
response.status = '200 OK'
response.body = body
response.headers['Content-Type'] = 'text/xml'
response.headers['Content-Length'] = len(body)
def respond(body, encoding='utf-8', allow_none=0):
from xmlrpclib import Fault, dumps
if not isinstance(body, Fault):
body = (body,)
_set_response(dumps(body, methodresponse=1,
encoding=encoding,
allow_none=allow_none))
def on_error(*args, **kwargs):
body = str(sys.exc_info()[1])
from xmlrpclib import Fault, dumps
_set_response(dumps(Fault(1, body)))
| 1,445 | Python | .py | 38 | 30.394737 | 66 | 0.625993 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,632 | http.py | midgetspy_Sick-Beard/cherrypy/lib/http.py | import warnings
warnings.warn('cherrypy.lib.http has been deprecated and will be removed '
'in CherryPy 3.3 use cherrypy.lib.httputil instead.',
DeprecationWarning)
from cherrypy.lib.httputil import *
| 238 | Python | .py | 5 | 39.2 | 75 | 0.707424 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,633 | win32.py | midgetspy_Sick-Beard/cherrypy/process/win32.py | """Windows service. Requires pywin32."""
import os
import win32api
import win32con
import win32event
import win32service
import win32serviceutil
from cherrypy.process import wspbus, plugins
class ConsoleCtrlHandler(plugins.SimplePlugin):
"""A WSPBus plugin for handling Win32 console events (like Ctrl-C)."""
def __init__(self, bus):
self.is_set = False
plugins.SimplePlugin.__init__(self, bus)
def start(self):
if self.is_set:
self.bus.log('Handler for console events already set.', level=40)
return
result = win32api.SetConsoleCtrlHandler(self.handle, 1)
if result == 0:
self.bus.log('Could not SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Set handler for console events.', level=40)
self.is_set = True
def stop(self):
if not self.is_set:
self.bus.log('Handler for console events already off.', level=40)
return
try:
result = win32api.SetConsoleCtrlHandler(self.handle, 0)
except ValueError:
# "ValueError: The object has not been registered"
result = 1
if result == 0:
self.bus.log('Could not remove SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Removed handler for console events.', level=40)
self.is_set = False
def handle(self, event):
"""Handle console control events (like Ctrl-C)."""
if event in (win32con.CTRL_C_EVENT, win32con.CTRL_LOGOFF_EVENT,
win32con.CTRL_BREAK_EVENT, win32con.CTRL_SHUTDOWN_EVENT,
win32con.CTRL_CLOSE_EVENT):
self.bus.log('Console event %s: shutting down bus' % event)
# Remove self immediately so repeated Ctrl-C doesn't re-call it.
try:
self.stop()
except ValueError:
pass
self.bus.exit()
# 'First to return True stops the calls'
return 1
return 0
class Win32Bus(wspbus.Bus):
"""A Web Site Process Bus implementation for Win32.
Instead of time.sleep, this bus blocks using native win32event objects.
"""
def __init__(self):
self.events = {}
wspbus.Bus.__init__(self)
def _get_state_event(self, state):
"""Return a win32event for the given state (creating it if needed)."""
try:
return self.events[state]
except KeyError:
event = win32event.CreateEvent(None, 0, 0,
"WSPBus %s Event (pid=%r)" %
(state.name, os.getpid()))
self.events[state] = event
return event
def _get_state(self):
return self._state
def _set_state(self, value):
self._state = value
event = self._get_state_event(value)
win32event.PulseEvent(event)
state = property(_get_state, _set_state)
def wait(self, state, interval=0.1, channel=None):
"""Wait for the given state(s), KeyboardInterrupt or SystemExit.
Since this class uses native win32event objects, the interval
argument is ignored.
"""
if isinstance(state, (tuple, list)):
# Don't wait for an event that beat us to the punch ;)
if self.state not in state:
events = tuple([self._get_state_event(s) for s in state])
win32event.WaitForMultipleObjects(events, 0, win32event.INFINITE)
else:
# Don't wait for an event that beat us to the punch ;)
if self.state != state:
event = self._get_state_event(state)
win32event.WaitForSingleObject(event, win32event.INFINITE)
class _ControlCodes(dict):
"""Control codes used to "signal" a service via ControlService.
User-defined control codes are in the range 128-255. We generally use
the standard Python value for the Linux signal and add 128. Example:
>>> signal.SIGUSR1
10
control_codes['graceful'] = 128 + 10
"""
def key_for(self, obj):
"""For the given value, return its corresponding key."""
for key, val in self.items():
if val is obj:
return key
raise ValueError("The given object could not be found: %r" % obj)
control_codes = _ControlCodes({'graceful': 138})
def signal_child(service, command):
if command == 'stop':
win32serviceutil.StopService(service)
elif command == 'restart':
win32serviceutil.RestartService(service)
else:
win32serviceutil.ControlService(service, control_codes[command])
class PyWebService(win32serviceutil.ServiceFramework):
"""Python Web Service."""
_svc_name_ = "Python Web Service"
_svc_display_name_ = "Python Web Service"
_svc_deps_ = None # sequence of service names on which this depends
_exe_name_ = "pywebsvc"
_exe_args_ = None # Default to no arguments
# Only exists on Windows 2000 or later, ignored on windows NT
_svc_description_ = "Python Web Service"
def SvcDoRun(self):
from cherrypy import process
process.bus.start()
process.bus.block()
def SvcStop(self):
from cherrypy import process
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
process.bus.exit()
def SvcOther(self, control):
process.bus.publish(control_codes.key_for(control))
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(PyWebService)
| 6,047 | Python | .py | 136 | 32.632353 | 82 | 0.601996 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,634 | plugins.py | midgetspy_Sick-Beard/cherrypy/process/plugins.py | """Site services for use with a Web Site Process Bus."""
import os
import re
try:
set
except NameError:
from sets import Set as set
import signal as _signal
import sys
import time
import thread
import threading
# _module__file__base is used by Autoreload to make
# absolute any filenames retrieved from sys.modules which are not
# already absolute paths. This is to work around Python's quirk
# of importing the startup script and using a relative filename
# for it in sys.modules.
#
# Autoreload examines sys.modules afresh every time it runs. If an application
# changes the current directory by executing os.chdir(), then the next time
# Autoreload runs, it will not be able to find any filenames which are
# not absolute paths, because the current directory is not the same as when the
# module was first imported. Autoreload will then wrongly conclude the file has
# "changed", and initiate the shutdown/re-exec sequence.
# See ticket #917.
# For this workaround to have a decent probability of success, this module
# needs to be imported as early as possible, before the app has much chance
# to change the working directory.
_module__file__base = os.getcwd()
class SimplePlugin(object):
"""Plugin base class which auto-subscribes methods for known channels."""
def __init__(self, bus):
self.bus = bus
def subscribe(self):
"""Register this object as a (multi-channel) listener on the bus."""
for channel in self.bus.listeners:
# Subscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.subscribe(channel, method)
def unsubscribe(self):
"""Unregister this object as a listener on the bus."""
for channel in self.bus.listeners:
# Unsubscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.unsubscribe(channel, method)
class SignalHandler(object):
"""Register bus channels (and listeners) for system signals.
By default, instantiating this object subscribes the following signals
and listeners:
TERM: bus.exit
HUP : bus.restart
USR1: bus.graceful
"""
# Map from signal numbers to names
signals = {}
for k, v in vars(_signal).items():
if k.startswith('SIG') and not k.startswith('SIG_'):
signals[v] = k
del k, v
def __init__(self, bus):
self.bus = bus
# Set default handlers
self.handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
self._previous_handlers = {}
def subscribe(self):
for sig, func in self.handlers.items():
try:
self.set_handler(sig, func)
except ValueError:
pass
def unsubscribe(self):
for signum, handler in self._previous_handlers.items():
signame = self.signals[signum]
if handler is None:
self.bus.log("Restoring %s handler to SIG_DFL." % signame)
handler = _signal.SIG_DFL
else:
self.bus.log("Restoring %s handler %r." % (signame, handler))
try:
our_handler = _signal.signal(signum, handler)
if our_handler is None:
self.bus.log("Restored old %s handler %r, but our "
"handler was not registered." %
(signame, handler), level=30)
except ValueError:
self.bus.log("Unable to restore %s handler %r." %
(signame, handler), level=40, traceback=True)
def set_handler(self, signal, listener=None):
"""Subscribe a handler for the given signal (number or name).
If the optional 'listener' argument is provided, it will be
subscribed as a listener for the given signal's channel.
If the given signal name or number is not available on the current
platform, ValueError is raised.
"""
if isinstance(signal, basestring):
signum = getattr(_signal, signal, None)
if signum is None:
raise ValueError("No such signal: %r" % signal)
signame = signal
else:
try:
signame = self.signals[signal]
except KeyError:
raise ValueError("No such signal: %r" % signal)
signum = signal
prev = _signal.signal(signum, self._handle_signal)
self._previous_handlers[signum] = prev
if listener is not None:
self.bus.log("Listening for %s." % signame)
self.bus.subscribe(signame, listener)
def _handle_signal(self, signum=None, frame=None):
"""Python signal handler (self.set_handler subscribes it for you)."""
signame = self.signals[signum]
self.bus.log("Caught signal %s." % signame)
self.bus.publish(signame)
def handle_SIGHUP(self):
if os.isatty(sys.stdin.fileno()):
# not daemonized (may be foreground or background)
self.bus.log("SIGHUP caught but not daemonized. Exiting.")
self.bus.exit()
else:
self.bus.log("SIGHUP caught while daemonized. Restarting.")
self.bus.restart()
try:
import pwd, grp
except ImportError:
pwd, grp = None, None
class DropPrivileges(SimplePlugin):
"""Drop privileges. uid/gid arguments not available on Windows.
Special thanks to Gavin Baker: http://antonym.org/node/100.
"""
def __init__(self, bus, umask=None, uid=None, gid=None):
SimplePlugin.__init__(self, bus)
self.finalized = False
self.uid = uid
self.gid = gid
self.umask = umask
def _get_uid(self):
return self._uid
def _set_uid(self, val):
if val is not None:
if pwd is None:
self.bus.log("pwd module not available; ignoring uid.",
level=30)
val = None
elif isinstance(val, basestring):
val = pwd.getpwnam(val)[2]
self._uid = val
uid = property(_get_uid, _set_uid, doc="The uid under which to run.")
def _get_gid(self):
return self._gid
def _set_gid(self, val):
if val is not None:
if grp is None:
self.bus.log("grp module not available; ignoring gid.",
level=30)
val = None
elif isinstance(val, basestring):
val = grp.getgrnam(val)[2]
self._gid = val
gid = property(_get_gid, _set_gid, doc="The gid under which to run.")
def _get_umask(self):
return self._umask
def _set_umask(self, val):
if val is not None:
try:
os.umask
except AttributeError:
self.bus.log("umask function not available; ignoring umask.",
level=30)
val = None
self._umask = val
umask = property(_get_umask, _set_umask, doc="The umask under which to run.")
def start(self):
# uid/gid
def current_ids():
"""Return the current (uid, gid) if available."""
name, group = None, None
if pwd:
name = pwd.getpwuid(os.getuid())[0]
if grp:
group = grp.getgrgid(os.getgid())[0]
return name, group
if self.finalized:
if not (self.uid is None and self.gid is None):
self.bus.log('Already running as uid: %r gid: %r' %
current_ids())
else:
if self.uid is None and self.gid is None:
if pwd or grp:
self.bus.log('uid/gid not set', level=30)
else:
self.bus.log('Started as uid: %r gid: %r' % current_ids())
if self.gid is not None:
os.setgid(self.gid)
if self.uid is not None:
os.setuid(self.uid)
self.bus.log('Running as uid: %r gid: %r' % current_ids())
# umask
if self.finalized:
if self.umask is not None:
self.bus.log('umask already set to: %03o' % self.umask)
else:
if self.umask is None:
self.bus.log('umask not set', level=30)
else:
old_umask = os.umask(self.umask)
self.bus.log('umask old: %03o, new: %03o' %
(old_umask, self.umask))
self.finalized = True
# This is slightly higher than the priority for server.start
# in order to facilitate the most common use: starting on a low
# port (which requires root) and then dropping to another user.
start.priority = 77
class Daemonizer(SimplePlugin):
"""Daemonize the running script.
Use this with a Web Site Process Bus via:
Daemonizer(bus).subscribe()
When this component finishes, the process is completely decoupled from
the parent environment. Please note that when this component is used,
the return code from the parent process will still be 0 if a startup
error occurs in the forked children. Errors in the initial daemonizing
process still return proper exit codes. Therefore, if you use this
plugin to daemonize, don't use the return code as an accurate indicator
of whether the process fully started. In fact, that return code only
indicates if the process succesfully finished the first fork.
"""
def __init__(self, bus, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'):
SimplePlugin.__init__(self, bus)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.finalized = False
def start(self):
if self.finalized:
self.bus.log('Already deamonized.')
# forking has issues with threads:
# http://www.opengroup.org/onlinepubs/000095399/functions/fork.html
# "The general problem with making fork() work in a multi-threaded
# world is what to do with all of the threads..."
# So we check for active threads:
if threading.activeCount() != 1:
self.bus.log('There are %r active threads. '
'Daemonizing now may cause strange failures.' %
threading.enumerate(), level=30)
# See http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
# (or http://www.faqs.org/faqs/unix-faq/programmer/faq/ section 1.7)
# and http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
# Finish up with the current stdout/stderr
sys.stdout.flush()
sys.stderr.flush()
# Do first fork.
try:
pid = os.fork()
if pid == 0:
# This is the child process. Continue.
pass
else:
# This is the first parent. Exit, now that we've forked.
self.bus.log('Forking once.')
os._exit(0)
except OSError, exc:
# Python raises OSError rather than returning negative numbers.
sys.exit("%s: fork #1 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.setsid()
# Do second fork
try:
pid = os.fork()
if pid > 0:
self.bus.log('Forking twice.')
os._exit(0) # Exit second parent
except OSError, exc:
sys.exit("%s: fork #2 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.chdir("/")
os.umask(0)
si = open(self.stdin, "r")
so = open(self.stdout, "a+")
se = open(self.stderr, "a+")
# os.dup2(fd, fd2) will close fd2 if necessary,
# so we don't explicitly close stdin/out/err.
# See http://docs.python.org/lib/os-fd-ops.html
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
self.bus.log('Daemonized to PID: %s' % os.getpid())
self.finalized = True
start.priority = 65
class PIDFile(SimplePlugin):
"""Maintain a PID file via a WSPBus."""
def __init__(self, bus, pidfile):
SimplePlugin.__init__(self, bus)
self.pidfile = pidfile
self.finalized = False
def start(self):
pid = os.getpid()
if self.finalized:
self.bus.log('PID %r already written to %r.' % (pid, self.pidfile))
else:
open(self.pidfile, "wb").write(str(pid))
self.bus.log('PID %r written to %r.' % (pid, self.pidfile))
self.finalized = True
start.priority = 70
def exit(self):
try:
os.remove(self.pidfile)
self.bus.log('PID file removed: %r.' % self.pidfile)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
class PerpetualTimer(threading._Timer):
"""A subclass of threading._Timer whose run() method repeats."""
def run(self):
while True:
self.finished.wait(self.interval)
if self.finished.isSet():
return
try:
self.function(*self.args, **self.kwargs)
except Exception, x:
self.bus.log("Error in perpetual timer thread function %r." %
self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class Monitor(SimplePlugin):
"""WSPBus listener to periodically run a callback in its own thread.
bus: a Web Site Process Bus object.
callback: the function to call at intervals.
frequency: the time in seconds between callback runs.
"""
frequency = 60
def __init__(self, bus, callback, frequency=60, name=None):
SimplePlugin.__init__(self, bus)
self.callback = callback
self.frequency = frequency
self.thread = None
self.name = name
def start(self):
"""Start our callback in its own perpetual timer thread."""
if self.frequency > 0:
threadname = self.name or self.__class__.__name__
if self.thread is None:
self.thread = PerpetualTimer(self.frequency, self.callback)
self.thread.bus = self.bus
self.thread.setName(threadname)
self.thread.start()
self.bus.log("Started monitor thread %r." % threadname)
else:
self.bus.log("Monitor thread %r already started." % threadname)
start.priority = 70
def stop(self):
"""Stop our callback's perpetual timer thread."""
if self.thread is None:
self.bus.log("No thread running for %s." % self.name or self.__class__.__name__)
else:
if self.thread is not threading.currentThread():
name = self.thread.getName()
self.thread.cancel()
self.thread.join()
self.bus.log("Stopped thread %r." % name)
self.thread = None
def graceful(self):
"""Stop the callback's perpetual timer thread and restart it."""
self.stop()
self.start()
class Autoreloader(Monitor):
"""Monitor which re-executes the process when files change."""
frequency = 1
match = '.*'
def __init__(self, bus, frequency=1, match='.*'):
self.mtimes = {}
self.files = set()
self.match = match
Monitor.__init__(self, bus, self.run, frequency)
def start(self):
"""Start our own perpetual timer thread for self.run."""
if self.thread is None:
self.mtimes = {}
Monitor.start(self)
start.priority = 70
def sysfiles(self):
"""Return a Set of filenames which the Autoreloader will monitor."""
files = set()
for k, m in sys.modules.items():
if re.match(self.match, k):
if hasattr(m, '__loader__') and hasattr(m.__loader__, 'archive'):
f = m.__loader__.archive
else:
f = getattr(m, '__file__', None)
if f is not None and not os.path.isabs(f):
# ensure absolute paths so a os.chdir() in the app doesn't break me
f = os.path.normpath(os.path.join(_module__file__base, f))
files.add(f)
return files
def run(self):
"""Reload the process if registered files have been modified."""
for filename in self.sysfiles() | self.files:
if filename:
if filename.endswith('.pyc'):
filename = filename[:-1]
oldtime = self.mtimes.get(filename, 0)
if oldtime is None:
# Module with no .py file. Skip it.
continue
try:
mtime = os.stat(filename).st_mtime
except OSError:
# Either a module with no .py file, or it's been deleted.
mtime = None
if filename not in self.mtimes:
# If a module has no .py file, this will be None.
self.mtimes[filename] = mtime
else:
if mtime is None or mtime > oldtime:
# The file has been deleted or modified.
self.bus.log("Restarting because %s changed." % filename)
self.thread.cancel()
self.bus.log("Stopped thread %r." % self.thread.getName())
self.bus.restart()
return
class ThreadManager(SimplePlugin):
"""Manager for HTTP request threads.
If you have control over thread creation and destruction, publish to
the 'acquire_thread' and 'release_thread' channels (for each thread).
This will register/unregister the current thread and publish to
'start_thread' and 'stop_thread' listeners in the bus as needed.
If threads are created and destroyed by code you do not control
(e.g., Apache), then, at the beginning of every HTTP request,
publish to 'acquire_thread' only. You should not publish to
'release_thread' in this case, since you do not know whether
the thread will be re-used or not. The bus will call
'stop_thread' listeners for you when it stops.
"""
def __init__(self, bus):
self.threads = {}
SimplePlugin.__init__(self, bus)
self.bus.listeners.setdefault('acquire_thread', set())
self.bus.listeners.setdefault('release_thread', set())
def acquire_thread(self):
"""Run 'start_thread' listeners for the current thread.
If the current thread has already been seen, any 'start_thread'
listeners will not be run again.
"""
thread_ident = thread.get_ident()
if thread_ident not in self.threads:
# We can't just use _get_ident as the thread ID
# because some platforms reuse thread ID's.
i = len(self.threads) + 1
self.threads[thread_ident] = i
self.bus.publish('start_thread', i)
def release_thread(self):
"""Release the current thread and run 'stop_thread' listeners."""
thread_ident = threading._get_ident()
i = self.threads.pop(thread_ident, None)
if i is not None:
self.bus.publish('stop_thread', i)
def stop(self):
"""Release all threads and run all 'stop_thread' listeners."""
for thread_ident, i in self.threads.items():
self.bus.publish('stop_thread', i)
self.threads.clear()
graceful = stop
| 21,151 | Python | .py | 470 | 32.048936 | 93 | 0.565992 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,635 | servers.py | midgetspy_Sick-Beard/cherrypy/process/servers.py | """Adapt an HTTP server."""
import time
class ServerAdapter(object):
"""Adapter for an HTTP server.
If you need to start more than one HTTP server (to serve on multiple
ports, or protocols, etc.), you can manually register each one and then
start them all with bus.start:
s1 = ServerAdapter(bus, MyWSGIServer(host='0.0.0.0', port=80))
s2 = ServerAdapter(bus, another.HTTPServer(host='127.0.0.1', SSL=True))
s1.subscribe()
s2.subscribe()
bus.start()
"""
def __init__(self, bus, httpserver=None, bind_addr=None):
self.bus = bus
self.httpserver = httpserver
self.bind_addr = bind_addr
self.interrupt = None
self.running = False
def subscribe(self):
self.bus.subscribe('start', self.start)
self.bus.subscribe('stop', self.stop)
def unsubscribe(self):
self.bus.unsubscribe('start', self.start)
self.bus.unsubscribe('stop', self.stop)
def start(self):
"""Start the HTTP server."""
if self.bind_addr is None:
on_what = "unknown interface (dynamic?)"
elif isinstance(self.bind_addr, tuple):
host, port = self.bind_addr
on_what = "%s:%s" % (host, port)
else:
on_what = "socket file: %s" % self.bind_addr
if self.running:
self.bus.log("Already serving on %s" % on_what)
return
self.interrupt = None
if not self.httpserver:
raise ValueError("No HTTP server has been created.")
# Start the httpserver in a new thread.
if isinstance(self.bind_addr, tuple):
wait_for_free_port(*self.bind_addr)
import threading
t = threading.Thread(target=self._start_http_thread)
t.setName("HTTPServer " + t.getName())
t.start()
self.wait()
self.running = True
self.bus.log("Serving on %s" % on_what)
start.priority = 75
def _start_http_thread(self):
"""HTTP servers MUST be running in new threads, so that the
main thread persists to receive KeyboardInterrupt's. If an
exception is raised in the httpserver's thread then it's
trapped here, and the bus (and therefore our httpserver)
are shut down.
"""
try:
self.httpserver.start()
except KeyboardInterrupt, exc:
self.bus.log("<Ctrl-C> hit: shutting down HTTP server")
self.interrupt = exc
self.bus.exit()
except SystemExit, exc:
self.bus.log("SystemExit raised: shutting down HTTP server")
self.interrupt = exc
self.bus.exit()
raise
except:
import sys
self.interrupt = sys.exc_info()[1]
self.bus.log("Error in HTTP server: shutting down",
traceback=True, level=40)
self.bus.exit()
raise
def wait(self):
"""Wait until the HTTP server is ready to receive requests."""
while not getattr(self.httpserver, "ready", False):
if self.interrupt:
raise self.interrupt
time.sleep(.1)
# Wait for port to be occupied
if isinstance(self.bind_addr, tuple):
host, port = self.bind_addr
wait_for_occupied_port(host, port)
def stop(self):
"""Stop the HTTP server."""
if self.running:
# stop() MUST block until the server is *truly* stopped.
self.httpserver.stop()
# Wait for the socket to be truly freed.
if isinstance(self.bind_addr, tuple):
wait_for_free_port(*self.bind_addr)
self.running = False
self.bus.log("HTTP Server %s shut down" % self.httpserver)
else:
self.bus.log("HTTP Server %s already shut down" % self.httpserver)
stop.priority = 25
def restart(self):
"""Restart the HTTP server."""
self.stop()
self.start()
class FlupFCGIServer(object):
"""Adapter for a flup.server.fcgi.WSGIServer."""
def __init__(self, *args, **kwargs):
if kwargs.get('bindAddress', None) is None:
import socket
if not hasattr(socket.socket, 'fromfd'):
raise ValueError(
'Dynamic FCGI server not available on this platform. '
'You must use a static or external one by providing a '
'legal bindAddress.')
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the FCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.fcgi import WSGIServer
self.fcgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.fcgiserver._installSignalHandlers = lambda: None
self.fcgiserver._oldSIGs = []
self.ready = True
self.fcgiserver.run()
def stop(self):
"""Stop the HTTP server."""
# Forcibly stop the fcgi server main event loop.
self.fcgiserver._keepGoing = False
# Force all worker threads to die off.
self.fcgiserver._threadPool.maxSpare = self.fcgiserver._threadPool._idleCount
self.ready = False
class FlupSCGIServer(object):
"""Adapter for a flup.server.scgi.WSGIServer."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the SCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.scgi import WSGIServer
self.scgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.scgiserver._installSignalHandlers = lambda: None
self.scgiserver._oldSIGs = []
self.ready = True
self.scgiserver.run()
def stop(self):
"""Stop the HTTP server."""
self.ready = False
# Forcibly stop the scgi server main event loop.
self.scgiserver._keepGoing = False
# Force all worker threads to die off.
self.scgiserver._threadPool.maxSpare = 0
def client_host(server_host):
"""Return the host on which a client can connect to the given listener."""
if server_host == '0.0.0.0':
# 0.0.0.0 is INADDR_ANY, which should answer on localhost.
return '127.0.0.1'
if server_host == '::':
# :: is IN6ADDR_ANY, which should answer on localhost.
return '::1'
return server_host
def check_port(host, port, timeout=1.0):
"""Raise an error if the given port is not free on the given host."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
host = client_host(host)
port = int(port)
import socket
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM)
except socket.gaierror:
if ':' in host:
info = [(socket.AF_INET6, socket.SOCK_STREAM, 0, "", (host, port, 0, 0))]
else:
info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", (host, port))]
for res in info:
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(timeout)
s.connect((host, port))
s.close()
raise IOError("Port %s is in use on %s; perhaps the previous "
"httpserver did not shut down properly." %
(repr(port), repr(host)))
except socket.error:
if s:
s.close()
def wait_for_free_port(host, port):
"""Wait for the specified port to become free (drop requests)."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
for trial in range(50):
try:
# we are expecting a free port, so reduce the timeout
check_port(host, port, timeout=0.1)
except IOError:
# Give the old server thread time to free the port.
time.sleep(0.1)
else:
return
raise IOError("Port %r not free on %r" % (port, host))
def wait_for_occupied_port(host, port):
"""Wait for the specified port to become active (receive requests)."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
for trial in range(50):
try:
check_port(host, port)
except IOError:
return
else:
time.sleep(.1)
raise IOError("Port %r not bound on %r" % (port, host))
| 10,560 | Python | .py | 242 | 32.289256 | 86 | 0.582706 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,636 | __init__.py | midgetspy_Sick-Beard/cherrypy/process/__init__.py | """Site container for an HTTP server.
A Web Site Process Bus object is used to connect applications, servers,
and frameworks with site-wide services such as daemonization, process
reload, signal handling, drop privileges, PID file management, logging
for all of these, and many more.
The 'plugins' module defines a few abstract and concrete services for
use with the bus. Some use tool-specific channels; see the documentation
for each class.
"""
from cherrypy.process.wspbus import bus
from cherrypy.process import plugins, servers
| 550 | Python | .py | 11 | 47.454545 | 73 | 0.803002 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,637 | wspbus.py | midgetspy_Sick-Beard/cherrypy/process/wspbus.py | """An implementation of the Web Site Process Bus.
This module is completely standalone, depending only on the stdlib.
Web Site Process Bus
--------------------
A Bus object is used to contain and manage site-wide behavior:
daemonization, HTTP server start/stop, process reload, signal handling,
drop privileges, PID file management, logging for all of these,
and many more.
In addition, a Bus object provides a place for each web framework
to register code that runs in response to site-wide events (like
process start and stop), or which controls or otherwise interacts with
the site-wide components mentioned above. For example, a framework which
uses file-based templates would add known template filenames to an
autoreload component.
Ideally, a Bus object will be flexible enough to be useful in a variety
of invocation scenarios:
1. The deployer starts a site from the command line via a framework-
neutral deployment script; applications from multiple frameworks
are mixed in a single site. Command-line arguments and configuration
files are used to define site-wide components such as the HTTP server,
WSGI component graph, autoreload behavior, signal handling, etc.
2. The deployer starts a site via some other process, such as Apache;
applications from multiple frameworks are mixed in a single site.
Autoreload and signal handling (from Python at least) are disabled.
3. The deployer starts a site via a framework-specific mechanism;
for example, when running tests, exploring tutorials, or deploying
single applications from a single framework. The framework controls
which site-wide components are enabled as it sees fit.
The Bus object in this package uses topic-based publish-subscribe
messaging to accomplish all this. A few topic channels are built in
('start', 'stop', 'exit', 'graceful', 'log', and 'main'). Frameworks and
site containers are free to define their own. If a message is sent to a
channel that has not been defined or has no listeners, there is no effect.
In general, there should only ever be a single Bus object per process.
Frameworks and site containers share a single Bus object by publishing
messages and subscribing listeners.
The Bus object works as a finite state machine which models the current
state of the process. Bus methods move it from one state to another;
those methods then publish to subscribed listeners on the channel for
the new state.
O
|
V
STOPPING --> STOPPED --> EXITING -> X
A A |
| \___ |
| \ |
| V V
STARTED <-- STARTING
"""
import atexit
import os
try:
set
except NameError:
from sets import Set as set
import sys
import threading
import time
import traceback as _traceback
import warnings
# Here I save the value of os.getcwd(), which, if I am imported early enough,
# will be the directory from which the startup script was run. This is needed
# by _do_execv(), to change back to the original directory before execv()ing a
# new process. This is a defense against the application having changed the
# current working directory (which could make sys.executable "not found" if
# sys.executable is a relative-path, and/or cause other problems).
_startup_cwd = os.getcwd()
class ChannelFailures(Exception):
delimiter = '\n'
def __init__(self, *args, **kwargs):
# Don't use 'super' here; Exceptions are old-style in Py2.4
# See http://www.cherrypy.org/ticket/959
Exception.__init__(self, *args, **kwargs)
self._exceptions = list()
def handle_exception(self):
self._exceptions.append(sys.exc_info())
def get_instances(self):
return [instance for cls, instance, traceback in self._exceptions]
def __str__(self):
exception_strings = map(repr, self.get_instances())
return self.delimiter.join(exception_strings)
def __nonzero__(self):
return bool(self._exceptions)
# Use a flag to indicate the state of the bus.
class _StateEnum(object):
class State(object):
name = None
def __repr__(self):
return "states.%s" % self.name
def __setattr__(self, key, value):
if isinstance(value, self.State):
value.name = key
object.__setattr__(self, key, value)
states = _StateEnum()
states.STOPPED = states.State()
states.STARTING = states.State()
states.STARTED = states.State()
states.STOPPING = states.State()
states.EXITING = states.State()
class Bus(object):
"""Process state-machine and messenger for HTTP site deployment.
All listeners for a given channel are guaranteed to be called even
if others at the same channel fail. Each failure is logged, but
execution proceeds on to the next listener. The only way to stop all
processing from inside a listener is to raise SystemExit and stop the
whole server.
"""
states = states
state = states.STOPPED
execv = False
def __init__(self):
self.execv = False
self.state = states.STOPPED
self.listeners = dict(
[(channel, set()) for channel
in ('start', 'stop', 'exit', 'graceful', 'log', 'main')])
self._priorities = {}
def subscribe(self, channel, callback, priority=None):
"""Add the given callback at the given channel (if not present)."""
if channel not in self.listeners:
self.listeners[channel] = set()
self.listeners[channel].add(callback)
if priority is None:
priority = getattr(callback, 'priority', 50)
self._priorities[(channel, callback)] = priority
def unsubscribe(self, channel, callback):
"""Discard the given callback (if present)."""
listeners = self.listeners.get(channel)
if listeners and callback in listeners:
listeners.discard(callback)
del self._priorities[(channel, callback)]
def publish(self, channel, *args, **kwargs):
"""Return output of all subscribers for the given channel."""
if channel not in self.listeners:
return []
exc = ChannelFailures()
output = []
items = [(self._priorities[(channel, listener)], listener)
for listener in self.listeners[channel]]
items.sort()
for priority, listener in items:
try:
output.append(listener(*args, **kwargs))
except KeyboardInterrupt:
raise
except SystemExit, e:
# If we have previous errors ensure the exit code is non-zero
if exc and e.code == 0:
e.code = 1
raise
except:
exc.handle_exception()
if channel == 'log':
# Assume any further messages to 'log' will fail.
pass
else:
self.log("Error in %r listener %r" % (channel, listener),
level=40, traceback=True)
if exc:
raise exc
return output
def _clean_exit(self):
"""An atexit handler which asserts the Bus is not running."""
if self.state != states.EXITING:
warnings.warn(
"The main thread is exiting, but the Bus is in the %r state; "
"shutting it down automatically now. You must either call "
"bus.block() after start(), or call bus.exit() before the "
"main thread exits." % self.state, RuntimeWarning)
self.exit()
def start(self):
"""Start all services."""
atexit.register(self._clean_exit)
self.state = states.STARTING
self.log('Bus STARTING')
try:
self.publish('start')
self.state = states.STARTED
self.log('Bus STARTED')
except (KeyboardInterrupt, SystemExit):
raise
except:
self.log("Shutting down due to error in start listener:",
level=40, traceback=True)
e_info = sys.exc_info()
try:
self.exit()
except:
# Any stop/exit errors will be logged inside publish().
pass
raise e_info[0], e_info[1], e_info[2]
def exit(self):
"""Stop all services and prepare to exit the process."""
exitstate = self.state
try:
self.stop()
self.state = states.EXITING
self.log('Bus EXITING')
self.publish('exit')
# This isn't strictly necessary, but it's better than seeing
# "Waiting for child threads to terminate..." and then nothing.
self.log('Bus EXITED')
except:
# This method is often called asynchronously (whether thread,
# signal handler, console handler, or atexit handler), so we
# can't just let exceptions propagate out unhandled.
# Assume it's been logged and just die.
os._exit(70) # EX_SOFTWARE
if exitstate == states.STARTING:
# exit() was called before start() finished, possibly due to
# Ctrl-C because a start listener got stuck. In this case,
# we could get stuck in a loop where Ctrl-C never exits the
# process, so we just call os.exit here.
os._exit(70) # EX_SOFTWARE
def restart(self):
"""Restart the process (may close connections).
This method does not restart the process from the calling thread;
instead, it stops the bus and asks the main thread to call execv.
"""
self.execv = True
self.exit()
def graceful(self):
"""Advise all services to reload."""
self.log('Bus graceful')
self.publish('graceful')
def block(self, interval=0.1):
"""Wait for the EXITING state, KeyboardInterrupt or SystemExit.
This function is intended to be called only by the main thread.
After waiting for the EXITING state, it also waits for all threads
to terminate, and then calls os.execv if self.execv is True. This
design allows another thread to call bus.restart, yet have the main
thread perform the actual execv call (required on some platforms).
"""
try:
self.wait(states.EXITING, interval=interval, channel='main')
except (KeyboardInterrupt, IOError):
# The time.sleep call might raise
# "IOError: [Errno 4] Interrupted function call" on KBInt.
self.log('Keyboard Interrupt: shutting down bus')
self.exit()
except SystemExit:
self.log('SystemExit raised: shutting down bus')
self.exit()
raise
# Waiting for ALL child threads to finish is necessary on OS X.
# See http://www.cherrypy.org/ticket/581.
# It's also good to let them all shut down before allowing
# the main thread to call atexit handlers.
# See http://www.cherrypy.org/ticket/751.
self.log("Waiting for child threads to terminate...")
for t in threading.enumerate():
if t != threading.currentThread() and t.isAlive():
# Note that any dummy (external) threads are always daemonic.
if hasattr(threading.Thread, "daemon"):
# Python 2.6+
d = t.daemon
else:
d = t.isDaemon()
if not d:
t.join()
if self.execv:
self._do_execv()
def wait(self, state, interval=0.1, channel=None):
"""Wait for the given state(s)."""
if isinstance(state, (tuple, list)):
states = state
else:
states = [state]
def _wait():
while self.state not in states:
time.sleep(interval)
self.publish(channel)
# From http://psyco.sourceforge.net/psycoguide/bugs.html:
# "The compiled machine code does not include the regular polling
# done by Python, meaning that a KeyboardInterrupt will not be
# detected before execution comes back to the regular Python
# interpreter. Your program cannot be interrupted if caught
# into an infinite Psyco-compiled loop."
try:
sys.modules['psyco'].cannotcompile(_wait)
except (KeyError, AttributeError):
pass
_wait()
def _do_execv(self):
"""Re-execute the current process.
This must be called from the main thread, because certain platforms
(OS X) don't allow execv to be called in a child thread very well.
"""
args = sys.argv[:]
self.log('Re-spawning %s' % ' '.join(args))
args.insert(0, sys.executable)
if sys.platform == 'win32':
args = ['"%s"' % arg for arg in args]
os.chdir(_startup_cwd)
os.execv(sys.executable, args)
def stop(self):
"""Stop all services."""
self.state = states.STOPPING
self.log('Bus STOPPING')
self.publish('stop')
self.state = states.STOPPED
self.log('Bus STOPPED')
def start_with_callback(self, func, args=None, kwargs=None):
"""Start 'func' in a new thread T, then start self (and return T)."""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
args = (func,) + args
def _callback(func, *a, **kw):
self.wait(states.STARTED)
func(*a, **kw)
t = threading.Thread(target=_callback, args=args, kwargs=kwargs)
t.setName('Bus Callback ' + t.getName())
t.start()
self.start()
return t
def log(self, msg="", level=20, traceback=False):
"""Log the given message. Append the last traceback if requested."""
if traceback:
exc = sys.exc_info()
msg += "\n" + "".join(_traceback.format_exception(*exc))
self.publish('log', msg, level)
bus = Bus()
| 14,846 | Python | .py | 325 | 34.655385 | 79 | 0.609131 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,638 | ssl_pyopenssl.py | midgetspy_Sick-Beard/cherrypy/wsgiserver/ssl_pyopenssl.py | """A library for integrating pyOpenSSL with CherryPy.
The OpenSSL module must be importable for SSL functionality.
You can obtain it from http://pyopenssl.sourceforge.net/
To use this module, set CherryPyWSGIServer.ssl_adapter to an instance of
SSLAdapter. There are two ways to use SSL:
Method One:
ssl_adapter.context: an instance of SSL.Context.
If this is not None, it is assumed to be an SSL.Context instance,
and will be passed to SSL.Connection on bind(). The developer is
responsible for forming a valid Context object. This approach is
to be preferred for more flexibility, e.g. if the cert and key are
streams instead of files, or need decryption, or SSL.SSLv3_METHOD
is desired instead of the default SSL.SSLv23_METHOD, etc. Consult
the pyOpenSSL documentation for complete options.
Method Two (shortcut):
ssl_adapter.certificate: the filename of the server SSL certificate.
ssl_adapter.private_key: the filename of the server's private key file.
Both are None by default. If ssl_adapter.context is None, but .private_key
and .certificate are both given and valid, they will be read, and the
context will be automatically created from them.
ssl_adapter.certificate_chain: (optional) the filename of CA's intermediate
certificate bundle. This is needed for cheaper "chained root" SSL
certificates, and should be left as None if not required.
"""
import socket
import threading
import time
from cherrypy import wsgiserver
try:
from OpenSSL import SSL
from OpenSSL import crypto
except ImportError:
SSL = None
class SSL_fileobject(wsgiserver.CP_fileobject):
"""SSL file object attached to a socket object."""
ssl_timeout = 3
ssl_retry = .01
def _safe_call(self, is_reader, call, *args, **kwargs):
"""Wrap the given call with SSL error-trapping.
is_reader: if False EOF errors will be raised. If True, EOF errors
will return "" (to emulate normal sockets).
"""
start = time.time()
while True:
try:
return call(*args, **kwargs)
except SSL.WantReadError:
# Sleep and try again. This is dangerous, because it means
# the rest of the stack has no way of differentiating
# between a "new handshake" error and "client dropped".
# Note this isn't an endless loop: there's a timeout below.
time.sleep(self.ssl_retry)
except SSL.WantWriteError:
time.sleep(self.ssl_retry)
except SSL.SysCallError, e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
errnum = e.args[0]
if is_reader and errnum in wsgiserver.socket_errors_to_ignore:
return ""
raise socket.error(errnum)
except SSL.Error, e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
thirdarg = None
try:
thirdarg = e.args[0][0][2]
except IndexError:
pass
if thirdarg == 'http request':
# The client is talking HTTP to an HTTPS server.
raise wsgiserver.NoSSLError()
raise wsgiserver.FatalSSLAlert(*e.args)
except:
raise
if time.time() - start > self.ssl_timeout:
raise socket.timeout("timed out")
def recv(self, *args, **kwargs):
buf = []
r = super(SSL_fileobject, self).recv
while True:
data = self._safe_call(True, r, *args, **kwargs)
buf.append(data)
p = self._sock.pending()
if not p:
return "".join(buf)
def sendall(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).sendall,
*args, **kwargs)
def send(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).send,
*args, **kwargs)
class SSLConnection:
"""A thread-safe wrapper for an SSL.Connection.
*args: the arguments to create the wrapped SSL.Connection(*args).
"""
def __init__(self, *args):
self._ssl_conn = SSL.Connection(*args)
self._lock = threading.RLock()
for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read',
'renegotiate', 'bind', 'listen', 'connect', 'accept',
'setblocking', 'fileno', 'close', 'get_cipher_list',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'makefile', 'get_app_data', 'set_app_data', 'state_string',
'sock_shutdown', 'get_peer_certificate', 'want_read',
'want_write', 'set_connect_state', 'set_accept_state',
'connect_ex', 'sendall', 'settimeout', 'gettimeout'):
exec("""def %s(self, *args):
self._lock.acquire()
try:
return self._ssl_conn.%s(*args)
finally:
self._lock.release()
""" % (f, f))
def shutdown(self, *args):
self._lock.acquire()
try:
# pyOpenSSL.socket.shutdown takes no args
return self._ssl_conn.shutdown()
finally:
self._lock.release()
class pyOpenSSLAdapter(wsgiserver.SSLAdapter):
"""A wrapper for integrating pyOpenSSL with CherryPy."""
def __init__(self, certificate, private_key, certificate_chain=None):
if SSL is None:
raise ImportError("You must install pyOpenSSL to use HTTPS.")
self.context = None
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
self._environ = None
def bind(self, sock):
"""Wrap and return the given socket."""
if self.context is None:
self.context = self.get_context()
conn = SSLConnection(self.context, sock)
self._environ = self.get_environ()
return conn
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
return sock, self._environ.copy()
def get_context(self):
"""Return an SSL.Context from self attributes."""
# See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
c = SSL.Context(SSL.SSLv23_METHOD)
c.use_privatekey_file(self.private_key)
if self.certificate_chain:
c.load_verify_locations(self.certificate_chain)
c.use_certificate_file(self.certificate)
return c
def get_environ(self):
"""Return WSGI environ entries to be merged into each request."""
ssl_environ = {
"HTTPS": "on",
# pyOpenSSL doesn't provide access to any of these AFAICT
## 'SSL_PROTOCOL': 'SSLv2',
## SSL_CIPHER string The cipher specification name
## SSL_VERSION_INTERFACE string The mod_ssl program version
## SSL_VERSION_LIBRARY string The OpenSSL program version
}
if self.certificate:
# Server certificate attributes
cert = open(self.certificate, 'rb').read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
ssl_environ.update({
'SSL_SERVER_M_VERSION': cert.get_version(),
'SSL_SERVER_M_SERIAL': cert.get_serial_number(),
## 'SSL_SERVER_V_START': Validity of server's certificate (start time),
## 'SSL_SERVER_V_END': Validity of server's certificate (end time),
})
for prefix, dn in [("I", cert.get_issuer()),
("S", cert.get_subject())]:
# X509Name objects don't seem to have a way to get the
# complete DN string. Use str() and slice it instead,
# because str(dn) == "<X509Name object '/C=US/ST=...'>"
dnstr = str(dn)[18:-2]
wsgikey = 'SSL_SERVER_%s_DN' % prefix
ssl_environ[wsgikey] = dnstr
# The DN should be of the form: /k1=v1/k2=v2, but we must allow
# for any value to contain slashes itself (in a URL).
while dnstr:
pos = dnstr.rfind("=")
dnstr, value = dnstr[:pos], dnstr[pos + 1:]
pos = dnstr.rfind("/")
dnstr, key = dnstr[:pos], dnstr[pos + 1:]
if key and value:
wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key)
ssl_environ[wsgikey] = value
return ssl_environ
def makefile(self, sock, mode='r', bufsize= -1):
if SSL and isinstance(sock, SSL.ConnectionType):
timeout = sock.gettimeout()
f = SSL_fileobject(sock, mode, bufsize)
f.ssl_timeout = timeout
return f
else:
return wsgiserver.CP_fileobject(sock, mode, bufsize)
| 9,379 | Python | .py | 197 | 35.568528 | 86 | 0.588136 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,639 | ssl_builtin.py | midgetspy_Sick-Beard/cherrypy/wsgiserver/ssl_builtin.py | """A library for integrating pyOpenSSL with CherryPy.
The ssl module must be importable for SSL functionality.
To use this module, set CherryPyWSGIServer.ssl_adapter to an instance of
BuiltinSSLAdapter.
ssl_adapter.certificate: the filename of the server SSL certificate.
ssl_adapter.private_key: the filename of the server's private key file.
"""
try:
import ssl
except ImportError:
ssl = None
from cherrypy import wsgiserver
class BuiltinSSLAdapter(wsgiserver.SSLAdapter):
"""A wrapper for integrating Python's builtin ssl module with CherryPy."""
def __init__(self, certificate, private_key, certificate_chain=None):
if ssl is None:
raise ImportError("You must install the ssl module to use HTTPS.")
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def bind(self, sock):
"""Wrap and return the given socket."""
return sock
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
try:
s = ssl.wrap_socket(sock, do_handshake_on_connect=True,
server_side=True, certfile=self.certificate,
keyfile=self.private_key, ssl_version=ssl.PROTOCOL_SSLv23)
except ssl.SSLError, e:
if e.errno == ssl.SSL_ERROR_EOF:
# This is almost certainly due to the cherrypy engine
# 'pinging' the socket to assert it's connectable;
# the 'ping' isn't SSL.
return None, {}
elif e.errno == ssl.SSL_ERROR_SSL:
if e.args[1].endswith('http request'):
# The client is speaking HTTP to an HTTPS server.
raise wsgiserver.NoSSLError
raise
return s, self.get_environ(s)
# TODO: fill this out more with mod ssl env
def get_environ(self, sock):
"""Create WSGI environ entries to be merged into each request."""
cipher = sock.cipher()
ssl_environ = {
"wsgi.url_scheme": "https",
"HTTPS": "on",
'SSL_PROTOCOL': cipher[1],
'SSL_CIPHER': cipher[0]
## SSL_VERSION_INTERFACE string The mod_ssl program version
## SSL_VERSION_LIBRARY string The OpenSSL program version
}
return ssl_environ
def makefile(self, sock, mode='r', bufsize= -1):
return wsgiserver.CP_fileobject(sock, mode, bufsize)
| 2,542 | Python | .py | 56 | 35.875 | 78 | 0.631879 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,640 | __init__.py | midgetspy_Sick-Beard/cherrypy/wsgiserver/__init__.py | """A high-speed, production ready, thread pooled, generic HTTP server.
Simplest example on how to use this module directly
(without using CherryPy's application machinery):
from cherrypy import wsgiserver
def my_crazy_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return ['Hello world!\n']
server = wsgiserver.CherryPyWSGIServer(
('0.0.0.0', 8070), my_crazy_app,
server_name='www.cherrypy.example')
The CherryPy WSGI server can serve as many WSGI applications
as you want in one instance by using a WSGIPathInfoDispatcher:
d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
This won't call the CherryPy engine (application side) at all, only the
HTTP server, which is independent from the rest of CherryPy. Don't
let the name "CherryPyWSGIServer" throw you; the name merely reflects
its origin, not its coupling.
For those of you wanting to understand internals of this module, here's the
basic call flow. The server's listening thread runs a very tight loop,
sticking incoming connections onto a Queue:
server = CherryPyWSGIServer(...)
server.start()
while True:
tick()
# This blocks until a request comes in:
child = socket.accept()
conn = HTTPConnection(child, ...)
server.requests.put(conn)
Worker threads are kept in a pool and poll the Queue, popping off and then
handling each connection in turn. Each connection can consist of an arbitrary
number of requests and their responses, so we run a nested loop:
while True:
conn = server.requests.get()
conn.communicate()
-> while True:
req = HTTPRequest(...)
req.parse_request()
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
req.rfile.readline()
read_headers(req.rfile, req.inheaders)
req.respond()
-> response = app(...)
try:
for chunk in response:
if chunk:
req.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if req.close_connection:
return
"""
CRLF = '\r\n'
import os
import Queue
import re
quoted_slash = re.compile("(?i)%2F")
import rfc822
import socket
import sys
if 'win' in sys.platform and not hasattr(socket, 'IPPROTO_IPV6'):
socket.IPPROTO_IPV6 = 41
try:
import cStringIO as StringIO
except ImportError:
import StringIO
_fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestring)
import threading
import time
import traceback
from urllib import unquote
from urlparse import urlparse
import warnings
import errno
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return dict.fromkeys(nums).keys()
socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
socket_errors_to_ignore = plat_specific_errors(
"EPIPE",
"EBADF", "WSAEBADF",
"ENOTSOCK", "WSAENOTSOCK",
"ETIMEDOUT", "WSAETIMEDOUT",
"ECONNREFUSED", "WSAECONNREFUSED",
"ECONNRESET", "WSAECONNRESET",
"ECONNABORTED", "WSAECONNABORTED",
"ENETRESET", "WSAENETRESET",
"EHOSTDOWN", "EHOSTUNREACH",
)
socket_errors_to_ignore.append("timed out")
socket_errors_to_ignore.append("The read operation timed out")
socket_errors_nonblocking = plat_specific_errors(
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
comma_separated_headers = ['Accept', 'Accept-Charset', 'Accept-Encoding',
'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
'WWW-Authenticate']
def read_headers(rfile, hdict=None):
"""Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP spec.
You should probably return "400 Bad Request" if this happens.
"""
if hdict is None:
hdict = {}
while True:
line = rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
if line[0] in ' \t':
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(":", 1)
except ValueError:
raise ValueError("Illegal header line.")
# TODO: what about TE and WWW-Authenticate?
k = k.strip().title()
v = v.strip()
hname = k
if k in comma_separated_headers:
existing = hdict.get(hname)
if existing:
v = ", ".join((existing, v))
hdict[hname] = v
return hdict
class MaxSizeExceeded(Exception):
pass
class SizeCheckWrapper(object):
"""Wraps a file-like object, raising MaxSizeExceeded if too large."""
def __init__(self, rfile, maxlen):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
def _check_length(self):
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded()
def read(self, size=None):
data = self.rfile.read(size)
self.bytes_read += len(data)
self._check_length()
return data
def readline(self, size=None):
if size is not None:
data = self.rfile.readline(size)
self.bytes_read += len(data)
self._check_length()
return data
# User didn't specify a size ...
# We read the line in chunks to make sure it's not a 100MB line !
res = []
while True:
data = self.rfile.readline(256)
self.bytes_read += len(data)
self._check_length()
res.append(data)
# See http://www.cherrypy.org/ticket/421
if len(data) < 256 or data[-1:] == "\n":
return ''.join(res)
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def next(self):
data = self.rfile.next()
self.bytes_read += len(data)
self._check_length()
return data
class KnownLengthRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted."""
def __init__(self, rfile, content_length):
self.rfile = rfile
self.remaining = content_length
def read(self, size=None):
if self.remaining == 0:
return ''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.read(size)
self.remaining -= len(data)
return data
def readline(self, size=None):
if self.remaining == 0:
return ''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.readline(size)
self.remaining -= len(data)
return data
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def __next__(self):
data = next(self.rfile)
self.remaining -= len(data)
return data
class MaxSizeExceeded(Exception):
pass
class ChunkedRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted.
This class is intended to provide a conforming wsgi.input value for
request entities that have been encoded with the 'chunked' transfer
encoding.
"""
def __init__(self, rfile, maxlen, bufsize=8192):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
self.buffer = ''
self.bufsize = bufsize
self.closed = False
def _fetch(self):
if self.closed:
return
line = self.rfile.readline()
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded("Request Entity Too Large", self.maxlen)
line = line.strip().split(";", 1)
try:
chunk_size = line.pop(0)
chunk_size = int(chunk_size, 16)
except ValueError:
raise ValueError("Bad chunked transfer size: " + repr(chunk_size))
if chunk_size <= 0:
self.closed = True
return
## if line: chunk_extension = line[0]
if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
raise IOError("Request Entity Too Large")
chunk = self.rfile.read(chunk_size)
self.bytes_read += len(chunk)
self.buffer += chunk
crlf = self.rfile.read(2)
if crlf != CRLF:
raise ValueError(
"Bad chunked transfer coding (expected '\\r\\n', "
"got " + repr(crlf) + ")")
def read(self, size=None):
data = ''
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
if size:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
data += self.buffer
def readline(self, size=None):
data = ''
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
newline_pos = self.buffer.find('\n')
if size:
if newline_pos == -1:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
remaining = min(size - len(data), newline_pos)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
if newline_pos == -1:
data += self.buffer
else:
data += self.buffer[:newline_pos]
self.buffer = self.buffer[newline_pos:]
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def read_trailer_lines(self):
if not self.closed:
raise ValueError(
"Cannot read trailers until the request body has been read.")
while True:
line = self.rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise IOError("Request Entity Too Large")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
yield line
def close(self):
self.rfile.close()
def __iter__(self):
# Shamelessly stolen from StringIO
total = 0
line = self.readline(sizehint)
while line:
yield line
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
class HTTPRequest(object):
"""An HTTP Request (and response).
A single HTTP connection may consist of multiple request/response pairs.
server: the Server object which is receiving this request.
conn: the HTTPConnection object on which this request connected.
inheaders: a dict of request headers.
outheaders: a list of header tuples to write in the response.
ready: when True, the request has been parsed and is ready to begin
generating the response. When False, signals the calling Connection
that the response should not be generated and the connection should
close.
close_connection: signals the calling Connection that the request
should close. This does not imply an error! The client and/or
server may each request that the connection be closed.
chunked_write: if True, output will be encoded with the "chunked"
transfer-coding. This value is set automatically inside
send_headers.
"""
def __init__(self, server, conn):
self.server = server
self.conn = conn
self.ready = False
self.started_request = False
self.scheme = "http"
if self.server.ssl_adapter is not None:
self.scheme = "https"
self.inheaders = {}
self.status = ""
self.outheaders = []
self.sent_headers = False
self.close_connection = False
self.chunked_write = False
def parse_request(self):
"""Parse the next HTTP request start-line and message-headers."""
self.rfile = SizeCheckWrapper(self.conn.rfile,
self.server.max_request_header_size)
try:
self._parse_request()
except MaxSizeExceeded:
self.simple_response("413 Request Entity Too Large")
return
def _parse_request(self):
# HTTP/1.1 connections are persistent by default. If a client
# requests a page, then idles (leaves the connection open),
# then rfile.readline() will raise socket.error("timed out").
# Note that it does this based on the value given to settimeout(),
# and doesn't need the client to request or acknowledge the close
# (although your TCP stack might suffer for it: cf Apache's history
# with FIN_WAIT_2).
request_line = self.rfile.readline()
# Set started_request to True so communicate() knows to send 408
# from here on out.
self.started_request = True
if not request_line:
# Force self.ready = False so the connection will close.
self.ready = False
return
if request_line == CRLF:
# RFC 2616 sec 4.1: "...if the server is reading the protocol
# stream at the beginning of a message and receives a CRLF
# first, it should ignore the CRLF."
# But only ignore one leading line! else we enable a DoS.
request_line = self.rfile.readline()
if not request_line:
self.ready = False
return
if not request_line.endswith(CRLF):
self.simple_response(400, "HTTP requires CRLF terminators")
return
try:
method, uri, req_protocol = request_line.strip().split(" ", 2)
except ValueError:
self.simple_response(400, "Malformed Request-Line")
return
self.uri = uri
self.method = method
# uri may be an abs_path (including "http://host.domain.tld");
scheme, authority, path = self.parse_request_uri(uri)
if '#' in path:
self.simple_response("400 Bad Request",
"Illegal #fragment in Request-URI.")
return
if scheme:
self.scheme = scheme
qs = ''
if '?' in path:
path, qs = path.split('?', 1)
# Unquote the path+params (e.g. "/this%20path" -> "/this path").
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
#
# But note that "...a URI must be separated into its components
# before the escaped characters within those components can be
# safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
# Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path".
try:
atoms = [unquote(x) for x in quoted_slash.split(path)]
except ValueError, ex:
self.simple_response("400 Bad Request", ex.args[0])
return
path = "%2F".join(atoms)
self.path = path
# Note that, like wsgiref and most other HTTP servers,
# we "% HEX HEX"-unquote the path but not the query string.
self.qs = qs
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
rp = int(req_protocol[5]), int(req_protocol[7])
sp = int(self.server.protocol[5]), int(self.server.protocol[7])
if sp[0] != rp[0]:
self.simple_response("505 HTTP Version Not Supported")
return
self.request_protocol = req_protocol
self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
# then all the http headers
try:
read_headers(self.rfile, self.inheaders)
except ValueError, ex:
self.simple_response("400 Bad Request", ex.args[0])
return
mrbs = self.server.max_request_body_size
if mrbs and int(self.inheaders.get("Content-Length", 0)) > mrbs:
self.simple_response("413 Request Entity Too Large")
return
# Persistent connection support
if self.response_protocol == "HTTP/1.1":
# Both server and client are HTTP/1.1
if self.inheaders.get("Connection", "") == "close":
self.close_connection = True
else:
# Either the server or client (or both) are HTTP/1.0
if self.inheaders.get("Connection", "") != "Keep-Alive":
self.close_connection = True
# Transfer-Encoding support
te = None
if self.response_protocol == "HTTP/1.1":
te = self.inheaders.get("Transfer-Encoding")
if te:
te = [x.strip().lower() for x in te.split(",") if x.strip()]
self.chunked_read = False
if te:
for enc in te:
if enc == "chunked":
self.chunked_read = True
else:
# Note that, even if we see "chunked", we must reject
# if there is an extension we don't recognize.
self.simple_response("501 Unimplemented")
self.close_connection = True
return
# From PEP 333:
# "Servers and gateways that implement HTTP 1.1 must provide
# transparent support for HTTP 1.1's "expect/continue" mechanism.
# This may be done in any of several ways:
# 1. Respond to requests containing an Expect: 100-continue request
# with an immediate "100 Continue" response, and proceed normally.
# 2. Proceed with the request normally, but provide the application
# with a wsgi.input stream that will send the "100 Continue"
# response if/when the application first attempts to read from
# the input stream. The read request must then remain blocked
# until the client responds.
# 3. Wait until the client decides that the server does not support
# expect/continue, and sends the request body on its own.
# (This is suboptimal, and is not recommended.)
#
# We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
# but it seems like it would be a big slowdown for such a rare case.
if self.inheaders.get("Expect", "") == "100-continue":
# Don't use simple_response here, because it emits headers
# we don't want. See http://www.cherrypy.org/ticket/951
msg = self.server.protocol + " 100 Continue\r\n\r\n"
try:
self.conn.wfile.sendall(msg)
except socket.error, x:
if x.args[0] not in socket_errors_to_ignore:
raise
self.ready = True
def parse_request_uri(self, uri):
"""Parse a Request-URI into (scheme, authority, path).
Note that Request-URI's must be one of:
Request-URI = "*" | absoluteURI | abs_path | authority
Therefore, a Request-URI which starts with a double forward-slash
cannot be a "net_path":
net_path = "//" authority [ abs_path ]
Instead, it must be interpreted as an "abs_path" with an empty first
path segment:
abs_path = "/" path_segments
path_segments = segment *( "/" segment )
segment = *pchar *( ";" param )
param = *pchar
"""
if uri == "*":
return None, None, uri
i = uri.find('://')
if i > 0 and '?' not in uri[:i]:
# An absoluteURI.
# If there's a scheme (and it must be http or https), then:
# http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]]
scheme, remainder = uri[:i].lower(), uri[i + 3:]
authority, path = remainder.split("/", 1)
return scheme, authority, path
if uri.startswith('/'):
# An abs_path.
return None, None, uri
else:
# An authority.
return None, uri, None
def respond(self):
"""Call the gateway and write its iterable output."""
mrbs = self.server.max_request_body_size
if self.chunked_read:
self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
else:
cl = int(self.inheaders.get("Content-Length", 0))
if mrbs and mrbs < cl:
if not self.sent_headers:
self.simple_response("413 Request Entity Too Large")
return
self.rfile = KnownLengthRFile(self.conn.rfile, cl)
self.server.gateway(self).respond()
if (self.ready and not self.sent_headers):
self.sent_headers = True
self.send_headers()
if self.chunked_write:
self.conn.wfile.sendall("0\r\n\r\n")
def simple_response(self, status, msg=""):
"""Write a simple response back to the client."""
status = str(status)
buf = [self.server.protocol + " " +
status + CRLF,
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n"]
if status[:3] == "413" and self.response_protocol == 'HTTP/1.1':
# Request Entity Too Large
self.close_connection = True
buf.append("Connection: close\r\n")
buf.append(CRLF)
if msg:
if isinstance(msg, unicode):
msg = msg.encode("ISO-8859-1")
buf.append(msg)
try:
self.conn.wfile.sendall("".join(buf))
except socket.error, x:
if x.args[0] not in socket_errors_to_ignore:
raise
def write(self, chunk):
"""Write unbuffered data to the client."""
if self.chunked_write and chunk:
buf = [hex(len(chunk))[2:], CRLF, chunk, CRLF]
self.conn.wfile.sendall("".join(buf))
else:
self.conn.wfile.sendall(chunk)
def send_headers(self):
"""Assert, process, and send the HTTP response message-headers.
You must set self.status, and self.outheaders before calling this.
"""
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif "content-length" not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
if (self.response_protocol == 'HTTP/1.1'
and self.method != 'HEAD'):
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append(("Transfer-Encoding", "chunked"))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
if "connection" not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append(("Connection", "close"))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append(("Connection", "Keep-Alive"))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
remaining = getattr(self.rfile, 'remaining', 0)
if remaining > 0:
self.rfile.read(remaining)
if "date" not in hkeys:
self.outheaders.append(("Date", rfc822.formatdate()))
if "server" not in hkeys:
self.outheaders.append(("Server", self.server.server_name))
buf = [self.server.protocol + " " + self.status + CRLF]
for k, v in self.outheaders:
buf.append(k + ": " + v + CRLF)
buf.append(CRLF)
self.conn.wfile.sendall("".join(buf))
class NoSSLError(Exception):
"""Exception raised when a client speaks HTTP to an HTTPS socket."""
pass
class FatalSSLAlert(Exception):
"""Exception raised when the SSL implementation signals a fatal alert."""
pass
if not _fileobject_uses_str_type:
class CP_fileobject(socket._fileobject):
"""Faux file object attached to a socket object."""
def sendall(self, data):
"""Sendall for non-blocking sockets."""
while data:
try:
bytes_sent = self.send(data)
data = data[bytes_sent:]
except socket.error, e:
if e.args[0] not in socket_errors_nonblocking:
raise
def send(self, data):
return self._sock.send(data)
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self.sendall(buffer)
def recv(self, size):
while True:
try:
return self._sock.recv(size)
except socket.error, e:
if (e.args[0] not in socket_errors_nonblocking
and e.args[0] not in socket_error_eintr):
raise
def read(self, size= -1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(rbufsize)
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
data = self.recv(left)
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size= -1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(self._rbufsize)
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(self._rbufsize)
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
else:
class CP_fileobject(socket._fileobject):
"""Faux file object attached to a socket object."""
def sendall(self, data):
"""Sendall for non-blocking sockets."""
while data:
try:
bytes_sent = self.send(data)
data = data[bytes_sent:]
except socket.error, e:
if e.args[0] not in socket_errors_nonblocking:
raise
def send(self, data):
return self._sock.send(data)
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self.sendall(buffer)
def recv(self, size):
while True:
try:
return self._sock.recv(size)
except socket.error, e:
if (e.args[0] not in socket_errors_nonblocking
and e.args[0] not in socket_error_eintr):
raise
def read(self, size= -1):
if size < 0:
# Read until EOF
buffers = [self._rbuf]
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
data = self._rbuf
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size= -1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
while data != "\n":
data = self.recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
class HTTPConnection(object):
"""An HTTP connection (active socket).
server: the Server object which received this connection.
socket: the raw socket object (usually TCP) for this connection.
makefile: a fileobject class for reading from the socket.
"""
remote_addr = None
remote_port = None
ssl_env = None
rbufsize = -1
RequestHandlerClass = HTTPRequest
def __init__(self, server, sock, makefile=CP_fileobject):
self.server = server
self.socket = sock
self.rfile = makefile(sock, "rb", self.rbufsize)
self.wfile = makefile(sock, "wb", -1)
def communicate(self):
"""Read each request and respond appropriately."""
request_seen = False
try:
while True:
# (re)set req to None so that if something goes wrong in
# the RequestHandlerClass constructor, the error doesn't
# get written to the previous request.
req = None
req = self.RequestHandlerClass(self.server, self)
# This order of operations should guarantee correct pipelining.
req.parse_request()
if not req.ready:
# Something went wrong in the parsing (and the server has
# probably already made a simple_response). Return and
# let the conn close.
return
request_seen = True
req.respond()
if req.close_connection:
return
except socket.error, e:
errnum = e.args[0]
if errnum == 'timed out':
# Don't error if we're between requests; only error
# if 1) no request has been started at all, or 2) we're
# in the middle of a request.
# See http://www.cherrypy.org/ticket/853
if (not request_seen) or (req and req.started_request):
# Don't bother writing the 408 if the response
# has already started being written.
if req and not req.sent_headers:
try:
req.simple_response("408 Request Timeout")
except FatalSSLAlert:
# Close the connection.
return
elif errnum not in socket_errors_to_ignore:
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error",
format_exc())
except FatalSSLAlert:
# Close the connection.
return
return
except (KeyboardInterrupt, SystemExit):
raise
except FatalSSLAlert:
# Close the connection.
return
except NoSSLError:
if req and not req.sent_headers:
# Unwrap our wfile
self.wfile = CP_fileobject(self.socket._sock, "wb", -1)
req.simple_response("400 Bad Request",
"The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
self.linger = True
except Exception:
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error", format_exc())
except FatalSSLAlert:
# Close the connection.
return
linger = False
def close(self):
"""Close the socket underlying this connection."""
self.rfile.close()
if not self.linger:
# Python's socket module does NOT call close on the kernel socket
# when you call socket.close(). We do so manually here because we
# want this server to send a FIN TCP segment immediately. Note this
# must be called *before* calling socket.close(), because the latter
# drops its reference to the kernel socket.
if hasattr(self.socket, '_sock'):
self.socket._sock.close()
self.socket.close()
else:
# On the other hand, sometimes we want to hang around for a bit
# to make sure the client has a chance to read our entire
# response. Skipping the close() calls here delays the FIN
# packet until the socket object is garbage-collected later.
# Someday, perhaps, we'll do the full lingering_close that
# Apache does, but not today.
pass
def format_exc(limit=None):
"""Like print_exc() but return a string. Backport for Python 2.3."""
try:
etype, value, tb = sys.exc_info()
return ''.join(traceback.format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None
_SHUTDOWNREQUEST = None
class WorkerThread(threading.Thread):
"""Thread which continuously polls a Queue for Connection objects.
server: the HTTP Server which spawned this thread, and which owns the
Queue and is placing active connections into it.
ready: a simple flag for the calling server to know when this thread
has begun polling the Queue.
Due to the timing issues of polling a Queue, a WorkerThread does not
check its own 'ready' flag after it has started. To stop the thread,
it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
(one for each running WorkerThread).
"""
conn = None
def __init__(self, server):
self.ready = False
self.server = server
threading.Thread.__init__(self)
def run(self):
try:
self.ready = True
while True:
conn = self.server.requests.get()
if conn is _SHUTDOWNREQUEST:
return
self.conn = conn
try:
conn.communicate()
finally:
conn.close()
self.conn = None
except (KeyboardInterrupt, SystemExit), exc:
self.server.interrupt = exc
class ThreadPool(object):
"""A Request Queue for the CherryPyWSGIServer which pools threads.
ThreadPool objects must provide min, get(), put(obj), start()
and stop(timeout) attributes.
"""
def __init__(self, server, min=10, max= -1):
self.server = server
self.min = min
self.max = max
self._threads = []
self._queue = Queue.Queue()
self.get = self._queue.get
def start(self):
"""Start the pool of threads."""
for i in range(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName("CP Server " + worker.getName())
worker.start()
for worker in self._threads:
while not worker.ready:
time.sleep(.1)
def _get_idle(self):
"""Number of worker threads which are idle. Read-only."""
return len([t for t in self._threads if t.conn is None])
idle = property(_get_idle, doc=_get_idle.__doc__)
def put(self, obj):
self._queue.put(obj)
if obj is _SHUTDOWNREQUEST:
return
def grow(self, amount):
"""Spawn new worker threads (not above self.max)."""
for i in range(amount):
if self.max > 0 and len(self._threads) >= self.max:
break
worker = WorkerThread(self.server)
worker.setName("CP Server " + worker.getName())
self._threads.append(worker)
worker.start()
def shrink(self, amount):
"""Kill off worker threads (not below self.min)."""
# Grow/shrink the pool if necessary.
# Remove any dead threads from our list
for t in self._threads:
if not t.isAlive():
self._threads.remove(t)
amount -= 1
if amount > 0:
for i in range(min(amount, len(self._threads) - self.min)):
# Put a number of shutdown requests on the queue equal
# to 'amount'. Once each of those is processed by a worker,
# that worker will terminate and be culled from our list
# in self.put.
self._queue.put(_SHUTDOWNREQUEST)
def stop(self, timeout=5):
# Must shut down threads here so the code that calls
# this method can know when all threads are stopped.
for worker in self._threads:
self._queue.put(_SHUTDOWNREQUEST)
# Don't join currentThread (when stop is called inside a request).
current = threading.currentThread()
if timeout and timeout >= 0:
endtime = time.time() + timeout
while self._threads:
worker = self._threads.pop()
if worker is not current and worker.isAlive():
try:
if timeout is None or timeout < 0:
worker.join()
else:
remaining_time = endtime - time.time()
if remaining_time > 0:
worker.join(remaining_time)
if worker.isAlive():
# We exhausted the timeout.
# Forcibly shut down the socket.
c = worker.conn
if c and not c.rfile.closed:
try:
c.socket.shutdown(socket.SHUT_RD)
except TypeError:
# pyOpenSSL sockets don't take an arg
c.socket.shutdown()
worker.join()
except (AssertionError,
# Ignore repeated Ctrl-C.
# See http://www.cherrypy.org/ticket/691.
KeyboardInterrupt), exc1:
pass
try:
import fcntl
except ImportError:
try:
from ctypes import windll, WinError
except ImportError:
def prevent_socket_inheritance(sock):
"""Dummy function, since neither fcntl nor ctypes are available."""
pass
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (Windows)."""
if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0):
raise WinError()
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (POSIX)."""
fd = sock.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
class SSLAdapter(object):
def __init__(self, certificate, private_key, certificate_chain=None):
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def wrap(self, sock):
raise NotImplemented
def makefile(self, sock, mode='r', bufsize= -1):
raise NotImplemented
class HTTPServer(object):
"""An HTTP server.
bind_addr: The interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any IPv4
or IPv6 address, or any valid hostname. The string 'localhost' is a
synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
The string '0.0.0.0' is a special IPv4 entry meaning "any active
interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
IPv6. The empty string or None are not allowed.
For UNIX sockets, supply the filename as a string.
gateway: a Gateway instance.
minthreads: the minimum number of worker threads to create (default 10).
maxthreads: the maximum number of worker threads to create (default -1 = no limit).
server_name: defaults to socket.gethostname().
request_queue_size: the 'backlog' argument to socket.listen();
specifies the maximum number of queued connections (default 5).
timeout: the timeout in seconds for accepted connections (default 10).
nodelay: if True (the default since 3.1), sets the TCP_NODELAY socket
option.
protocol: the version string to write in the Status-Line of all
HTTP responses. For example, "HTTP/1.1" (the default). This
also limits the supported features used in the response.
SSL/HTTPS
---------
You must have an ssl library installed and set self.ssl_adapter to an
instance of SSLAdapter (or a subclass) which provides the methods:
wrap(sock) -> wrapped socket, ssl environ dict
makefile(sock, mode='r', bufsize=-1) -> socket file object
"""
protocol = "HTTP/1.1"
_bind_addr = "127.0.0.1"
version = "CherryPy/3.2.0rc1"
response_header = None
ready = False
_interrupt = None
max_request_header_size = 0
max_request_body_size = 0
nodelay = True
ConnectionClass = HTTPConnection
ssl_adapter = None
def __init__(self, bind_addr, gateway, minthreads=10, maxthreads= -1,
server_name=None):
self.bind_addr = bind_addr
self.gateway = gateway
self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads)
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
def __str__(self):
return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
self.bind_addr)
def _get_bind_addr(self):
return self._bind_addr
def _set_bind_addr(self, value):
if isinstance(value, tuple) and value[0] in ('', None):
# Despite the socket module docs, using '' does not
# allow AI_PASSIVE to work. Passing None instead
# returns '0.0.0.0' like we want. In other words:
# host AI_PASSIVE result
# '' Y 192.168.x.y
# '' N 192.168.x.y
# None Y 0.0.0.0
# None N 127.0.0.1
# But since you can get the same effect with an explicit
# '0.0.0.0', we deny both the empty string and None as values.
raise ValueError("Host values of '' or None are not allowed. "
"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
"to listen on all active interfaces.")
self._bind_addr = value
bind_addr = property(_get_bind_addr, _set_bind_addr,
doc="""The interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any IPv4
or IPv6 address, or any valid hostname. The string 'localhost' is a
synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
The string '0.0.0.0' is a special IPv4 entry meaning "any active
interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
IPv6. The empty string or None are not allowed.
For UNIX sockets, supply the filename as a string.""")
def start(self):
"""Run the server forever."""
# We don't have to trap KeyboardInterrupt or SystemExit here,
# because cherrpy.server already does so, calling self.stop() for us.
# If you're using this server with another framework, you should
# trap those exceptions in whatever code block calls start().
self._interrupt = None
# SSL backward compatibility
if (self.ssl_adapter is None and
getattr(self, 'ssl_certificate', None) and
getattr(self, 'ssl_private_key', None)):
warnings.warn(
"SSL attributes are deprecated in CherryPy 3.2, and will "
"be removed in CherryPy 3.3. Use an ssl_adapter attribute "
"instead.",
DeprecationWarning
)
try:
from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
except ImportError:
pass
else:
self.ssl_adapter = pyOpenSSLAdapter(
self.ssl_certificate, self.ssl_private_key,
getattr(self, 'ssl_certificate_chain', None))
# Select the appropriate socket
if isinstance(self.bind_addr, basestring):
# AF_UNIX socket
# So we can reuse the socket...
try: os.unlink(self.bind_addr)
except: pass
# So everyone can access the socket...
try: os.chmod(self.bind_addr, 0777)
except: pass
info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
else:
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
host, port = self.bind_addr
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
except socket.gaierror:
if ':' in self.bind_addr[0]:
info = [(socket.AF_INET6, socket.SOCK_STREAM,
0, "", self.bind_addr + (0, 0))]
else:
info = [(socket.AF_INET, socket.SOCK_STREAM,
0, "", self.bind_addr)]
self.socket = None
msg = "No socket could be created"
for res in info:
af, socktype, proto, canonname, sa = res
try:
self.bind(af, socktype, proto)
except socket.error, msg:
if self.socket:
self.socket.close()
self.socket = None
continue
break
if not self.socket:
raise socket.error(msg)
# Timeout so KeyboardInterrupt can be caught on Win32
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
# Create worker threads
self.requests.start()
self.ready = True
while self.ready:
self.tick()
if self.interrupt:
while self.interrupt is True:
# Wait for self.stop() to complete. See _set_interrupt.
time.sleep(0.1)
if self.interrupt:
raise self.interrupt
def bind(self, family, type, proto=0):
"""Create (or recreate) the actual socket object."""
self.socket = socket.socket(family, type, proto)
prevent_socket_inheritance(self.socket)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.nodelay and not isinstance(self.bind_addr, str):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self.ssl_adapter is not None:
self.socket = self.ssl_adapter.bind(self.socket)
# If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
# activate dual-stack. See http://www.cherrypy.org/ticket/871.
if (family == socket.AF_INET6
and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')):
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
# Apparently, the socket option is not available in
# this machine's TCP stack
pass
self.socket.bind(self.bind_addr)
def tick(self):
"""Accept a new connection and put it on the Queue."""
try:
s, addr = self.socket.accept()
if not self.ready:
return
prevent_socket_inheritance(s)
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
if self.response_header is None:
self.response_header = "%s Server" % self.version
makefile = CP_fileobject
ssl_env = {}
# if ssl cert and key are set, we try to be a secure HTTP server
if self.ssl_adapter is not None:
try:
s, ssl_env = self.ssl_adapter.wrap(s)
except NoSSLError:
msg = ("The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
buf = ["%s 400 Bad Request\r\n" % self.protocol,
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n\r\n",
msg]
wfile = CP_fileobject(s, "wb", -1)
try:
wfile.sendall("".join(buf))
except socket.error, x:
if x.args[0] not in socket_errors_to_ignore:
raise
return
if not s:
return
makefile = self.ssl_adapter.makefile
conn = self.ConnectionClass(self, s, makefile)
if not isinstance(self.bind_addr, basestring):
# optional values
# Until we do DNS lookups, omit REMOTE_HOST
if addr is None: # sometimes this can happen
# figure out if AF_INET or AF_INET6.
if len(s.getsockname()) == 2:
# AF_INET
addr = ('0.0.0.0', 0)
else:
# AF_INET6
addr = ('::', 0)
conn.remote_addr = addr[0]
conn.remote_port = addr[1]
conn.ssl_env = ssl_env
self.requests.put(conn)
except socket.timeout:
# The only reason for the timeout in start() is so we can
# notice keyboard interrupts on Win32, which don't interrupt
# accept() by default
return
except socket.error, x:
if x.args[0] in socket_error_eintr:
# I *think* this is right. EINTR should occur when a signal
# is received during the accept() call; all docs say retry
# the call, and I *think* I'm reading it right that Python
# will then go ahead and poll for and handle the signal
# elsewhere. See http://www.cherrypy.org/ticket/707.
return
if x.args[0] in socket_errors_nonblocking:
# Just try again. See http://www.cherrypy.org/ticket/479.
return
if x.args[0] in socket_errors_to_ignore:
# Our socket was closed.
# See http://www.cherrypy.org/ticket/686.
return
raise
def _get_interrupt(self):
return self._interrupt
def _set_interrupt(self, interrupt):
self._interrupt = True
self.stop()
self._interrupt = interrupt
interrupt = property(_get_interrupt, _set_interrupt,
doc="Set this to an Exception instance to "
"interrupt the server.")
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
self.ready = False
sock = getattr(self, "socket", None)
if sock:
if not isinstance(self.bind_addr, basestring):
# Touch our own socket to make accept() return immediately.
try:
host, port = sock.getsockname()[:2]
except socket.error, x:
if x.args[0] not in socket_errors_to_ignore:
# Changed to use error code and not message
# See http://www.cherrypy.org/ticket/860.
raise
else:
# Note that we're explicitly NOT using AI_PASSIVE,
# here, because we want an actual IP to touch.
# localhost won't work if we've bound to a public IP,
# but it will if we bound to '0.0.0.0' (INADDR_ANY).
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
if hasattr(sock, "close"):
sock.close()
self.socket = None
self.requests.stop(self.shutdown_timeout)
class Gateway(object):
def __init__(self, req):
self.req = req
def respond(self):
raise NotImplemented
# These may either be wsgiserver.SSLAdapter subclasses or the string names
# of such classes (in which case they will be lazily loaded).
ssl_adapters = {
'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
'pyopenssl': 'cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter',
}
def get_ssl_adapter_class(name='pyopenssl'):
adapter = ssl_adapters[name.lower()]
if isinstance(adapter, basestring):
last_dot = adapter.rfind(".")
attr_name = adapter[last_dot + 1:]
mod_path = adapter[:last_dot]
try:
mod = sys.modules[mod_path]
if mod is None:
raise KeyError()
except KeyError:
# The last [''] is important.
mod = __import__(mod_path, globals(), locals(), [''])
# Let an AttributeError propagate outward.
try:
adapter = getattr(mod, attr_name)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'"
% (mod_path, attr_name))
return adapter
# -------------------------------- WSGI Stuff -------------------------------- #
class CherryPyWSGIServer(HTTPServer):
wsgi_version = (1, 1)
def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
max= -1, request_queue_size=5, timeout=10, shutdown_timeout=5):
self.requests = ThreadPool(self, min=numthreads or 1, max=max)
self.wsgi_app = wsgi_app
self.gateway = wsgi_gateways[self.wsgi_version]
self.bind_addr = bind_addr
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.request_queue_size = request_queue_size
self.timeout = timeout
self.shutdown_timeout = shutdown_timeout
def _get_numthreads(self):
return self.requests.min
def _set_numthreads(self, value):
self.requests.min = value
numthreads = property(_get_numthreads, _set_numthreads)
class WSGIGateway(Gateway):
def __init__(self, req):
self.req = req
self.started_response = False
self.env = self.get_environ()
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
raise NotImplemented
def respond(self):
response = self.req.server.wsgi_app(self.env, self.start_response)
try:
for chunk in response:
# "The start_response callable must not actually transmit
# the response headers. Instead, it must store them for the
# server or gateway to transmit only after the first
# iteration of the application return value that yields
# a NON-EMPTY string, or upon the application's first
# invocation of the write() callable." (PEP 333)
if chunk:
if isinstance(chunk, unicode):
chunk = chunk.encode('ISO-8859-1')
self.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
def start_response(self, status, headers, exc_info=None):
"""WSGI callable to begin the HTTP response."""
# "The application may call start_response more than once,
# if and only if the exc_info argument is provided."
if self.started_response and not exc_info:
raise AssertionError("WSGI start_response called a second "
"time with no exc_info.")
self.started_response = True
# "if exc_info is provided, and the HTTP headers have already been
# sent, start_response must raise an error, and should raise the
# exc_info tuple."
if self.req.sent_headers:
try:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
self.req.status = status
for k, v in headers:
if not isinstance(k, str):
raise TypeError("WSGI response header key %r is not a byte string." % k)
if not isinstance(v, str):
raise TypeError("WSGI response header value %r is not a byte string." % v)
self.req.outheaders.extend(headers)
return self.write
def write(self, chunk):
"""WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).
"""
if not self.started_response:
raise AssertionError("WSGI write called before start_response.")
if not self.req.sent_headers:
self.req.sent_headers = True
self.req.send_headers()
self.req.write(chunk)
class WSGIGateway_10(WSGIGateway):
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env = {
# set a non-standard environ entry so the WSGI app can know what
# the *real* server protocol is (and what features to support).
# See http://www.faqs.org/rfcs/rfc2145.html.
'ACTUAL_SERVER_PROTOCOL': req.server.protocol,
'PATH_INFO': req.path,
'QUERY_STRING': req.qs,
'REMOTE_ADDR': req.conn.remote_addr or '',
'REMOTE_PORT': str(req.conn.remote_port or ''),
'REQUEST_METHOD': req.method,
'REQUEST_URI': req.uri,
'SCRIPT_NAME': '',
'SERVER_NAME': req.server.server_name,
# Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
'SERVER_PROTOCOL': req.request_protocol,
'wsgi.errors': sys.stderr,
'wsgi.input': req.rfile,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': req.scheme,
'wsgi.version': (1, 0),
}
if isinstance(req.server.bind_addr, basestring):
# AF_UNIX. This isn't really allowed by WSGI, which doesn't
# address unix domain sockets. But it's better than nothing.
env["SERVER_PORT"] = ""
else:
env["SERVER_PORT"] = str(req.server.bind_addr[1])
# CONTENT_TYPE/CONTENT_LENGTH
for k, v in req.inheaders.iteritems():
env["HTTP_" + k.upper().replace("-", "_")] = v
ct = env.pop("HTTP_CONTENT_TYPE", None)
if ct is not None:
env["CONTENT_TYPE"] = ct
cl = env.pop("HTTP_CONTENT_LENGTH", None)
if cl is not None:
env["CONTENT_LENGTH"] = cl
if req.conn.ssl_env:
env.update(req.conn.ssl_env)
return env
class WSGIGateway_11(WSGIGateway_10):
def get_environ(self):
env = WSGIGateway_10.get_environ(self)
env['wsgi.version'] = (1, 1)
return env
class WSGIGateway_u0(WSGIGateway_10):
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env_10 = WSGIGateway_10.get_environ(self)
env = dict([(k.decode('ISO-8859-1'), v) for k, v in env_10.iteritems()])
env[u'wsgi.version'] = ('u', 0)
# Request-URI
env.setdefault(u'wsgi.url_encoding', u'utf-8')
try:
for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
except UnicodeDecodeError:
# Fall back to latin 1 so apps can transcode if needed.
env[u'wsgi.url_encoding'] = u'ISO-8859-1'
for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
for k, v in sorted(env.items()):
if isinstance(v, str) and k not in ('REQUEST_URI', 'wsgi.input'):
env[k] = v.decode('ISO-8859-1')
return env
wsgi_gateways = {
(1, 0): WSGIGateway_10,
(1, 1): WSGIGateway_11,
('u', 0): WSGIGateway_u0,
}
class WSGIPathInfoDispatcher(object):
"""A WSGI dispatcher for dispatch based on the PATH_INFO.
apps: a dict or list of (path_prefix, app) pairs.
"""
def __init__(self, apps):
try:
apps = apps.items()
except AttributeError:
pass
# Sort the apps by len(path), descending
apps.sort(cmp=lambda x, y: cmp(len(x[0]), len(y[0])))
apps.reverse()
# The path_prefix strings must start, but not end, with a slash.
# Use "" instead of "/".
self.apps = [(p.rstrip("/"), a) for p, a in apps]
def __call__(self, environ, start_response):
path = environ["PATH_INFO"] or "/"
for p, app in self.apps:
# The apps list should be sorted by length, descending.
if path.startswith(p + "/") or path == p:
environ = environ.copy()
environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
environ["PATH_INFO"] = path[len(p):]
return app(environ, start_response)
start_response('404 Not Found', [('Content-Type', 'text/plain'),
('Content-Length', '0')])
return ['']
| 80,807 | Python | .py | 1,771 | 30.530774 | 93 | 0.525916 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,641 | tv_tests.py | midgetspy_Sick-Beard/tests/tv_tests.py | # coding=UTF-8
# Author: Dennis Lutter <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import unittest
import test_lib as test
import sickbeard
from sickbeard.tv import TVEpisode, TVShow
class TVShowTests(test.SickbeardTestDBCase):
def setUp(self):
super(TVShowTests, self).setUp()
sickbeard.showList = []
def test_init_tvdbid(self):
show = TVShow(0001, "en")
self.assertEqual(show.tvdbid, 0001)
def test_change_tvdbid(self):
show = TVShow(0001, "en")
show.name = "show name"
show.tvrname = "show name"
show.network = "cbs"
show.genre = "crime"
show.runtime = 40
show.status = "5"
show.airs = "monday"
show.startyear = 1987
show.saveToDB()
show.loadFromDB(skipNFO=True)
show.tvdbid = 0002
show.saveToDB()
show.loadFromDB(skipNFO=True)
self.assertEqual(show.tvdbid, 0002)
def test_set_name(self):
show = TVShow(0001, "en")
show.name = "newName"
show.saveToDB()
show.loadFromDB(skipNFO=True)
self.assertEqual(show.name, "newName")
class TVEpisodeTests(test.SickbeardTestDBCase):
def setUp(self):
super(TVEpisodeTests, self).setUp()
sickbeard.showList = []
def test_init_empty_db(self):
show = TVShow(0001, "en")
ep = TVEpisode(show, 1, 1)
ep.name = "asdasdasdajkaj"
ep.saveToDB()
ep.loadFromDB(1, 1)
self.assertEqual(ep.name, "asdasdasdajkaj")
class TVTests(test.SickbeardTestDBCase):
def setUp(self):
super(TVTests, self).setUp()
sickbeard.showList = []
def test_getEpisode(self):
show = TVShow(0001, "en")
show.name = "show name"
show.tvrname = "show name"
show.network = "cbs"
show.genre = "crime"
show.runtime = 40
show.status = "5"
show.airs = "monday"
show.startyear = 1987
show.saveToDB()
sickbeard.showList = [show]
#TODO: implement
if __name__ == '__main__':
print "=================="
print "STARTING - TV TESTS"
print "=================="
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(TVShowTests)
unittest.TextTestRunner(verbosity=2).run(suite)
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(TVEpisodeTests)
unittest.TextTestRunner(verbosity=2).run(suite)
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(TVTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| 3,474 | Python | .py | 92 | 31.663043 | 82 | 0.615591 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,642 | config_tests.py | midgetspy_Sick-Beard/tests/config_tests.py | import unittest
import sys
import os.path
sys.path.append(os.path.abspath('..'))
from sickbeard import config
class QualityTests(unittest.TestCase):
def test_clean_url(self):
self.assertEqual(config.clean_url("https://subdomain.domain.tld/endpoint"), "https://subdomain.domain.tld/endpoint")
self.assertEqual(config.clean_url("google.com/xml.rpc"), "http://google.com/xml.rpc")
self.assertEqual(config.clean_url("google.com"), "http://google.com/")
self.assertEqual(config.clean_url("http://www.example.com/folder/"), "http://www.example.com/folder/")
self.assertEqual(config.clean_url("scgi:///home/user/.config/path/socket"), "scgi:///home/user/.config/path/socket")
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(QualityTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| 889 | Python | .py | 15 | 53 | 125 | 0.706497 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,643 | db_tests.py | midgetspy_Sick-Beard/tests/db_tests.py | # coding=UTF-8
# Author: Dennis Lutter <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import unittest
import test_lib as test
class DBBasicTests(test.SickbeardTestDBCase):
def setUp(self):
super(DBBasicTests, self).setUp()
self.db = test.db.DBConnection()
def test_select(self):
self.db.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [0000])
if __name__ == '__main__':
print "=================="
print "STARTING - DB TESTS"
print "=================="
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(DBBasicTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| 1,415 | Python | .py | 33 | 39.969697 | 95 | 0.675636 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,644 | snatch_tests.py | midgetspy_Sick-Beard/tests/snatch_tests.py | # coding=UTF-8
# Author: Dennis Lutter <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import random
import unittest
import test_lib as test
import sys, os.path
import sickbeard.search as search
import sickbeard
from sickbeard.tv import TVEpisode, TVShow
import sickbeard.common as c
tests = {"Dexter": {"a": 1, "q": c.HD, "s": 5, "e": [7], "b": 'Dexter.S05E07.720p.BluRay.X264-REWARD', "i": ['Dexter.S05E07.720p.BluRay.X264-REWARD', 'Dexter.S05E07.720p.X264-REWARD']},
"House": {"a": 1, "q": c.HD, "s": 4, "e": [5], "b": 'House.4x5.720p.BluRay.X264-REWARD', "i": ['Dexter.S05E04.720p.X264-REWARD', 'House.4x5.720p.BluRay.X264-REWARD']},
"Hells Kitchen": {"a": 1, "q": c.SD, "s": 6, "e": [14, 15], "b": 'Hells.Kitchen.s6e14e15.HDTV.XviD-ASAP', "i": ['Hells.Kitchen.S06E14.HDTV.XviD-ASAP', 'Hells.Kitchen.6x14.HDTV.XviD-ASAP', 'Hells.Kitchen.s6e14e15.HDTV.XviD-ASAP']}
}
def _create_fake_xml(items):
xml = '<?xml version="1.0" encoding="UTF-8" ?><rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:newznab="http://www.newznab.com/DTD/2010/feeds/attributes/" encoding="utf-8"><channel>'
for item in items:
xml += '<item><title>' + item + '</title>\n'
xml += '<link>http://fantasy.com/' + item + '</link></item>'
xml += '</channel></rss>'
return xml
searchItems = []
class SearchTest(test.SickbeardTestDBCase):
def _fake_getURL(self, url, headers=None):
global searchItems
return _create_fake_xml(searchItems)
def _fake_isActive(self):
return True
def __init__(self, something):
for provider in sickbeard.providers.sortedProviderList():
provider.getURL = self._fake_getURL
#provider.isActive = self._fake_isActive
super(SearchTest, self).__init__(something)
def test_generator(tvdbdid, show_name, curData, forceSearch):
def test(self):
global searchItems
searchItems = curData["i"]
show = TVShow(tvdbdid)
show.name = show_name
show.quality = curData["q"]
show.saveToDB()
sickbeard.showList.append(show)
for epNumber in curData["e"]:
episode = TVEpisode(show, curData["s"], epNumber)
episode.status = c.WANTED
episode.saveToDB()
bestResult = search.findEpisode(episode, forceSearch)
if not bestResult:
self.assertEqual(curData["b"], bestResult)
self.assertEqual(curData["b"], bestResult.name) #first is expected, second is choosen one
return test
if __name__ == '__main__':
print "=================="
print "STARTING - Snatch TESTS"
print "=================="
print "######################################################################"
# create the test methods
tvdbdid = 1
for forceSearch in (True, False):
for name, curData in tests.items():
if not curData["a"]:
continue
fname = name.replace(' ', '_')
if forceSearch:
test_name = 'test_manual_%s_%s' % (fname, tvdbdid)
else:
test_name = 'test_%s_%s' % (fname, tvdbdid)
test = test_generator(tvdbdid, name, curData, forceSearch)
setattr(SearchTest, test_name, test)
tvdbdid += 1
suite = unittest.TestLoader().loadTestsFromTestCase(SearchTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| 4,114 | Python | .py | 88 | 40.340909 | 238 | 0.634274 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,645 | scene_helpers_tests.py | midgetspy_Sick-Beard/tests/scene_helpers_tests.py | import unittest
import test_lib as test
import sys, os.path
sys.path.append(os.path.abspath('..'))
from sickbeard import show_name_helpers, scene_exceptions, common, name_cache
import sickbeard
from sickbeard import db
from sickbeard.databases import cache_db
from sickbeard.tv import TVShow as Show
class SceneTests(test.SickbeardTestDBCase):
def _test_sceneToNormalShowNames(self, name, expected):
result = show_name_helpers.sceneToNormalShowNames(name)
self.assertTrue(len(set(expected).intersection(set(result))) == len(expected))
dot_result = show_name_helpers.sceneToNormalShowNames(name.replace(' ', '.'))
dot_expected = [x.replace(' ', '.') for x in expected]
self.assertTrue(len(set(dot_expected).intersection(set(dot_result))) == len(dot_expected))
def _test_allPossibleShowNames(self, name, tvdbid=0, tvrname=None, expected=[]):
s = Show(tvdbid)
s.name = name
s.tvrname = tvrname
result = show_name_helpers.allPossibleShowNames(s)
self.assertTrue(len(set(expected).intersection(set(result))) == len(expected))
def _test_filterBadReleases(self, name, expected):
result = show_name_helpers.filterBadReleases(name)
self.assertEqual(result, expected)
def _test_isGoodName(self, name, show):
self.assertTrue(show_name_helpers.isGoodResult(name, show))
def test_isGoodName(self):
listOfcases = [('Show.Name.S01E02.Test-Test', 'Show/Name'),
('Show.Name.S01E02.Test-Test', 'Show. Name'),
('Show.Name.S01E02.Test-Test', 'Show- Name'),
('Show.Name.Part.IV.Test-Test', 'Show Name'),
('Show.Name.S01.Test-Test', 'Show Name'),
('Show.Name.E02.Test-Test', 'Show: Name'),
('Show Name Season 2 Test', 'Show: Name'),
]
for testCase in listOfcases:
scene_name, show_name = testCase
s = Show(0)
s.name = show_name
self._test_isGoodName(scene_name, s)
def test_sceneToNormalShowNames(self):
self._test_sceneToNormalShowNames('Show Name 2010', ['Show Name 2010', 'Show Name (2010)'])
self._test_sceneToNormalShowNames('Show Name US', ['Show Name US', 'Show Name (US)'])
self._test_sceneToNormalShowNames('Show Name AU', ['Show Name AU', 'Show Name (AU)'])
self._test_sceneToNormalShowNames('Show Name CA', ['Show Name CA', 'Show Name (CA)'])
self._test_sceneToNormalShowNames('Show and Name', ['Show and Name', 'Show & Name'])
self._test_sceneToNormalShowNames('Show and Name 2010', ['Show and Name 2010', 'Show & Name 2010', 'Show and Name (2010)', 'Show & Name (2010)'])
self._test_sceneToNormalShowNames('show name us', ['show name us', 'show name (us)'])
self._test_sceneToNormalShowNames('Show And Name', ['Show And Name', 'Show & Name'])
# failure cases
self._test_sceneToNormalShowNames('Show Name 90210', ['Show Name 90210'])
self._test_sceneToNormalShowNames('Show Name YA', ['Show Name YA'])
def test_allPossibleShowNames(self):
#common.sceneExceptions[-1] = ['Exception Test']
myDB = db.DBConnection("cache.db")
myDB.action("INSERT INTO scene_exceptions (tvdb_id, show_name) VALUES (?,?)", [-1, 'Exception Test'])
common.countryList['Full Country Name'] = 'FCN'
self._test_allPossibleShowNames('Show Name', expected=['Show Name'])
self._test_allPossibleShowNames('Show Name', -1, expected=['Show Name', 'Exception Test'])
self._test_allPossibleShowNames('Show Name', tvrname='TVRage Name', expected=['Show Name', 'TVRage Name'])
self._test_allPossibleShowNames('Show Name FCN', expected=['Show Name FCN', 'Show Name (Full Country Name)'])
self._test_allPossibleShowNames('Show Name (FCN)', expected=['Show Name (FCN)', 'Show Name (Full Country Name)'])
self._test_allPossibleShowNames('Show Name Full Country Name', expected=['Show Name Full Country Name', 'Show Name (FCN)'])
self._test_allPossibleShowNames('Show Name (Full Country Name)', expected=['Show Name (Full Country Name)', 'Show Name (FCN)'])
self._test_allPossibleShowNames('Show Name (FCN)', -1, 'TVRage Name', expected=['Show Name (FCN)', 'Show Name (Full Country Name)', 'Exception Test', 'TVRage Name'])
def test_filterBadReleases(self):
self._test_filterBadReleases('Show.S02.German.Stuff-Grp', False)
self._test_filterBadReleases('Show.S02.Some.Stuff-Core2HD', False)
self._test_filterBadReleases('Show.S02.Some.German.Stuff-Grp', False)
self._test_filterBadReleases('German.Show.S02.Some.Stuff-Grp', True)
self._test_filterBadReleases('Show.S02.This.Is.German', False)
class SceneExceptionTestCase(test.SickbeardTestDBCase):
def setUp(self):
super(SceneExceptionTestCase, self).setUp()
scene_exceptions.retrieve_exceptions()
def test_sceneExceptionsEmpty(self):
self.assertEqual(scene_exceptions.get_scene_exceptions(0), [])
def test_sceneExceptionsBabylon5(self):
self.assertEqual(sorted(scene_exceptions.get_scene_exceptions(70726)), ['Babylon 5', 'Babylon5'])
def test_sceneExceptionByName(self):
self.assertEqual(scene_exceptions.get_scene_exception_by_name('Babylon5'), 70726)
self.assertEqual(scene_exceptions.get_scene_exception_by_name('babylon 5'), 70726)
self.assertEqual(scene_exceptions.get_scene_exception_by_name('Carlos 2010'), 164451)
def test_sceneExceptionByNameEmpty(self):
self.assertEqual(scene_exceptions.get_scene_exception_by_name('nothing useful'), None)
def test_sceneExceptionsResetNameCache(self):
# clear the exceptions
myDB = db.DBConnection("cache.db")
myDB.action("DELETE FROM scene_exceptions")
# put something in the cache
name_cache.addNameToCache('Cached Name', 0)
# updating should clear the cache so our previously "Cached Name" won't be in there
scene_exceptions.retrieve_exceptions()
self.assertEqual(name_cache.retrieveNameFromCache('Cached Name'), None)
# put something in the cache
name_cache.addNameToCache('Cached Name', 0)
# updating should not clear the cache this time since our exceptions didn't change
scene_exceptions.retrieve_exceptions()
self.assertEqual(name_cache.retrieveNameFromCache('Cached Name'), 0)
if __name__ == '__main__':
if len(sys.argv) > 1:
suite = unittest.TestLoader().loadTestsFromName('scene_helpers_tests.SceneExceptionTestCase.test_' + sys.argv[1])
unittest.TextTestRunner(verbosity=2).run(suite)
else:
suite = unittest.TestLoader().loadTestsFromTestCase(SceneTests)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(SceneExceptionTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
| 7,197 | Python | .py | 109 | 55.733945 | 174 | 0.667284 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,646 | test_lib.py | midgetspy_Sick-Beard/tests/test_lib.py | # coding=UTF-8
# Author: Dennis Lutter <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import unittest
import sqlite3
import sys
import os.path
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('../lib'))
import sickbeard
import shutil
from sickbeard import encodingKludge as ek, providers, tvcache
from sickbeard import db
from sickbeard.databases import mainDB
from sickbeard.databases import cache_db
#=================
# test globals
#=================
TESTDIR = os.path.abspath('.')
TESTDBNAME = "sickbeard.db"
TESTCACHEDBNAME = "cache.db"
SHOWNAME = u"show name"
SEASON = 4
EPISODE = 2
FILENAME = u"show name - s0" + str(SEASON) + "e0" + str(EPISODE) + ".mkv"
FILEDIR = os.path.join(TESTDIR, SHOWNAME)
FILEPATH = os.path.join(FILEDIR, FILENAME)
SHOWDIR = os.path.join(TESTDIR, SHOWNAME + " final")
#sickbeard.logger.sb_log_instance = sickbeard.logger.SBRotatingLogHandler(os.path.join(TESTDIR, 'sickbeard.log'), sickbeard.logger.NUM_LOGS, sickbeard.logger.LOG_SIZE)
sickbeard.logger.SBRotatingLogHandler.log_file = os.path.join(os.path.join(TESTDIR, 'Logs'), 'test_sickbeard.log')
#=================
# prepare env functions
#=================
def createTestLogFolder():
if not os.path.isdir(sickbeard.LOG_DIR):
os.mkdir(sickbeard.LOG_DIR)
# call env functions at appropriate time during sickbeard var setup
#=================
# sickbeard globals
#=================
sickbeard.SYS_ENCODING = 'UTF-8'
sickbeard.showList = []
sickbeard.QUALITY_DEFAULT = 4 # hdtv
sickbeard.FLATTEN_FOLDERS_DEFAULT = 0
sickbeard.NAMING_PATTERN = ''
sickbeard.NAMING_ABD_PATTERN = ''
sickbeard.NAMING_MULTI_EP = 1
sickbeard.PROVIDER_ORDER = ["sick_beard_index"]
sickbeard.newznabProviderList = providers.getNewznabProviderList("Sick Beard Index|http://lolo.sickbeard.com/|0|5030,5040|0!!!NZBs.org|http://nzbs.org/||5030,5040,5070,5090|0!!!Usenet-Crawler|http://www.usenet-crawler.com/||5030,5040|0")
sickbeard.providerList = providers.makeProviderList()
sickbeard.PROG_DIR = os.path.abspath('..')
sickbeard.DATA_DIR = sickbeard.PROG_DIR
sickbeard.LOG_DIR = os.path.join(TESTDIR, 'Logs')
createTestLogFolder()
sickbeard.logger.sb_log_instance.initLogging(False)
#=================
# dummy functions
#=================
def _dummy_saveConfig():
return True
# this overrides the sickbeard save_config which gets called during a db upgrade
# this might be considered a hack
mainDB.sickbeard.save_config = _dummy_saveConfig
# the real one tries to contact tvdb just stop it from getting more info on the ep
def _fake_specifyEP(self, season, episode):
pass
sickbeard.tv.TVEpisode.specifyEpisode = _fake_specifyEP
#=================
# test classes
#=================
class SickbeardTestDBCase(unittest.TestCase):
def setUp(self):
sickbeard.showList = []
setUp_test_db()
setUp_test_episode_file()
setUp_test_show_dir()
def tearDown(self):
sickbeard.showList = []
tearDown_test_db()
tearDown_test_episode_file()
tearDown_test_show_dir()
class TestDBConnection(db.DBConnection, object):
def __init__(self, dbFileName=TESTDBNAME, row_type=None):
dbFileName = os.path.join(TESTDIR, dbFileName)
super(TestDBConnection, self).__init__(dbFileName, row_type)
class TestCacheDBConnection(TestDBConnection, object):
def __init__(self, providerName):
db.DBConnection.__init__(self, os.path.join(TESTDIR, TESTCACHEDBNAME))
# Create the table if it's not already there
try:
sql = "CREATE TABLE " + providerName + " (name TEXT, season NUMERIC, episodes TEXT, tvrid NUMERIC, tvdbid NUMERIC, url TEXT, time NUMERIC, quality TEXT);"
self.connection.execute(sql)
self.connection.commit()
except sqlite3.OperationalError, e:
if str(e) != "table " + providerName + " already exists":
raise
# Create the table if it's not already there
try:
sql = "CREATE TABLE lastUpdate (provider TEXT, time NUMERIC);"
self.connection.execute(sql)
self.connection.commit()
except sqlite3.OperationalError, e:
if str(e) != "table lastUpdate already exists":
raise
# this will override the normal db connection
sickbeard.db.DBConnection = TestDBConnection
sickbeard.tvcache.CacheDBConnection = TestCacheDBConnection
#=================
# test functions
#=================
def setUp_test_db():
"""upgrades the db to the latest version
"""
# upgrading the db
db.upgradeDatabase(db.DBConnection(), mainDB.InitialSchema)
# fix up any db problems
db.sanityCheckDatabase(db.DBConnection(), mainDB.MainSanityCheck)
#and for cache.b too
db.upgradeDatabase(db.DBConnection("cache.db"), cache_db.InitialSchema)
def tearDown_test_db():
"""Deletes the test db
although this seams not to work on my system it leaves me with an zero kb file
"""
# uncomment next line so leave the db intact between test and at the end
#return False
if os.path.exists(os.path.join(TESTDIR, TESTDBNAME)):
os.remove(os.path.join(TESTDIR, TESTDBNAME))
if os.path.exists(os.path.join(TESTDIR, TESTCACHEDBNAME)):
os.remove(os.path.join(TESTDIR, TESTCACHEDBNAME))
def setUp_test_episode_file():
if not os.path.exists(FILEDIR):
os.makedirs(FILEDIR)
try:
with open(FILEPATH, 'w') as f:
f.write("foo bar")
except EnvironmentError:
print "Unable to set up test episode"
raise
def tearDown_test_episode_file():
shutil.rmtree(FILEDIR)
def setUp_test_show_dir():
if not os.path.exists(SHOWDIR):
os.makedirs(SHOWDIR)
def tearDown_test_show_dir():
shutil.rmtree(SHOWDIR)
tearDown_test_db()
if __name__ == '__main__':
print "=================="
print "Dont call this directly"
print "=================="
print "you might want to call"
dirList = os.listdir(TESTDIR)
for fname in dirList:
if (fname.find("_test") > 0) and (fname.find("pyc") < 0):
print "- " + fname
print "=================="
print "or just call all_tests.py"
| 6,916 | Python | .py | 173 | 35.728324 | 237 | 0.691583 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,647 | all_tests.py | midgetspy_Sick-Beard/tests/all_tests.py | #!/usr/bin/env python
# coding=UTF-8
# Author: Dennis Lutter <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
if __name__ == "__main__":
import glob
import unittest
test_file_strings = [ x for x in glob.glob('*_tests.py') if not x in __file__]
module_strings = [file_string[0:len(file_string) - 3] for file_string in test_file_strings]
suites = [unittest.defaultTestLoader.loadTestsFromName(file_string) for file_string in module_strings]
testSuite = unittest.TestSuite(suites)
print "=================="
print "STARTING - ALL TESTS"
print "=================="
print "this will include"
for includedfiles in test_file_strings:
print "- " + includedfiles
text_runner = unittest.TextTestRunner().run(testSuite)
| 1,452 | Python | .py | 33 | 41.121212 | 106 | 0.713376 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,648 | name_parser_tests.py | midgetspy_Sick-Beard/tests/name_parser_tests.py | import datetime
import unittest
import sys, os.path
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('../lib'))
from sickbeard.name_parser import parser
import sickbeard
sickbeard.SYS_ENCODING = 'UTF-8'
DEBUG = VERBOSE = False
simple_test_cases = {
'standard': {
'Mr.Show.Name.S01E02.Source.Quality.Etc-Group': parser.ParseResult(None, 'Mr Show Name', 1, [2], 'Source.Quality.Etc', 'Group'),
'Show.Name.S01E02': parser.ParseResult(None, 'Show Name', 1, [2]),
'Show Name - S01E02 - My Ep Name': parser.ParseResult(None, 'Show Name', 1, [2], 'My Ep Name'),
'Show.1.0.Name.S01.E03.My.Ep.Name-Group': parser.ParseResult(None, 'Show 1.0 Name', 1, [3], 'My.Ep.Name', 'Group'),
'Show.Name.S01E02E03.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2,3], 'Source.Quality.Etc', 'Group'),
'Mr. Show Name - S01E02-03 - My Ep Name': parser.ParseResult(None, 'Mr. Show Name', 1, [2,3], 'My Ep Name'),
'Show.Name.S01.E02.E03': parser.ParseResult(None, 'Show Name', 1, [2,3]),
'Show.Name-0.2010.S01E02.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name-0 2010', 1, [2], 'Source.Quality.Etc', 'Group'),
'S01E02 Ep Name': parser.ParseResult(None, None, 1, [2], 'Ep Name'),
'Show Name - S06E01 - 2009-12-20 - Ep Name': parser.ParseResult(None, 'Show Name', 6, [1], '2009-12-20 - Ep Name'),
'Show Name - S06E01 - -30-': parser.ParseResult(None, 'Show Name', 6, [1], '30-' ),
'Show-Name-S06E01-720p': parser.ParseResult(None, 'Show-Name', 6, [1], '720p' ),
'Show-Name-S06E01-1080i': parser.ParseResult(None, 'Show-Name', 6, [1], '1080i' ),
'Show.Name.S06E01.Other.WEB-DL': parser.ParseResult(None, 'Show Name', 6, [1], 'Other.WEB-DL' ),
'Show.Name.S06E01 Some-Stuff Here': parser.ParseResult(None, 'Show Name', 6, [1], 'Some-Stuff Here' ),
},
'fov': {
'Show_Name.1x02.Source_Quality_Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2], 'Source_Quality_Etc', 'Group'),
'Show Name 1x02': parser.ParseResult(None, 'Show Name', 1, [2]),
'Show Name 1x02 x264 Test': parser.ParseResult(None, 'Show Name', 1, [2], 'x264 Test'),
'Show Name - 1x02 - My Ep Name': parser.ParseResult(None, 'Show Name', 1, [2], 'My Ep Name'),
'Show_Name.1x02x03x04.Source_Quality_Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2,3,4], 'Source_Quality_Etc', 'Group'),
'Show Name - 1x02-03-04 - My Ep Name': parser.ParseResult(None, 'Show Name', 1, [2,3,4], 'My Ep Name'),
'1x02 Ep Name': parser.ParseResult(None, None, 1, [2], 'Ep Name'),
'Show-Name-1x02-720p': parser.ParseResult(None, 'Show-Name', 1, [2], '720p'),
'Show-Name-1x02-1080i': parser.ParseResult(None, 'Show-Name', 1, [2], '1080i'),
'Show Name [05x12] Ep Name': parser.ParseResult(None, 'Show Name', 5, [12], 'Ep Name'),
'Show.Name.1x02.WEB-DL': parser.ParseResult(None, 'Show Name', 1, [2], 'WEB-DL'),
},
'standard_repeat': {
'Show.Name.S01E02.S01E03.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2,3], 'Source.Quality.Etc', 'Group'),
'Show.Name.S01E02.S01E03': parser.ParseResult(None, 'Show Name', 1, [2,3]),
'Show Name - S01E02 - S01E03 - S01E04 - Ep Name': parser.ParseResult(None, 'Show Name', 1, [2,3,4], 'Ep Name'),
'Show.Name.S01E02.S01E03.WEB-DL': parser.ParseResult(None, 'Show Name', 1, [2,3], 'WEB-DL'),
},
'fov_repeat': {
'Show.Name.1x02.1x03.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2,3], 'Source.Quality.Etc', 'Group'),
'Show.Name.1x02.1x03': parser.ParseResult(None, 'Show Name', 1, [2,3]),
'Show Name - 1x02 - 1x03 - 1x04 - Ep Name': parser.ParseResult(None, 'Show Name', 1, [2,3,4], 'Ep Name'),
'Show.Name.1x02.1x03.WEB-DL': parser.ParseResult(None, 'Show Name', 1, [2,3], 'WEB-DL'),
},
'bare': {
'Show.Name.102.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2], 'Source.Quality.Etc', 'Group'),
'show.name.2010.123.source.quality.etc-group': parser.ParseResult(None, 'show name 2010', 1, [23], 'source.quality.etc', 'group'),
'show.name.2010.222.123.source.quality.etc-group': parser.ParseResult(None, 'show name 2010.222', 1, [23], 'source.quality.etc', 'group'),
'Show.Name.102': parser.ParseResult(None, 'Show Name', 1, [2]),
'the.event.401.hdtv-lol': parser.ParseResult(None, 'the event', 4, [1], 'hdtv', 'lol'),
'show.name.2010.special.hdtv-blah': None,
},
'stupid': {
'tpz-abc102': parser.ParseResult(None, None, 1, [2], None, 'tpz'),
'tpz-abc.102': parser.ParseResult(None, None, 1, [2], None, 'tpz'),
},
'no_season': {
'Show Name - 01 - Ep Name': parser.ParseResult(None, 'Show Name', None, [1], 'Ep Name'),
'01 - Ep Name': parser.ParseResult(None, None, None, [1], 'Ep Name'),
'Show Name - 01 - Ep Name - WEB-DL': parser.ParseResult(None, 'Show Name', None, [1], 'Ep Name - WEB-DL'),
},
'no_season_general': {
'Show.Name.E23.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [23], 'Source.Quality.Etc', 'Group'),
'Show Name - Episode 01 - Ep Name': parser.ParseResult(None, 'Show Name', None, [1], 'Ep Name'),
'Show.Name.Part.3.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [3], 'Source.Quality.Etc', 'Group'),
'Show.Name.Part.1.and.Part.2.Blah-Group': parser.ParseResult(None, 'Show Name', None, [1,2], 'Blah', 'Group'),
'Show.Name.Part.IV.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [4], 'Source.Quality.Etc', 'Group'),
'Deconstructed.E07.1080i.HDTV.DD5.1.MPEG2-TrollHD': parser.ParseResult(None, 'Deconstructed', None, [7], '1080i.HDTV.DD5.1.MPEG2', 'TrollHD'),
'Show.Name.E23.WEB-DL': parser.ParseResult(None, 'Show Name', None, [23], 'WEB-DL'),
},
'no_season_multi_ep': {
'Show.Name.E23-24.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [23,24], 'Source.Quality.Etc', 'Group'),
'Show Name - Episode 01-02 - Ep Name': parser.ParseResult(None, 'Show Name', None, [1,2], 'Ep Name'),
'Show.Name.E23-24.WEB-DL': parser.ParseResult(None, 'Show Name', None, [23,24], 'WEB-DL'),
},
'season_only': {
'Show.Name.S02.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', 2, [], 'Source.Quality.Etc', 'Group'),
'Show Name Season 2': parser.ParseResult(None, 'Show Name', 2),
'Season 02': parser.ParseResult(None, None, 2),
},
'scene_date_format': {
'Show.Name.2010.11.23.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [], 'Source.Quality.Etc', 'Group', datetime.date(2010,11,23)),
'Show Name - 2010.11.23': parser.ParseResult(None, 'Show Name', air_date = datetime.date(2010,11,23)),
'Show.Name.2010.23.11.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [], 'Source.Quality.Etc', 'Group', datetime.date(2010,11,23)),
'Show Name - 2010-11-23 - Ep Name': parser.ParseResult(None, 'Show Name', extra_info = 'Ep Name', air_date = datetime.date(2010,11,23)),
'2010-11-23 - Ep Name': parser.ParseResult(None, extra_info = 'Ep Name', air_date = datetime.date(2010,11,23)),
'Show.Name.2010.11.23.WEB-DL': parser.ParseResult(None, 'Show Name', None, [], 'WEB-DL', None, datetime.date(2010,11,23)),
}
}
combination_test_cases = [
('/test/path/to/Season 02/03 - Ep Name.avi',
parser.ParseResult(None, None, 2, [3], 'Ep Name'),
['no_season', 'season_only']),
('Show.Name.S02.Source.Quality.Etc-Group/tpz-sn203.avi',
parser.ParseResult(None, 'Show Name', 2, [3], 'Source.Quality.Etc', 'Group'),
['stupid', 'season_only']),
('MythBusters.S08E16.720p.HDTV.x264-aAF/aaf-mb.s08e16.720p.mkv',
parser.ParseResult(None, 'MythBusters', 8, [16], '720p.HDTV.x264', 'aAF'),
['standard']),
('/home/drop/storage/TV/Terminator The Sarah Connor Chronicles/Season 2/S02E06 The Tower is Tall, But the Fall is Short.mkv',
parser.ParseResult(None, None, 2, [6], 'The Tower is Tall, But the Fall is Short'),
['standard']),
(r'/Test/TV/Jimmy Fallon/Season 2/Jimmy Fallon - 2010-12-15 - blah.avi',
parser.ParseResult(None, 'Jimmy Fallon', extra_info = 'blah', air_date = datetime.date(2010,12,15)),
['scene_date_format']),
(r'/X/30 Rock/Season 4/30 Rock - 4x22 -.avi',
parser.ParseResult(None, '30 Rock', 4, [22]),
['fov']),
('Season 2/Show Name - 03-04 - Ep Name.avi',
parser.ParseResult(None, 'Show Name', 2, [3,4], extra_info = 'Ep Name'),
['no_season', 'season_only']),
('Season 02/03-04-05 - Ep Name.avi',
parser.ParseResult(None, None, 2, [3,4,5], extra_info = 'Ep Name'),
['no_season', 'season_only']),
]
unicode_test_cases = [
(u'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv',
parser.ParseResult(None, 'The.Big.Bang.Theory', 2, [7], '720p.HDTV.x264.AC3', 'SHELDON')
),
('The.Big.Bang.Theory.2x07.The.Panty.Pi\xc3\xb1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv',
parser.ParseResult(None, 'The.Big.Bang.Theory', 2, [7], '720p.HDTV.x264.AC3', 'SHELDON')
),
]
failure_cases = ['7sins-jfcs01e09-720p-bluray-x264']
class UnicodeTests(unittest.TestCase):
def _test_unicode(self, name, result):
np = parser.NameParser(True)
parse_result = np.parse(name)
# this shouldn't raise an exception
a = repr(str(parse_result))
def test_unicode(self):
for (name, result) in unicode_test_cases:
self._test_unicode(name, result)
class FailureCaseTests(unittest.TestCase):
def _test_name(self, name):
np = parser.NameParser(True)
try:
parse_result = np.parse(name)
except parser.InvalidNameException:
return True
if VERBOSE:
print 'Actual: ', parse_result.which_regex, parse_result
return False
def test_failures(self):
for name in failure_cases:
self.assertTrue(self._test_name(name))
class ComboTests(unittest.TestCase):
def _test_combo(self, name, result, which_regexes):
if VERBOSE:
print
print 'Testing', name
np = parser.NameParser(True)
test_result = np.parse(name)
if DEBUG:
print test_result, test_result.which_regex
print result, which_regexes
self.assertEqual(test_result, result)
for cur_regex in which_regexes:
self.assertTrue(cur_regex in test_result.which_regex)
self.assertEqual(len(which_regexes), len(test_result.which_regex))
def test_combos(self):
for (name, result, which_regexes) in combination_test_cases:
# Normalise the paths. Converts UNIX-style paths into Windows-style
# paths when test is run on Windows.
self._test_combo(os.path.normpath(name), result, which_regexes)
class BasicTests(unittest.TestCase):
def _test_names(self, np, section, transform=None, verbose=False):
if VERBOSE or verbose:
print
print 'Running', section, 'tests'
for cur_test_base in simple_test_cases[section]:
if transform:
cur_test = transform(cur_test_base)
else:
cur_test = cur_test_base
if VERBOSE or verbose:
print 'Testing', cur_test
result = simple_test_cases[section][cur_test_base]
if not result:
self.assertRaises(parser.InvalidNameException, np.parse, cur_test)
return
else:
test_result = np.parse(cur_test)
if DEBUG or verbose:
print 'air_by_date:', test_result.air_by_date, 'air_date:', test_result.air_date
print test_result
print result
self.assertEqual(test_result.which_regex, [section])
self.assertEqual(test_result, result)
def test_standard_names(self):
np = parser.NameParser(False)
self._test_names(np, 'standard')
def test_standard_repeat_names(self):
np = parser.NameParser(False)
self._test_names(np, 'standard_repeat')
def test_fov_names(self):
np = parser.NameParser(False)
self._test_names(np, 'fov')
def test_fov_repeat_names(self):
np = parser.NameParser(False)
self._test_names(np, 'fov_repeat')
def test_bare_names(self):
np = parser.NameParser(False)
self._test_names(np, 'bare')
def test_stupid_names(self):
np = parser.NameParser(False)
self._test_names(np, 'stupid')
def test_no_season_names(self):
np = parser.NameParser(False)
self._test_names(np, 'no_season')
def test_no_season_general_names(self):
np = parser.NameParser(False)
self._test_names(np, 'no_season_general')
def test_no_season_multi_ep_names(self):
np = parser.NameParser(False)
self._test_names(np, 'no_season_multi_ep')
def test_season_only_names(self):
np = parser.NameParser(False)
self._test_names(np, 'season_only')
def test_scene_date_format_names(self):
np = parser.NameParser(False)
self._test_names(np, 'scene_date_format')
def test_standard_file_names(self):
np = parser.NameParser()
self._test_names(np, 'standard', lambda x: x + '.avi')
def test_standard_repeat_file_names(self):
np = parser.NameParser()
self._test_names(np, 'standard_repeat', lambda x: x + '.avi')
def test_fov_file_names(self):
np = parser.NameParser()
self._test_names(np, 'fov', lambda x: x + '.avi')
def test_fov_repeat_file_names(self):
np = parser.NameParser()
self._test_names(np, 'fov_repeat', lambda x: x + '.avi')
def test_bare_file_names(self):
np = parser.NameParser()
self._test_names(np, 'bare', lambda x: x + '.avi')
def test_stupid_file_names(self):
np = parser.NameParser()
self._test_names(np, 'stupid', lambda x: x + '.avi')
def test_no_season_file_names(self):
np = parser.NameParser()
self._test_names(np, 'no_season', lambda x: x + '.avi')
def test_no_season_general_file_names(self):
np = parser.NameParser()
self._test_names(np, 'no_season_general', lambda x: x + '.avi')
def test_no_season_multi_ep_file_names(self):
np = parser.NameParser()
self._test_names(np, 'no_season_multi_ep', lambda x: x + '.avi')
def test_season_only_file_names(self):
np = parser.NameParser()
self._test_names(np, 'season_only', lambda x: x + '.avi')
def test_scene_date_format_file_names(self):
np = parser.NameParser()
self._test_names(np, 'scene_date_format', lambda x: x + '.avi')
def test_combination_names(self):
pass
if __name__ == '__main__':
if len(sys.argv) > 1:
suite = unittest.TestLoader().loadTestsFromName('name_parser_tests.BasicTests.test_'+sys.argv[1])
else:
suite = unittest.TestLoader().loadTestsFromTestCase(BasicTests)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(ComboTests)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(UnicodeTests)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(FailureCaseTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| 17,744 | Python | .py | 277 | 49.018051 | 170 | 0.570489 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,649 | pp_tests.py | midgetspy_Sick-Beard/tests/pp_tests.py | # coding=UTF-8
# Author: Dennis Lutter <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import random
import unittest
import test_lib as test
import sys, os.path
from sickbeard.postProcessor import PostProcessor
import sickbeard
from sickbeard.tv import TVEpisode, TVShow
class PPInitTests(unittest.TestCase):
def setUp(self):
self.pp = PostProcessor(test.FILEPATH)
def test_init_file_name(self):
self.assertEqual(self.pp.file_name, test.FILENAME)
def test_init_folder_name(self):
self.assertEqual(self.pp.folder_name, test.SHOWNAME)
class PPBasicTests(test.SickbeardTestDBCase):
def test_process(self):
show = TVShow(3)
show.name = test.SHOWNAME
show.location = test.SHOWDIR
show.saveToDB()
sickbeard.showList = [show]
ep = TVEpisode(show, test.SEASON, test.EPISODE)
ep.name = "some ep name"
ep.saveToDB()
pp = PostProcessor(test.FILEPATH)
self.assertTrue(pp.process())
if __name__ == '__main__':
print "=================="
print "STARTING - PostProcessor TESTS"
print "=================="
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(PPInitTests)
unittest.TextTestRunner(verbosity=2).run(suite)
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(PPPrivateTests)
unittest.TextTestRunner(verbosity=2).run(suite)
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(PPBasicTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| 2,436 | Python | .py | 57 | 38.508772 | 82 | 0.657215 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,650 | common_tests.py | midgetspy_Sick-Beard/tests/common_tests.py | import unittest
import sys
import os.path
sys.path.append(os.path.abspath('..'))
from sickbeard import common
class QualityTests(unittest.TestCase):
# TODO: repack / proper ? air-by-date ? season rip? multi-ep?
def test_SDTV(self):
self.assertEqual(common.Quality.SDTV, common.Quality.nameQuality("Test.Show.S01E02.PDTV.XViD-GROUP"))
self.assertEqual(common.Quality.SDTV, common.Quality.nameQuality("Test.Show.S01E02.PDTV.x264-GROUP"))
self.assertEqual(common.Quality.SDTV, common.Quality.nameQuality("Test.Show.S01E02.HDTV.XViD-GROUP"))
self.assertEqual(common.Quality.SDTV, common.Quality.nameQuality("Test.Show.S01E02.HDTV.x264-GROUP"))
self.assertEqual(common.Quality.SDTV, common.Quality.nameQuality("Test.Show.S01E02.DSR.XViD-GROUP"))
self.assertEqual(common.Quality.SDTV, common.Quality.nameQuality("Test.Show.S01E02.DSR.x264-GROUP"))
self.assertEqual(common.Quality.SDTV, common.Quality.nameQuality("Test.Show.S01E02.TVRip.XViD-GROUP"))
self.assertEqual(common.Quality.SDTV, common.Quality.nameQuality("Test.Show.S01E02.TVRip.x264-GROUP"))
self.assertEqual(common.Quality.SDTV, common.Quality.nameQuality("Test.Show.S01E02.WEBRip.XViD-GROUP"))
self.assertEqual(common.Quality.SDTV, common.Quality.nameQuality("Test.Show.S01E02.WEBRip.x264-GROUP"))
self.assertEqual(common.Quality.SDTV, common.Quality.nameQuality("Test.Show.S01E02.WEB-DL.x264-GROUP"))
self.assertEqual(common.Quality.SDTV, common.Quality.nameQuality("Test.Show.S01E02.WEB-DL.AAC2.0.H.264-GROUP"))
self.assertEqual(common.Quality.SDTV, common.Quality.nameQuality("Test.Show.S01E02 WEB-DL H 264-GROUP"))
self.assertEqual(common.Quality.SDTV, common.Quality.nameQuality("Test.Show.S01E02_WEB-DL_H_264-GROUP"))
self.assertEqual(common.Quality.SDTV, common.Quality.nameQuality("Test.Show.S01E02.WEB-DL.AAC2.0.H264-GROUP"))
def test_SDDVD(self):
self.assertEqual(common.Quality.SDDVD, common.Quality.nameQuality("Test.Show.S01E02.DVDRiP.XViD-GROUP"))
self.assertEqual(common.Quality.SDDVD, common.Quality.nameQuality("Test.Show.S01E02.DVDRiP.DiVX-GROUP"))
self.assertEqual(common.Quality.SDDVD, common.Quality.nameQuality("Test.Show.S01E02.DVDRiP.x264-GROUP"))
self.assertEqual(common.Quality.SDDVD, common.Quality.nameQuality("Test.Show.S01E02.DVDRip.WS.XViD-GROUP"))
self.assertEqual(common.Quality.SDDVD, common.Quality.nameQuality("Test.Show.S01E02.DVDRip.WS.DiVX-GROUP"))
self.assertEqual(common.Quality.SDDVD, common.Quality.nameQuality("Test.Show.S01E02.DVDRip.WS.x264-GROUP"))
self.assertEqual(common.Quality.SDDVD, common.Quality.nameQuality("Test.Show.S01E02.BDRIP.XViD-GROUP"))
self.assertEqual(common.Quality.SDDVD, common.Quality.nameQuality("Test.Show.S01E02.BDRIP.DiVX-GROUP"))
self.assertEqual(common.Quality.SDDVD, common.Quality.nameQuality("Test.Show.S01E02.BDRIP.x264-GROUP"))
self.assertEqual(common.Quality.SDDVD, common.Quality.nameQuality("Test.Show.S01E02.BDRIP.WS.XViD-GROUP"))
self.assertEqual(common.Quality.SDDVD, common.Quality.nameQuality("Test.Show.S01E02.BDRIP.WS.DiVX-GROUP"))
self.assertEqual(common.Quality.SDDVD, common.Quality.nameQuality("Test.Show.S01E02.BDRIP.WS.x264-GROUP"))
def test_HDTV(self):
self.assertEqual(common.Quality.HDTV, common.Quality.nameQuality("Test.Show.S01E02.720p.HDTV.x264-GROUP"))
self.assertEqual(common.Quality.HDTV, common.Quality.nameQuality("Test.Show.S01E02.HR.WS.PDTV.x264-GROUP"))
def test_RAWHDTV(self):
self.assertEqual(common.Quality.RAWHDTV, common.Quality.nameQuality("Test.Show.S01E02.720p.HDTV.DD5.1.MPEG2-GROUP"))
self.assertEqual(common.Quality.RAWHDTV, common.Quality.nameQuality("Test.Show.S01E02.1080i.HDTV.DD2.0.MPEG2-GROUP"))
self.assertEqual(common.Quality.RAWHDTV, common.Quality.nameQuality("Test.Show.S01E02.1080i.HDTV.H.264.DD2.0-GROUP"))
self.assertEqual(common.Quality.RAWHDTV, common.Quality.nameQuality("Test Show - S01E02 - 1080i HDTV MPA1.0 H.264 - GROUP"))
self.assertEqual(common.Quality.RAWHDTV, common.Quality.nameQuality("Test.Show.S01E02.1080i.HDTV.DD.5.1.h264-GROUP"))
self.assertEqual(common.Quality.RAWHDTV, common.Quality.nameQuality("Test.Show.S01E02.1080i.HDTV.MPEG2.DD5.1-GROUP"))
self.assertEqual(common.Quality.RAWHDTV, common.Quality.nameQuality("Test Show S01E02 1080p HDTV DD5.1 H.264-GROUP"))
def test_FULLHDTV(self):
self.assertEqual(common.Quality.FULLHDTV, common.Quality.nameQuality("Test.Show.S01E02.1080p.HDTV.x264-GROUP"))
self.assertEqual(common.Quality.FULLHDTV, common.Quality.nameQuality("Test.Show.S01E02.1080p.HDTV.DD5.1.x264-GROUP"))
def test_HDWEBDL(self):
self.assertEqual(common.Quality.HDWEBDL, common.Quality.nameQuality("Test.Show.S01E02.720p.WEB-DL-GROUP"))
self.assertEqual(common.Quality.HDWEBDL, common.Quality.nameQuality("Test.Show.S01E02.720p.WEBRip-GROUP"))
self.assertEqual(common.Quality.HDWEBDL, common.Quality.nameQuality("Test.Show.S01E02.WEBRip.720p.H.264.AAC.2.0-GROUP"))
self.assertEqual(common.Quality.HDWEBDL, common.Quality.nameQuality("Test.Show.S01E02.720p.WEB-DL.AAC2.0.H.264-GROUP"))
self.assertEqual(common.Quality.HDWEBDL, common.Quality.nameQuality("Test Show S01E02 720p WEB-DL AAC2 0 H 264-GROUP"))
self.assertEqual(common.Quality.HDWEBDL, common.Quality.nameQuality("Test_Show.S01E02_720p_WEB-DL_AAC2.0_H264-GROUP"))
self.assertEqual(common.Quality.HDWEBDL, common.Quality.nameQuality("Test.Show.S01E02.720p.WEB-DL.AAC2.0.H264-GROUP"))
self.assertEqual(common.Quality.HDWEBDL, common.Quality.nameQuality("Test.Show.S01E02.720p.iTunes.Rip.H264.AAC-GROUP"))
def test_FULLHDWEBDL(self):
self.assertEqual(common.Quality.FULLHDWEBDL, common.Quality.nameQuality("Test.Show.S01E02.1080p.WEB-DL-GROUP"))
self.assertEqual(common.Quality.FULLHDWEBDL, common.Quality.nameQuality("Test.Show.S01E02.1080p.WEBRip-GROUP"))
self.assertEqual(common.Quality.FULLHDWEBDL, common.Quality.nameQuality("Test.Show.S01E02.WEBRip.1080p.H.264.AAC.2.0-GROUP"))
self.assertEqual(common.Quality.FULLHDWEBDL, common.Quality.nameQuality("Test.Show.S01E02.WEBRip.1080p.H264.AAC.2.0-GROUP"))
self.assertEqual(common.Quality.FULLHDWEBDL, common.Quality.nameQuality("Test.Show.S01E02.1080p.iTunes.H.264.AAC-GROUP"))
self.assertEqual(common.Quality.FULLHDWEBDL, common.Quality.nameQuality("Test Show S01E02 1080p iTunes H 264 AAC-GROUP"))
self.assertEqual(common.Quality.FULLHDWEBDL, common.Quality.nameQuality("Test_Show_S01E02_1080p_iTunes_H_264_AAC-GROUP"))
def test_HDBLURAY(self):
self.assertEqual(common.Quality.HDBLURAY, common.Quality.nameQuality("Test.Show.S01E02.720p.BluRay.x264-GROUP"))
self.assertEqual(common.Quality.HDBLURAY, common.Quality.nameQuality("Test.Show.S01E02.720p.HDDVD.x264-GROUP"))
def test_FULLHDBLURAY(self):
self.assertEqual(common.Quality.FULLHDBLURAY, common.Quality.nameQuality("Test.Show.S01E02.1080p.BluRay.x264-GROUP"))
self.assertEqual(common.Quality.FULLHDBLURAY, common.Quality.nameQuality("Test.Show.S01E02.1080p.HDDVD.x264-GROUP"))
def test_UNKNOWN(self):
self.assertEqual(common.Quality.UNKNOWN, common.Quality.nameQuality("Test.Show.S01E02-SiCKBEARD"))
def test_reverse_parsing(self):
self.assertEqual(common.Quality.SDTV, common.Quality.nameQuality("Test Show - S01E02 - SD TV - GROUP"))
self.assertEqual(common.Quality.SDDVD, common.Quality.nameQuality("Test Show - S01E02 - SD DVD - GROUP"))
self.assertEqual(common.Quality.HDTV, common.Quality.nameQuality("Test Show - S01E02 - HD TV - GROUP"))
self.assertEqual(common.Quality.RAWHDTV, common.Quality.nameQuality("Test Show - S01E02 - RawHD TV - GROUP"))
self.assertEqual(common.Quality.FULLHDTV, common.Quality.nameQuality("Test Show - S01E02 - 1080p HD TV - GROUP"))
self.assertEqual(common.Quality.HDWEBDL, common.Quality.nameQuality("Test Show - S01E02 - 720p WEB-DL - GROUP"))
self.assertEqual(common.Quality.FULLHDWEBDL, common.Quality.nameQuality("Test Show - S01E02 - 1080p WEB-DL - GROUP"))
self.assertEqual(common.Quality.HDBLURAY, common.Quality.nameQuality("Test Show - S01E02 - 720p BluRay - GROUP"))
self.assertEqual(common.Quality.FULLHDBLURAY, common.Quality.nameQuality("Test Show - S01E02 - 1080p BluRay - GROUP"))
self.assertEqual(common.Quality.UNKNOWN, common.Quality.nameQuality("Test Show - S01E02 - Unknown - SiCKBEARD"))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(QualityTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| 8,908 | Python | .py | 89 | 90.966292 | 134 | 0.748321 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,651 | _cpwsgi_server.py | midgetspy_Sick-Beard/cherrypy/_cpwsgi_server.py | """WSGI server interface (see PEP 333). This adds some CP-specific bits to
the framework-agnostic wsgiserver package.
"""
import sys
import cherrypy
from cherrypy import wsgiserver
class CPHTTPRequest(wsgiserver.HTTPRequest):
pass
class CPHTTPConnection(wsgiserver.HTTPConnection):
pass
class CPWSGIServer(wsgiserver.CherryPyWSGIServer):
"""Wrapper for wsgiserver.CherryPyWSGIServer.
wsgiserver has been designed to not reference CherryPy in any way,
so that it can be used in other frameworks and applications. Therefore,
we wrap it here, so we can set our own mount points from cherrypy.tree
and apply some attributes from config -> cherrypy.server -> wsgiserver.
"""
def __init__(self, server_adapter=cherrypy.server):
self.server_adapter = server_adapter
self.max_request_header_size = self.server_adapter.max_request_header_size or 0
self.max_request_body_size = self.server_adapter.max_request_body_size or 0
server_name = (self.server_adapter.socket_host or
self.server_adapter.socket_file or
None)
self.wsgi_version = self.server_adapter.wsgi_version
s = wsgiserver.CherryPyWSGIServer
s.__init__(self, server_adapter.bind_addr, cherrypy.tree,
self.server_adapter.thread_pool,
server_name,
max=self.server_adapter.thread_pool_max,
request_queue_size=self.server_adapter.socket_queue_size,
timeout=self.server_adapter.socket_timeout,
shutdown_timeout=self.server_adapter.shutdown_timeout,
)
self.protocol = self.server_adapter.protocol_version
self.nodelay = self.server_adapter.nodelay
ssl_module = self.server_adapter.ssl_module or 'pyopenssl'
if self.server_adapter.ssl_context:
adapter_class = wsgiserver.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain)
self.ssl_adapter.context = self.server_adapter.ssl_context
elif self.server_adapter.ssl_certificate:
adapter_class = wsgiserver.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain)
| 2,686 | Python | .wsgi | 50 | 41.26 | 88 | 0.660465 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,652 | _cpwsgi.py | midgetspy_Sick-Beard/cherrypy/_cpwsgi.py | """WSGI interface (see PEP 333)."""
import sys as _sys
import cherrypy as _cherrypy
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from cherrypy import _cperror
from cherrypy.lib import httputil
def downgrade_wsgi_ux_to_1x(environ):
"""Return a new environ dict for WSGI 1.x from the given WSGI u.x environ."""
env1x = {}
url_encoding = environ[u'wsgi.url_encoding']
for k, v in environ.items():
if k in [u'PATH_INFO', u'SCRIPT_NAME', u'QUERY_STRING']:
v = v.encode(url_encoding)
elif isinstance(v, unicode):
v = v.encode('ISO-8859-1')
env1x[k.encode('ISO-8859-1')] = v
return env1x
class VirtualHost(object):
"""Select a different WSGI application based on the Host header.
This can be useful when running multiple sites within one CP server.
It allows several domains to point to different applications. For example:
root = Root()
RootApp = cherrypy.Application(root)
Domain2App = cherrypy.Application(root)
SecureApp = cherrypy.Application(Secure())
vhost = cherrypy._cpwsgi.VirtualHost(RootApp,
domains={'www.domain2.example': Domain2App,
'www.domain2.example:443': SecureApp,
})
cherrypy.tree.graft(vhost)
default: required. The default WSGI application.
use_x_forwarded_host: if True (the default), any "X-Forwarded-Host"
request header will be used instead of the "Host" header. This
is commonly added by HTTP servers (such as Apache) when proxying.
domains: a dict of {host header value: application} pairs.
The incoming "Host" request header is looked up in this dict,
and, if a match is found, the corresponding WSGI application
will be called instead of the default. Note that you often need
separate entries for "example.com" and "www.example.com".
In addition, "Host" headers may contain the port number.
"""
def __init__(self, default, domains=None, use_x_forwarded_host=True):
self.default = default
self.domains = domains or {}
self.use_x_forwarded_host = use_x_forwarded_host
def __call__(self, environ, start_response):
domain = environ.get('HTTP_HOST', '')
if self.use_x_forwarded_host:
domain = environ.get("HTTP_X_FORWARDED_HOST", domain)
nextapp = self.domains.get(domain)
if nextapp is None:
nextapp = self.default
return nextapp(environ, start_response)
class InternalRedirector(object):
"""WSGI middleware that handles raised cherrypy.InternalRedirect."""
def __init__(self, nextapp, recursive=False):
self.nextapp = nextapp
self.recursive = recursive
def __call__(self, environ, start_response):
redirections = []
while True:
environ = environ.copy()
try:
return self.nextapp(environ, start_response)
except _cherrypy.InternalRedirect, ir:
sn = environ.get('SCRIPT_NAME', '')
path = environ.get('PATH_INFO', '')
qs = environ.get('QUERY_STRING', '')
# Add the *previous* path_info + qs to redirections.
old_uri = sn + path
if qs:
old_uri += "?" + qs
redirections.append(old_uri)
if not self.recursive:
# Check to see if the new URI has been redirected to already
new_uri = sn + ir.path
if ir.query_string:
new_uri += "?" + ir.query_string
if new_uri in redirections:
ir.request.close()
raise RuntimeError("InternalRedirector visited the "
"same URL twice: %r" % new_uri)
# Munge the environment and try again.
environ['REQUEST_METHOD'] = "GET"
environ['PATH_INFO'] = ir.path
environ['QUERY_STRING'] = ir.query_string
environ['wsgi.input'] = StringIO()
environ['CONTENT_LENGTH'] = "0"
environ['cherrypy.previous_request'] = ir.request
class ExceptionTrapper(object):
def __init__(self, nextapp, throws=(KeyboardInterrupt, SystemExit)):
self.nextapp = nextapp
self.throws = throws
def __call__(self, environ, start_response):
return _TrappedResponse(self.nextapp, environ, start_response, self.throws)
class _TrappedResponse(object):
response = iter([])
def __init__(self, nextapp, environ, start_response, throws):
self.nextapp = nextapp
self.environ = environ
self.start_response = start_response
self.throws = throws
self.started_response = False
self.response = self.trap(self.nextapp, self.environ, self.start_response)
self.iter_response = iter(self.response)
def __iter__(self):
self.started_response = True
return self
def next(self):
return self.trap(self.iter_response.next)
def close(self):
if hasattr(self.response, 'close'):
self.response.close()
def trap(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except self.throws:
raise
except StopIteration:
raise
except:
tb = _cperror.format_exc()
#print('trapped (started %s):' % self.started_response, tb)
_cherrypy.log(tb, severity=40)
if not _cherrypy.request.show_tracebacks:
tb = ""
s, h, b = _cperror.bare_error(tb)
if self.started_response:
# Empty our iterable (so future calls raise StopIteration)
self.iter_response = iter([])
else:
self.iter_response = iter(b)
try:
self.start_response(s, h, _sys.exc_info())
except:
# "The application must not trap any exceptions raised by
# start_response, if it called start_response with exc_info.
# Instead, it should allow such exceptions to propagate
# back to the server or gateway."
# But we still log and call close() to clean up ourselves.
_cherrypy.log(traceback=True, severity=40)
raise
if self.started_response:
return "".join(b)
else:
return b
# WSGI-to-CP Adapter #
class AppResponse(object):
"""WSGI response iterable for CherryPy applications."""
def __init__(self, environ, start_response, cpapp):
if environ.get(u'wsgi.version') == (u'u', 0):
environ = downgrade_wsgi_ux_to_1x(environ)
self.environ = environ
self.cpapp = cpapp
try:
self.run()
except:
self.close()
raise
r = _cherrypy.serving.response
self.iter_response = iter(r.body)
self.write = start_response(r.output_status, r.header_list)
def __iter__(self):
return self
def next(self):
return self.iter_response.next()
def close(self):
"""Close and de-reference the current request and response. (Core)"""
self.cpapp.release_serving()
def run(self):
"""Create a Request object using environ."""
env = self.environ.get
local = httputil.Host('', int(env('SERVER_PORT', 80)),
env('SERVER_NAME', ''))
remote = httputil.Host(env('REMOTE_ADDR', ''),
int(env('REMOTE_PORT', -1)),
env('REMOTE_HOST', ''))
scheme = env('wsgi.url_scheme')
sproto = env('ACTUAL_SERVER_PROTOCOL', "HTTP/1.1")
request, resp = self.cpapp.get_serving(local, remote, scheme, sproto)
# LOGON_USER is served by IIS, and is the name of the
# user after having been mapped to a local account.
# Both IIS and Apache set REMOTE_USER, when possible.
request.login = env('LOGON_USER') or env('REMOTE_USER') or None
request.multithread = self.environ['wsgi.multithread']
request.multiprocess = self.environ['wsgi.multiprocess']
request.wsgi_environ = self.environ
request.prev = env('cherrypy.previous_request', None)
meth = self.environ['REQUEST_METHOD']
path = httputil.urljoin(self.environ.get('SCRIPT_NAME', ''),
self.environ.get('PATH_INFO', ''))
qs = self.environ.get('QUERY_STRING', '')
rproto = self.environ.get('SERVER_PROTOCOL')
headers = self.translate_headers(self.environ)
rfile = self.environ['wsgi.input']
request.run(meth, path, qs, rproto, headers, rfile)
headerNames = {'HTTP_CGI_AUTHORIZATION': 'Authorization',
'CONTENT_LENGTH': 'Content-Length',
'CONTENT_TYPE': 'Content-Type',
'REMOTE_HOST': 'Remote-Host',
'REMOTE_ADDR': 'Remote-Addr',
}
def translate_headers(self, environ):
"""Translate CGI-environ header names to HTTP header names."""
for cgiName in environ:
# We assume all incoming header keys are uppercase already.
if cgiName in self.headerNames:
yield self.headerNames[cgiName], environ[cgiName]
elif cgiName[:5] == "HTTP_":
# Hackish attempt at recovering original header names.
translatedHeader = cgiName[5:].replace("_", "-")
yield translatedHeader, environ[cgiName]
class CPWSGIApp(object):
"""A WSGI application object for a CherryPy Application.
pipeline: a list of (name, wsgiapp) pairs. Each 'wsgiapp' MUST be a
constructor that takes an initial, positional 'nextapp' argument,
plus optional keyword arguments, and returns a WSGI application
(that takes environ and start_response arguments). The 'name' can
be any you choose, and will correspond to keys in self.config.
head: rather than nest all apps in the pipeline on each call, it's only
done the first time, and the result is memoized into self.head. Set
this to None again if you change self.pipeline after calling self.
config: a dict whose keys match names listed in the pipeline. Each
value is a further dict which will be passed to the corresponding
named WSGI callable (from the pipeline) as keyword arguments.
"""
pipeline = [('ExceptionTrapper', ExceptionTrapper),
('InternalRedirector', InternalRedirector),
]
head = None
config = {}
response_class = AppResponse
def __init__(self, cpapp, pipeline=None):
self.cpapp = cpapp
self.pipeline = self.pipeline[:]
if pipeline:
self.pipeline.extend(pipeline)
self.config = self.config.copy()
def tail(self, environ, start_response):
"""WSGI application callable for the actual CherryPy application.
You probably shouldn't call this; call self.__call__ instead,
so that any WSGI middleware in self.pipeline can run first.
"""
return self.response_class(environ, start_response, self.cpapp)
def __call__(self, environ, start_response):
head = self.head
if head is None:
# Create and nest the WSGI apps in our pipeline (in reverse order).
# Then memoize the result in self.head.
head = self.tail
for name, callable in self.pipeline[::-1]:
conf = self.config.get(name, {})
head = callable(head, **conf)
self.head = head
return head(environ, start_response)
def namespace_handler(self, k, v):
"""Config handler for the 'wsgi' namespace."""
if k == "pipeline":
# Note this allows multiple 'wsgi.pipeline' config entries
# (but each entry will be processed in a 'random' order).
# It should also allow developers to set default middleware
# in code (passed to self.__init__) that deployers can add to
# (but not remove) via config.
self.pipeline.extend(v)
elif k == "response_class":
self.response_class = v
else:
name, arg = k.split(".", 1)
bucket = self.config.setdefault(name, {})
bucket[arg] = v
| 13,366 | Python | .wsgi | 273 | 35.666667 | 84 | 0.583379 | midgetspy/Sick-Beard | 2,890 | 1,507 | 113 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,653 | setup.py | gabrielfalcao_lettuce/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from setuptools import setup
def get_packages():
# setuptools can't do the job :(
packages = []
for root, dirnames, filenames in os.walk('lettuce'):
if '__init__.py' in filenames:
packages.append(".".join(os.path.split(root)).strip("."))
return packages
required_modules = ['sure', 'fuzzywuzzy', 'python-subunit']
if os.name.lower() == 'nt':
required_modules.append('colorama')
setup(
name='lettuce',
version='0.2.23',
description='Behaviour Driven Development for python',
author='Gabriel Falcao',
author_email='[email protected]',
url='http://lettuce.it',
packages=get_packages(),
install_requires=required_modules,
entry_points={
'console_scripts': ['lettuce = lettuce.bin:main'],
},
package_data={
'lettuce': ['COPYING', '*.md'],
},
)
| 1,685 | Python | .py | 46 | 33.152174 | 71 | 0.701776 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,654 | bin.py | gabrielfalcao_lettuce/lettuce/bin.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import optparse
import lettuce
def main(args=sys.argv[1:]):
base_path = os.path.join(os.path.dirname(os.curdir), 'features')
parser = optparse.OptionParser(
usage="%prog or type %prog -h (--help) for help",
version=lettuce.version)
parser.add_option("-v", "--verbosity",
dest="verbosity",
default=3,
help='The verbosity level')
parser.add_option("--no-color",
action="store_true",
dest="no_color",
default=False,
help="Don't colorize the command output.")
parser.add_option("-s", "--scenarios",
dest="scenarios",
default=None,
help='Comma separated list of scenarios to run')
parser.add_option("-t", "--tag",
dest="tags",
default=None,
action='append',
help='Tells lettuce to run the specified tags only; '
'can be used multiple times to define more tags'
'(prefixing tags with "-" will exclude them and '
'prefixing with "~" will match approximate words)')
parser.add_option("-r", "--random",
dest="random",
action="store_true",
default=False,
help="Run scenarios in a more random order to avoid interference")
parser.add_option("--root-dir",
dest="root_dir",
default="/",
type="string",
help="Tells lettuce not to search for features/steps "
" above this directory.")
parser.add_option("--with-xunit",
dest="enable_xunit",
action="store_true",
default=False,
help='Output JUnit XML test results to a file')
parser.add_option("--xunit-file",
dest="xunit_file",
default=None,
type="string",
help='Write JUnit XML to this file. Defaults to '
'lettucetests.xml')
parser.add_option("--with-subunit",
dest="enable_subunit",
action="store_true",
default=False,
help='Output Subunit test results to a file')
parser.add_option("--subunit-file",
dest="subunit_filename",
default=None,
help='Write Subunit data to this file. Defaults to '
'subunit.bin')
parser.add_option("--failfast",
dest="failfast",
default=False,
action="store_true",
help='Stop running in the first failure')
parser.add_option("--pdb",
dest="auto_pdb",
default=False,
action="store_true",
help='Launches an interactive debugger upon error')
options, args = parser.parse_args(args)
if args:
base_path = os.path.abspath(args[0])
try:
options.verbosity = int(options.verbosity)
except ValueError:
pass
tags = None
if options.tags:
tags = [tag.strip('@') for tag in options.tags]
runner = lettuce.Runner(
base_path,
scenarios=options.scenarios,
verbosity=options.verbosity,
no_color=options.no_color,
random=options.random,
enable_xunit=options.enable_xunit,
xunit_filename=options.xunit_file,
enable_subunit=options.enable_subunit,
subunit_filename=options.subunit_filename,
failfast=options.failfast,
auto_pdb=options.auto_pdb,
tags=tags,
root_dir=options.root_dir,
)
result = runner.run()
failed = result is None or result.steps != result.steps_passed
raise SystemExit(int(failed))
if __name__ == '__main__':
main()
| 5,004 | Python | .py | 119 | 29.294118 | 88 | 0.54852 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,655 | terminal.py | gabrielfalcao_lettuce/lettuce/terminal.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import platform
import struct
def get_size():
if platform.system() == "Windows":
size = get_terminal_size_win()
else:
size = get_terminal_size_unix()
if not all(size):
size = (1, 1)
return size
def get_terminal_size_win():
#Windows specific imports
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr, left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
else: # can't determine actual size - return default values
sizex, sizey = 80, 25
return sizex, sizey
def get_terminal_size_unix():
# Unix/Posix specific imports
import fcntl
import termios
def ioctl_GWINSZ(fd):
try:
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
'1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.getenv('LINES', 25), os.getenv('COLUMNS', 80))
return int(cr[1]), int(cr[0])
| 2,339 | Python | .py | 66 | 29.818182 | 72 | 0.656637 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,656 | fs.py | gabrielfalcao_lettuce/lettuce/fs.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import os
import imp
import sys
import codecs
import fnmatch
import zipfile
from functools import wraps
from glob import glob
from os.path import abspath, join, dirname, curdir, exists
class FeatureLoader(object):
"""Loader class responsible for findind features and step
definitions along a given path on filesystem"""
def __init__(self, base_dir, root_dir=None):
self.base_dir = FileSystem.abspath(base_dir)
if root_dir is None:
root_dir = '/'
self.root_dir = FileSystem.abspath(root_dir)
def find_and_load_step_definitions(self):
# find steps, possibly up several directories
base_dir = self.base_dir
while base_dir != self.root_dir:
files = FileSystem.locate(base_dir, '*.py')
if files:
break
base_dir = FileSystem.join(base_dir, '..')
else:
# went as far as root_dir, also discover files under root_dir
files = FileSystem.locate(base_dir, '*.py')
for filename in files:
root = FileSystem.dirname(filename)
sys.path.insert(0, root)
to_load = FileSystem.filename(filename, with_extension=False)
try:
module = __import__(to_load)
except ValueError as e:
import traceback
err_msg = traceback.format_exc(e)
if 'empty module name' in err_msg.lower():
continue
else:
e.args = ('{0} when importing {1}'
.format(e, filename)),
raise e
reload(module) # always take fresh meat :)
sys.path.remove(root)
def find_feature_files(self):
paths = FileSystem.locate(self.base_dir, "*.feature")
paths.sort()
return paths
class FileSystem(object):
"""File system abstraction, mainly used for indirection, so that
lettuce can be well unit-tested :)
"""
stack = []
def __init__(self):
self.stack = []
@classmethod
def _import(cls, name):
sys.path.insert(0, cls.current_dir())
fp, pathname, description = imp.find_module(name)
try:
module = imp.load_module(name, fp, pathname, description)
sys.path.remove(cls.current_dir())
return module
finally:
# Since we may exit via an exception, close fp explicitly.
if fp:
fp.close()
@classmethod
def pushd(cls, *path):
"""Change current dir to `path`, adding it to a stack. Can be
undone by calling FileSystem.popd()"""
path = cls.join(*path)
if not len(cls.stack):
cls.stack.append(cls.current_dir())
cls.stack.append(path)
os.chdir(path)
@classmethod
def popd(cls):
"""Go back one path in path stack"""
if cls.stack:
cls.stack.pop()
if cls.stack:
os.chdir(cls.stack[-1])
@classmethod
def filename(cls, path, with_extension=True):
"""Returns only the filename from a full path. If the argument
with_extension is False, return the filename without
extension.
Examples::
>>> from lettuce.fs import FileSystem
>>> assert FileSystem.filename('/full/path/to/some_file.py') == 'some_file.py'
>>> assert FileSystem.filename('/full/path/to/some_file.py', False) == 'some_file'
"""
fname = os.path.split(path)[1]
if not with_extension:
fname = os.path.splitext(fname)[0]
return fname
@classmethod
def exists(cls, path):
"""Return True if `path`exists"""
return exists(path)
@classmethod
def mkdir(cls, path):
"""Create paths recursively, ignore already created dirs
Example:
>>> from lettuce.fs import FileSystem
>>> FileSystem.mkdir('~/a/lot/of/nested/dirs')
"""
try:
os.makedirs(path)
except OSError as e:
# ignore if path already exists
if e.errno not in (17, ):
raise e
else:
if not os.path.isdir(path):
# but the path must be a dir to ignore its creation
raise e
@classmethod
def current_dir(cls, path=""):
'''Returns the absolute path for current dir, also join the
current path with the given, if so.'''
to_return = cls.abspath(curdir)
if path:
return cls.join(to_return, path)
return to_return
@classmethod
def abspath(cls, path):
'''Returns the absolute path for the given path.'''
return abspath(path)
@classmethod
def relpath(cls, path):
'''Returns the absolute path for the given path.'''
current_path = cls.current_dir()
absolute_path = cls.abspath(path)
return re.sub("^" + re.escape(current_path), '', absolute_path).lstrip("/")
@classmethod
def join(cls, *args):
'''Returns the concatenated path for the given arguments.'''
return join(*args)
@classmethod
def dirname(cls, path):
'''Returns the directory name for the given file.'''
return cls.abspath(dirname(path))
@classmethod
def walk(cls, path):
'''Walks through filesystem'''
return os.walk(path)
@classmethod
def locate(cls, path, match, recursive=True):
"""Locate files recursively in a given path"""
root_path = cls.abspath(path)
if recursive:
return_files = []
for path, dirs, files in cls.walk(root_path):
for filename in fnmatch.filter(files, match):
return_files.append(cls.join(path, filename))
return return_files
else:
return glob(cls.join(root_path, match))
@classmethod
def extract_zip(cls, filename, base_path='.', verbose=False):
"""Extracts a zip file into `base_path`"""
base_path = cls.abspath(base_path)
output = lambda x: verbose and sys.stdout.write("%s\n" % x)
cls.pushd(base_path)
zfile = zipfile.ZipFile(filename)
output("Extracting files to %s" % base_path)
for file_name in zfile.namelist():
try:
output(" -> Unpacking %s" % file_name)
f = cls.open_raw(file_name, 'w')
f.write(zfile.read(file_name))
f.close()
except IOError:
output("---> Creating directory %s" % file_name)
cls.mkdir(file_name)
cls.popd()
@classmethod
def open(cls, name, mode):
"""Opens a file as utf-8"""
path = name
if not os.path.isabs(path):
path = cls.current_dir(name)
return codecs.open(path, mode, 'utf-8')
@classmethod
def open_raw(cls, name, mode):
"""Opens a file without specifying encoding"""
path = name
if not os.path.isabs(path):
path = cls.current_dir(name)
return open(path, mode)
@classmethod
def in_directory(cls, *directories):
"""Decorator to set the working directory around a function"""
def decorator(func):
@wraps(func)
def inner(*args, **kwargs):
cls.pushd(*directories)
try:
return func(*args, **kwargs)
finally:
cls.popd()
return inner
return decorator
| 8,441 | Python | .py | 223 | 28.412556 | 90 | 0.589605 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,657 | strings.py | gabrielfalcao_lettuce/lettuce/strings.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import time
import unicodedata
def utf8_string(s):
if isinstance(s, str):
s = s.decode("utf-8")
return s
def escape_if_necessary(what):
what = unicode(what)
if len(what) is 1:
what = u"[%s]" % what
return what
def get_stripped_lines(string, ignore_lines_starting_with=''):
"""Split lines at newline char, then return the array of stripped lines"""
# used e.g. to separate out all the steps in a scenario
string = unicode(string)
lines = [unicode(l.strip()) for l in string.splitlines()]
if ignore_lines_starting_with:
filter_func = lambda x: x and not x.startswith(
ignore_lines_starting_with)
else:
# by using an "identity" filter function, blank lines
# will not be included in the returned list
filter_func = lambda x: x
lines = filter(filter_func, lines)
return lines
def split_wisely(string, sep, strip=False):
string = unicode(string)
if strip:
string = string.strip()
else:
string = string.strip("\n")
sep = unicode(sep)
regex = re.compile(escape_if_necessary(sep), re.UNICODE | re.M | re.I)
items = filter(lambda x: x, regex.split(string))
if strip:
items = [i.strip() for i in items]
else:
items = [i.strip("\n") for i in items]
return [unicode(i) for i in items]
def wise_startswith(string, seed):
string = unicode(string).strip()
seed = unicode(seed)
regex = u"^%s" % re.escape(seed)
return bool(re.search(regex, string, re.I))
def remove_it(string, what):
return unicode(re.sub(unicode(what), "", unicode(string)).strip())
def column_width(string):
l = 0
for c in unicode(string):
if unicodedata.east_asian_width(c) in "WF":
l += 2
else:
l += 1
return l
def rfill(string, times, char=u" ", append=u""):
string = unicode(string)
missing = times - column_width(string)
for x in range(missing):
string += char
return unicode(string) + unicode(append)
def getlen(string):
return column_width(unicode(string)) + 1
def dicts_to_string(dicts, order):
'''
Makes dictionary ready for comparison to strings
'''
escape = "#{%s}" % unicode(time.time())
def enline(line):
return unicode(line).replace("|", escape)
def deline(line):
return line.replace(escape, '\\|')
keys_and_sizes = dict([(k, getlen(k)) for k in dicts[0].keys()])
for key in keys_and_sizes:
for data in dicts:
current_size = keys_and_sizes[key]
value = unicode(data.get(key, ''))
size = getlen(value)
if size > current_size:
keys_and_sizes[key] = size
names = []
for key in order:
size = keys_and_sizes[key]
name = u" %s" % rfill(key, size)
names.append(enline(name))
table = [u"|%s|" % "|".join(names)]
for data in dicts:
names = []
for key in order:
value = data.get(key, '')
size = keys_and_sizes[key]
names.append(enline(u" %s" % rfill(value, size)))
table.append(u"|%s|" % "|".join(names))
return deline(u"\n".join(table) + u"\n")
def parse_hashes(lines, json_format=None):
escape = "#{%s}" % unicode(time.time())
def enline(line):
return unicode(line.replace("\\|", escape)).strip()
def deline(line):
return line.replace(escape, '|')
def discard_comments(lines):
return [line for line in lines if not line.startswith('#')]
lines = discard_comments(lines)
lines = map(enline, lines)
keys = []
hashes = []
if lines:
first_line = lines.pop(0)
keys = split_wisely(first_line, u"|", True)
keys = map(deline, keys)
for line in lines:
values = split_wisely(line, u"|", True)
values = map(deline, values)
hashes.append(dict(zip(keys, values)))
return keys, hashes
def json_to_string(json_list, order):
'''
This is for aesthetic reasons, it will get the width of the largest column and
rfill the rest with spaces
'''
escape = "#{%s}" % unicode(time.time())
def enline(line):
return unicode(line).replace("|", escape)
def deline(line):
return line.replace(escape, '\\|')
nu_keys_and_sizes = list([[k.keys()[0], getlen(k.keys()[0])] for k in json_list])
maxlen = 0
for key_list in nu_keys_and_sizes:
current_size = key_list[1]
counter = 0
temp_list = json_list[counter].values()[0]
temp_maxlen = len(temp_list)
if temp_maxlen > maxlen:
maxlen = temp_maxlen
for data in temp_list:
value = unicode(data)
size = getlen(value)
if size > current_size:
key_list[1] = size
counter += 1
names = []
idx = 0
for key in nu_keys_and_sizes:
size = key[1]
name = u" %s" % rfill(key[0], size)
names.append(enline(name))
table = [u"|%s|" % "|".join(names)]
for idx in xrange(maxlen):
names = []
for data, key in zip(json_list, nu_keys_and_sizes):
try:
value = data.values()[0][idx]
except IndexError:
value = ''
size = key[1]
names.append(enline(u" %s" % rfill(value, size)))
table.append(u"|%s|" % "|".join(names))
return deline(u"\n".join(table) + u"\n")
def parse_as_json(lines):
'''
Parse lines into json objects
'''
escape = "#{%s}" % unicode(time.time())
def enline(line):
return unicode(line.replace("\\|", escape)).strip()
def deline(line):
return line.replace(escape, '|')
def discard_comments(lines):
return [line for line in lines if not line.startswith('#')]
lines = discard_comments(lines)
lines = map(enline, lines)
non_unique_keys = []
json_map = []
if lines:
first_line = lines.pop(0)
non_unique_keys = split_wisely(first_line, u"|", True)
non_unique_keys = map(deline, non_unique_keys)
rng_idx = len(non_unique_keys)
json_map = list(non_unique_keys)
for idx in xrange(rng_idx):
json_map[idx] = dict([(non_unique_keys[idx], [])])
for line in lines:
values = split_wisely(line, u"|", True)
values = map(deline, values)
for idx in xrange(rng_idx):
json_map[idx].values()[0].append(values[idx])
return non_unique_keys, json_map
def parse_multiline(lines):
multilines = []
in_multiline = False
for line in lines:
if line == '"""':
in_multiline = not in_multiline
elif in_multiline:
if line.startswith('"'):
line = line[1:]
if line.endswith('"'):
line = line[:-1]
multilines.append(line)
return u'\n'.join(multilines)
| 7,805 | Python | .py | 216 | 28.953704 | 85 | 0.598539 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,658 | languages.py | gabrielfalcao_lettuce/lettuce/languages.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
LANGUAGES = {
'en': {
'examples': u'Examples|Scenarios',
'feature': u'Feature',
'name': u'English',
'native': u'English',
'scenario': u'Scenario',
'scenario_outline': u'Scenario Outline',
'scenario_separator': u'(Scenario Outline|Scenario)',
'background': u'(?:Background)',
},
'pt-br': {
'examples': u'Exemplos|Cenários',
'feature': u'Funcionalidade',
'name': u'Portuguese',
'native': u'Português',
'scenario': u'Cenário|Cenario',
'scenario_outline': u'Esquema do Cenário|Esquema do Cenario',
'scenario_separator': u'(Esquema do Cenário|Esquema do Cenario|Cenario|Cenário)',
'background': u'(?:Contexto|Considerações)',
},
'pl': {
'examples': u'Przykład',
'feature': u'Właściwość',
'name': u'Polish',
'native': u'Polski',
'scenario': u'Scenariusz',
'scenario_outline': u'Zarys Scenariusza',
'scenario_separator': u'(Zarys Scenariusza|Scenariusz)',
'background': u'(?:Background)',
},
'ca': {
'examples': u'Exemples',
'feature': u'Funcionalitat',
'name': u'Catalan',
'native': u'Català',
'scenario': u'Escenari',
'scenario_outline': u"Esquema d'Escenari",
'scenario_separator': u"(Esquema d'Escenari|Escenari)",
'background': u'(?:Background)',
},
'es': {
'examples': u'Ejemplos',
'feature': u'Funcionalidad',
'name': u'Spanish',
'native': u'Español',
'scenario': u'Escenario',
'scenario_outline': u'Esquema de Escenario',
'scenario_separator': u'(Esquema de Escenario|Escenario)',
'background': u'(?:Contexto|Consideraciones)',
},
'hu': {
'examples': u'Példák',
'feature': u'Jellemző',
'name': u'Hungarian',
'native': u'Magyar',
'scenario': u'Forgatókönyv',
'scenario_outline': u'Forgatókönyv vázlat',
'scenario_separator': u'(Forgatókönyv|Forgatókönyv vázlat)',
'background': u'(?:Háttér)',
},
'fr': {
'examples': u'Exemples|Scénarios',
'feature': u'Fonctionnalité|Fonction',
'name': u'French',
'native': u'Français',
'scenario': u'Scénario',
'scenario_outline': u'Plan de Scénario|Plan du Scénario',
'scenario_separator': u'(Plan de Scénario|Plan du Scénario|Scénario)',
'background': u'(?:Background|Contexte)',
},
'de': {
'examples': u'Beispiele|Szenarios',
'feature': u'Funktionalität|Funktion',
'name': u'German',
'native': u'Deutsch',
'scenario': u'Szenario',
'scenario_outline': u'Szenario-Zusammenfassung|Zusammenfassung',
'scenario_separator': u'(Szenario-Zusammenfassung|Zusammenfassung)',
'background': u'(?:Background)',
},
'ja': {
'examples': u'例',
'feature': u'フィーチャ|機能',
'name': u'Japanese',
'native': u'日本語',
'scenario': u'シナリオ',
'scenario_outline': u'シナリオアウトライン|シナリオテンプレート|テンプレ|シナリオテンプレ',
'scenario_separator': u'(シナリオ|シナリオアウトライン|シナリオテンプレート|テンプレ|シナリオテンプレ)',
'background': u'背景',
},
'tr': {
'examples': u'Örnekler',
'feature': u'Özellik',
'name': u'Turkish',
'native': u'Türkçe',
'scenario': u'Senaryo',
'scenario_outline': u'Senaryo taslağı|Senaryo Taslağı',
'scenario_separator': u'(Senaryo taslağı|Senaryo Taslağı|Senaryo)',
'background': u'(?:Background)',
},
'zh-CN': {
'examples': u'例如|场景集',
'feature': u'特性',
'name': u'Simplified Chinese',
'native': u'简体中文',
'scenario': u'场景',
'scenario_outline': u'场景模板',
'scenario_separator': u'(场景模板|场景)',
'background': u'(?:背景)',
},
'zh-TW': {
'examples': u'例如|場景集',
'feature': u'特性',
'name': u'Traditional Chinese',
'native': u'繁體中文',
'scenario': u'場景',
'scenario_outline': u'場景模板',
'scenario_separator': u'(場景模板|場景)',
'background': u'(?:背景)',
},
'ru': {
'examples': u'Примеры|Сценарии',
'feature': u'Функционал',
'name': u'Russian',
'native': u'Русский',
'scenario': u'Сценарий',
'scenario_outline': u'Структура сценария',
'scenario_separator': u'(Структура сценария|Сценарий)',
'background': u'(?:Background|Предыстория)',
},
'uk': {
'examples': u'Приклади|Сценарії',
'feature': u'Функціонал',
'name': u'Ukrainian',
'native': u'Українська',
'scenario': u'Сценарій',
'scenario_outline': u'Структура сценарію',
'scenario_separator': u'(Структура сценарію|Сценарій)',
'background': u'(?:Background)',
},
'it': {
'examples': u'Esempi|Scenari|Scenarii',
'feature': u'Funzionalità|Funzione',
'name': u'Italian',
'native': u'Italiano',
'scenario': u'Scenario',
'scenario_outline': u'Schema di Scenario|Piano di Scenario',
'scenario_separator': u'(Schema di Scenario|Piano di Scenario|Scenario)',
'background': u'(?:Background)',
},
'no': {
'examples': u'Eksempler',
'feature': u'Egenskaper',
'name': u'Norwegian',
'native': u'Norsk',
'scenario': u'Situasjon',
'scenario_outline': u'Situasjon Oversikt',
'scenario_separator': u'(Situasjon Oversikt|Situasjon)',
'background': u'(?:Bakgrunn)',
},
'sv': {
'examples': u'Exempel|Scenarion',
'feature': u'Egenskaper',
'name': u'Swedish',
'native': u'Svenska',
'scenario': u'Scenario',
'scenario_outline': u'Scenarioöversikt',
'scenario_separator': u'(Scenarioöversikt|Scenario)',
'background': u'(?:Context)',
},
'cz': {
'examples': u'Příklady',
'feature': u'Požadavek',
'name': u'Czech',
'native': u'Čeština',
'scenario': u'Scénář|Požadavek',
'scenario_outline': u'Náčrt scénáře',
'scenario_separator': u'(Náčrt scénáře|Scénář)',
'background': u'(?:Background)',
},
}
| 7,581 | Python | .py | 198 | 28.065657 | 89 | 0.581935 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,659 | __init__.py | gabrielfalcao_lettuce/lettuce/__init__.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__version__ = version = '0.2.23'
release = 'kryptonite'
import os
import sys
import traceback
try:
from imp import reload
except ImportError:
# python 2.5 fallback
pass
import random
from lettuce.core import Feature, TotalResult
from lettuce.terrain import after
from lettuce.terrain import before
from lettuce.terrain import world
from lettuce.decorators import step, steps
from lettuce.registry import call_hook
from lettuce.registry import STEP_REGISTRY
from lettuce.registry import CALLBACK_REGISTRY
from lettuce.exceptions import StepLoadingError, LettuceRunnerError
from lettuce.plugins import (
xunit_output,
subunit_output,
autopdb,
smtp_mail_queue,
jsonreport_output,
)
from lettuce import fs
from lettuce import exceptions
try:
from colorama import init as ms_windows_workaround
ms_windows_workaround()
except ImportError:
pass
__all__ = [
'after',
'before',
'step',
'steps',
'world',
'STEP_REGISTRY',
'CALLBACK_REGISTRY',
'call_hook',
]
try:
terrain = fs.FileSystem._import("terrain")
reload(terrain)
except Exception as e:
if not "No module named terrain" in str(e):
string = 'Lettuce has tried to load the conventional environment ' \
'module "terrain"\nbut it has errors, check its contents and ' \
'try to run lettuce again.\n\nOriginal traceback below:\n\n'
sys.stderr.write(string)
sys.stderr.write(exceptions.traceback.format_exc(e))
raise LettuceRunnerError(string)
class Runner(object):
""" Main lettuce's test runner
Takes a base path as parameter (string), so that it can look for
features and step definitions on there.
"""
def __init__(self, base_path, scenarios=None,
verbosity=0, no_color=False, random=False,
enable_xunit=False, xunit_filename=None,
enable_subunit=False, subunit_filename=None,
enable_jsonreport=False, jsonreport_filename=None,
tags=None, failfast=False, auto_pdb=False,
smtp_queue=None, root_dir=None):
""" lettuce.Runner will try to find a terrain.py file and
import it from within `base_path`
"""
self.tags = tags
self.single_feature = None
if os.path.isfile(base_path) and os.path.exists(base_path):
self.single_feature = base_path
base_path = os.path.dirname(base_path)
sys.path.insert(0, base_path)
self.loader = fs.FeatureLoader(base_path, root_dir)
self.verbosity = verbosity
self.scenarios = scenarios and map(int, scenarios.split(",")) or None
self.failfast = failfast
if auto_pdb:
autopdb.enable(self)
sys.path.remove(base_path)
if verbosity is 0:
from lettuce.plugins import non_verbose as output
elif verbosity is 1:
from lettuce.plugins import dots as output
elif verbosity is 2:
from lettuce.plugins import scenario_names as output
elif verbosity is 3:
if no_color:
from lettuce.plugins import shell_output as output
else:
from lettuce.plugins import colored_shell_output as output
self.random = random
if enable_xunit:
xunit_output.enable(filename=xunit_filename, tags=tags)
if smtp_queue:
smtp_mail_queue.enable()
if enable_subunit:
subunit_output.enable(filename=subunit_filename)
if enable_jsonreport:
jsonreport_output.enable(filename=jsonreport_filename)
reload(output)
self.output = output
def run(self):
""" Find and load step definitions, and them find and load
features under `base_path` specified on constructor
"""
results = []
if self.single_feature:
features_files = [self.single_feature]
else:
features_files = self.loader.find_feature_files()
if self.random:
random.shuffle(features_files)
if not features_files:
self.output.print_no_features_found(self.loader.base_dir)
return
# only load steps if we've located some features.
# this prevents stupid bugs when loading django modules
# that we don't even want to test.
try:
self.loader.find_and_load_step_definitions()
except StepLoadingError as e:
print "Error loading step definitions:\n", e
return
call_hook('before', 'all')
failed = False
try:
for filename in features_files:
feature = Feature.from_file(filename)
results.append(
feature.run(self.scenarios,
tags=self.tags,
random=self.random,
failfast=self.failfast))
except exceptions.LettuceSyntaxError as e:
sys.stderr.write(e.msg)
failed = True
except exceptions.NoDefinitionFound, e:
sys.stderr.write(e.msg)
failed = True
except:
if not self.failfast:
e = sys.exc_info()[1]
print "Died with %s" % str(e)
traceback.print_exc()
else:
print
print ("Lettuce aborted running any more tests "
"because was called with the `--failfast` option")
failed = True
finally:
total = TotalResult(results)
total.output_format()
call_hook('after', 'all', total)
if failed:
raise LettuceRunnerError("Test failed.")
return total
| 6,642 | Python | .py | 176 | 29 | 77 | 0.63478 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,660 | core.py | gabrielfalcao_lettuce/lettuce/core.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import codecs
import unicodedata
from copy import deepcopy
from fuzzywuzzy import fuzz
from itertools import chain
from random import shuffle
from lettuce import strings
from lettuce import languages
from lettuce.fs import FileSystem
from lettuce.registry import STEP_REGISTRY
from lettuce.registry import call_hook
from lettuce.exceptions import ReasonToFail
from lettuce.exceptions import NoDefinitionFound
from lettuce.exceptions import LettuceSyntaxError
fs = FileSystem()
class REP(object):
"RegEx Pattern"
first_of = re.compile(ur'^first_of_')
last_of = re.compile(ur'^last_of_')
language = re.compile(u"\s*#\s*language:[ ]*([^\s]+)")
within_double_quotes = re.compile(r'("[^"]+")')
within_single_quotes = re.compile(r"('[^']+')")
only_whitespace = re.compile('^\s*$')
last_tag_extraction_regex = re.compile(ur'(?:\s|^)[@](\S+)\s*$')
first_tag_extraction_regex = re.compile(ur'^\s*[@](\S+)(?:\s|$)')
tag_strip_regex = re.compile(ur'(?:(?:^\s*|\s+)[@]\S+\s*)+$', re.DOTALL)
comment_strip1 = re.compile(ur'(^[^\'"]*)[#]([^\'"]*)$')
comment_strip2 = re.compile(ur'(^[^\'"]+)[#](.*)$')
class HashList(list):
__base_msg = 'The step "%s" have no table defined, so ' \
'that you can\'t use step.hashes.%s'
def __init__(self, step, *args, **kw):
self.step = step
super(HashList, self).__init__(*args, **kw)
def values_under(self, key):
msg = 'The step "%s" have no table column with the key "%s". ' \
'Could you check your step definition for that ? ' \
'Maybe there is a typo :)'
try:
return [h[key] for h in self]
except KeyError:
raise AssertionError(msg % (self.step.sentence, key))
@property
def first(self):
if len(self) > 0:
return self[0]
raise AssertionError(self.__base_msg % (self.step.sentence, 'first'))
@property
def last(self):
if len(self) > 0:
return self[-1]
raise AssertionError(self.__base_msg % (self.step.sentence, 'last'))
class Language(object):
code = 'en'
name = 'English'
native = 'English'
feature = 'Feature'
scenario = 'Scenario'
examples = 'Examples|Scenarios'
scenario_outline = 'Scenario Outline'
scenario_separator = 'Scenario( Outline)?'
background = "Background"
def __init__(self, code=u'en'):
self.code = code
for attr, value in languages.LANGUAGES[code].items():
setattr(self, attr, unicode(value))
def __repr__(self):
return '<Language "%s">' % self.code
def __getattr__(self, attr):
for pattern in [REP.first_of, REP.last_of]:
if pattern.match(attr):
name = pattern.sub(u'', attr)
return unicode(getattr(self, name, u'').split(u"|")[0])
return super(Language, self).__getattribute__(attr)
@property
def non_capturable_scenario_separator(self):
return re.sub(r'^[(]', '(?:', self.scenario_separator)
@classmethod
def guess_from_string(cls, string):
match = re.search(REP.language, string)
if match:
instance = cls(match.group(1))
else:
instance = cls()
return instance
class StepDefinition(object):
"""A step definition is a wrapper for user-defined callbacks. It
gets a few metadata from file, such as filename and line number"""
def __init__(self, step, function):
self.function = function
self.file = fs.relpath(function.func_code.co_filename)
self.line = function.func_code.co_firstlineno + 1
self.step = step
def __call__(self, *args, **kw):
"""Method that actually wrapps the call to step definition
callback. Sends step object as first argument
"""
try:
ret = self.function(self.step, *args, **kw)
self.step.passed = True
self.step.failed = False
except Exception as e:
self.step.failed = True
self.step.passed = False
self.step.why = ReasonToFail(self.step, e)
raise
return ret
class StepDescription(object):
"""A simple object that holds filename and line number of a step
description (step within feature file)"""
def __init__(self, line, filename):
self.file = filename
if self.file:
self.file = fs.relpath(self.file)
else:
self.file = "unknown file"
self.line = line or 0
class ScenarioDescription(object):
"""A simple object that holds filename and line number of a scenario
description (scenario within feature file)"""
def __init__(self, scenario, filename, string, language):
self.file = fs.relpath(filename)
self.line = None
for pline, part in enumerate(string.splitlines()):
part = part.strip()
# for performance reasons, avoid using the regex on all lines:
# first check if the scenario name is present, and use regex to verify this is the scenario definition
if (scenario.name in part) and re.match(u"%s:[ ]+" % language.scenario_separator + re.escape(scenario.name), part):
self.line = pline + 1
break
class FeatureDescription(object):
"""A simple object that holds filename and line number of a feature
description"""
def __init__(self, feature, filename, string, language):
lines = [l.strip() for l in string.splitlines()]
self.file = fs.relpath(filename)
self.line = None
described_at = []
description_lines = strings.get_stripped_lines(feature.description)
for pline, part in enumerate(lines):
part = part.strip()
line = pline + 1
if re.match(u"(?:%s): " % language.feature, part):
self.line = line
else:
for description in description_lines:
if part == description:
described_at.append(line)
self.description_at = tuple(described_at)
class Step(object):
""" Object that represents each step on feature files."""
has_definition = False
indentation = 4
table_indentation = indentation + 2
defined_at = None
why = None
ran = False
passed = None
failed = None
related_outline = None
scenario = None
background = None
display = True
columns = None
matrix = None
def __init__(self, sentence, remaining_lines, line=None, filename=None):
self.sentence = sentence
self.original_sentence = sentence
self._remaining_lines = remaining_lines
keys, hashes, self.multiline, columns, nukeys = self._parse_remaining_lines(remaining_lines)
self.keys = tuple(keys)
self.non_unique_keys = nukeys
self.hashes = HashList(self, hashes)
self.columns = columns
self.described_at = StepDescription(line, filename)
self.proposed_method_name, self.proposed_sentence = self.propose_definition()
def propose_definition(self):
sentence = unicode(self.original_sentence)
method_name = sentence
groups = [
('"', REP.within_double_quotes, r'"([^"]*)"'),
("'", REP.within_single_quotes, r"\'([^\']*)\'"),
]
attribute_names = []
for char, group, template in groups:
match_groups = group.search(self.original_sentence)
if match_groups:
for index, match in enumerate(group.findall(sentence)):
sentence = sentence.replace(match, template)
group_name = u"group%d" % (index + 1)
method_name = method_name.replace(match, group_name)
attribute_names.append(group_name)
method_name = unicodedata.normalize('NFKD', method_name) \
.encode('ascii', 'ignore')
method_name = '%s(step%s)' % (
"_".join(re.findall("\w+", method_name)).lower(),
attribute_names and (", %s" % ", ".join(attribute_names)) or "")
return method_name, sentence
def solve_and_clone(self, data, display_step):
sentence = self.sentence
multiline = self.multiline
hashes = self.hashes[:] # deep copy
for k, v in data.items():
def evaluate(stuff):
return stuff.replace(u'<%s>' % unicode(k), unicode(v))
def evaluate_hash_value(hash_row):
new_row = {}
for rkey, rvalue in hash_row.items():
new_row[rkey] = evaluate(rvalue)
return new_row
sentence = evaluate(sentence)
multiline = evaluate(multiline)
hashes = map(evaluate_hash_value, hashes)
new = deepcopy(self)
new.sentence = sentence
new.multiline = multiline
new.hashes = hashes
new.display = display_step
return new
def _calc_list_length(self, lst):
length = self.table_indentation + 2
for item in lst:
length += strings.column_width(item) + 2
if len(lst) > 1:
length += 1
return length
def _calc_key_length(self, data):
return self._calc_list_length(data.keys())
def _calc_value_length(self, data):
return self._calc_list_length(data.values())
@property
def max_length(self):
max_length_sentence = strings.column_width(self.sentence) + \
self.indentation
max_length_original = strings.column_width(self.original_sentence) + \
self.indentation
max_length = max([max_length_original, max_length_sentence])
for data in self.hashes:
key_size = self._calc_key_length(data)
if key_size > max_length:
max_length = key_size
value_size = self._calc_value_length(data)
if value_size > max_length:
max_length = value_size
return max_length
@property
def parent(self):
return self.scenario or self.background
def represent_string(self, string):
head = ' ' * self.indentation + string
where = self.described_at
if self.defined_at:
where = self.defined_at
return strings.rfill(head, self.parent.feature.max_length + 1, append=u'# %s:%d\n' % (where.file, where.line))
def represent_hashes(self):
lines = strings.dicts_to_string(self.hashes, self.keys).splitlines()
return u"\n".join([(u" " * self.table_indentation) + line for line in lines]) + "\n"
def represent_columns(self):
lines = strings.json_to_string(self.columns, self.non_unique_keys).splitlines()
return u"\n".join([(u" " * self.table_indentation) + line for line in lines]) + "\n"
def __unicode__(self):
return u'<Step: "%s">' % self.sentence
def __repr__(self):
return unicode(self).encode('utf-8')
def _parse_remaining_lines(self, lines):
multiline = strings.parse_multiline(lines)
keys, hashes = strings.parse_hashes(lines)
non_unique_keys, columns = strings.parse_as_json(lines)
return keys, hashes, multiline, columns, non_unique_keys
def _get_match(self, ignore_case):
matched, func = None, lambda: None
for step, func in STEP_REGISTRY.items():
regex = STEP_REGISTRY.get_regex(step, ignore_case)
matched = regex.search(self.sentence)
if matched:
break
return matched, StepDefinition(self, func)
def pre_run(self, ignore_case, with_outline=None):
matched, step_definition = self._get_match(ignore_case)
self.related_outline = with_outline
if not self.defined_at:
if not matched:
raise NoDefinitionFound(self)
self.has_definition = True
self.defined_at = step_definition
return matched, step_definition
def given(self, string):
return self.behave_as(string)
def when(self, string):
return self.behave_as(string)
def then(self, string):
return self.behave_as(string)
def behave_as(self, string):
""" Parses and runs steps given in string form.
In your step definitions, you can use this to run one step from another.
e.g.
@step('something ordinary')
def something(step):
step.behave_as('Given something defined elsewhere')
@step('something defined elsewhere')
def elsewhere(step):
# actual step behavior, maybe.
This will raise the error of the first failing step (thus halting
execution of the step) if a subordinate step fails.
"""
lines = string.split('\n')
steps = self.many_from_lines(lines)
if hasattr(self, 'scenario'):
for step in steps:
step.scenario = self.scenario
(_, _, steps_failed, steps_undefined) = self.run_all(steps)
if not steps_failed and not steps_undefined:
self.passed = True
self.failed = False
return self.passed
self.passed = False
self.failed = True
assert not steps_failed, steps_failed[0].why.exception
assert not steps_undefined, "Undefined step: %s" % steps_undefined[0].sentence
def run(self, ignore_case):
"""Runs a step, trying to resolve it on available step
definitions"""
matched, step_definition = self.pre_run(ignore_case)
self.ran = True
kw = matched.groupdict()
if kw:
step_definition(**kw)
else:
groups = matched.groups()
step_definition(*groups)
self.passed = True
return True
@classmethod
def _handle_inline_comments(klass, line):
line = REP.comment_strip1.sub(r'\g<1>\g<2>', line)
line = REP.comment_strip2.sub(r'\g<1>', line)
return line
@staticmethod
def run_all(steps, outline=None, run_callbacks=False, ignore_case=True, failfast=False, display_steps=True, reasons_to_fail=None):
"""Runs each step in the given list of steps.
Returns a tuple of five lists:
- The full set of steps executed
- The steps that passed
- The steps that failed
- The steps that were undefined
- The reason for each failing step (indices matching per above)
"""
all_steps = []
steps_passed = []
steps_failed = []
steps_undefined = []
if reasons_to_fail is None:
reasons_to_fail = []
for step in steps:
if outline:
step = step.solve_and_clone(outline, display_steps)
try:
step.pre_run(ignore_case, with_outline=outline)
if run_callbacks:
call_hook('before_each', 'step', step)
call_hook('before_output', 'step', step)
if not steps_failed and not steps_undefined:
step.run(ignore_case)
steps_passed.append(step)
except NoDefinitionFound as e:
steps_undefined.append(e.step)
except Exception as e:
steps_failed.append(step)
reasons_to_fail.append(step.why)
if failfast:
raise
finally:
all_steps.append(step)
call_hook('after_output', 'step', step)
if run_callbacks:
call_hook('after_each', 'step', step)
return (all_steps, steps_passed, steps_failed, steps_undefined)
@classmethod
def many_from_lines(klass, lines, filename=None, original_string=None):
"""Parses a set of steps from lines of input.
This will correctly parse and produce a list of steps from lines without
any Scenario: heading at the top. Examples in table form are correctly
parsed, but must be well-formed under a regular step sentence.
"""
invalid_first_line_error = '\nFirst line of step "%s" is in %s form.'
if lines and strings.wise_startswith(lines[0], u'|'):
raise LettuceSyntaxError(
None,
invalid_first_line_error % (lines[0], 'table'))
if lines and strings.wise_startswith(lines[0], u'"""'):
raise LettuceSyntaxError(
None,
invalid_first_line_error % (lines[0], 'multiline'))
# Select only lines that aren't end-to-end whitespace and aren't tags
# Tags could be included as steps if the first scenario following a background is tagged
# This then causes the test to fail, because lettuce looks for the step's definition (which doesn't exist)
lines = filter(lambda x: not (REP.only_whitespace.match(x) or re.match(r'^\s*@', x)), lines)
step_strings = []
in_multiline = False
for line in lines:
if strings.wise_startswith(line, u'"""'):
in_multiline = not in_multiline
step_strings[-1] += "\n%s" % line
elif strings.wise_startswith(line, u"|") or in_multiline:
step_strings[-1] += "\n%s" % line
elif '#' in line:
step_strings.append(klass._handle_inline_comments(line))
else:
step_strings.append(line)
mkargs = lambda s: [s, filename, original_string]
return [klass.from_string(*mkargs(s)) for s in step_strings]
@classmethod
def from_string(cls, string, with_file=None, original_string=None):
"""Creates a new step from string"""
lines = strings.get_stripped_lines(string)
sentence = lines.pop(0)
line = None
if with_file and original_string:
for pline, line in enumerate(original_string.splitlines()):
if sentence in line:
line = pline + 1
break
return cls(sentence,
remaining_lines=lines,
line=line,
filename=with_file)
class Scenario(object):
""" Object that represents each scenario on feature files."""
described_at = None
indentation = 2
table_indentation = indentation + 2
def __init__(self, name, remaining_lines, keys, outlines,
with_file=None,
original_string=None,
language=None,
tags=None):
self.feature = None
if not language:
language = language()
self.name = name
self.language = language
self.tags = tags
self.remaining_lines = remaining_lines
self.steps = self._parse_remaining_lines(remaining_lines,
with_file,
original_string)
self.keys = keys
self.outlines = outlines
self.with_file = with_file
self.original_string = original_string
if with_file and original_string:
scenario_definition = ScenarioDescription(self, with_file,
original_string,
language)
self._set_definition(scenario_definition)
self.solved_steps = list(self._resolve_steps(
self.steps, self.outlines, with_file, original_string))
self._add_myself_to_steps()
@property
def max_length(self):
if self.outlines:
prefix = self.language.first_of_scenario_outline + ":"
else:
prefix = self.language.first_of_scenario + ":"
max_length = strings.column_width(
u"%s %s" % (prefix, self.name)) + self.indentation
for step in self.steps:
if step.max_length > max_length:
max_length = step.max_length
for outline in self.outlines:
key_size = self._calc_key_length(outline)
if key_size > max_length:
max_length = key_size
value_size = self._calc_value_length(outline)
if value_size > max_length:
max_length = value_size
return max_length
def _calc_list_length(self, lst):
length = self.table_indentation + 2
for item in lst:
length += len(item) + 2
if len(lst) > 1:
length += 2
return length
def _calc_key_length(self, data):
return self._calc_list_length(data.keys())
def _calc_value_length(self, data):
return self._calc_list_length(data.values())
def __unicode__(self):
return u'<Scenario: "%s">' % self.name
def __repr__(self):
return unicode(self).encode('utf-8')
def matches_tags(self, tags):
if tags is None:
return True
has_exclusionary_tags = any([t.startswith('-') for t in tags])
if not self.tags and not has_exclusionary_tags:
return False
matched = []
if isinstance(self.tags, list):
for tag in self.tags:
if tag in tags:
return True
else:
self.tags = []
for tag in tags:
exclude = tag.startswith('-')
if exclude:
tag = tag[1:]
fuzzable = tag.startswith('~')
if fuzzable:
tag = tag[1:]
result = tag in self.tags
if fuzzable:
fuzzed = []
for internal_tag in self.tags:
ratio = fuzz.ratio(tag, internal_tag)
if exclude:
fuzzed.append(ratio <= 80)
else:
fuzzed.append(ratio > 80)
result = any(fuzzed)
elif exclude:
result = tag not in self.tags
matched.append(result)
return all(matched)
@property
def evaluated(self):
for outline_idx, outline in enumerate(self.outlines):
steps = []
for step in self.steps:
new_step = step.solve_and_clone(outline, display_step=(outline_idx == 0))
new_step.original_sentence = step.sentence
new_step.scenario = self
steps.append(new_step)
yield (outline, steps)
@property
def ran(self):
return all([step.ran for step in self.steps])
@property
def passed(self):
return self.ran and all([step.passed for step in self.steps])
@property
def failed(self):
return any([step.failed for step in self.steps])
def run(self, ignore_case, failfast=False):
"""Runs a scenario, running each of its steps. Also call
before_each and after_each callbacks for steps and scenario"""
results = []
call_hook('before_each', 'scenario', self)
def run_scenario(almost_self, order=-1, outline=None, run_callbacks=False):
try:
if outline:
self._report_outline_hook(outline, True)
if self.background:
self.background.run(ignore_case)
reasons_to_fail = []
all_steps, steps_passed, steps_failed, steps_undefined = Step.run_all(self.steps, outline, run_callbacks, ignore_case, failfast=failfast, display_steps=(order < 1), reasons_to_fail=reasons_to_fail)
except:
if outline:
# Can't use "finally" here since we need to call it before after_each_scenario
self._report_outline_hook(outline, False)
call_hook('after_each', 'scenario', self)
raise
finally:
if outline:
call_hook('outline', 'scenario', self, order, outline,
reasons_to_fail)
if outline:
self._report_outline_hook(outline, False)
skip = lambda x: x not in steps_passed and x not in steps_undefined and x not in steps_failed
steps_skipped = filter(skip, all_steps)
return ScenarioResult(
self,
all_steps,
steps_passed,
steps_failed,
steps_skipped,
steps_undefined,
outline
)
if self.outlines:
for index, outline in enumerate(self.outlines):
results.append(run_scenario(self, index, outline, run_callbacks=True))
else:
results.append(run_scenario(self, run_callbacks=True))
call_hook('after_each', 'scenario', self)
return results
def _add_myself_to_steps(self):
for step in self.steps:
step.scenario = self
for step in self.solved_steps:
step.scenario = self
def _report_outline_hook(self, outline, started):
"""
Function called before each outline and after each outline to provide hooks
:param outline: dict with examples row
:type outline dict
:param started: is outline started or finished
:type started bool
"""
call_hook('before_each' if started else 'after_each', 'outline', self, outline)
def _resolve_steps(self, steps, outlines, with_file, original_string):
for outline_idx, outline in enumerate(outlines):
for step in steps:
yield step.solve_and_clone(outline, display_step=(outline_idx == 0))
def _parse_remaining_lines(self, lines, with_file, original_string):
invalid_first_line_error = '\nInvalid step on scenario "%s".\n' \
'Maybe you killed the first step text of that scenario\n'
if lines and strings.wise_startswith(lines[0], u'|'):
raise LettuceSyntaxError(
with_file,
invalid_first_line_error % self.name)
return Step.many_from_lines(lines, with_file, original_string)
def _set_definition(self, definition):
self.described_at = definition
def represented(self):
make_prefix = lambda x: u"%s%s: " % (u' ' * self.indentation, x)
if self.outlines:
prefix = make_prefix(self.language.first_of_scenario_outline)
else:
prefix = make_prefix(self.language.first_of_scenario)
head_parts = []
if self.tags:
tags = ['@%s' % t for t in self.tags]
head_parts.append(u' ' * self.indentation)
head_parts.append(' '.join(tags) + '\n')
head_parts.append(prefix + self.name)
head = ''.join(head_parts)
appendix = ''
if self.described_at:
fmt = (self.described_at.file, self.described_at.line)
appendix = u'# %s:%d\n' % fmt
max_length = self.max_length
if self.feature:
max_length = self.feature.max_length
return strings.rfill(
head, max_length + 1,
append=appendix)
def represent_examples(self):
lines = strings.dicts_to_string(self.outlines, self.keys).splitlines()
return "\n".join([(u" " * self.table_indentation) + line for line in lines]) + '\n'
@classmethod
def from_string(new_scenario, string,
with_file=None,
original_string=None,
language=None,
tags=None):
""" Creates a new scenario from string"""
# ignoring comments
string = "\n".join(strings.get_stripped_lines(string, ignore_lines_starting_with='#'))
if not language:
language = Language()
splitted = strings.split_wisely(string, u"(%s):" % language.examples, True)
string = splitted[0]
keys = []
outlines = []
if len(splitted) > 1:
parts = [l for l in splitted[1:] if l not in language.examples]
part = "".join(parts)
keys, outlines = strings.parse_hashes(strings.get_stripped_lines(part))
lines = strings.get_stripped_lines(string)
scenario_line = lines.pop(0).strip()
for repl in (language.scenario_outline, language.scenario):
scenario_line = strings.remove_it(scenario_line, u"(%s): " % repl).strip()
scenario = new_scenario(
name=scenario_line,
remaining_lines=lines,
keys=keys,
outlines=outlines,
with_file=with_file,
original_string=original_string,
language=language,
tags=tags,
)
return scenario
class Background(object):
indentation = 2
def __init__(self, lines, feature,
with_file=None,
original_string=None,
language=None):
self.steps = map(self.add_self_to_step, Step.many_from_lines(
lines, with_file, original_string))
self.feature = feature
self.original_string = original_string
self.language = language
def add_self_to_step(self, step):
step.background = self
return step
def run(self, ignore_case):
call_hook('before_each', 'background', self)
results = []
for step in self.steps:
matched, step_definition = step.pre_run(ignore_case)
call_hook('before_each', 'step', step)
try:
results.append(step.run(ignore_case))
except Exception as e:
print e
pass
call_hook('after_each', 'step', step)
call_hook('after_each', 'background', self, results)
return results
def __repr__(self):
return '<Background for feature: {0}>'.format(self.feature.name)
@property
def max_length(self):
max_length = 0
for step in self.steps:
if step.max_length > max_length:
max_length = step.max_length
return max_length
def represented(self):
return ((' ' * self.indentation) + 'Background:')
@classmethod
def from_string(new_background,
lines,
feature,
with_file=None,
original_string=None,
language=None):
return new_background(
lines,
feature,
with_file=with_file,
original_string=original_string,
language=language)
class Feature(object):
""" Object that represents a feature."""
described_at = None
def __init__(self, name, remaining_lines, with_file, original_string,
language=None):
if not language:
language = language()
self.name = name
self.language = language
self.original_string = original_string
(self.background,
self.scenarios,
self.description) = self._parse_remaining_lines(
remaining_lines,
original_string,
with_file)
if with_file:
feature_definition = FeatureDescription(self,
with_file,
original_string,
language)
self._set_definition(feature_definition)
if original_string and '@' in self.original_string:
self.tags = self._find_tags_in(original_string)
else:
self.tags = None
self._add_myself_to_scenarios()
@property
def max_length(self):
max_length = strings.column_width(u"%s: %s" % (
self.language.first_of_feature, self.name))
if max_length == 0:
# in case feature has two keywords
max_length = strings.column_width(u"%s: %s" % (
self.language.last_of_feature, self.name))
for line in self.description.splitlines():
length = strings.column_width(line.strip()) + Scenario.indentation
if length > max_length:
max_length = length
for scenario in self.scenarios:
if scenario.max_length > max_length:
max_length = scenario.max_length
return max_length
def _add_myself_to_scenarios(self):
for scenario in self.scenarios:
scenario.feature = self
if scenario.tags is not None and self.tags:
scenario.tags.extend(self.tags)
def _find_tags_in(self, original_string):
broad_regex = re.compile(ur"([@].*)%s: (%s)" % (
self.language.feature,
re.escape(self.name)), re.DOTALL)
regexes = [broad_regex]
def try_finding_with(regex):
found = regex.search(original_string)
if found:
tag_lines = found.group().splitlines()
tags = list(chain(*map(self._extract_tag, tag_lines)))
return tags
for regex in regexes:
found = try_finding_with(regex)
if found:
return found
return []
def _extract_tag(self, item):
regex = re.compile(r'(?:(?:^|\s+)[@]([^@\s]+))')
found = regex.findall(item)
return found
def __unicode__(self):
return u'<%s: "%s">' % (self.language.first_of_feature, self.name)
def __repr__(self):
return unicode(self).encode('utf-8')
def get_head(self):
return u"%s: %s" % (self.language.first_of_feature, self.name)
def represented(self):
length = self.max_length + 1
filename = self.described_at.file
line = self.described_at.line
head = strings.rfill(self.get_head(), length,
append=u"# %s:%d\n" % (filename, line))
for description, line in zip(self.description.splitlines(),
self.described_at.description_at):
head += strings.rfill(
u" %s" % description, length, append=u"# %s:%d\n" % (filename, line))
return head
@classmethod
def from_string(new_feature, string, with_file=None, language=None):
"""Creates a new feature from string"""
lines = strings.get_stripped_lines(
string,
ignore_lines_starting_with='#',
)
if not language:
language = Language()
found = len(re.findall(r'^[ \t]*(?:%s):[ ]*\w+' % language.feature, "\n".join(lines), re.U + re.M))
if found > 1:
raise LettuceSyntaxError(with_file,
'A feature file must contain ONLY ONE feature!')
elif found == 0:
raise LettuceSyntaxError(with_file,
'Features must have a name. e.g: "Feature: This is my name"')
while lines:
matched = re.search(r'^[ \t]*(?:%s):(.*)' % language.feature, lines[0], re.I)
if matched:
name = matched.groups()[0].strip()
break
lines.pop(0)
feature = new_feature(name=name,
remaining_lines=lines,
with_file=with_file,
original_string=string,
language=language)
return feature
@classmethod
def from_file(new_feature, filename):
"""Creates a new feature from filename"""
f = codecs.open(filename, "r", "utf-8")
string = f.read()
f.close()
language = Language.guess_from_string(string)
feature = new_feature.from_string(string, with_file=filename, language=language)
return feature
def _set_definition(self, definition):
self.described_at = definition
def _extract_tags(self, string, extract_regex=REP.last_tag_extraction_regex):
tags = []
while True:
m = extract_regex.search(string)
if not m:
return tags, string
tags.insert(0, m.groups()[0])
string = extract_regex.sub('', string)
def _strip_next_scenario_tags(self, string):
stripped = REP.tag_strip_regex.sub('', string)
return stripped
def _extract_desc_and_bg(self, joined):
if not re.search(self.language.background, joined):
return joined, None
parts = strings.split_wisely(
joined, "(%s):\s*" % self.language.background)
description = parts.pop(0)
if not re.search(self.language.background, description):
if parts:
parts = parts[1:]
else:
description = ""
background_string = "".join(parts).splitlines()
return description, background_string
def _check_scenario_syntax(self, lines, filename):
empty_scenario = ('%s:' % (self.language.first_of_scenario)).lower()
for line in lines:
if line.lower() == empty_scenario:
raise LettuceSyntaxError(
filename,
('In the feature "%s", scenarios '
'must have a name, make sure to declare a scenario like '
'this: `Scenario: name of your scenario`' % self.name),
)
def _parse_remaining_lines(self, lines, original_string, with_file=None):
joined = u"\n".join(lines[1:])
self._check_scenario_syntax(lines, filename=with_file)
# replacing occurrences of Scenario Outline, with just "Scenario"
scenario_prefix = u'%s:' % self.language.first_of_scenario
regex = re.compile(
ur"%s:[\t\r\f\v]*" % self.language.scenario_separator, re.U | re.I | re.DOTALL)
joined = regex.sub(scenario_prefix, joined)
parts = strings.split_wisely(joined, scenario_prefix)
description = u""
background = None
tags_scenario = []
if not re.search("^" + scenario_prefix, joined):
if not parts:
raise LettuceSyntaxError(
with_file,
(u"Features must have scenarios.\n"
"Please refer to the documentation available at http://lettuce.it for more information.")
)
tags_scenario, description_and_background = self._extract_tags(parts[0])
description, background_lines = self._extract_desc_and_bg(description_and_background)
background = background_lines and Background.from_string(
background_lines,
self,
with_file=with_file,
original_string=original_string,
language=self.language,
) or None
parts.pop(0)
prefix = self.language.first_of_scenario
upcoming_scenarios = [
u"%s: %s" % (prefix, s) for s in parts if s.strip()]
kw = dict(
original_string=original_string,
with_file=with_file,
language=self.language,
)
scenarios = []
while upcoming_scenarios:
tags_next_scenario, current = self._extract_tags(upcoming_scenarios[0])
current = self._strip_next_scenario_tags(upcoming_scenarios.pop(0))
params = dict(
tags=tags_scenario,
)
params.update(kw)
current_scenario = Scenario.from_string(current, **params)
current_scenario.background = background
scenarios.append(current_scenario)
tags_scenario = tags_next_scenario
return background, scenarios, description
def run(self, scenarios=None, ignore_case=True, tags=None, random=False, failfast=False):
scenarios_ran = []
if random:
shuffle(self.scenarios)
scenario_nums_to_run = None
if isinstance(scenarios, (tuple, list)):
if all(map(lambda x: isinstance(x, int), scenarios)):
scenario_nums_to_run = scenarios
def should_run_scenario(num, scenario):
return scenario.matches_tags(tags) and \
(scenario_nums_to_run is None or num in scenario_nums_to_run)
scenarios_to_run = [scenario for num, scenario in enumerate(self.scenarios, start=1)
if should_run_scenario(num, scenario)]
# If no scenarios in this feature will run, don't run the feature hooks.
if not scenarios_to_run:
return FeatureResult(self)
call_hook('before_each', 'feature', self)
try:
for scenario in scenarios_to_run:
scenarios_ran.extend(scenario.run(ignore_case, failfast=failfast))
except:
call_hook('after_each', 'feature', self)
raise
else:
call_hook('after_each', 'feature', self)
return FeatureResult(self, *scenarios_ran)
class FeatureResult(object):
"""Object that holds results of each scenario ran from within a feature"""
def __init__(self, feature, *scenario_results):
self.feature = feature
self.scenario_results = scenario_results
@property
def passed(self):
return all([result.passed for result in self.scenario_results])
class ScenarioResult(object):
"""Object that holds results of each step ran from within a scenario"""
def __init__(self, scenario, all_steps, steps_passed, steps_failed, steps_skipped,
steps_undefined, outline=None):
self.scenario = scenario
self.all_steps = all_steps
self.steps_passed = steps_passed
self.steps_failed = steps_failed
self.steps_skipped = steps_skipped
self.steps_undefined = steps_undefined
self.outline = outline
self.total_steps = len(all_steps)
@property
def passed(self):
return self.total_steps is len(self.steps_passed)
class TotalResult(object):
def __init__(self, feature_results=None):
self.feature_results = feature_results
self.scenario_results = []
self.steps_passed = 0
self.steps_failed = 0
self.steps_skipped = 0
self.steps_undefined = 0
self._proposed_definitions = []
self.steps = 0
# store the scenario names that failed, with their location
self.failed_scenario_locations = []
def output_format(self):
for feature_result in self.feature_results:
for scenario_result in feature_result.scenario_results:
self.scenario_results.append(scenario_result)
self.steps_passed += len(scenario_result.steps_passed)
self.steps_failed += len(scenario_result.steps_failed)
self.steps_skipped += len(scenario_result.steps_skipped)
self.steps_undefined += len(scenario_result.steps_undefined)
self.steps += scenario_result.total_steps
self._proposed_definitions.extend(scenario_result.steps_undefined)
if len(scenario_result.steps_failed) > 0:
self.failed_scenario_locations.append(scenario_result.scenario.represented())
def _filter_proposed_definitions(self):
sentences = []
for step in self._proposed_definitions:
if step.proposed_sentence not in sentences:
sentences.append(step.proposed_sentence)
yield step
@property
def proposed_definitions(self):
return list(self._filter_proposed_definitions())
@property
def features_ran(self):
return len(self.feature_results)
@property
def features_passed(self):
return len([result for result in self.feature_results if result.passed])
@property
def scenarios_ran(self):
return len(self.scenario_results)
@property
def scenarios_passed(self):
return len([result for result in self.scenario_results if result.passed])
@property
def is_success(self):
return not bool(self.failed_scenario_locations)
class SummaryTotalResults(TotalResult):
def __init__(self, total_results):
"""Aggregates results per total results into a summary
@params: list of total result objects
"""
super(SummaryTotalResults, self).__init__()
self.total_results = total_results
self.features_ran_overall = 0
self.features_passed_overall = 0
def __len__(self):
""" Overloaded len() to be able to use with tests
"""
return len(self.total_results)
def __getitem__(self, item):
""" Needed for tests.
"""
return self.total_results[item]
def summarize_all(self):
"""Outputs the aggregated results for the TotalResult list
"""
for partial_result in filter(None, self.total_results):
self.features_ran_overall += len(partial_result.feature_results)
self.features_passed_overall += len([feat for feat in partial_result.feature_results if feat.passed])
self.feature_results = partial_result.feature_results
for feature_result in self.feature_results:
for scenario_result in feature_result.scenario_results:
self.scenario_results.append(scenario_result)
self.steps_passed += len(scenario_result.steps_passed)
self.steps_failed += len(scenario_result.steps_failed)
self.steps_skipped += len(scenario_result.steps_skipped)
self.steps_undefined += len(scenario_result.steps_undefined)
self.steps += scenario_result.total_steps
self._proposed_definitions.extend(scenario_result.steps_undefined)
if len(scenario_result.steps_failed) > 0:
self.failed_scenario_locations.append(scenario_result.scenario.represented())
| 47,056 | Python | .py | 1,085 | 32.23318 | 213 | 0.585694 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,661 | exceptions.py | gabrielfalcao_lettuce/lettuce/exceptions.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import traceback
import sys
from lettuce.strings import utf8_string
class NoDefinitionFound(Exception):
""" Exception raised by lettuce.core.Step, when trying to solve a
Step, but does not find a suitable step definition.
This exception should never blow on user's face. It used merely yo
lettuce can filter undefined steps.
"""
def __init__(self, step):
self.step = step
error = filter(lambda x : 0 <= ord(x) <= 127,
'The step r"%s" is not defined' % self.step.sentence)
super(NoDefinitionFound, self).__init__(error)
class ReasonToFail(object):
""" Exception that contains detailed information about a
AssertionError raised within a step definition. With these data
lettuce show detailed traceback to user in a nice representation.
"""
def __init__(self, step, exc):
self.step = step
self.exception = exc
if sys.version_info[:2] < (2, 6):
msg = exc.message
else:
msg = exc.args[0] if exc.args else ''
if isinstance(msg, basestring):
self.cause = utf8_string(msg)
self.traceback = utf8_string(traceback.format_exc(exc))
class LettuceSyntaxError(SyntaxError):
def __init__(self, filename, string):
self.filename = filename
self.msg = "Syntax error at: %s\n%s\n" % (filename, string)
class StepLoadingError(Exception):
"""Raised when a step cannot be loaded."""
pass
class LettuceRunnerError(Exception):
"""Raised when the Lettuce runner experiences failures/errors."""
| 2,376 | Python | .py | 54 | 38.888889 | 76 | 0.69961 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,662 | terrain.py | gabrielfalcao_lettuce/lettuce/terrain.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lettuce.registry import world
from lettuce.registry import CALLBACK_REGISTRY
world._set = True
def absorb(thing, name=None):
if not isinstance(name, basestring):
name = thing.__name__
setattr(world, name, thing)
return thing
world.absorb = absorb
@world.absorb
def spew(name):
if hasattr(world, name):
item = getattr(world, name)
delattr(world, name)
return item
class Main(object):
def __init__(self, callback):
self.name = callback
@classmethod
def _add_method(cls, name, where, when):
def method(self, fn):
CALLBACK_REGISTRY.append_to(where, when % {'0': self.name}, fn)
return fn
method.__name__ = method.fn_name = name
setattr(cls, name, method)
for name, where, when in (
('all', 'all', '%(0)s'),
('each_step', 'step', '%(0)s_each'),
('step_output', 'step', '%(0)s_output'),
('each_scenario', 'scenario', '%(0)s_each'),
('each_outline', 'outline', '%(0)s_each'),
('each_background', 'background', '%(0)s_each'),
('each_feature', 'feature', '%(0)s_each'),
('harvest', 'harvest', '%(0)s'),
('each_app', 'app', '%(0)s_each'),
('runserver', 'runserver', '%(0)s'),
('handle_request', 'handle_request', '%(0)s'),
('outline', 'scenario', 'outline')):
Main._add_method(name, where, when)
before = Main('before')
after = Main('after')
| 2,250 | Python | .py | 57 | 34.473684 | 75 | 0.641907 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,663 | registry.py | gabrielfalcao_lettuce/lettuce/registry.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import threading
import traceback
from lettuce.exceptions import StepLoadingError
world = threading.local()
world._set = False
def _function_matches(one, other):
return (os.path.abspath(one.func_code.co_filename) == os.path.abspath(other.func_code.co_filename) and
one.func_code.co_firstlineno == other.func_code.co_firstlineno)
class CallbackDict(dict):
def append_to(self, where, when, function):
if not any(_function_matches(o, function) for o in self[where][when]):
self[where][when].append(function)
def clear(self):
for name, action_dict in self.items():
for callback_list in action_dict.values():
callback_list[:] = []
class StepDict(dict):
def __init__(self, *args, **kwargs):
super(StepDict, self).__init__(*args, **kwargs)
self._compiled = {}
self._compiled_ignore_case = {}
def get_regex(self, step, ignore_case=False):
if ignore_case:
regex = self._compiled_ignore_case.get(step, None)
if not regex:
regex = re.compile(step, re.I)
self._compiled_ignore_case[step] = regex
else:
regex = self._compiled.get(step, None)
if not regex:
regex = re.compile(step)
self._compiled[step] = regex
return regex
def load(self, step, func):
self._assert_is_step(step, func)
self[step] = func
return func
def load_func(self, func):
regex = self._extract_sentence(func)
return self.load(regex, func)
def load_steps(self, obj):
exclude = getattr(obj, "exclude", [])
for attr in dir(obj):
if self._attr_is_step(attr, obj) and attr not in exclude:
step_method = getattr(obj, attr)
self.load_func(step_method)
return obj
def _extract_sentence(self, func):
func = getattr(func, '__func__', func)
sentence = getattr(func, '__doc__', None)
if sentence is None:
sentence = func.func_name.replace('_', ' ')
sentence = sentence[0].upper() + sentence[1:]
return sentence
def _assert_is_step(self, step, func):
try:
re.compile(step)
except re.error as e:
raise StepLoadingError("Error when trying to compile:\n"
" regex: %r\n"
" for function: %s\n"
" error: %s" % (step, func, e))
def _attr_is_step(self, attr, obj):
return attr[0] != '_' and self._is_func_or_method(getattr(obj, attr))
def _is_func_or_method(self, func):
func_dir = dir(func)
return callable(func) and ("func_name" in func_dir or "__func__" in func_dir)
STEP_REGISTRY = StepDict()
CALLBACK_REGISTRY = CallbackDict(
{
'all': {
'before': [],
'after': [],
},
'step': {
'before_each': [],
'after_each': [],
'before_output': [],
'after_output': [],
},
'scenario': {
'before_each': [],
'after_each': [],
'outline': [],
},
'outline': {
'before_each': [],
'after_each': [],
},
'background': {
'before_each': [],
'after_each': [],
},
'feature': {
'before_each': [],
'after_each': [],
},
'app': {
'before_each': [],
'after_each': [],
},
'harvest': {
'before': [],
'after': [],
},
'handle_request': {
'before': [],
'after': [],
},
'runserver': {
'before': [],
'after': [],
},
},
)
def call_hook(situation, kind, *args, **kw):
for callback in CALLBACK_REGISTRY[kind][situation]:
try:
callback(*args, **kw)
except Exception as e:
print "=" * 1000
traceback.print_exc(e)
print
raise
def clear():
STEP_REGISTRY.clear()
CALLBACK_REGISTRY.clear()
| 5,051 | Python | .py | 145 | 25.813793 | 106 | 0.547297 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,664 | decorators.py | gabrielfalcao_lettuce/lettuce/decorators.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lettuce.core import STEP_REGISTRY
def _is_step_sentence(sentence):
return isinstance(sentence, str) or isinstance(sentence, basestring)
def step(step_func_or_sentence):
"""Decorates a function, so that it will become a new step
definition.
You give step sentence either (by priority):
* with step function argument (first example)
* with function doc (second example)
* with the function name exploded by underscores (third example)
Example::
>>> from lettuce import step
>>> from models import contact
>>>
>>> # First Example
>>> step(r'Given I delete the contact "(?P<name>.*)" from my address book')
... def given_i_do_something(step, name):
... contact.delete_by_name(name)
... assert step.sentence == 'Given I delete the contact "John Doe" from my address book'
...
>>> # Second Example
>>> @step
... def given_i_delete_a_contact_from_my_address_book(step, name):
... '''Given I delete the contact "(?P<name>.*)" from my address book'''
... contact.delete_by_name(name)
... assert step.sentence == 'Given I delete the contact "(?P<name>.*)" from my address book'
...
>>> # Third Example
>>> @step
... def given_I_delete_the_contact_John_Doe_from_my_address_book(step):
... contact.delete_by_name("John Doe")
... assert step.sentence == 'Given I delete the contact John Doe from my address book'
Notice that all step definitions take a step object as argument.
"""
if _is_step_sentence(step_func_or_sentence):
return lambda func: STEP_REGISTRY.load(step_func_or_sentence, func)
else:
return STEP_REGISTRY.load_func(step_func_or_sentence)
def steps(steps_class):
"""Decorates a class, and set steps definitions from methods
except those in the attribute "exclude" or starting by underscore.
Steps sentences are taken from methods names or docs if exist.
Example::
>>> from lettuce import steps
>>> from models import contact
>>>
>>> @steps
>>> class ListOfSteps(object):
... exclude = ["delete_by_name"]
...
... def __init__(self, contact):
... self.contact = contact
...
... def given_i_delete_a_contact_from_my_address_book(self, step, name):
... '''Given I delete the contact "(?P<name>.*)" from my address book'''
... self.delete_by_name(name)
... assert step.sentence == 'Given I delete the contact "(?P<name>.*)" from my address book'
...
... def given_I_delete_the_contact_John_Doe_from_my_address_book(self, step):
... self.delete_by_name("John Doe")
... assert step.sentence == 'Given I delete the contact John Doe from my address book'
...
... def delete_by_name(self, name):
... self.contact.delete_by_name(name)
...
>>> ListOfSteps(contact)
Notice steps are added when an object of the class is created.
"""
if hasattr(steps_class, '__init__'):
_init_ = getattr(steps_class, '__init__')
def init(self, *args, **kwargs):
_init_(self, *args, **kwargs)
STEP_REGISTRY.load_steps(self)
else:
def init(self, *args, **kwargs):
STEP_REGISTRY.load_steps(self)
setattr(steps_class, '__init__', init)
return steps_class
| 4,357 | Python | .py | 94 | 39.734043 | 109 | 0.623383 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,665 | apps.py | gabrielfalcao_lettuce/lettuce/django/apps.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from os.path import join, dirname
try:
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module
from django.conf import settings
def _filter_bultins(module):
"returns only those apps that are not builtin django.contrib"
name = module.__name__
return not name.startswith("django.contrib") and name != 'lettuce.django'
def _filter_configured_apps(module):
"returns only those apps that are in django.conf.settings.LETTUCE_APPS"
app_found = True
if hasattr(settings, 'LETTUCE_APPS') and isinstance(settings.LETTUCE_APPS, (list, tuple)):
app_found = False
for appname in settings.LETTUCE_APPS:
if module.__name__.startswith(appname):
app_found = True
return app_found
def _filter_configured_avoids(module):
"returns apps that are not within django.conf.settings.LETTUCE_AVOID_APPS"
run_app = False
if hasattr(settings, 'LETTUCE_AVOID_APPS') and isinstance(settings.LETTUCE_AVOID_APPS, (list, tuple)):
for appname in settings.LETTUCE_AVOID_APPS:
if module.__name__.startswith(appname):
run_app = True
return not run_app
def get_apps():
"""
Import Django apps. It ignores ImportErrors.
(Django will take care of it)
"""
apps = []
for app in settings.INSTALLED_APPS:
try:
apps.append(import_module(app))
except ImportError:
pass
return apps
def harvest_lettuces(only_the_apps=None, avoid_apps=None, path="features"):
"""gets all installed apps that are not from django.contrib
returns a list of tuples with (path_to_app, app_module)
"""
apps = get_apps()
if isinstance(only_the_apps, (list, tuple)) and any(only_the_apps):
def _filter_only_specified(module):
return module.__name__ in only_the_apps
apps = filter(_filter_only_specified, apps)
else:
apps = filter(_filter_bultins, apps)
apps = filter(_filter_configured_apps, apps)
apps = filter(_filter_configured_avoids, apps)
if isinstance(avoid_apps, (list, tuple)) and any(avoid_apps):
def _filter_avoid(module):
return module.__name__ not in avoid_apps
apps = filter(_filter_avoid, apps)
joinpath = lambda app: (join(dirname(app.__file__), path), app)
return map(joinpath, apps)
| 3,195 | Python | .py | 74 | 37.635135 | 106 | 0.696227 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,666 | __init__.py | gabrielfalcao_lettuce/lettuce/django/__init__.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lettuce.django.apps import harvest_lettuces
server = None
django_url = None
def get_server(*args, **kwargs):
"""
Look up the server we are using and set it as the global
"""
from django.conf import settings
server_name = getattr(settings, 'LETTUCE_TEST_SERVER',
'lettuce.django.server.DefaultServer')
module, klass = server_name.rsplit('.', 1)
Server = getattr(__import__(module, fromlist=[klass]), klass)
global server, django_url
server = Server(*args, **kwargs)
django_url = server.url
return server
__all__ = ['harvest_lettuces', 'server', 'django_url']
| 1,429 | Python | .py | 33 | 39.818182 | 71 | 0.721098 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,667 | server.py | gabrielfalcao_lettuce/lettuce/django/server.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import time
import socket
import httplib
import urlparse
import tempfile
import multiprocessing
from StringIO import StringIO
from django.conf import settings
from django.core.handlers.wsgi import WSGIHandler
from django.core.servers.basehttp import WSGIServer
from django.core.servers.basehttp import ServerHandler
from django.core.servers.basehttp import WSGIRequestHandler
try:
from django.core.servers.basehttp import AdminMediaHandler
except ImportError:
AdminMediaHandler = None
if 'django.contrib.staticfiles' in settings.INSTALLED_APPS:
try:
from django.contrib.staticfiles.handlers import StaticFilesHandler
except ImportError:
StaticFilesHandler = None
else:
StaticFilesHandler = None
try:
from django.utils.six.moves import socketserver
except ImportError:
import SocketServer as socketserver
try:
import SocketServer
SocketServer.BaseServer.handle_error = lambda *args, **kw: None
except ImportError:
pass
from lettuce.django import mail
from lettuce.registry import call_hook
def create_mail_queue():
mail.queue = multiprocessing.Queue()
return mail.queue
class LettuceServerException(socket.error):
pass
keep_running = True
class MutedRequestHandler(WSGIRequestHandler):
""" A RequestHandler that silences output, in order to don't
mess with Lettuce's output"""
dev_null = StringIO()
def log_message(self, *args, **kw):
pass # do nothing
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = LettuceServerHandler(
self.rfile,
self.wfile,
self.dev_null,
self.get_environ(),
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
class LettuceServerHandler(ServerHandler):
def handle_error(self, request, client_address):
pass
def finish_response(self):
try:
ServerHandler.finish_response(self)
# avoiding broken pipes
# http://code.djangoproject.com/ticket/4444
except Exception:
exc_type, exc_value = sys.exc_info()[:2]
if not issubclass(exc_type, socket.error) or \
exc_value.args[0] is 32:
raise
class ThreadedServer(multiprocessing.Process):
"""
Runs django's builtin in background
"""
daemon = True
def __init__(self, address, port, mail_queue, threading=True, *args, **kw):
multiprocessing.Process.__init__(self)
self.address = address
self.port = port
self.lock = multiprocessing.Lock()
self.mail_queue = mail_queue
self.threading = threading
def configure_mail_queue(self):
mail.queue = self.mail_queue
settings.EMAIL_BACKEND = \
'lettuce.django.mail.backends.QueueEmailBackend'
@staticmethod
def get_real_address(address):
if address == '0.0.0.0' or address == 'localhost':
address = '127.0.0.1'
return address
def wait(self):
address = ThreadedServer.get_real_address(self.address)
while True:
time.sleep(0.1)
http = httplib.HTTPConnection(address, self.port, timeout=1)
try:
http.request("GET", "/")
except socket.error:
http.close()
continue
break
self.lock.acquire()
def should_serve_static_files(self):
try:
return (StaticFilesHandler is not None and
getattr(settings, 'STATIC_URL', False))
except ImportError:
return False
def should_serve_admin_media(self):
try:
return (('django.contrib.admin' in settings.INSTALLED_APPS and
AdminMediaHandler) or
getattr(settings, 'LETTUCE_SERVE_ADMIN_MEDIA', False))
except ImportError:
return False
def run(self):
self.lock.acquire()
pidfile = os.path.join(tempfile.gettempdir(), 'lettuce-django.pid')
if os.path.exists(pidfile):
pid = int(open(pidfile).read())
try:
os.kill(pid, 9)
except OSError:
pass
finally:
os.unlink(pidfile)
open(pidfile, 'w').write(unicode(os.getpid()))
self.configure_mail_queue()
connector = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.threading:
httpd_cls = type(str('WSGIServer'), (socketserver.ThreadingMixIn, WSGIServer), {})
else:
httpd_cls = WSGIServer
try:
s = connector.connect((self.address, self.port))
self.lock.release()
os.kill(os.getpid(), 9)
except socket.error:
pass
finally:
self.lock.release()
try:
server_address = (self.address, self.port)
httpd = httpd_cls(server_address, MutedRequestHandler)
except socket.error:
raise LettuceServerException(
"the port %d already being used, could not start " \
"django's builtin server on it" % self.port,
)
handler = WSGIHandler()
if self.should_serve_admin_media():
if not AdminMediaHandler:
raise LettuceServerException(
"AdminMediaHandler is not available in this version of "
"Django. Please set LETTUCE_SERVE_ADMIN_MEDIA = False "
"in your Django settings.")
admin_media_path = ''
handler = AdminMediaHandler(handler, admin_media_path)
if self.should_serve_static_files():
handler = StaticFilesHandler(handler)
httpd.set_app(handler)
global keep_running
while keep_running:
call_hook('before', 'handle_request', httpd, self)
httpd.handle_request()
call_hook('after', 'handle_request', httpd, self)
try:
self.lock.release()
except ValueError:
pass
class BaseServer(object):
"""
Base class for Lettuce's internal server
"""
def __init__(self, address='0.0.0.0', port=None, threading=True):
self.port = int(port or getattr(settings, 'LETTUCE_SERVER_PORT', 8000))
self.address = unicode(address)
self.threading = threading
def start(self):
"""
Starts the webserver and waits it to be available
Chain this method up before your implementation
"""
call_hook('before', 'runserver', self._server)
def stop(self):
"""
Stops the webserver
Chain this method up after your implementation
"""
call_hook('after', 'runserver', self._server)
def url(self, url=''):
"""The url to access a server on"""
raise NotImplemented()
class DefaultServer(BaseServer):
"""A silenced, lightweight and simple django's builtin server so
that lettuce can be used with selenium, webdriver, windmill or any
browser tool"""
def __init__(self, *args, **kwargs):
super(DefaultServer, self).__init__(*args, **kwargs)
queue = create_mail_queue()
self._server = ThreadedServer(self.address, self.port, queue, threading=self.threading)
def start(self):
super(DefaultServer, self).start()
if self._server.should_serve_admin_media():
msg = "Preparing to serve django's admin site static files"
if getattr(settings, 'LETTUCE_SERVE_ADMIN_MEDIA', False):
msg += ' (as per settings.LETTUCE_SERVE_ADMIN_MEDIA=True)'
print "%s..." % msg
self._server.start()
self._server.wait()
addrport = self.address, self._server.port
if not self._server.is_alive():
raise LettuceServerException(
'Lettuce could not run the builtin Django server at %s:%d"\n'
'maybe you forgot a "runserver" instance running ?\n\n'
'well if you really do not want lettuce to run the server '
'for you, then just run:\n\n'
'python manage.py --no-server' % addrport,
)
print "Django's builtin server is running at %s:%d" % addrport
def stop(self, fail=False):
pid = self._server.pid
if pid:
os.kill(pid, 9)
super(DefaultServer, self).stop()
code = int(fail)
return sys.exit(code)
def url(self, url=""):
base_url = "http://%s" % ThreadedServer.get_real_address(self.address)
if self.port is not 80:
base_url += ':%d' % self.port
return urlparse.urljoin(base_url, url)
try:
try:
from django.contrib.staticfiles.testing import \
StaticLiveServerTestCase as LiveServerTestCase
except ImportError:
from django.test.testcases import LiveServerTestCase
class DjangoServer(BaseServer):
"""
A server that uses Django's LiveServerTestCase to implement the Server class.
"""
_server = None
def start(self):
super(DjangoServer, self).start()
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = \
'{address}:{port}'.format(address=self.address,
port=self.port)
LiveServerTestCase.setUpClass()
print "Django's builtin server is running at {address}:{port}".format(
address=self.address,
port=self.port)
def stop(self, fail=False):
LiveServerTestCase.tearDownClass()
super(DjangoServer, self).stop()
return 0
def url(self, url=''):
return urlparse.urljoin(
'http://{address}:{port}/'.format(address=self.address,
port=self.port),
url)
except ImportError:
pass
| 11,074 | Python | .py | 284 | 29.676056 | 95 | 0.622006 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,668 | backends.py | gabrielfalcao_lettuce/lettuce/django/mail/backends.py | """
Email backend that sends mails to a multiprocessing queue
"""
from lettuce.django import mail
from django.core.mail.backends.base import BaseEmailBackend
class QueueEmailBackend(BaseEmailBackend):
def send_messages(self, messages):
for message in messages:
mail.queue.put(message)
return len(messages)
| 341 | Python | .py | 10 | 29.6 | 59 | 0.759146 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,669 | djangoapp.py | gabrielfalcao_lettuce/lettuce/django/tests/functional/djangoapp.py | """
A minimal Django app, just one file.
See: http://olifante.blogs.com/covil/2010/04/minimal-django.html
"""
import os
from django.conf.urls.defaults import patterns
from django.core.mail import send_mail
from django.http import HttpResponse
filepath, extension = os.path.splitext(__file__)
ROOT_URLCONF = os.path.basename(filepath)
INSTALLED_APPS = (
"lettuce.django"
)
def mail(request):
send_mail('Subject here', 'Here is the message.', '[email protected]',
['[email protected]'], fail_silently=False)
return HttpResponse('Mail has been sent')
urlpatterns = patterns('', (r'^mail/$', mail))
| 627 | Python | .py | 18 | 31.833333 | 73 | 0.727575 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,670 | steps.py | gabrielfalcao_lettuce/lettuce/django/tests/functional/steps.py | from lettuce import world, step
from lettuce.django import mail
from lettuce.django import django_url
from nose.tools import assert_equals
@step(u'I visit "([^"]*)"')
def visit(step, url):
world.browser.visit(django_url(url))
@step(u'I see "([^"]*)"')
def see(step, text):
assert world.browser.is_text_present(text)
@step(u'an email is sent to "([^"]*?)" with subject "([^"]*)"')
def email_sent(step, to, subject):
message = mail.queue.get(True, timeout=5)
assert_equals(message.subject, subject)
assert_equals(message.recipients(), [to])
| 566 | Python | .py | 15 | 34.933333 | 63 | 0.698529 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,671 | terrain.py | gabrielfalcao_lettuce/lettuce/django/tests/functional/terrain.py | from splinter.browser import Browser
from lettuce import before, after, world
@before.harvest
def setup(server):
world.browser = Browser()
@after.harvest
def cleanup(server):
world.browser.quit()
| 208 | Python | .py | 8 | 23.5 | 40 | 0.790816 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,672 | test_email.py | gabrielfalcao_lettuce/lettuce/django/tests/functional/test_email.py | import os
import sys
import commands
from tests.asserts import assert_not_equals
from lettuce.fs import FileSystem
current_directory = FileSystem.dirname(__file__)
OLD_PYTHONPATH = os.getenv('PYTHONPATH', ':'.join(sys.path))
def teardown():
os.environ['PYTHONPATH'] = OLD_PYTHONPATH
def test_email():
'lettuce should be able to receive emails sent from django server'
os.environ['PYTHONPATH'] = current_directory
os.environ['DJANGO_SETTINGS_MODULE'] = 'djangoapp'
status, out = commands.getstatusoutput(
"django-admin.py harvest email.feature --verbosity=2")
assert_not_equals(status, 0)
| 629 | Python | .py | 16 | 35.75 | 70 | 0.751656 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,673 | __init__.py | gabrielfalcao_lettuce/lettuce/django/management/__init__.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
| 784 | Python | .py | 16 | 48 | 71 | 0.765625 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,674 | harvest.py | gabrielfalcao_lettuce/lettuce/django/management/commands/harvest.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import traceback
from distutils.version import StrictVersion
import django
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from django.test.utils import setup_test_environment, teardown_test_environment
from lettuce import Runner
from lettuce import registry
from lettuce.core import SummaryTotalResults
from lettuce.exceptions import LettuceRunnerError
from lettuce.django import harvest_lettuces, get_server
from lettuce.django.server import LettuceServerException
DJANGO_VERSION = StrictVersion(django.get_version())
class Command(BaseCommand):
help = u'Run lettuce tests all along installed apps'
if DJANGO_VERSION < StrictVersion('1.7'):
requires_model_validation = False
else:
requires_system_checks = False
def add_arguments(self, parser):
parser.set_defaults(verbosity=3) # default verbosity is 3
parser.add_argument('features', nargs='*', help='A list of features to run (files or folders)')
parser.add_argument(
'-a', '--apps', action='store', dest='apps', default='',
help='Run ONLY the django apps that are listed here. Comma separated'
)
parser.add_argument(
'-A', '--avoid-apps', action='store', dest='avoid_apps', default='',
help='AVOID running the django apps that are listed here. Comma separated'
)
parser.add_argument(
'-S', '--no-server', action='store_true', dest='no_server', default=False,
help="will not run django's builtin HTTP server"
)
parser.add_argument(
'--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.'
)
parser.add_argument(
'-T', '--test-server', action='store_true', dest='test_database',
default=getattr(settings, "LETTUCE_USE_TEST_DATABASE", False),
help="will run django's builtin HTTP server using the test databases"
)
parser.add_argument(
'-P', '--port', type=int, dest='port',
help="the port in which the HTTP server will run at"
)
parser.add_argument(
'-d', '--debug-mode', action='store_true', dest='debug', default=False,
help="when put together with builtin HTTP server, forces django to run with settings.DEBUG=True"
)
parser.add_argument(
'-s', '--scenarios', action='store', dest='scenarios', default=None,
help='Comma separated list of scenarios to run'
)
parser.add_argument(
"-t", "--tag", dest="tags", type=str, action='append', default=None,
help='Tells lettuce to run the specified tags only; '
'can be used multiple times to define more tags'
'(prefixing tags with "-" will exclude them and '
'prefixing with "~" will match approximate words)'
)
parser.add_argument(
'--with-xunit', action='store_true', dest='enable_xunit', default=False,
help='Output JUnit XML test results to a file'
)
parser.add_argument(
'--smtp-queue', action='store_true', dest='smtp_queue', default=False,
help='Use smtp for mail queue (usefull with --no-server option'
)
parser.add_argument(
'--xunit-file', action='store', dest='xunit_file', default=None,
help='Write JUnit XML to this file. Defaults to lettucetests.xml'
)
parser.add_argument(
'--with-subunit', action='store_true', dest='enable_subunit',
default=False, help='Output Subunit test results to a file'
)
parser.add_argument(
'--subunit-file', action='store', dest='subunit_file', default=None,
help='Write Subunit to this file. Defaults to subunit.bin'
)
parser.add_argument(
'--with-jsonreport', action='store_true', dest='enable_jsonreport',
default=False, help='Output JSON test results to a file'
)
parser.add_argument(
'--jsonreport-file', action='store', dest='jsonreport_file',
default=None, help='Write JSON report to this file. Defaults to lettucetests.json'
)
parser.add_argument(
"--failfast", dest="failfast", default=False,
action="store_true", help='Stop running in the first failure'
)
parser.add_argument(
"--pdb", dest="auto_pdb", default=False, action="store_true",
help='Launches an interactive debugger upon error'
)
if DJANGO_VERSION < StrictVersion('1.7'):
# Django 1.7 introduces the --no-color flag. We must add the flag
# to be compatible with older django versions
parser.add_argument(
'--no-color', action='store_true', dest='no_color',
default=False, help="Don't colorize the command output."
)
def get_paths(self, feature_paths, apps_to_run, apps_to_avoid):
if feature_paths:
for path, exists in zip(feature_paths, map(os.path.exists, feature_paths)):
if not exists:
sys.stderr.write("You passed the path '%s', but it does not exist.\n" % path)
sys.exit(1)
else:
paths = feature_paths
else:
paths = harvest_lettuces(apps_to_run, apps_to_avoid) # list of tuples with (path, app_module)
return paths
def handle(self, *args, **options):
setup_test_environment()
verbosity = options['verbosity']
no_color = options.get('no_color', False)
apps_to_run = tuple(options['apps'].split(","))
apps_to_avoid = tuple(options['avoid_apps'].split(","))
run_server = not options['no_server']
test_database = options['test_database']
smtp_queue = options['smtp_queue']
tags = options['tags']
failfast = options['failfast']
auto_pdb = options['auto_pdb']
threading = options['use_threading']
if test_database:
migrate_south = getattr(settings, "SOUTH_TESTS_MIGRATE", True)
try:
from south.management.commands import patch_for_test_db_setup
patch_for_test_db_setup()
except:
migrate_south = False
pass
from django.test.utils import get_runner
self._testrunner = get_runner(settings)(interactive=False)
self._testrunner.setup_test_environment()
self._old_db_config = self._testrunner.setup_databases()
if DJANGO_VERSION < StrictVersion('1.7'):
call_command('syncdb', verbosity=0, interactive=False,)
if migrate_south:
call_command('migrate', verbosity=0, interactive=False,)
else:
call_command('migrate', verbosity=0, interactive=False,)
settings.DEBUG = options.get('debug', False)
paths = self.get_paths(options['features'], apps_to_run, apps_to_avoid)
server = get_server(port=options['port'], threading=threading)
if run_server:
try:
server.start()
except LettuceServerException as e:
raise CommandError("Couldn't start Django server: %s" % e)
os.environ['SERVER_NAME'] = str(server.address)
os.environ['SERVER_PORT'] = str(server.port)
failed = False
registry.call_hook('before', 'harvest', locals())
results = []
try:
for path in paths:
app_module = None
if isinstance(path, tuple) and len(path) is 2:
path, app_module = path
if app_module is not None:
registry.call_hook('before_each', 'app', app_module)
runner = Runner(path, options.get('scenarios'),
verbosity, no_color,
enable_xunit=options.get('enable_xunit'),
enable_subunit=options.get('enable_subunit'),
enable_jsonreport=options.get('enable_jsonreport'),
xunit_filename=options.get('xunit_file'),
subunit_filename=options.get('subunit_file'),
jsonreport_filename=options.get('jsonreport_file'),
tags=tags, failfast=failfast, auto_pdb=auto_pdb,
smtp_queue=smtp_queue)
result = runner.run()
if app_module is not None:
registry.call_hook('after_each', 'app', app_module, result)
results.append(result)
if not result or result.steps != result.steps_passed:
failed = True
except LettuceRunnerError:
failed = True
except Exception as e:
failed = True
traceback.print_exc(e)
finally:
summary = SummaryTotalResults(results)
summary.summarize_all()
registry.call_hook('after', 'harvest', summary)
if test_database:
self._testrunner.teardown_databases(self._old_db_config)
teardown_test_environment()
server.stop(failed)
if failed:
raise CommandError("Lettuce tests failed.")
| 10,466 | Python | .py | 217 | 36.847926 | 108 | 0.601879 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,675 | __init__.py | gabrielfalcao_lettuce/lettuce/django/management/commands/__init__.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
| 784 | Python | .py | 16 | 48 | 71 | 0.765625 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,676 | models.py | gabrielfalcao_lettuce/lettuce/django/steps/models.py | """
Step definitions for working with Django models.
"""
from datetime import datetime
import re
from django.core.management import call_command
from django.core.management.color import no_style
from django.db import connection
try:
from django.db.models.loading import get_models
except ImportError:
from django.apps import apps
get_models = apps.get_models
from django.utils.functional import curry
from functools import wraps
from lettuce import step
STEP_PREFIX = r'(?:Given|And|Then|When) '
def _models_generator():
"""
Build a hash of model verbose names to models
"""
for model in get_models():
yield (unicode(model._meta.verbose_name), model)
yield (unicode(model._meta.verbose_name_plural), model)
MODELS = dict(_models_generator())
_WRITE_MODEL = {}
def creates_models(model):
"""
Register a model-specific creation function. Wrapper around writes_models
that removes the field parameter (always a create operation).
"""
def decorated(func):
@wraps(func)
@writes_models(model)
def wrapped(data, field):
if field:
raise NotImplementedError(
"Must use the writes_models decorator to update models")
return func(data)
return decorated
def writes_models(model):
"""
Register a model-specific create and update function.
"""
def decorated(func):
"""
Decorator for the creation function.
"""
_WRITE_MODEL[model] = func
return func
return decorated
_MODEL_EXISTS = {}
def checks_existence(model):
"""
Register a model-specific existence check function.
"""
def decorated(func):
"""
Decorator for the existence function.
"""
_MODEL_EXISTS[model] = func
return func
return decorated
def hash_data(hash_):
"""
Convert strings from a Lettuce hash to appropriate types
"""
res = {}
for key, value in hash_.items():
if type(value) in (str, unicode):
if value == "true":
value = True
elif value == "false":
value = False
elif value == "null":
value = None
elif value.isdigit() and not re.match("^0[0-9]+", value):
value = int(value)
elif re.match(r'^\d{4}-\d{2}-\d{2}$', value):
value = datetime.strptime(value, "%Y-%m-%d")
res[key] = value
return res
def hashes_data(step):
"""
Convert strings from step hashes to appropriate types
"""
return [hash_data(hash_) for hash_ in step.hashes]
def get_model(model):
"""
Convert a model's verbose name to the model class. This allows us to
use the models verbose name in steps.
"""
name = model.lower()
model = MODELS.get(model, None)
assert model, "Could not locate model by name '%s'" % name
return model
def reset_sequence(model):
"""
Reset the ID sequence for a model.
"""
sql = connection.ops.sequence_reset_sql(no_style(), [model])
for cmd in sql:
connection.cursor().execute(cmd)
def create_models(model, data):
"""
Create models for each data hash. Wrapper around write_models.
"""
return write_models(model, data, None)
def write_models(model, data, field=None):
"""
Create or update models for each data hash. If field is present, it is the
field that is used to get the existing models out of the database to update
them; otherwise, new models are created.
"""
if hasattr(data, 'hashes'):
data = hashes_data(data)
written = []
for hash_ in data:
if field:
if field not in hash_:
raise KeyError(("The \"%s\" field is required for all update "
"operations") % field)
model_kwargs = {field: hash_[field]}
model_obj = model.objects.get(**model_kwargs)
for to_set, val in hash_.items():
setattr(model_obj, to_set, val)
model_obj.save()
else:
model_obj = model.objects.create(**hash_)
written.append(model_obj)
reset_sequence(model)
return written
def _dump_model(model, attrs=None):
"""
Dump the model fields for debugging.
"""
for field in model._meta.fields:
print '%s=%s,' % (field.name, str(getattr(model, field.name))),
if attrs is not None:
for attr in attrs:
print '%s=%s,' % (attr, str(getattr(model, attr))),
for field in model._meta.many_to_many:
vals = getattr(model, field.name)
print '%s=%s (%i),' % (
field.name,
', '.join(map(str, vals.all())),
vals.count()),
print
def models_exist(model, data, queryset=None):
"""
Check whether the models defined by @data exist in the @queryset.
"""
if hasattr(data, 'hashes'):
data = hashes_data(data)
if not queryset:
queryset = model.objects
failed = 0
try:
for hash_ in data:
fields = {}
extra_attrs = {}
for k, v in hash_.iteritems():
if k.startswith('@'):
# this is an attribute
extra_attrs[k[1:]] = v
else:
fields[k] = v
filtered = queryset.filter(**fields)
match = False
if filtered.exists():
for obj in filtered.all():
if all(getattr(obj, k) == v
for k, v in extra_attrs.iteritems()):
match = True
break
assert match, \
"%s does not exist: %s\n%s" % (
model.__name__, hash_, filtered.query)
except AssertionError as exc:
print exc
failed += 1
if failed:
print "Rows in DB are:"
for model in queryset.all():
_dump_model(model, extra_attrs.keys())
raise AssertionError("%i rows missing" % failed)
for txt in (
(r'I have(?: an?)? ([a-z][a-z0-9_ ]*) in the database:'),
(r'I update(?: an?)? existing ([a-z][a-z0-9_ ]*) by ([a-z][a-z0-9_]*) '
'in the database:'),
):
@step(txt)
def write_models_generic(step, model, field=None):
"""
And I have foos in the database:
| name | bar |
| Baz | Quux |
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
The generic method can be overridden for a specific model by defining a
function write_badgers(step, field), which creates and updates
the Badger model and decorating it with the writes_models(model_class)
decorator.
@writes_models(Profile)
def write_profile(step, field):
'''Creates a Profile model'''
for hash_ in hashes_data(step):
if field:
profile = Profile.objects.get(**{field: hash_[field]})
else:
profile = Profile()
...
"""
model = get_model(model)
try:
func = _WRITE_MODEL[model]
except KeyError:
func = curry(write_models, model)
func(step, field)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"' +
r' has(?: an?)? ([A-Z][a-z0-9_ ]*) in the database:')
def create_models_for_relation(step, rel_model_name,
rel_key, rel_value, model):
"""
And project with name "Ball Project" has goals in the database:
| description |
| To have fun playing with balls of twine |
"""
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
for hash_ in step.hashes:
hash_['%s' % rel_model_name] = rel_model
write_models_generic(step, model)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"' +
r' is linked to ([A-Z][a-z0-9_ ]*) in the database:')
def create_m2m_links(step, rel_model_name, rel_key, rel_value, relation_name):
"""
And article with name "Guidelines" is linked to tags in the database:
| name |
| coding |
| style |
"""
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
relation = None
for m2m in rel_model._meta.many_to_many:
if relation_name in (m2m.name, m2m.verbose_name):
relation = getattr(rel_model, m2m.name)
break
if not relation:
try:
relation = getattr(rel_model, relation_name)
except AttributeError:
pass
assert relation, \
"%s does not have a many-to-many relation named '%s'" % (
rel_model._meta.verbose_name.capitalize(),
relation_name,
)
m2m_model = relation.model
for hash_ in step.hashes:
relation.add(m2m_model.objects.get(**hash_))
@step(STEP_PREFIX + r'(?:an? )?([A-Z][a-z0-9_ ]*) should be present ' +
r'in the database')
def models_exist_generic(step, model):
"""
And objectives should be present in the database:
| description |
| Make a mess |
"""
model = get_model(model)
try:
func = _MODEL_EXISTS[model]
except KeyError:
func = curry(models_exist, model)
func(step)
@step(r'There should be (\d+) ([a-z][a-z0-9_ ]*) in the database')
def model_count(step, count, model):
"""
Then there should be 0 goals in the database
"""
model = get_model(model)
expected = int(count)
found = model.objects.count()
assert found == expected, "Expected %d %s, found %d." % \
(expected, model._meta.verbose_name_plural, found)
def clean_db(scenario):
"""
Clean the DB after each scenario
Usage: after.each_scenario(clean_db)
"""
call_command('flush', interactive=False)
| 10,131 | Python | .py | 293 | 26.34471 | 79 | 0.574684 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,677 | mail.py | gabrielfalcao_lettuce/lettuce/django/steps/mail.py | """
Step definitions for working with Django email.
"""
from smtplib import SMTPException
from django.core import mail
from lettuce import step
STEP_PREFIX = r'(?:Given|And|Then|When) '
CHECK_PREFIX = r'(?:And|Then) '
EMAIL_PARTS = ('subject', 'body', 'from_email', 'to', 'bcc', 'cc')
GOOD_MAIL = mail.EmailMessage.send
@step(CHECK_PREFIX + r'I have sent (\d+) emails?')
def mail_sent_count(step, count):
"""
Then I have sent 2 emails
"""
count = int(count)
assert len(mail.outbox) == count, "Length of outbox is {0}".format(count)
@step(r'I have not sent any emails')
def mail_not_sent(step):
"""
I have not sent any emails
"""
return mail_sent_count(step, 0)
@step(CHECK_PREFIX + (r'I have sent an email with "([^"]*)" in the ({0})'
'').format('|'.join(EMAIL_PARTS)))
def mail_sent_content(step, text, part):
"""
Then I have sent an email with "pandas" in the body
"""
assert any(text in getattr(email, part)
for email
in mail.outbox
), "An email contained expected text in the {0}".format(part)
@step(CHECK_PREFIX + r'I have sent an email with the following in the body:')
def mail_sent_content_multiline(step):
"""
I have sent an email with the following in the body:
\"""
Name: Mr. Panda
\"""
"""
return mail_sent_content(step, step.multiline, 'body')
@step(STEP_PREFIX + r'I clear my email outbox')
def mail_clear(step):
"""
I clear my email outbox
"""
mail.EmailMessage.send = GOOD_MAIL
mail.outbox = []
def broken_send(*args, **kwargs):
"""
Broken send function for email_broken step
"""
raise SMTPException("Failure mocked by lettuce")
@step(STEP_PREFIX + r'sending email does not work')
def email_broken(step):
"""
Break email sending
"""
mail.EmailMessage.send = broken_send
| 1,903 | Python | .py | 60 | 27.1 | 77 | 0.643288 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,678 | smtp_mail_queue.py | gabrielfalcao_lettuce/lettuce/plugins/smtp_mail_queue.py | import asyncore
import threading
from email.header import decode_header
from email import message_from_string
from smtpd import SMTPServer
from lettuce import after, before
def _parse_header(val):
result = ''
elements = decode_header(val)
for el, enc in elements:
result += el.decode(enc) if enc and el else el
return result
def _decode_part(part):
content = part.get_payload(decode=True)
charset = part.get_content_charset()
return content.decode(charset) if charset else content
def _get_content(msg):
alternatives = []
if msg.get_content_type() == 'multipart/alternative':
payload = msg.get_payload()
body = _decode_part(payload.pop(0))
alternatives = [_decode_part(part) for part in payload]
else:
body = _decode_part(msg)
return body, alternatives
def _convert_to_django_msg(msg):
from django.core.mail import EmailMessage, EmailMultiAlternatives
body, alternatives = _get_content(msg)
if alternatives:
email = EmailMultiAlternatives(body=body,
alternatives=alternatives)
else:
email = EmailMessage(body=body)
email.subject = _parse_header(msg['Subject'])
email.to = _parse_header(msg['To'])
email.cc = _parse_header(msg.get('Cc', None))
email.bcc = _parse_header(msg.get('Bcc', None))
email.from_email = _parse_header(msg['From'])
return email
def enable():
from django.conf import settings
from lettuce.django import mail
smtp_queue_server = None
@before.each_scenario
def start_server(*args, **kwargs):
global smtp_queue_server
smtp_queue_server = QueueSMTPServer((settings.LETTUCE_SMTP_QUEUE_HOST,
settings.LETTUCE_SMTP_QUEUE_PORT),
None)
smtp_queue_server.start()
@after.each_scenario
def stop_server(*args, **kwargs):
global smtp_queue_server
smtp_queue_server.stop()
class QueueSMTPServer(SMTPServer, threading.Thread):
"""
Asyncore SMTP server wrapped into a thread.
It pushes incoming email messages into lettuce email queue.
Based on DummyFTPServer from:
http://svn.python.org/view/python/branches/py3k/Lib/test/test_ftplib.py?revision=86061&view=markup
"""
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
SMTPServer.__init__(self, *args, **kwargs)
self.active_lock = threading.Lock()
self.active = False
self.daemon = True
def process_message(self, peer, mailfrom, rcpttos, data):
msg = message_from_string(data)
django_msg = _convert_to_django_msg(msg)
mail.queue.put(django_msg)
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all()
def stop(self):
assert self.active
self.active = False
self.join()
| 3,409 | Python | .py | 87 | 29.942529 | 106 | 0.621777 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,679 | jsonreport_output.py | gabrielfalcao_lettuce/lettuce/plugins/jsonreport_output.py | from datetime import datetime
import json
from lettuce import world
from lettuce.terrain import after, before
def enable(filename=None):
filename = filename or "lettucetests.json"
@before.all
def before_all():
"""
Set `world._started` to `datetime.now()` to track total duration.
"""
world._started = datetime.now()
@after.all
def generate_json_output(total):
"""
This callback is called after all the features are
ran.
"""
world._stopped = datetime.now()
total_dict = total_result_to_dict(total)
with open(filename, "w") as handle:
json.dump(total_dict, handle)
@before.each_feature
@before.each_scenario
@before.each_step
def before_each_element(*args):
"""
Set `step._started`, `scenario._started` or `feature._started` to `datetime.now()`
to track step/scenario/feature duration.
"""
element = args[0]
element._started = datetime.now()
@after.each_feature
@after.each_scenario
@after.each_step
def after_each_element(*args):
"""
Set `step._stopped`, `scenario._stopped` or `feature._stopped` to `datetime.now()`
to track step/scenario/feature duration.
"""
element = args[0]
element._stopped = datetime.now()
def total_result_to_dict(total):
"""
Transform a `TotalResult` to a json-serializable Python dictionary.
:param total: a `TotalResult` instance
:return: a Python dictionary
"""
return {
"meta": extract_meta(total),
"duration": _get_duration(world),
"features": [
extract_feature_data(feature_result)
for feature_result in total.feature_results
]
}
def extract_feature_data(feature_result):
"""
Extract data from a `FeatureResult` instance.
:param feature_result: a `FeatureResult` instance
:return: a Python dictionary
"""
scenarios = []
meta = {
"steps": {
"total": 0,
"success": 0,
"failures": 0,
"skipped": 0,
"undefined": 0,
},
"scenarios": {
"total": 0,
"success": 0,
"failures": 0,
"skipped": 0,
"undefined": 0,
}
}
for scenario_result in feature_result.scenario_results:
scenario_data = extract_scenario_data(scenario_result)
scenarios.append(scenario_data)
# scenarios
success = (
not scenario_data["meta"]["failures"] and
not scenario_data["meta"]["skipped"] and
not scenario_data["meta"]["undefined"]
)
meta["scenarios"]["total"] += 1 if scenario_data["meta"]["total"] else 0
meta["scenarios"]["success"] += 1 if success else 0
meta["scenarios"]["failures"] += 1 if scenario_data["meta"]["failures"] else 0
meta["scenarios"]["skipped"] += 1 if scenario_data["meta"]["skipped"] else 0
meta["scenarios"]["undefined"] += 1 if scenario_data["meta"]["undefined"] else 0
# steps
meta["steps"]["total"] += scenario_data["meta"]["total"]
meta["steps"]["success"] += scenario_data["meta"]["success"]
meta["steps"]["failures"] += scenario_data["meta"]["failures"]
meta["steps"]["skipped"] += scenario_data["meta"]["skipped"]
meta["steps"]["undefined"] += scenario_data["meta"]["undefined"]
return {
"name": feature_result.feature.name,
"duration": _get_duration(feature_result.feature),
"meta": meta,
"scenarios": scenarios,
"background": extract_background_data(feature_result.feature.background)
}
def extract_background_data(background):
"""
Extract data from a `Background` instance.
:param background: a `Background` instance, possibly None
:return: a Python dictionary
"""
if not background:
return None
step_data = [extract_step_data(step) for step in background.steps]
return {
"meta": {
"total": len(background.steps),
"success": sum([s["meta"]["success"] for s in step_data]),
"failures": sum([s["meta"]["failed"] for s in step_data]),
"skipped": sum([s["meta"]["skipped"] for s in step_data]),
"undefined": sum([s["meta"]["undefined"] for s in step_data]),
},
"steps": step_data
}
def extract_scenario_data(scenario_result):
"""
Extract data from a `ScenarioResult` instance.
:param scenario_result: a `ScenarioResult` instance
:return: a Python dictionary
"""
return {
"name": scenario_result.scenario.name,
"duration": _get_duration(scenario_result.scenario),
"outline": scenario_result.outline,
"meta": {
"total": scenario_result.total_steps,
"success": len(scenario_result.steps_passed),
"failures": len(scenario_result.steps_failed),
"skipped": len(scenario_result.steps_skipped),
"undefined": len(scenario_result.steps_undefined),
},
"steps": [extract_step_data(step) for step in scenario_result.all_steps]
}
def extract_step_data(step):
"""
Extract data from a `Step` instance.
:param step: a `Step` instance
:return a Python dictionary
"""
step_data = {
"name": step.sentence,
"duration": _get_duration(step),
"meta": {
"success": bool(step.passed),
"failed": bool(step.failed),
"skipped": not step.passed and not step.failed and step.has_definition,
"undefined": not step.has_definition,
},
"failure": {}
}
if step.why:
step_data["failure"] = {
"exception": repr(step.why.exception),
"traceback": step.why.traceback
}
return step_data
def extract_meta(total):
"""
Extract metadata from the `TotalResult`.
:param total: a `TotalResult` instance
:return: a Python dictionary
"""
return {
"features": {
"total": total.features_ran,
"success": total.features_passed,
"failures": total.features_ran - total.features_passed,
},
"scenarios": {
"total": total.scenarios_ran,
"success": total.scenarios_passed,
"failures": total.scenarios_ran - total.scenarios_passed,
},
"steps": {
"total": total.steps,
"success": total.steps_passed,
"failures": total.steps_failed,
"skipped": total.steps_skipped,
"undefined": total.steps_undefined,
},
"is_success": total.is_success,
}
def _get_duration(element):
"""
Return the duration of an element.
:param element: either a step or a scenario or a feature
"""
return (element._stopped - element._started).seconds if hasattr(element, '_started') else None
| 7,304 | Python | .py | 199 | 28.542714 | 98 | 0.568824 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,680 | non_verbose.py | gabrielfalcao_lettuce/lettuce/plugins/non_verbose.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
from lettuce import core
from lettuce.terrain import after
from lettuce.terrain import before
@before.each_step
def print_step_running(step):
logging.info(step.represent_string(step.sentence))
@after.each_step
def print_step_ran(step):
logging.info("\033[A" + step.represent_string(step.sentence))
@before.each_scenario
def print_scenario_running(scenario):
logging.info(scenario.represented())
@before.each_feature
def print_feature_running(feature):
logging.info("\n")
logging.info(feature.represented())
logging.info("\n")
@after.all
def print_end(total):
logging.info("\n")
word = total.features_ran > 1 and "features" or "feature"
logging.info("%d %s (%d passed)\n" % (
total.features_ran,
word,
total.features_passed))
word = total.scenarios_ran > 1 and "scenarios" or "scenario"
logging.info("%d %s (%d passed)\n" % (
total.scenarios_ran,
word,
total.scenarios_passed))
word = total.steps > 1 and "steps" or "step"
logging.info("%d %s (%d passed)\n" % (
total.steps,
word,
total.steps_passed))
def print_no_features_found(where):
where = core.fs.relpath(where)
if not where.startswith(os.sep):
where = '.%s%s' % (os.sep, where)
logging.info('\033[1;31mOops!\033[0m\n')
logging.info(
'\033[1;37mcould not find features at '
'\033[1;33m%s\033[0m\n' % where)
| 2,251 | Python | .py | 61 | 32.967213 | 71 | 0.702069 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,681 | autopdb.py | gabrielfalcao_lettuce/lettuce/plugins/autopdb.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from lettuce.terrain import after
def enable(runner):
@after.each_step
def failfast_or_pdb(step):
has_traceback = step.why
if not has_traceback:
return
sys.stdout.write(step.why.traceback + '\n')
try:
from IPython.core.debugger import Pdb
pdb = Pdb()
except ImportError:
try:
from IPython.Debugger import Pdb
from IPython.Shell import IPShell
IPShell(argv=[''])
pdb = Pdb()
except ImportError:
import pdb
matched, defined = step.pre_run(False)
if matched:
args = matched.groups()
kwargs = matched.groupdict()
pdb.runcall(defined.function, step, *args, **kwargs)
| 865 | Python | .py | 27 | 21.925926 | 64 | 0.554087 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,682 | shell_output.py | gabrielfalcao_lettuce/lettuce/plugins/shell_output.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERsteps.pyCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from lettuce import core
from lettuce import strings
from lettuce.terrain import after
from lettuce.terrain import before
from lettuce.terrain import world
def wrt(what):
if isinstance(what, unicode):
what = what.encode('utf-8')
sys.stdout.write(what)
@after.each_step
def print_step_running(step):
if not step.display:
return
wrt(step.represent_string(step.original_sentence).rstrip())
if not step.defined_at:
wrt(" (undefined)")
wrt('\n')
if step.hashes:
wrt(step.represent_hashes())
if step.failed:
print_spaced = lambda x: wrt("%s%s\n" % (" " * step.indentation, x))
for line in step.why.traceback.splitlines():
print_spaced(line)
@before.each_scenario
def print_scenario_running(scenario):
if scenario.background:
# Only print the background on the first scenario run
# So, we determine if this was called previously with the attached background.
# If so, skip the print_scenario() since we'll call it again in the after_background.
if not hasattr(world, 'background_scenario_holder'):
world.background_scenario_holder = {}
if scenario.background not in world.background_scenario_holder:
# We haven't seen this background before, add our 1st scenario
world.background_scenario_holder[scenario.background] = scenario
return
wrt('\n')
wrt(scenario.represented())
@before.each_background
def print_background_running(background):
wrt('\n')
wrt(background.represented())
wrt('\n')
@after.each_background
def print_first_scenario_running(background, results):
scenario = world.background_scenario_holder[background]
print_scenario_running(scenario)
@after.outline
def print_outline(scenario, order, outline, reasons_to_fail):
table = strings.dicts_to_string(scenario.outlines, scenario.keys)
lines = table.splitlines()
head = lines.pop(0)
wline = lambda x: wrt("%s%s\n" % (" " * scenario.table_indentation, x))
if order is 0:
wrt("\n")
wrt("%s%s:\n" % (" " * scenario.indentation, scenario.language.first_of_examples))
wline(head)
line = lines[order]
wline(line)
if reasons_to_fail:
print_spaced = lambda x: wrt("%s%s\n" % (" " * scenario.table_indentation, x))
elines = reasons_to_fail[0].traceback.splitlines()
for line in elines:
print_spaced(line)
@before.each_feature
def print_feature_running(feature):
wrt("\n")
wrt(feature.represented())
@after.harvest
@after.all
def print_end(total=None):
wrt("\n")
if isinstance(total, core.SummaryTotalResults):
wrt("Test Suite Summary:\n")
word = total.features_ran_overall > 1 and "features" or "feature"
wrt("%d %s (%d passed)\n" % (
total.features_ran_overall,
word,
total.features_passed_overall))
else:
word = total.features_ran > 1 and "features" or "feature"
wrt("%d %s (%d passed)\n" % (
total.features_ran,
word,
total.features_passed))
word = total.scenarios_ran > 1 and "scenarios" or "scenario"
wrt("%d %s (%d passed)\n" % (
total.scenarios_ran,
word,
total.scenarios_passed))
steps_details = []
for kind in ("failed","skipped", "undefined"):
attr = 'steps_%s' % kind
stotal = getattr(total, attr)
if stotal:
steps_details.append("%d %s" % (stotal, kind))
steps_details.append("%d passed" % total.steps_passed)
word = total.steps > 1 and "steps" or "step"
wrt("%d %s (%s)\n" % (
total.steps,
word,
", ".join(steps_details)))
if total.proposed_definitions:
wrt("\nYou can implement step definitions for undefined steps with these snippets:\n\n")
wrt("# -*- coding: utf-8 -*-\n")
wrt("from lettuce import step\n\n")
for step in total.proposed_definitions:
method_name = step.proposed_method_name
wrt("@step(u'%s')\n" % step.proposed_sentence)
wrt("def %s:\n" % method_name)
wrt(" assert False, 'This step must be implemented'\n")
if total.failed_scenario_locations:
# print list of failed scenarios, with their file and line number
wrt("\nList of failed scenarios:\n")
for scenario in total.failed_scenario_locations:
wrt(scenario)
wrt("\n")
def print_no_features_found(where):
where = core.fs.relpath(where)
if not where.startswith(os.sep):
where = '.%s%s' % (os.sep, where)
wrt('Oops!\n')
wrt('could not find features at %s\n' % where)
| 5,545 | Python | .py | 140 | 33.364286 | 96 | 0.657302 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,683 | subunit_output.py | gabrielfalcao_lettuce/lettuce/plugins/subunit_output.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERsteps.pyCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import sys
from StringIO import StringIO
from lettuce.terrain import before, after
from subunit.v2 import StreamResultToBytes
from subunit.iso8601 import Utc
def open_file(filename):
"""
open a subunit file
this is not a context manager because it is used asynchronously by
hooks
out of the scope of enable() because we want to patch it in our tests
"""
filename = filename or 'subunit.bin'
return open(filename, 'wb')
def close_file(file_):
"""
"""
file_.close()
def enable(filename=None):
file_ = open_file(filename)
streamresult = StreamResultToBytes(file_)
streamresult.startTestRun()
real_stdout = sys.stdout
real_stderr = sys.stderr
@before.each_scenario
def before_scenario(scenario):
# create redirects for stdout and stderr
scenario.stdout = StringIO()
scenario.stderr = StringIO()
try:
test_tags = scenario.tags
except AttributeError:
test_tags = ()
streamresult.status(test_id=get_test_id(scenario),
test_status='inprogress',
test_tags=test_tags,
timestamp=now())
@before.step_output
def capture_output(step):
# only consider steps for background
if not step.scenario:
return
sys.stdout = step.scenario.stdout
sys.stderr = step.scenario.stderr
@after.step_output
def uncapture_output(step):
sys.stdout = real_stdout
sys.stderr = real_stderr
@after.each_scenario
def after_scenario(scenario):
streamresult.status(test_id=get_test_id(scenario),
file_name='stdout',
file_bytes=scenario.stdout.getvalue().encode('utf-8'),
mime_type='text/plain; charset=utf8',
eof=True)
streamresult.status(test_id=get_test_id(scenario),
file_name='stderr',
file_bytes=scenario.stderr.getvalue().encode('utf-8'),
mime_type='text/plain; charset=utf8',
eof=True)
if scenario.passed:
streamresult.status(test_id=get_test_id(scenario),
test_status='success',
timestamp=now())
else:
streamresult.status(test_id=get_test_id(scenario),
test_status='fail',
timestamp=now())
@after.each_step
def after_step(step):
# only consider steps for background
if not step.scenario:
return
test_id = get_test_id(step.scenario)
if step.passed:
marker = u'✔'
elif not step.defined_at:
marker = u'?'
elif step.failed:
marker = u'❌'
try:
streamresult.status(test_id=test_id,
file_name='traceback',
file_bytes=step.why.traceback.encode('utf-8'),
mime_type='text/plain; charset=utf8')
except AttributeError:
pass
elif not step.ran:
marker = u' '
else:
raise AssertionError("Internal error")
steps = u'{marker} {sentence}\n'.format(
marker=marker,
sentence=step.sentence)
streamresult.status(test_id=test_id,
file_name='steps',
file_bytes=steps.encode('utf-8'),
mime_type='text/plain; charset=utf8')
@after.all
def after_all(total):
streamresult.stopTestRun()
close_file(file_)
def get_test_id(scenario):
try:
return '{feature}: {scenario}'.format(
feature=scenario.feature.name,
scenario=scenario.name)
except AttributeError:
return '{feature}: Background'.format(
feature=scenario.feature.name)
def now():
"""
A timestamp suitable for subunit
"""
return datetime.datetime.now(tz=Utc())
| 5,048 | Python | .py | 132 | 27.477273 | 82 | 0.587836 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,684 | __init__.py | gabrielfalcao_lettuce/lettuce/plugins/__init__.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
| 784 | Python | .py | 16 | 48 | 71 | 0.765625 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,685 | xunit_output.py | gabrielfalcao_lettuce/lettuce/plugins/xunit_output.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERsteps.pyCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime, timedelta
from lettuce.terrain import after
from lettuce.terrain import before
from xml.dom import minidom
from lettuce.strings import utf8_string
def wrt_output(filename, content):
f = open(filename, "w")
if isinstance(content, unicode):
content = content.encode('utf-8')
f.write(content)
f.close()
def write_xml_doc(filename, doc):
wrt_output(filename, doc.toxml())
def total_seconds(td):
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
def enable(filename=None, tags=None):
doc = minidom.Document()
root = doc.createElement("testsuite")
if tags:
root.setAttribute("name", "_".join(tags))
else:
root.setAttribute("name", "lettuce")
root.setAttribute("hostname", "localhost")
root.setAttribute("timestamp", datetime.now().strftime("%Y-%m-%dT%H:%M:%S"))
output_filename = filename or "lettucetests.xml"
@before.each_step
def time_step(step):
step.started = datetime.now()
@after.each_step
def create_test_case_step(step):
parent = step.scenario or step.background
if getattr(parent, 'outlines', None):
return
name = getattr(parent, 'name', 'Background') # Background sections are nameless
classname = u"%s : %s" % (parent.feature.name, name)
tc = doc.createElement("testcase")
tc.setAttribute("classname", classname)
tc.setAttribute("name", step.sentence)
try:
tc.setAttribute("time", str(total_seconds((datetime.now() - step.started))))
except AttributeError:
tc.setAttribute("time", str(total_seconds(timedelta(seconds=0))))
if not step.ran:
skip = doc.createElement("skipped")
skip.setAttribute("type", "UndefinedStep(%s)" % step.sentence)
tc.appendChild(skip)
if step.failed:
cdata = doc.createCDATASection(step.why.traceback)
failure = doc.createElement("failure")
if hasattr(step.why, 'cause'):
failure.setAttribute("message", step.why.cause)
failure.setAttribute("type", step.why.exception.__class__.__name__)
failure.appendChild(cdata)
tc.appendChild(failure)
root.appendChild(tc)
@before.outline
def time_outline(scenario, order, outline, reasons_to_fail):
scenario.outline_started = datetime.now()
pass
@after.outline
def create_test_case_outline(scenario, order, outline, reasons_to_fail):
classname = "%s : %s" % (scenario.feature.name, scenario.name)
tc = doc.createElement("testcase")
tc.setAttribute("classname", classname)
tc.setAttribute("name", u'| %s |' % u' | '.join(outline.values()))
tc.setAttribute("time", str(total_seconds((datetime.now() - scenario.outline_started))))
for reason_to_fail in reasons_to_fail:
cdata = doc.createCDATASection(reason_to_fail.traceback)
failure = doc.createElement("failure")
failure.setAttribute("message", reason_to_fail.cause if hasattr(reason_to_fail,"cause") else "")
failure.appendChild(cdata)
tc.appendChild(failure)
root.appendChild(tc)
@after.all
def output_xml(total):
root.setAttribute("tests", str(total.steps))
root.setAttribute("failures", str(total.steps_failed))
root.setAttribute("errors", '0')
root.setAttribute("time", '0')
doc.appendChild(root)
write_xml_doc(output_filename, doc)
| 4,401 | Python | .py | 97 | 38.247423 | 108 | 0.667759 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,686 | dots.py | gabrielfalcao_lettuce/lettuce/plugins/dots.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERsteps.pyCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from lettuce import core
from lettuce.terrain import after
from lettuce.terrain import before
from lettuce.plugins.reporter import Reporter
class DotReporter(Reporter):
def print_scenario_ran(self, scenario):
if scenario.passed:
self.wrt(".")
elif scenario.failed:
reason = self.scenarios_and_its_fails[scenario]
if isinstance(reason.exception, AssertionError):
self.wrt("F")
else:
self.wrt("E")
reporter = DotReporter()
before.each_scenario(reporter.print_scenario_running)
after.each_scenario(reporter.print_scenario_ran)
after.each_step(reporter.store_failed_step)
after.all(reporter.print_end)
def print_no_features_found(where):
where = core.fs.relpath(where)
if not where.startswith(os.sep):
where = '.%s%s' % (os.sep, where)
reporter.wrt('Oops!\n')
reporter.wrt('could not find features at %s\n' % where)
| 1,745 | Python | .py | 42 | 37.428571 | 71 | 0.727005 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,687 | colored_shell_output.py | gabrielfalcao_lettuce/lettuce/plugins/colored_shell_output.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import sys
from lettuce import core
from lettuce import strings
from lettuce import terminal
from lettuce.terrain import after
from lettuce.terrain import before
from lettuce.terrain import world
def wrt(what):
if isinstance(what, unicode):
what = what.encode('utf-8')
sys.stdout.write(what)
def wrap_file_and_line(string, start, end):
return re.sub(r'([#] [^:]+[:]\d+)', '%s\g<1>%s' % (start, end), string)
def wp(l):
if l.startswith("\033[1;32m"):
l = l.replace(" |", "\033[1;37m |\033[1;32m")
if l.startswith("\033[1;36m"):
l = l.replace(" |", "\033[1;37m |\033[1;36m")
if l.startswith("\033[0;36m"):
l = l.replace(" |", "\033[1;37m |\033[0;36m")
if l.startswith("\033[0;31m"):
l = l.replace(" |", "\033[1;37m |\033[0;31m")
if l.startswith("\033[1;30m"):
l = l.replace(" |", "\033[1;37m |\033[1;30m")
if l.startswith("\033[1;31m"):
l = l.replace(" |", "\033[1;37m |\033[0;31m")
return l
def write_out(what):
wrt(wp(what))
@before.each_step
def print_step_running(step):
if not step.defined_at or not step.display:
return
color = '\033[1;30m'
if step.scenario and step.scenario.outlines:
color = '\033[0;36m'
string = step.represent_string(step.original_sentence)
string = wrap_file_and_line(string, '\033[1;30m', '\033[0m')
write_out("%s%s" % (color, string))
if step.hashes and step.defined_at:
for line in step.represent_hashes().splitlines():
write_out("\033[1;30m%s\033[0m\n" % line)
@after.each_step
def print_step_ran(step):
if not step.display:
return
if step.scenario and step.scenario.outlines and (step.failed or step.passed or step.defined_at):
return
if step.hashes and step.defined_at:
write_out("\033[A" * (len(step.hashes) + 1))
string = step.represent_string(step.original_sentence)
if not step.failed:
string = wrap_file_and_line(string, '\033[1;30m', '\033[0m')
prefix = '\033[A'
width, height = terminal.get_size()
lines_up = len(string) / float(width)
if lines_up < 1:
lines_up = 1
else:
lines_up = int(lines_up) + 1
#prefix = prefix * lines_up
if step.failed:
color = "\033[0;31m"
string = wrap_file_and_line(string, '\033[1;41;33m', '\033[0m')
elif step.passed:
color = "\033[1;32m"
elif step.defined_at:
color = "\033[0;36m"
else:
color = "\033[0;33m"
prefix = ""
write_out("%s%s%s" % (prefix, color, string))
if step.hashes:
for line in step.represent_hashes().splitlines():
write_out("%s%s\033[0m\n" % (color, line))
if step.failed:
wrt("\033[1;31m")
pspaced = lambda x: wrt("%s%s" % (" " * step.indentation, x))
lines = step.why.traceback.splitlines()
for pindex, line in enumerate(lines):
pspaced(line)
if pindex + 1 < len(lines):
wrt("\n")
wrt("\033[0m\n")
@before.each_scenario
def print_scenario_running(scenario):
if scenario.background:
# Only print the background on the first scenario run
# So, we determine if this was called previously with the attached background.
# If so, skip the print_scenario() since we'll call it again in the after_background.
if not hasattr(world, 'background_scenario_holder'):
world.background_scenario_holder = {}
if scenario.background not in world.background_scenario_holder:
# We haven't seen this background before, add our 1st scenario
world.background_scenario_holder[scenario.background] = scenario
return
string = scenario.represented()
string = wrap_file_and_line(string, '\033[1;30m', '\033[0m')
write_out("\n\033[1;37m%s" % string)
@after.outline
def print_outline(scenario, order, outline, reasons_to_fail):
table = strings.dicts_to_string(scenario.outlines, scenario.keys)
lines = table.splitlines()
head = lines.pop(0)
wline = lambda x: write_out("\033[0;36m%s%s\033[0m\n" % (" " * scenario.table_indentation, x))
wline_success = lambda x: write_out("\033[1;32m%s%s\033[0m\n" % (" " * scenario.table_indentation, x))
wline_red_outline = lambda x: write_out("\033[1;31m%s%s\033[0m\n" % (" " * scenario.table_indentation, x))
wline_red = lambda x: write_out("%s%s" % (" " * scenario.table_indentation, x))
if order is 0:
wrt("\n")
wrt("\033[1;37m%s%s:\033[0m\n" % (" " * scenario.indentation, scenario.language.first_of_examples))
wline(head)
line = lines[order]
if reasons_to_fail:
wline_red_outline(line)
else:
wline_success(line)
if reasons_to_fail:
elines = reasons_to_fail[0].traceback.splitlines()
wrt("\033[1;31m")
for pindex, line in enumerate(elines):
wline_red(line)
if pindex + 1 < len(elines):
wrt("\n")
wrt("\033[0m\n")
@before.each_feature
def print_feature_running(feature):
string = feature.represented()
lines = string.splitlines()
write_out("\n")
for line in lines:
line = wrap_file_and_line(line, '\033[1;30m', '\033[0m')
write_out("\033[1;37m%s\n" % line)
@after.harvest
@after.all
def print_end(total=None):
if total is None:
return
write_out("\n")
if isinstance(total, core.SummaryTotalResults):
word = total.features_ran_overall > 1 and "features" or "feature"
color = "\033[1;32m"
if total.features_passed_overall is 0:
color = "\033[0;31m"
write_out("\033[1;37mTest Suite Summary:\n")
write_out("\033[1;37m%d %s (%s%d passed\033[1;37m)\033[0m\n" % (
total.features_ran_overall,
word,
color,
total.features_passed_overall))
else:
word = total.features_ran > 1 and "features" or "feature"
color = "\033[1;32m"
if total.features_passed is 0:
color = "\033[0;31m"
write_out("\033[1;37m%d %s (%s%d passed\033[1;37m)\033[0m\n" % (
total.features_ran,
word,
color,
total.features_passed))
color = "\033[1;32m"
if total.scenarios_passed is 0:
color = "\033[0;31m"
word = total.scenarios_ran > 1 and "scenarios" or "scenario"
write_out("\033[1;37m%d %s (%s%d passed\033[1;37m)\033[0m\n" % (
total.scenarios_ran,
word,
color,
total.scenarios_passed))
steps_details = []
kinds_and_colors = (
('failed', '\033[0;31m'),
('skipped', '\033[0;36m'),
('undefined', '\033[0;33m'),
)
for kind, color in kinds_and_colors:
attr = 'steps_%s' % kind
stotal = getattr(total, attr)
if stotal:
steps_details.append("%s%d %s" % (color, stotal, kind))
steps_details.append("\033[1;32m%d passed\033[1;37m" % total.steps_passed)
word = total.steps > 1 and "steps" or "step"
content = "\033[1;37m, ".join(steps_details)
word = total.steps > 1 and "steps" or "step"
write_out("\033[1;37m%d %s (%s)\033[0m\n" % (
total.steps,
word,
content))
if total.proposed_definitions:
wrt("\n\033[0;33mYou can implement step definitions for undefined steps with these snippets:\n\n")
wrt("# -*- coding: utf-8 -*-\n")
wrt("from lettuce import step\n\n")
last = len(total.proposed_definitions) - 1
for current, step in enumerate(total.proposed_definitions):
method_name = step.proposed_method_name
wrt("@step(u'%s')\n" % step.proposed_sentence)
wrt("def %s:\n" % method_name)
wrt(" assert False, 'This step must be implemented'")
if current is last:
wrt("\033[0m")
wrt("\n")
if total.failed_scenario_locations:
# print list of failed scenarios, with their file and line number
wrt("\n")
wrt("\033[1;31m")
wrt("List of failed scenarios:\n")
wrt("\033[0;31m")
for scenario in total.failed_scenario_locations:
wrt(scenario)
wrt("\033[0m")
wrt("\n")
def print_no_features_found(where):
where = core.fs.relpath(where)
if not where.startswith(os.sep):
where = '.%s%s' % (os.sep, where)
write_out('\033[1;31mOops!\033[0m\n')
write_out(
'\033[1;37mcould not find features at '
'\033[1;33m%s\033[0m\n' % where)
@before.each_background
def print_background_running(background):
wrt('\n')
wrt('\033[1;37m')
wrt(background.represented())
wrt('\033[0m\n')
@after.each_background
def print_first_scenario_running(background, results):
scenario = world.background_scenario_holder[background]
print_scenario_running(scenario)
| 9,740 | Python | .py | 247 | 32.502024 | 110 | 0.615532 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,688 | scenario_names.py | gabrielfalcao_lettuce/lettuce/plugins/scenario_names.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERsteps.pyCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from lettuce import core
from lettuce.terrain import after
from lettuce.terrain import before
from lettuce.plugins.reporter import Reporter
class NameReporter(Reporter):
def print_scenario_running(self, scenario):
self.wrt('%s ... ' % scenario.name)
def print_scenario_ran(self, scenario):
if scenario.passed:
self.wrt("OK")
elif scenario.failed:
reason = self.scenarios_and_its_fails[scenario]
if isinstance(reason.exception, AssertionError):
self.wrt("FAILED")
else:
self.wrt("ERROR")
self.wrt("\n")
reporter = NameReporter()
before.each_scenario(reporter.print_scenario_running)
after.each_scenario(reporter.print_scenario_ran)
after.each_step(reporter.store_failed_step)
after.all(reporter.print_end)
def print_no_features_found(where):
where = core.fs.relpath(where)
if not where.startswith(os.sep):
where = '.%s%s' % (os.sep, where)
reporter.wrt('Oops!\n')
reporter.wrt('could not find features at %s\n' % where)
| 1,873 | Python | .py | 45 | 37.244444 | 71 | 0.719231 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,689 | reporter.py | gabrielfalcao_lettuce/lettuce/plugins/reporter.py | import sys
class Reporter(object):
def __init__(self):
self.failed_scenarios = []
self.scenarios_and_its_fails = {}
def wrt(self, what):
if isinstance(what, unicode):
what = what.encode('utf-8')
sys.stdout.write(what)
def store_failed_step(self, step):
if step.failed and step.scenario not in self.failed_scenarios:
self.scenarios_and_its_fails[step.scenario] = step.why
self.failed_scenarios.append(step.scenario)
def print_scenario_running(self, scenario):
pass
def print_scenario_ran(self, scenario):
pass
def print_end(self, total):
if total.scenarios_passed < total.scenarios_ran:
self.wrt("\n")
self.wrt("\n")
for scenario in self.failed_scenarios:
reason = self.scenarios_and_its_fails[scenario]
self.wrt(unicode(reason.step))
self.wrt("\n")
self.wrt(reason.traceback)
self.wrt("\n")
word = total.features_ran > 1 and "features" or "feature"
self.wrt("%d %s (%d passed)\n" % (
total.features_ran,
word,
total.features_passed))
word = total.scenarios_ran > 1 and "scenarios" or "scenario"
self.wrt("%d %s (%d passed)\n" % (
total.scenarios_ran,
word,
total.scenarios_passed))
steps_details = []
for kind in "failed", "skipped", "undefined":
attr = 'steps_%s' % kind
stotal = getattr(total, attr)
if stotal:
steps_details.append("%d %s" % (stotal, kind))
steps_details.append("%d passed" % total.steps_passed)
word = total.steps > 1 and "steps" or "step"
self.wrt("%d %s (%s)\n" % (
total.steps,
word,
", ".join(steps_details)))
if total.failed_scenario_locations:
self.wrt("\n")
self.wrt("List of failed scenarios:\n")
for scenario in total.failed_scenario_locations:
self.wrt(scenario)
self.wrt("\n")
| 2,156 | Python | .py | 55 | 28.236364 | 70 | 0.550981 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,690 | conf.py | gabrielfalcao_lettuce/docs/conf.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
here = os.path.dirname(__file__)
parent = os.path.dirname(here)
sys.path.append(parent)
sys.path.append(os.path.join(here, '_ext'))
from lettuce import version, release
copyright = u'Gabriel Falcão <[email protected]>'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage'
]
templates_path = ["_templates"]
source_suffix = '.rst'
master_doc = 'contents'
project = 'Lettuce'
rst_epilog = '''
.. _Lettuce: http://lettuce.it
.. _Python: http://python.org
.. _Ruby: http://ruby-lang.org/
.. _Agile: http://agilemanifesto.org
.. _Cucumber: http://cukes.info
.. _Pyccuracy: http://github.com/heynemann/pyccuracy
.. _TDD: http://en.wikipedia.org/wiki/Test_Driven_Development
.. _BDD: http://en.wikipedia.org/wiki/Behavior_Driven_Development
'''
release = "%s (%s release)" % (version, release)
today_fmt = '%B %d, %Y'
add_function_parentheses = True
add_module_names = True
show_authors = True
html_use_smartypants = True
html_copy_source = True
pygments_style = 'bw'
exclude_dirnames = ['.git']
html_style = 'lettuce-docs.css'
html_static_path = ["_static"]
html_last_updated_fmt = '%b %d, %Y'
html_translator_class = "adjusts.LettuceHTMLTranslator"
html_additional_pages = {}
| 2,054 | Python | .py | 58 | 33.931034 | 71 | 0.738911 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,691 | adjusts.py | gabrielfalcao_lettuce/docs/_ext/adjusts.py | from sphinx.writers import html as sphinx_htmlwriter
class LettuceHTMLTranslator(sphinx_htmlwriter.SmartyPantsHTMLTranslator):
"""
Lettuce-customized HTML transformations of documentation. Based on
djangodocs.DjangoHTMLTranslator
"""
def visit_section(self, node):
node['ids'] = map(lambda x: "lettuce-%s" % x, node['ids'])
sphinx_htmlwriter.SmartyPantsHTMLTranslator.visit_section(self, node)
| 432 | Python | .py | 9 | 42.777778 | 77 | 0.752969 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,692 | util.py | gabrielfalcao_lettuce/tests/util.py | """
Utils for testing
"""
import commands
def run_scenario(application='', feature='', scenario='', **opts):
"""
Runs a Django scenario and returns it's output vars
"""
if application:
application = ' {0}/features/'.format(application)
if feature:
feature = '{0}.feature'.format(feature)
if scenario:
scenario = ' -s {0:d}'.format(scenario)
opts_string = ''
for opt, val in opts.iteritems():
if not val:
val = ''
opts_string = ' '.join((opts_string, opt, val))
cmd = 'python manage.py harvest -v 3 -T {0}{1}{2}{3}'.format(opts_string,
application,
feature,
scenario,
)
return commands.getstatusoutput(cmd)
| 965 | Python | .py | 25 | 23.24 | 77 | 0.446945 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,693 | asserts.py | gabrielfalcao_lettuce/tests/asserts.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import sys
from StringIO import StringIO
from nose.tools import assert_equals, assert_not_equals
from lettuce import registry
from difflib import Differ
def prepare_stdout():
registry.clear()
if isinstance(sys.stdout, StringIO):
del sys.stdout
std = StringIO()
sys.stdout = std
def prepare_stderr():
registry.clear()
if isinstance(sys.stderr, StringIO):
del sys.stderr
std = StringIO()
sys.stderr = std
def assert_lines(original, expected):
original = original.decode('utf-8') if isinstance(original, basestring) else original
assert_lines_unicode(original, expected)
def assert_lines_unicode(original, expected):
if isinstance(expected, unicode):
expected = expected.encode('utf-8')
if isinstance(original, unicode):
original = original.encode('utf-8')
expected_lines = expected.splitlines(1)
original_lines = original.splitlines(1)
if original != expected:
comparison = Differ().compare(expected_lines, original_lines)
if isinstance(comparison, unicode):
expected = expected.encode('utf-8')
diff = u''.encode('utf-8').join(comparison)
msg = (u'Output differed as follows:\n{0}\n'
'Output was:\n{1}\nExpected was:\n{2}'.encode('utf-8'))
raise AssertionError(repr(msg.format(diff, original, expected)).replace(r'\n', '\n'))
assert_equals(
len(expected), len(original),
u'Output appears equal, but of different lengths.')
def assert_lines_with_traceback(one, other):
lines_one = one.splitlines()
lines_other = other.splitlines()
regex = re.compile('File "([^"]+)", line \d+, in.*')
error = '%r should be in traceback line %r.\nFull output was:\n' + one
for line1, line2 in zip(lines_one, lines_other):
if regex.search(line1) and regex.search(line2):
found = regex.search(line2)
filename = found.group(1)
params = filename, line1
assert filename in line1, error % params
else:
assert_unicode_equals(line1, line2)
assert_unicode_equals(len(lines_one), len(lines_other))
def assert_unicode_equals(original, expected):
if isinstance(original, basestring):
original = original.decode('utf-8')
assert_equals.im_class.maxDiff = None
assert_equals(original, expected)
def assert_stderr(expected):
string = sys.stderr.getvalue()
assert_unicode_equals(string, expected)
def assert_stdout(expected):
string = sys.stdout.getvalue()
assert_unicode_equals(string, expected)
def assert_stdout_lines(other):
assert_lines(sys.stdout.getvalue(), other)
def assert_stderr_lines(other):
assert_lines(sys.stderr.getvalue(), other)
def assert_stdout_lines_with_traceback(other):
assert_lines_with_traceback(sys.stdout.getvalue(), other)
def assert_stderr_lines_with_traceback(other):
assert_lines_with_traceback(sys.stderr.getvalue(), other)
| 3,772 | Python | .py | 88 | 37.784091 | 93 | 0.708425 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,694 | test_language_ptbr.py | gabrielfalcao_lettuce/tests/unit/test_language_ptbr.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from nose.tools import assert_equals
from lettuce.core import Language, Scenario, Feature
SCENARIO = u"""
Cenário: Consolidar o banco de dados de cursos universitários em arquivo texto
Dados os seguintes cursos cadastrados no banco de dados da universidade:
| Nome | Duração |
| Ciência da Computação | 5 anos |
| Nutrição | 4 anos |
Quando eu consolido os dados no arquivo 'cursos.txt'
Então a 1a linha do arquivo 'cursos.txt' contém 'Ciência da Computação:5'
E a 2a linha do arquivo 'cursos.txt' contém 'Nutrição:4'
"""
SCENARIO_OUTLINE1 = u'''
Esquema do Cenário: Cadastrar um aluno no banco de dados
Dado que eu preencho o campo "nome" com "<nome>"
E que eu preencho o campo "idade" com "<idade>"
Quando eu salvo o formulário
Então vejo a mensagem "Aluno <nome>, de <idade> anos foi cadastrado com sucesso!"
Exemplos:
| nome | idade |
| Gabriel | 22 |
| João | 30 |
'''
SCENARIO_OUTLINE2 = u'''
Esquema do Cenário: Cadastrar um aluno no banco de dados
Dado que eu preencho o campo "nome" com "<nome>"
E que eu preencho o campo "idade" com "<idade>"
Quando eu salvo o formulário
Então vejo a mensagem "Aluno <nome>, de <idade> anos foi cadastrado com sucesso!"
Cenários:
| nome | idade |
| Gabriel | 99 |
| João | 100 |
'''
FEATURE = u'''
Funcionalidade: Pesquisar alunos com matrícula vencida
Como gerente financeiro
Eu quero pesquisar alunos com matrícula vencida
Para propor um financiamento
Cenário: Pesquisar por nome do curso
Dado que eu preencho o campo "nome do curso" com "Nutrição"
Quando eu clico em "pesquisar"
Então vejo os resultados:
| nome | valor devido |
| João | R$ 512,66 |
| Maria | R$ 998,41 |
| Ana | R$ 231,00 |
'''
def test_language_portuguese():
'Language: PT-BR -> Language class supports portuguese through code "pt-br"'
lang = Language('pt-br')
assert_equals(lang.code, u'pt-br')
assert_equals(lang.name, u'Portuguese')
assert_equals(lang.native, u'Português')
assert_equals(lang.feature, u'Funcionalidade')
assert_equals(lang.scenario, u'Cenário|Cenario')
assert_equals(lang.examples, u'Exemplos|Cenários')
assert_equals(lang.scenario_outline, u'Esquema do Cenário|Esquema do Cenario')
def test_scenario_ptbr_from_string():
'Language: PT-BR -> Scenario.from_string'
ptbr = Language('pt-br')
scenario = Scenario.from_string(SCENARIO, language=ptbr)
assert_equals(
scenario.name,
u'Consolidar o banco de dados de cursos universitários em arquivo texto'
)
assert_equals(
scenario.steps[0].hashes,
[
{'Nome': u'Ciência da Computação', u'Duração': '5 anos'},
{'Nome': u'Nutrição', u'Duração': '4 anos'},
]
)
def test_scenario_outline1_ptbr_from_string():
'Language: PT-BR -> Scenario.from_string, with scenario outline, first case'
ptbr = Language('pt-br')
scenario = Scenario.from_string(SCENARIO_OUTLINE1, language=ptbr)
assert_equals(
scenario.name,
'Cadastrar um aluno no banco de dados'
)
assert_equals(
scenario.outlines,
[
{'nome': u'Gabriel', u'idade': '22'},
{'nome': u'João', u'idade': '30'},
]
)
def test_scenario_outline2_ptbr_from_string():
'Language: PT-BR -> Scenario.from_string, with scenario outline, second case'
ptbr = Language('pt-br')
scenario = Scenario.from_string(SCENARIO_OUTLINE2, language=ptbr)
assert_equals(
scenario.name,
'Cadastrar um aluno no banco de dados'
)
assert_equals(
scenario.outlines,
[
{'nome': u'Gabriel', u'idade': '99'},
{'nome': u'João', u'idade': '100'},
]
)
def test_feature_ptbr_from_string():
'Language: PT-BR -> Feature.from_string'
ptbr = Language('pt-br')
feature = Feature.from_string(FEATURE, language=ptbr)
assert_equals(
feature.name,
u'Pesquisar alunos com matrícula vencida'
)
assert_equals(
feature.description,
u"Como gerente financeiro\n"
u"Eu quero pesquisar alunos com matrícula vencida\n"
u"Para propor um financiamento"
)
(scenario, ) = feature.scenarios
assert_equals(
scenario.name,
'Pesquisar por nome do curso'
)
assert_equals(
scenario.steps[-1].hashes,
[
{'nome': u'João', u'valor devido': 'R$ 512,66'},
{'nome': u'Maria', u'valor devido': 'R$ 998,41'},
{'nome': u'Ana', u'valor devido': 'R$ 231,00'},
]
)
| 5,564 | Python | .py | 146 | 32.342466 | 85 | 0.657121 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,695 | test_step_parsing.py | gabrielfalcao_lettuce/tests/unit/test_step_parsing.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
I_LIKE_VEGETABLES = "I hold a special love for green vegetables"
I_HAVE_TASTY_BEVERAGES = """I have the following tasty beverages in my freezer:
| Name | Type | Price |
| Skol | Beer | 3.80 |
| Nestea | Ice-tea | 2.10 |
""".strip()
I_DIE_HAPPY = "I shall die with love in my heart"
BACKGROUND_WITH_TAGGED_SCENARIO = '''
Background:
background line 1
@wip
Scenario:
Scenario line 1
'''
MULTI_LINE = '''
I have a string like so:
"""
This is line one
and this is line two
and this is line three
and this is line four,
with spaces at the beginning
"""
'''.strip()
MULTI_LINE_WHITESPACE = '''
I have a string like so:
"""
This is line one
and this is line two
and this is line three
" and this is line four,
"
" with spaces at the beginning
and spaces at the end "
"""
'''.strip()
INVALID_MULTI_LINE = '''
"""
invalid one...
"""
'''.strip()
import string
from lettuce.core import Step
from lettuce.exceptions import LettuceSyntaxError
from lettuce import strings
from nose.tools import assert_equals
from tests.asserts import *
def test_step_has_repr():
"Step implements __repr__ nicely"
step = Step.from_string(I_HAVE_TASTY_BEVERAGES)
assert_equals(
repr(step),
'<Step: "' + string.split(I_HAVE_TASTY_BEVERAGES, '\n')[0] + '">'
)
def test_can_get_sentence_from_string():
"It should extract the sentence string from the whole step"
step = Step.from_string(I_HAVE_TASTY_BEVERAGES)
assert isinstance(step, Step)
assert_equals(
step.sentence,
string.split(I_HAVE_TASTY_BEVERAGES, '\n')[0]
)
def test_can_parse_keys_from_table():
"It should take the keys from the step, if it has a table"
step = Step.from_string(I_HAVE_TASTY_BEVERAGES)
assert_equals(step.keys, ('Name', 'Type', 'Price'))
def test_can_parse_tables():
"It should have a list of data from a given step, if it has a table"
step = Step.from_string(I_HAVE_TASTY_BEVERAGES)
assert isinstance(step.hashes, list)
assert_equals(len(step.hashes), 2)
assert_equals(
step.hashes[0],
{
'Name': 'Skol',
'Type': 'Beer',
'Price': '3.80'
}
)
assert_equals(
step.hashes[1],
{
'Name': 'Nestea',
'Type': 'Ice-tea',
'Price': '2.10'
}
)
def test_can_parse_a_unary_array_from_single_step():
"It should extract a single ordinary step correctly into an array of steps"
steps = Step.many_from_lines(I_HAVE_TASTY_BEVERAGES.splitlines())
assert_equals(len(steps), 1)
assert isinstance(steps[0], Step)
assert_equals(steps[0].sentence, string.split(I_HAVE_TASTY_BEVERAGES, '\n')[0])
def test_can_parse_a_unary_array_from_complicated_step():
"It should extract a single tabular step correctly into an array of steps"
steps = Step.many_from_lines(I_LIKE_VEGETABLES.splitlines())
assert_equals(len(steps), 1)
assert isinstance(steps[0], Step)
assert_equals(steps[0].sentence, I_LIKE_VEGETABLES)
def test_can_parse_regular_step_followed_by_tabular_step():
"It should correctly extract two steps (one regular, one tabular) into an array."
steps = Step.many_from_lines(I_LIKE_VEGETABLES.splitlines() + I_HAVE_TASTY_BEVERAGES.splitlines())
assert_equals(len(steps), 2)
assert isinstance(steps[0], Step)
assert isinstance(steps[1], Step)
assert_equals(steps[0].sentence, I_LIKE_VEGETABLES)
assert_equals(steps[1].sentence, string.split(I_HAVE_TASTY_BEVERAGES, '\n')[0])
def test_can_parse_tabular_step_followed_by_regular_step():
"It should correctly extract two steps (one tabular, one regular) into an array."
steps = Step.many_from_lines(I_HAVE_TASTY_BEVERAGES.splitlines() + I_LIKE_VEGETABLES.splitlines())
assert_equals(len(steps), 2)
assert isinstance(steps[0], Step)
assert isinstance(steps[1], Step)
assert_equals(steps[0].sentence, string.split(I_HAVE_TASTY_BEVERAGES, '\n')[0])
assert_equals(steps[1].sentence, I_LIKE_VEGETABLES)
def test_can_parse_two_ordinary_steps():
"It should correctly extract two ordinary steps into an array."
steps = Step.many_from_lines(I_DIE_HAPPY.splitlines() + I_LIKE_VEGETABLES.splitlines())
assert_equals(len(steps), 2)
assert isinstance(steps[0], Step)
assert isinstance(steps[1], Step)
assert_equals(steps[0].sentence, I_DIE_HAPPY)
assert_equals(steps[1].sentence, I_LIKE_VEGETABLES)
def test_can_parse_background_and_ignore_tag():
"It should correctly parse and ignore tags between the background and first step."
steps = Step.many_from_lines(BACKGROUND_WITH_TAGGED_SCENARIO.splitlines())
steps_without_tags = filter(lambda x: not x.sentence == '@wip', steps)
assert_equals(len(steps), len(steps_without_tags))
def test_cannot_start_with_multiline():
"It should raise an error when a step starts with a multiline string"
lines = strings.get_stripped_lines(INVALID_MULTI_LINE)
try:
step = Step.many_from_lines(lines)
except LettuceSyntaxError:
return
assert False, "LettuceSyntaxError not raised"
def test_multiline_is_part_of_previous_step():
"It should correctly parse a multi-line string as part of the preceding step"
lines = strings.get_stripped_lines(MULTI_LINE)
steps = Step.many_from_lines(lines)
print steps
assert_equals(len(steps), 1)
assert isinstance(steps[0], Step)
assert_equals(steps[0].sentence, 'I have a string like so:')
def test_multiline_is_parsed():
step = Step.from_string(MULTI_LINE)
assert_equals(step.sentence, 'I have a string like so:')
assert_equals(step.multiline, u"""This is line one
and this is line two
and this is line three
and this is line four,
with spaces at the beginning""")
def test_multiline_with_whitespace():
step = Step.from_string(MULTI_LINE_WHITESPACE)
assert_equals(step.sentence, 'I have a string like so:')
assert_equals(step.multiline, u"""This is line one
and this is line two
and this is line three
and this is line four,
with spaces at the beginning
and spaces at the end """)
def test_handy_attribute_for_first_occurrence_of_hashes():
'Step.hashes objects should have a ".first" attribute that gives the first row (dict) of the "hashes" list'
step = Step.from_string(I_HAVE_TASTY_BEVERAGES)
assert_equals(
step.hashes.first,
{'Name': 'Skol', 'Type': 'Beer', 'Price': '3.80'}
)
def test_hashes__first_attr_raises_assertion_error_if_empty():
'Step().hashes.first should raise a assertion error if the list is empty'
step = Step.from_string(I_DIE_HAPPY)
try:
step.hashes.first
failed = False
except AssertionError as e:
failed = True
assert_equals(
unicode(e),
'The step "%s" have no table defined, so that you can\'t use step.hashes.first' % I_DIE_HAPPY
)
assert failed, 'it should fail'
def test_handy_attribute_for_last_occurrence_of_hashes():
'Step.hashes objects should have a ".last" attribute that gives the last row (dict) of the "hashes" list'
step = Step.from_string(I_HAVE_TASTY_BEVERAGES)
assert_equals(
step.hashes.last,
{'Name': 'Nestea', 'Type': 'Ice-tea', 'Price': '2.10'}
)
def test_hashes__last_attr_raises_assertion_error_if_empty():
'Step().hashes.last should raise a assertion error if the list is empty'
step = Step.from_string(I_DIE_HAPPY)
try:
step.hashes.last
failed = False
except AssertionError as e:
failed = True
assert_equals(
unicode(e),
'The step "%s" have no table defined, so that you can\'t use step.hashes.last' % I_DIE_HAPPY
)
assert failed, 'it should fail'
def test_handy_function_for_table_members():
'Step.hashes.values_under should be a method that gives a list of members'
step = Step.from_string(I_HAVE_TASTY_BEVERAGES)
assert_equals(step.hashes.values_under('Name'), ['Skol', 'Nestea'])
def test_handy_function_for_table_members_fail_giving_assertionerror():
'Step.hashes.values_under raises AssertionError if the key does not exist'
step = Step.from_string(I_HAVE_TASTY_BEVERAGES)
try:
step.hashes.values_under('Foobar')
failed = False
except AssertionError as e:
failed = True
assert_equals(
unicode(e),
'The step "I have the following tasty beverages in my freezer:" ' \
'have no table column with the key "Foobar". ' \
'Could you check your step definition for that ? ' \
'Maybe there is a typo :)'
)
assert failed, 'it should fail'
| 9,574 | Python | .py | 237 | 35.312236 | 111 | 0.687264 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,696 | test_scenario_parsing.py | gabrielfalcao_lettuce/tests/unit/test_scenario_parsing.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from sure import expect
from lettuce.core import Step
from lettuce.core import Scenario
from lettuce.core import Feature
from lettuce.exceptions import LettuceSyntaxError
from nose.tools import assert_equals
from nose.tools import assert_raises
SCENARIO1 = """
Scenario: Adding some students to my university database
Given I have the following courses in my university:
| Name | Duration |
| Computer Science | 5 years |
| Nutrition | 4 years |
When I consolidate the database into 'courses.txt'
Then I see the 1st line of 'courses.txt' has 'Computer Science:5'
And I see the 2nd line of 'courses.txt' has 'Nutrition:4'
"""
OUTLINED_SCENARIO = """
Scenario Outline: Add two numbers
Given I have entered <input_1> into the calculator
And I have entered <input_2> into the calculator
When I press <button>
Then the result should be <output> on the screen
Examples:
| input_1 | input_2 | button | output |
| 20 | 30 | add | 50 |
| 2 | 5 | add | 7 |
| 0 | 40 | add | 40 |
"""
OUTLINED_SCENARIO_WITH_SUBSTITUTIONS_IN_TABLE = """
Scenario Outline: Bad configuration should fail
Given I provide the following configuration:
| Parameter | Value |
| a | <a> |
| b | <b> |
When I run the program
Then it should fail hard-core
Examples:
| a | b |
| 1 | 2 |
| 2 | 4 |
"""
OUTLINED_SCENARIO_WITH_SUBSTITUTIONS_IN_MULTILINE = '''
Scenario Outline: Parsing HTML
When I parse the HTML:
"""
<div><v></div>
"""
I should see "outline value"
Examples:
| v |
| outline value |
'''
OUTLINED_FEATURE = """
Feature: Do many things at once
In order to automate tests
As a automation freaky
I want to use scenario outlines
Scenario Outline: Add two numbers wisely
Given I have entered <input_1> into the calculator
And I have entered <input_2> into the calculator
When I press <button>
Then the result should be <output> on the screen
Examples:
| input_1 | input_2 | button | output |
| 20 | 30 | add | 50 |
| 2 | 5 | add | 7 |
| 0 | 40 | add | 40 |
"""
OUTLINED_FEATURE_WITH_MANY = """
Feature: Full-featured feature
Scenario Outline: Do something
Given I have entered <input_1> into the <input_2>
Examples:
| input_1 | input_2 |
| ok | fail |
| fail | ok |
Scenario: Do something else
Given I am fine
Scenario: Worked!
Given it works
When I look for something
Then I find it
Scenario Outline: Add two numbers wisely
Given I have entered <input_1> into the calculator
And I have entered <input_2> into the calculator
When I press <button>
Then the result should be <output> on the screen
Examples:
| input_1 | input_2 | button | output |
| 20 | 30 | add | 50 |
| 2 | 5 | add | 7 |
| 0 | 40 | add | 40 |
Examples:
| input_1 | input_2 | button | output |
| 5 | 7 | add | 12 |
"""
SCENARIO_FAILED = """
Scenario: Adding some students to my university database
| Name | Duration |
| Computer Science | 5 years |
| Nutrition | 4 years |
When I consolidate the database into 'courses.txt'
Then I see the 1st line of 'courses.txt' has 'Computer Science:5'
And I see the 2nd line of 'courses.txt' has 'Nutrition:4'
"""
OUTLINED_SCENARIO_WITH_COMMENTS_ON_EXAMPLES = """
Scenario Outline: Add two numbers
Given I have entered <input_1> into the calculator
And I have entered <input_2> into the calculator
When I press <button>
Then the result should be <output> on the screen
Examples:
| input_1 | input_2 | button | output |
| 20 | 30 | add | 50 |
#| 2 | 5 | add | 7 |
| 0 | 40 | add | 40 |
# end of the scenario
"""
OUTLINED_SCENARIO_WITH_MORE_THAN_ONE_EXAMPLES_BLOCK = """
Scenario Outline: Add two numbers
Given I have entered <input_1> into the calculator
And I have entered <input_2> into the calculator
When I press <button>
Then the result should be <output> on the screen
Examples:
| input_1 | input_2 | button | output |
| 20 | 30 | add | 50 |
| 2 | 5 | add | 7 |
| 0 | 40 | add | 40 |
Examples:
| input_1 | input_2 | button | output |
| 20 | 33 | add | 53 |
| 12 | 40 | add | 52 |
"""
COMMENTED_SCENARIO = """
Scenario: Adding some students to my university database
Given I have the following courses in my university:
| Name | Duration |
| Computer Science | 5 years |
| Nutrition | 4 years |
When I consolidate the database into 'courses.txt'
Then I see the 1st line of 'courses.txt' has 'Computer Science:5'
And I see the 2nd line of 'courses.txt' has 'Nutrition:4'
# Scenario: Adding some students to my university database
# Given I have the following courses in my university:
# | Name | Duration |
# | Computer Science | 5 years |
# | Nutrition | 4 years |
# When I consolidate the database into 'courses.txt'
# Then I see the 1st line of 'courses.txt' has 'Computer Science:5'
# And I see the 2nd line of 'courses.txt' has 'Nutrition:4'
"""
INLINE_COMMENTS = '''
Scenario: Making a sword
Given I am using an anvil
And I am using a hammer # informational "comment"
'''
INLINE_COMMENTS_IGNORED_WITHIN_DOUBLE_QUOTES = '''
Scenario: Tweeting
Given I am logged in on twitter
When I search for the hashtag "#hammer"
'''
INLINE_COMMENTS_IGNORED_WITHIN_SINGLE_QUOTES = """
Scenario: Tweeting
Given I am logged in on twitter
When I search for the hashtag '#hammer'
"""
def test_scenario_has_name():
"It should extract the name of the scenario"
scenario = Scenario.from_string(SCENARIO1)
assert isinstance(scenario, Scenario)
assert_equals(
scenario.name,
"Adding some students to my university database"
)
def test_scenario_has_repr():
"Scenario implements __repr__ nicely"
scenario = Scenario.from_string(SCENARIO1)
assert_equals(
repr(scenario),
'<Scenario: "Adding some students to my university database">'
)
def test_scenario_has_steps():
"A scenario object should have a list of steps"
scenario = Scenario.from_string(SCENARIO1)
assert_equals(type(scenario.steps), list)
assert_equals(len(scenario.steps), 4, "It should have 4 steps")
expected_sentences = [
"Given I have the following courses in my university:",
"When I consolidate the database into 'courses.txt'",
"Then I see the 1st line of 'courses.txt' has 'Computer Science:5'",
"And I see the 2nd line of 'courses.txt' has 'Nutrition:4'",
]
for step, expected_sentence in zip(scenario.steps, expected_sentences):
assert_equals(type(step), Step)
assert_equals(step.sentence, expected_sentence)
assert_equals(scenario.steps[0].keys, ('Name', 'Duration'))
assert_equals(
scenario.steps[0].hashes,
[
{'Name': 'Computer Science', 'Duration': '5 years'},
{'Name': 'Nutrition', 'Duration': '4 years'},
]
)
def test_scenario_may_own_outlines():
"A scenario may own outlines"
scenario = Scenario.from_string(OUTLINED_SCENARIO)
assert_equals(len(scenario.steps), 4)
expected_sentences = [
'Given I have entered <input_1> into the calculator',
'And I have entered <input_2> into the calculator',
'When I press <button>',
'Then the result should be <output> on the screen',
]
for step, expected_sentence in zip(scenario.steps, expected_sentences):
assert_equals(type(step), Step)
assert_equals(step.sentence, expected_sentence)
assert_equals(scenario.name, "Add two numbers")
assert_equals(
scenario.outlines,
[
{'input_1': '20', 'input_2': '30', 'button': 'add', 'output': '50'},
{'input_1': '2', 'input_2': '5', 'button': 'add', 'output': '7'},
{'input_1': '0', 'input_2': '40', 'button': 'add', 'output': '40'},
]
)
def test_steps_parsed_by_scenarios_has_scenarios():
"Steps parsed by scenarios has scenarios"
scenario = Scenario.from_string(SCENARIO1)
for step in scenario.steps:
assert_equals(step.scenario, scenario)
def test_scenario_sentences_can_be_solved():
"A scenario with outlines may solve its sentences"
scenario = Scenario.from_string(OUTLINED_SCENARIO)
assert_equals(len(scenario.solved_steps), 12)
expected_sentences = [
'Given I have entered 20 into the calculator',
'And I have entered 30 into the calculator',
'When I press add',
'Then the result should be 50 on the screen',
'Given I have entered 2 into the calculator',
'And I have entered 5 into the calculator',
'When I press add',
'Then the result should be 7 on the screen',
'Given I have entered 0 into the calculator',
'And I have entered 40 into the calculator',
'When I press add',
'Then the result should be 40 on the screen',
]
for step, expected_sentence in zip(scenario.solved_steps, expected_sentences):
assert_equals(type(step), Step)
assert_equals(step.sentence, expected_sentence)
def test_scenario_tables_are_solved_against_outlines():
"Outline substitution should apply to tables within a scenario"
expected_hashes_per_step = [
# a = 1, b = 2
[{'Parameter': 'a', 'Value': '1'}, {'Parameter': 'b', 'Value': '2'}], # Given ...
[], # When I run the program
[], # Then I crash hard-core
# a = 2, b = 4
[{'Parameter': 'a', 'Value': '2'}, {'Parameter': 'b', 'Value': '4'}],
[],
[]
]
scenario = Scenario.from_string(OUTLINED_SCENARIO_WITH_SUBSTITUTIONS_IN_TABLE)
for step, expected_hashes in zip(scenario.solved_steps, expected_hashes_per_step):
assert_equals(type(step), Step)
assert_equals(step.hashes, expected_hashes)
def test_scenario_tables_are_solved_against_outlines():
"Outline substitution should apply to multiline strings within a scenario"
expected_multiline = '<div>outline value</div>'
scenario = Scenario.from_string(OUTLINED_SCENARIO_WITH_SUBSTITUTIONS_IN_MULTILINE)
step = scenario.solved_steps[0]
assert_equals(type(step), Step)
assert_equals(step.multiline, expected_multiline)
def test_solved_steps_also_have_scenario_as_attribute():
"Steps solved in scenario outlines also have scenario as attribute"
scenario = Scenario.from_string(OUTLINED_SCENARIO)
for step in scenario.solved_steps:
assert_equals(step.scenario, scenario)
def test_scenario_outlines_within_feature():
"Solving scenario outlines within a feature"
feature = Feature.from_string(OUTLINED_FEATURE)
scenario = feature.scenarios[0]
assert_equals(len(scenario.solved_steps), 12)
expected_sentences = [
'Given I have entered 20 into the calculator',
'And I have entered 30 into the calculator',
'When I press add',
'Then the result should be 50 on the screen',
'Given I have entered 2 into the calculator',
'And I have entered 5 into the calculator',
'When I press add',
'Then the result should be 7 on the screen',
'Given I have entered 0 into the calculator',
'And I have entered 40 into the calculator',
'When I press add',
'Then the result should be 40 on the screen',
]
for step, expected_sentence in zip(scenario.solved_steps, expected_sentences):
assert_equals(type(step), Step)
assert_equals(step.sentence, expected_sentence)
def test_full_featured_feature():
"Solving scenarios within a full-featured feature"
feature = Feature.from_string(OUTLINED_FEATURE_WITH_MANY)
scenario1, scenario2, scenario3, scenario4 = feature.scenarios
assert_equals(scenario1.name, 'Do something')
assert_equals(scenario2.name, 'Do something else')
assert_equals(scenario3.name, 'Worked!')
assert_equals(scenario4.name, 'Add two numbers wisely')
assert_equals(len(scenario1.solved_steps), 2)
expected_sentences = [
'Given I have entered ok into the fail',
'Given I have entered fail into the ok',
]
for step, expected_sentence in zip(scenario1.solved_steps, expected_sentences):
assert_equals(step.sentence, expected_sentence)
expected_evaluated = (
(
{'button': 'add', 'input_1': '20', 'input_2': '30', 'output': '50'}, [
'Given I have entered 20 into the calculator',
'And I have entered 30 into the calculator',
'When I press add',
'Then the result should be 50 on the screen',
]
),
(
{'button': 'add', 'input_1': '2', 'input_2': '5', 'output': '7'}, [
'Given I have entered 2 into the calculator',
'And I have entered 5 into the calculator',
'When I press add',
'Then the result should be 7 on the screen',
]
),
(
{'button': 'add', 'input_1': '0', 'input_2': '40', 'output': '40'}, [
'Given I have entered 0 into the calculator',
'And I have entered 40 into the calculator',
'When I press add',
'Then the result should be 40 on the screen',
],
),
(
{'button': 'add', 'input_1': '5', 'input_2': '7', 'output': '12'}, [
'Given I have entered 5 into the calculator',
'And I have entered 7 into the calculator',
'When I press add',
'Then the result should be 12 on the screen',
],
)
)
for ((got_examples, got_steps), (expected_examples, expected_steps)) in zip(scenario4.evaluated, expected_evaluated):
sentences_of = lambda x: x.sentence
assert_equals(got_examples, expected_examples)
assert_equals(map(sentences_of, got_steps), expected_steps)
def test_scenario_with_table_and_no_step_fails():
"A step table imediately after the scenario line, without step line fails"
assert_raises(LettuceSyntaxError, Scenario.from_string, SCENARIO_FAILED)
def test_scenario_ignore_commented_lines_from_examples():
"Comments on scenario example should be ignored"
scenario = Scenario.from_string(OUTLINED_SCENARIO_WITH_COMMENTS_ON_EXAMPLES)
assert_equals(
scenario.outlines,
[
{'input_1': '20', 'input_2': '30', 'button': 'add', 'output': '50'},
{'input_1': '0', 'input_2': '40', 'button': 'add', 'output': '40'},
]
)
def test_scenario_aggregate_all_examples_blocks():
"All scenario's examples block should be translated to outlines"
scenario = Scenario.from_string(OUTLINED_SCENARIO_WITH_MORE_THAN_ONE_EXAMPLES_BLOCK)
assert_equals(
scenario.outlines,
[
{'input_1': '20', 'input_2': '30', 'button': 'add', 'output': '50'},
{'input_1': '2', 'input_2': '5', 'button': 'add', 'output': '7'},
{'input_1': '0', 'input_2': '40', 'button': 'add', 'output': '40'},
{'input_1': '20', 'input_2': '33', 'button': 'add', 'output': '53'},
{'input_1': '12', 'input_2': '40', 'button': 'add', 'output': '52'},
]
)
def test_commented_scenarios():
"A scenario string that contains lines starting with '#' will be commented"
scenario = Scenario.from_string(COMMENTED_SCENARIO)
assert_equals(scenario.name, u'Adding some students to my university database')
assert_equals(len(scenario.steps), 4)
def test_scenario_matches_tags():
("A scenario with tags should respond with True when "
".matches_tags() is called with a valid list of tags")
scenario = Scenario.from_string(
SCENARIO1,
original_string=SCENARIO1.strip(),
tags=['onetag', 'another-one'])
expect(scenario.tags).to.equal(['onetag', 'another-one'])
assert scenario.matches_tags(['onetag'])
assert scenario.matches_tags(['another-one'])
def test_scenario_matches_tags_fuzzywuzzy():
("When Scenario#matches_tags is called with a member starting with ~ "
"it will consider a fuzzywuzzy match")
scenario = Scenario.from_string(
SCENARIO1,
original_string=SCENARIO1.strip(),
tags=['anothertag', 'another-tag'])
assert scenario.matches_tags(['~another'])
def test_scenario_matches_tags_excluding():
("When Scenario#matches_tags is called with a member starting with - "
"it will exclude that tag from the matching")
scenario = Scenario.from_string(
SCENARIO1,
original_string=SCENARIO1.strip(),
tags=['anothertag', 'another-tag'])
assert not scenario.matches_tags(['-anothertag'])
assert scenario.matches_tags(['-foobar'])
def test_scenario_matches_tags_excluding_when_scenario_has_no_tags():
("When Scenario#matches_tags is called for a scenario "
"that has no tags and the given match is a exclusionary tag")
scenario = Scenario.from_string(
SCENARIO1,
original_string=(SCENARIO1.strip()))
assert scenario.matches_tags(['-nope', '-neither'])
def test_scenario_matches_tags_excluding_fuzzywuzzy():
("When Scenario#matches_tags is called with a member starting with -~ "
"it will exclude that tag from that fuzzywuzzy match")
scenario = Scenario.from_string(
SCENARIO1,
original_string=('@anothertag\n@another-tag\n' + SCENARIO1.strip()))
assert not scenario.matches_tags(['-~anothertag'])
def test_scenario_show_tags_in_its_representation():
("Scenario#represented should show its tags")
scenario = Scenario.from_string(
SCENARIO1,
original_string=SCENARIO1.strip(),
tags=['slow', 'firefox', 'chrome'])
expect(scenario.represented()).to.equal(
u' @slow @firefox @chrome\n '
'Scenario: Adding some students to my university database')
def test_scenario_with_inline_comments():
("Scenarios can have steps with inline comments")
scenario = Scenario.from_string(INLINE_COMMENTS)
step1, step2 = scenario.steps
expect(step1.sentence).to.equal(u'Given I am using an anvil')
expect(step2.sentence).to.equal(u'And I am using a hammer')
def test_scenario_with_hash_within_double_quotes():
("Scenarios have hashes within double quotes and yet don't "
"consider them as comments")
scenario = Scenario.from_string(
INLINE_COMMENTS_IGNORED_WITHIN_DOUBLE_QUOTES)
step1, step2 = scenario.steps
expect(step1.sentence).to.equal(u'Given I am logged in on twitter')
expect(step2.sentence).to.equal(u'When I search for the hashtag "#hammer"')
def test_scenario_with_hash_within_single_quotes():
("Scenarios have hashes within single quotes and yet don't "
"consider them as comments")
scenario = Scenario.from_string(
INLINE_COMMENTS_IGNORED_WITHIN_SINGLE_QUOTES)
step1, step2 = scenario.steps
expect(step1.sentence).to.equal(u'Given I am logged in on twitter')
expect(step2.sentence).to.equal(u"When I search for the hashtag '#hammer'")
| 20,897 | Python | .py | 475 | 37.176842 | 121 | 0.631944 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,697 | test_main.py | gabrielfalcao_lettuce/tests/unit/test_main.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import lettuce
import lettuce.fs
from lettuce.exceptions import LettuceRunnerError
from nose.tools import assert_equals
from mox import Mox
def test_has_version():
"A nice python module is supposed to have a version"
assert_equals(lettuce.version, '0.2.23')
def test_has_release():
"A nice python module is supposed to have a release name"
assert_equals(lettuce.release, 'kryptonite')
def test_import():
"lettuce importer does import"
import os
module = lettuce.fs.FileSystem._import('os')
assert_equals(os, module)
def test_terrain_import_exception():
"lettuce error tries to import "
string = 'Lettuce has tried to load the conventional environment ' \
'module "terrain"\nbut it has errors, check its contents and ' \
'try to run lettuce again.\n\nOriginal traceback below:\n\n'
mox = Mox()
mox.StubOutWithMock(lettuce.fs, 'FileSystem')
mox.StubOutWithMock(lettuce.exceptions, 'traceback')
mox.StubOutWithMock(lettuce.sys, 'stderr')
exc = Exception('foo bar')
lettuce.fs.FileSystem._import('terrain').AndRaise(exc)
lettuce.exceptions.traceback.format_exc(exc). \
AndReturn('I AM THE TRACEBACK FOR IMPORT ERROR')
lettuce.sys.stderr.write(string)
lettuce.sys.stderr.write('I AM THE TRACEBACK FOR IMPORT ERROR')
mox.ReplayAll()
try:
reload(lettuce)
except LettuceRunnerError:
mox.VerifyAll()
finally:
mox.UnsetStubs()
| 2,257 | Python | .py | 54 | 37.907407 | 72 | 0.737083 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,698 | test_language_fr.py | gabrielfalcao_lettuce/tests/unit/test_language_fr.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from nose.tools import assert_equals
from lettuce.core import Language, Scenario, Feature
SCENARIO = u"""
Scénario: Ajout de plusieurs cursus dans la base de mon université
Soit Une liste de cursus disponibles dans mon université
| Nom | Durée |
| Science de l'Informatique | 5 ans |
| Nutrition | 4 ans |
Quand je consolide la base dans 'cursus.txt'
Alors je vois que la 1er ligne de 'cursus.txt' contient 'Science de l'Informatique:5
Et je vois que la 2em ligne de 'cursus.txt' contient 'Nutrition:4'
"""
OUTLINED_SCENARIO = u"""
Plan de Scénario: Ajouter 2 nombres
Soit <input_1> entré dans la calculatrice
Et <input_2> entré dans la calculatrice
Quand je presse <bouton>
Alors je doit avoir <output> à l'écran
Exemples:
| input_1 | input_2 | bouton | output |
| 20 | 30 | add | 50 |
| 2 | 5 | add | 7 |
| 0 | 40 | add | 40 |
"""
OUTLINED_SCENARIO2 = u"""
Plan de Scénario: Ajouter 2 nombres
Soit <input_1> entré dans la calculatrice
Et <input_2> entré dans la calculatrice
Quand je presse <bouton>
Alors je doit avoir <output> à l'écran
Scénarios:
| input_1 | input_2 | bouton | output |
| 20 | 30 | add | 50 |
| 2 | 5 | add | 7 |
| 0 | 40 | add | 40 |
"""
OUTLINED_SCENARIO3 = u"""
Plan du Scénario: Ajouter 2 nombres
Soit <input_1> entré dans la calculatrice
Et <input_2> entré dans la calculatrice
Quand je presse <bouton>
Alors je doit avoir <output> à l'écran
Scénarios:
| input_1 | input_2 | bouton | output |
| 20 | 30 | add | 50 |
| 2 | 5 | add | 7 |
| 0 | 40 | add | 40 |
"""
OUTLINED_FEATURE = u"""
Fonctionnalité: Faire plusieur choses en même temps
De façon à automatiser les tests
En tant que fainéant
J'utilise les plans de scénario
Plan de Scénario: Ajouter 2 nombres
Soit <input_1> entré dans la calculatrice
Et <input_2> entré dans la calculatrice
Quand je presse <bouton>
Alors je doit avoir <output> à l'écran
Exemples:
| input_1 | input_2 | bouton | output |
| 20 | 30 | add | 50 |
| 2 | 5 | add | 7 |
| 0 | 40 | add | 40 |
"""
OUTLINED_FEATURE2 = u"""
Fonction: Faire plusieur choses en même temps
De façon à automatiser les tests
En tant que fainéant
J'utilise les plans de scénario
Plan de Scénario: Ajouter 2 nombres
Soit <input_1> entré dans la calculatrice
Et <input_2> entré dans la calculatrice
Quand je presse <bouton>
Alors je doit avoir <output> à l'écran
Exemples:
| input_1 | input_2 | bouton | output |
| 20 | 30 | add | 50 |
| 2 | 5 | add | 7 |
| 0 | 40 | add | 40 |
"""
def test_language_french():
'Language: FR -> Language class supports french through code "fr"'
lang = Language('fr')
assert_equals(lang.code, u'fr')
assert_equals(lang.name, u'French')
assert_equals(lang.native, u'Français')
assert_equals(lang.feature, u'Fonctionnalité|Fonction')
assert_equals(lang.scenario, u'Scénario')
assert_equals(lang.examples, u'Exemples|Scénarios')
assert_equals(lang.scenario_outline, u'Plan de Scénario|Plan du Scénario')
assert_equals(lang.scenario_separator, u'(Plan de Scénario|Plan du Scénario|Scénario)')
def test_scenario_fr_from_string():
'Language: FR -> Scenario.from_string'
lang = Language('fr')
scenario = Scenario.from_string(SCENARIO, language=lang)
assert_equals(
scenario.name,
u'Ajout de plusieurs cursus dans la base de mon université'
)
assert_equals(
scenario.steps[0].hashes,
[
{'Nom': u"Science de l'Informatique", u'Durée': '5 ans'},
{'Nom': u'Nutrition', u'Durée': '4 ans'},
]
)
def test_scenario_outline1_fr_from_string():
'Language: FR -> Scenario.from_string, with scenario outline, first case'
lang = Language('fr')
scenario = Scenario.from_string(OUTLINED_SCENARIO, language=lang)
assert_equals(
scenario.name,
'Ajouter 2 nombres'
)
assert_equals(
scenario.outlines,
[
{u'input_1':u'20',u'input_2':u'30',u'bouton':u'add',u'output':u'50'},
{u'input_1':u'2',u'input_2':u'5',u'bouton':u'add',u'output':u'7'},
{u'input_1':u'0',u'input_2':u'40',u'bouton':u'add',u'output':u'40'},
]
)
def test_scenario_outline2_fr_from_string():
'Language: FR -> Scenario.from_string, with scenario outline, second case'
lang = Language('fr')
scenario = Scenario.from_string(OUTLINED_SCENARIO2, language=lang)
assert_equals(
scenario.name,
'Ajouter 2 nombres'
)
assert_equals(
scenario.outlines,
[
{u'input_1':u'20',u'input_2':u'30',u'bouton':u'add',u'output':u'50'},
{u'input_1':u'2',u'input_2':u'5',u'bouton':u'add',u'output':u'7'},
{u'input_1':u'0',u'input_2':u'40',u'bouton':u'add',u'output':u'40'},
]
)
def test_scenario_outline3_fr_from_string():
'Language: FR -> Scenario.from_string, with scenario outline, third case'
lang = Language('fr')
scenario = Scenario.from_string(OUTLINED_SCENARIO2, language=lang)
assert_equals(
scenario.name,
'Ajouter 2 nombres'
)
assert_equals(
scenario.outlines,
[
{u'input_1':u'20',u'input_2':u'30',u'bouton':u'add',u'output':u'50'},
{u'input_1':u'2',u'input_2':u'5',u'bouton':u'add',u'output':u'7'},
{u'input_1':u'0',u'input_2':u'40',u'bouton':u'add',u'output':u'40'},
]
)
def test_feature_fr_from_string():
'Language: FR -> Feature.from_string'
lang = Language('fr')
feature = Feature.from_string(OUTLINED_FEATURE, language=lang)
assert_equals(
feature.name,
u'Faire plusieur choses en même temps'
)
assert_equals(
feature.description,
u"De façon à automatiser les tests\n"
u"En tant que fainéant\n"
u"J'utilise les plans de scénario"
)
(scenario, ) = feature.scenarios
assert_equals(
scenario.name,
'Ajouter 2 nombres'
)
assert_equals(
scenario.outlines,
[
{u'input_1':u'20',u'input_2':u'30',u'bouton':u'add',u'output':u'50'},
{u'input_1':u'2',u'input_2':u'5',u'bouton':u'add',u'output':u'7'},
{u'input_1':u'0',u'input_2':u'40',u'bouton':u'add',u'output':u'40'},
]
)
def test_feature_fr_from_string2():
'Language: FR -> Feature.from_string, alternate name'
lang = Language('fr')
feature = Feature.from_string(OUTLINED_FEATURE2, language=lang)
assert_equals(
feature.name,
u'Faire plusieur choses en même temps'
)
assert_equals(
feature.description,
u"De façon à automatiser les tests\n"
u"En tant que fainéant\n"
u"J'utilise les plans de scénario"
)
(scenario, ) = feature.scenarios
assert_equals(
scenario.name,
'Ajouter 2 nombres'
)
assert_equals(
scenario.outlines,
[
{u'input_1':u'20',u'input_2':u'30',u'bouton':u'add',u'output':u'50'},
{u'input_1':u'2',u'input_2':u'5',u'bouton':u'add',u'output':u'7'},
{u'input_1':u'0',u'input_2':u'40',u'bouton':u'add',u'output':u'40'},
]
)
| 8,600 | Python | .py | 224 | 31.919643 | 91 | 0.602319 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
1,699 | test_terrain.py | gabrielfalcao_lettuce/tests/unit/test_terrain.py | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc√£o <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from mox import Mox
from nose.tools import assert_equals
from lettuce import step
from lettuce.terrain import after
from lettuce.terrain import before
from lettuce.terrain import world
from lettuce.core import Feature, TotalResult
from lettuce.registry import CALLBACK_REGISTRY
OUTLINE_FEATURE = '''
Feature: Outline hooks
Scenario Outline: Outlines
Given step1 of <outline>
And step2 of <outline>
Examples:
| outline |
| first_outline |
| second_outline |
'''
FEATURE1 = '''
Feature: Before and After callbacks all along lettuce
Scenario: Before and After steps
Given I append "during" to states
'''
FEATURE2 = '''
Feature: Before and After callbacks all along lettuce
Scenario: Before and After scenarios
Given I append "during" to states
Scenario: Again
Given I append "during" to states
'''
FEATURE3 = '''
Feature: Before and After callbacks all along lettuce
@tag1
Scenario: Before and After scenarios
Given I append "during" to states
@tag2
Scenario: Again
Given I append "during" to states
'''
def test_world():
"lettuce.terrain.world can be monkey patched at will"
def set_world():
world.was_set = True
def test_does_not_have():
from lettuce.terrain import world
assert not hasattr(world, 'was_set')
def test_does_have():
from lettuce.terrain import world
assert hasattr(world, 'was_set')
test_does_not_have()
set_world()
test_does_have()
def test_outline_hooks_if_test_failed():
"before.each_outline and after.each_outline decorators works in correct order even if test fails in fastfail"
@step("step1 of first_outline")
def step_def(step):
raise Exception("Failed")
world.scenario_names = set()
world.hooks_for_failing_test = []
@before.each_scenario
def before_scenario(scenario):
world.hooks_for_failing_test.append("before scenario {0}".format(scenario.name))
@after.each_scenario
def after_scenario(scenario):
world.hooks_for_failing_test.append("after scenario {0}".format(scenario.name))
@before.each_outline
def before_outline(scenario, outline):
world.scenario_names.add(scenario.name)
world.hooks_for_failing_test.append("before {0}".format(outline["outline"]))
@after.each_outline
def after_outline(scenario, outline):
world.scenario_names.add(scenario.name)
world.hooks_for_failing_test.append("after {0}".format(outline["outline"]))
feature = Feature.from_string(OUTLINE_FEATURE)
try:
feature.run(failfast=True)
except Exception:
pass
assert_equals(world.hooks_for_failing_test,
['before scenario Outlines',
'before first_outline',
'after first_outline',
'after scenario Outlines'])
def test_outline_hooks():
"terrain.before.each_outline and terrain.after.each_outline decorators"
world.scenario_names = set()
world.hooks = []
@before.each_scenario
def before_scenario(scenario):
world.hooks.append("before scenario {0}".format(scenario.name))
@after.each_scenario
def after_scenario(scenario):
world.hooks.append("after scenario {0}".format(scenario.name))
@before.each_outline
def before_outline(scenario, outline):
world.scenario_names.add(scenario.name)
world.hooks.append("before {0}".format(outline["outline"]))
@after.each_outline
def after_outline(scenario, outline):
world.scenario_names.add(scenario.name)
world.hooks.append("after {0}".format(outline["outline"]))
feature = Feature.from_string(OUTLINE_FEATURE)
feature.run()
assert_equals(world.hooks,
['before scenario Outlines',
'before first_outline',
'after first_outline',
'before second_outline',
'after second_outline',
'after scenario Outlines'])
def test_after_each_step_is_executed_before_each_step():
"terrain.before.each_step and terrain.after.each_step decorators"
world.step_states = []
@before.each_step
def set_state_to_before(step):
world.step_states.append('before')
expected = 'Given I append "during" to states'
if step.sentence != expected:
raise TypeError('%r != %r' % (step.sentence, expected))
@step('append "during" to states')
def append_during_to_step_states(step):
world.step_states.append("during")
@after.each_step
def set_state_to_after(step):
world.step_states.append('after')
expected = 'Given I append "during" to states'
if step.sentence != expected:
raise TypeError('%r != %r' % (step.sentence, expected))
feature = Feature.from_string(FEATURE1)
feature.run()
assert_equals(world.step_states, ['before', 'during', 'after'])
def test_after_each_scenario_is_executed_before_each_scenario():
"terrain.before.each_scenario and terrain.after.each_scenario decorators"
world.scenario_steps = []
@before.each_scenario
def set_state_to_before(scenario):
world.scenario_steps.append('before')
@step('append "during" to states')
def append_during_to_scenario_steps(step):
world.scenario_steps.append("during")
@after.each_scenario
def set_state_to_after(scenario):
world.scenario_steps.append('after')
feature = Feature.from_string(FEATURE2)
feature.run()
assert_equals(
world.scenario_steps,
['before', 'during', 'after', 'before', 'during', 'after'],
)
def test_after_each_feature_is_executed_before_each_feature():
"terrain.before.each_feature and terrain.after.each_feature decorators"
world.feature_steps = []
@before.each_feature
def set_state_to_before(feature):
world.feature_steps.append('before')
@step('append "during" to states')
def append_during_to_feature_steps(step):
world.feature_steps.append("during")
@after.each_feature
def set_state_to_after(feature):
world.feature_steps.append('after')
feature = Feature.from_string(FEATURE2)
feature.run()
assert_equals(
world.feature_steps,
['before', 'during', 'during', 'after'],
)
def test_feature_hooks_not_invoked_if_no_scenarios_run():
feature = Feature.from_string(FEATURE3)
world.feature_steps = []
feature.run(tags=['tag1'])
assert_equals(
world.feature_steps,
['before', 'during', 'after']
)
world.feature_steps = []
feature.run(tags=['tag3'])
assert_equals(
world.feature_steps,
[]
)
def test_after_each_all_is_executed_before_each_all():
"terrain.before.each_all and terrain.after.each_all decorators"
import lettuce
from lettuce.fs import FeatureLoader
world.all_steps = []
mox = Mox()
loader_mock = mox.CreateMock(FeatureLoader)
mox.StubOutWithMock(lettuce.sys, 'path')
mox.StubOutWithMock(lettuce, 'fs')
mox.StubOutWithMock(lettuce.fs, 'FileSystem')
mox.StubOutWithMock(lettuce, 'Feature')
lettuce.fs.FeatureLoader('some_basepath', None).AndReturn(loader_mock)
lettuce.sys.path.insert(0, 'some_basepath')
lettuce.sys.path.remove('some_basepath')
loader_mock.find_feature_files().AndReturn(['some_basepath/foo.feature'])
loader_mock.find_and_load_step_definitions()
lettuce.Feature.from_file('some_basepath/foo.feature'). \
AndReturn(Feature.from_string(FEATURE2))
mox.ReplayAll()
runner = lettuce.Runner('some_basepath')
CALLBACK_REGISTRY.clear()
@before.all
def set_state_to_before():
world.all_steps.append('before')
@step('append "during" to states')
def append_during_to_all_steps(step):
world.all_steps.append("during")
@after.all
def set_state_to_after(total):
world.all_steps.append('after')
isinstance(total, TotalResult)
runner.run()
mox.VerifyAll()
assert_equals(
world.all_steps,
['before', 'during', 'during', 'after'],
)
mox.UnsetStubs()
def test_world_should_be_able_to_absorb_functions():
u"world should be able to absorb functions"
assert not hasattr(world, 'function1')
@world.absorb
def function1():
return 'absorbed'
assert hasattr(world, 'function1')
assert callable(world.function1)
assert_equals(world.function1(), 'absorbed')
world.spew('function1')
assert not hasattr(world, 'function1')
def test_world_should_be_able_to_absorb_lambdas():
u"world should be able to absorb lambdas"
assert not hasattr(world, 'named_func')
world.absorb(lambda: 'absorbed', 'named_func')
assert hasattr(world, 'named_func')
assert callable(world.named_func)
assert_equals(world.named_func(), 'absorbed')
world.spew('named_func')
assert not hasattr(world, 'named_func')
def test_world_should_be_able_to_absorb_classs():
u"world should be able to absorb class"
assert not hasattr(world, 'MyClass')
if sys.version_info < (2, 6):
return
class MyClass:
pass
world.absorb(MyClass)
assert hasattr(world, 'MyClass')
assert_equals(world.MyClass, MyClass)
assert isinstance(world.MyClass(), MyClass)
world.spew('MyClass')
assert not hasattr(world, 'MyClass')
def test_hooks_should_be_still_manually_callable():
"terrain hooks should be still manually callable"
@before.all
def before_all():
pass
@before.harvest
def before_harvest():
pass
@before.each_app
def before_each_app():
pass
@before.each_step
def before_each_step():
pass
@before.each_scenario
def before_each_scenario():
pass
@before.each_feature
def before_each_feature():
pass
@before.handle_request
def before_handle_request():
pass
@before.outline
def before_outline():
pass
@after.all
def after_all():
pass
@after.harvest
def after_harvest():
pass
@after.each_app
def after_each_app():
pass
@after.each_step
def after_each_step():
pass
@after.each_scenario
def after_each_scenario():
pass
@after.each_feature
def after_each_feature():
pass
@after.handle_request
def after_handle_request():
pass
@after.outline
def after_outline():
pass
assert callable(before_all), \
'@before.all decorator should return the original function'
assert callable(before_handle_request), \
'@before.handle_request decorator should return the original function'
assert callable(before_harvest), \
'@before.harvest decorator should return the original function'
assert callable(before_each_feature), \
'@before.each_feature decorator should return the original function'
assert callable(before_outline), \
'@before.outline decorator should return the original function'
assert callable(before_each_scenario), \
'@before.each_scenario decorator should return the original function'
assert callable(before_each_step), \
'@before.each_step decorator should return the original function'
assert callable(after_all), \
'@after.all decorator should return the original function'
assert callable(after_handle_request), \
'@after.handle_request decorator should return the original function'
assert callable(after_harvest), \
'@after.harvest decorator should return the original function'
assert callable(after_each_feature), \
'@after.each_feature decorator should return the original function'
assert callable(after_outline), \
'@after.outline decorator should return the original function'
assert callable(after_each_scenario), \
'@after.each_scenario decorator should return the original function'
assert callable(after_each_step), \
'@after.each_step decorator should return the original function'
| 13,048 | Python | .py | 347 | 31.325648 | 113 | 0.68411 | gabrielfalcao/lettuce | 1,274 | 325 | 102 | GPL-3.0 | 9/5/2024, 5:08:58 PM (Europe/Amsterdam) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.