response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Set and retrieve the value of Response.status
On retrieval, it concatenates status_int and title.
When set to a str, it splits status_int and title apart.
When set to an integer, retrieves the correct title for that
response code from the RESPONSE_REASONS dict. | def _resp_status_property():
"""
Set and retrieve the value of Response.status
On retrieval, it concatenates status_int and title.
When set to a str, it splits status_int and title apart.
When set to an integer, retrieves the correct title for that
response code from the RESPONSE_REASONS dict.
"""
def getter(self):
return '%s %s' % (self.status_int, self.title)
def setter(self, value):
if isinstance(value, six.integer_types):
self.status_int = value
self.explanation = self.title = RESPONSE_REASONS[value][0]
else:
self.status_int = int(value.split(' ', 1)[0])
self.explanation = self.title = value.split(' ', 1)[1]
return property(getter, setter,
doc="Retrieve and set the Response status, e.g. '200 OK'") |
Set and retrieve the value of Response.body
If necessary, it will consume Response.app_iter to create a body.
On assignment, encodes unicode values to utf-8, and sets the content-length
to the length of the str. | def _resp_body_property():
"""
Set and retrieve the value of Response.body
If necessary, it will consume Response.app_iter to create a body.
On assignment, encodes unicode values to utf-8, and sets the content-length
to the length of the str.
"""
def getter(self):
if not self._body:
if not self._app_iter:
return b''
with closing_if_possible(self._app_iter):
self._body = b''.join(self._app_iter)
self._app_iter = None
return self._body
def setter(self, value):
if isinstance(value, six.text_type):
raise TypeError('WSGI responses must be bytes')
if isinstance(value, six.binary_type):
self.content_length = len(value)
close_if_possible(self._app_iter)
self._app_iter = None
self._body = value
return property(getter, setter,
doc="Retrieve and set the Response body str") |
Set and retrieve Response.etag
This may be broken for etag use cases other than Swift's.
Quotes strings when assigned and unquotes when read, for compatibility
with webob. | def _resp_etag_property():
"""
Set and retrieve Response.etag
This may be broken for etag use cases other than Swift's.
Quotes strings when assigned and unquotes when read, for compatibility
with webob.
"""
def getter(self):
etag = self.headers.get('etag', None)
if etag:
etag = etag.replace('"', '')
return etag
def setter(self, value):
if value is None:
self.headers['etag'] = None
else:
self.headers['etag'] = '"%s"' % value
return property(getter, setter,
doc="Retrieve and set the response Etag header") |
Set and retrieve Response.content_type
Strips off any charset when retrieved -- that is accessible
via Response.charset. | def _resp_content_type_property():
"""
Set and retrieve Response.content_type
Strips off any charset when retrieved -- that is accessible
via Response.charset.
"""
def getter(self):
if 'content-type' in self.headers:
return self.headers.get('content-type').split(';')[0]
def setter(self, value):
self.headers['content-type'] = value
return property(getter, setter,
doc="Retrieve and set the response Content-Type header") |
Set and retrieve Response.charset
On retrieval, separates the charset from the content-type.
On assignment, removes any existing charset from the content-type and
appends the new one. | def _resp_charset_property():
"""
Set and retrieve Response.charset
On retrieval, separates the charset from the content-type.
On assignment, removes any existing charset from the content-type and
appends the new one.
"""
def getter(self):
if '; charset=' in self.headers['content-type']:
return self.headers['content-type'].split('; charset=')[1]
def setter(self, value):
if 'content-type' in self.headers:
self.headers['content-type'] = self.headers['content-type'].split(
';')[0]
if value:
self.headers['content-type'] += '; charset=' + value
return property(getter, setter,
doc="Retrieve and set the response charset") |
Set and retrieve Response.app_iter
Mostly a pass-through to Response._app_iter; it's a property so it can zero
out an existing content-length on assignment. | def _resp_app_iter_property():
"""
Set and retrieve Response.app_iter
Mostly a pass-through to Response._app_iter; it's a property so it can zero
out an existing content-length on assignment.
"""
def getter(self):
return self._app_iter
def setter(self, value):
if isinstance(value, (list, tuple)):
for i, item in enumerate(value):
if not isinstance(item, bytes):
raise TypeError('WSGI responses must be bytes; '
'got %s for item %d' % (type(item), i))
self.content_length = sum(map(len, value))
elif value is not None:
self.content_length = None
self._body = None
close_if_possible(self._app_iter)
self._app_iter = value
return property(getter, setter,
doc="Retrieve and set the response app_iter") |
Set and retrieve "fancy" properties.
On retrieval, these properties return a class that takes the value of the
header as the only argument to their constructor.
For assignment, those classes should implement a __str__ that converts them
back to their header values.
:param header: name of the header, e.g. "Accept"
:param even_if_nonexistent: Return a value even if the header does not
exist. Classes using this should be prepared to accept None as a
parameter. | def _req_fancy_property(cls, header, even_if_nonexistent=False):
"""
Set and retrieve "fancy" properties.
On retrieval, these properties return a class that takes the value of the
header as the only argument to their constructor.
For assignment, those classes should implement a __str__ that converts them
back to their header values.
:param header: name of the header, e.g. "Accept"
:param even_if_nonexistent: Return a value even if the header does not
exist. Classes using this should be prepared to accept None as a
parameter.
"""
def getter(self):
try:
if header in self.headers or even_if_nonexistent:
return cls(self.headers.get(header))
except ValueError:
return None
def setter(self, value):
self.headers[header] = value
return property(getter, setter, doc=("Retrieve and set the %s "
"property in the WSGI environ, as a %s object") %
(header, cls.__name__)) |
Set and retrieve value of the environ_field entry in self.environ.
(Used by Request) | def _req_environ_property(environ_field, is_wsgi_string_field=True):
"""
Set and retrieve value of the environ_field entry in self.environ.
(Used by Request)
"""
def getter(self):
return self.environ.get(environ_field, None)
def setter(self, value):
if six.PY2:
if isinstance(value, six.text_type):
self.environ[environ_field] = value.encode('utf-8')
else:
self.environ[environ_field] = value
else:
if is_wsgi_string_field:
# Check that input is valid before setting
if isinstance(value, str):
value.encode('latin1').decode('utf-8')
if isinstance(value, bytes):
value = value.decode('latin1')
self.environ[environ_field] = value
return property(getter, setter, doc=("Get and set the %s property "
"in the WSGI environment") % environ_field) |
Set and retrieve the Request.body parameter. It consumes wsgi.input and
returns the results. On assignment, uses a WsgiBytesIO to create a new
wsgi.input. | def _req_body_property():
"""
Set and retrieve the Request.body parameter. It consumes wsgi.input and
returns the results. On assignment, uses a WsgiBytesIO to create a new
wsgi.input.
"""
def getter(self):
body = self.environ['wsgi.input'].read()
self.environ['wsgi.input'] = WsgiBytesIO(body)
return body
def setter(self, value):
if not isinstance(value, six.binary_type):
value = value.encode('utf8')
self.environ['wsgi.input'] = WsgiBytesIO(value)
self.environ['CONTENT_LENGTH'] = str(len(value))
return property(getter, setter, doc="Get and set the request body str") |
Retrieves the best guess that can be made for an absolute location up to
the path, for example: https://host.com:1234 | def _host_url_property():
"""
Retrieves the best guess that can be made for an absolute location up to
the path, for example: https://host.com:1234
"""
def getter(self):
if 'HTTP_HOST' in self.environ:
host = self.environ['HTTP_HOST']
else:
host = '%s:%s' % (self.environ['SERVER_NAME'],
self.environ['SERVER_PORT'])
scheme = self.environ.get('wsgi.url_scheme', 'http')
if scheme == 'http' and host.endswith(':80'):
host, port = host.rsplit(':', 1)
elif scheme == 'https' and host.endswith(':443'):
host, port = host.rsplit(':', 1)
return '%s://%s' % (scheme, host)
return property(getter, doc="Get url for request/response up to path") |
A decorator for translating functions which take a swob Request object and
return a Response object into WSGI callables. Also catches any raised
HTTPExceptions and treats them as a returned Response. | def wsgify(func):
"""
A decorator for translating functions which take a swob Request object and
return a Response object into WSGI callables. Also catches any raised
HTTPExceptions and treats them as a returned Response.
"""
@functools.wraps(func)
def _wsgify(*args):
env, start_response = args[-2:]
new_args = args[:-2] + (Request(env), )
try:
return func(*new_args)(env, start_response)
except HTTPException as err_resp:
return err_resp(env, start_response)
return _wsgify |
Wrap a function whos first argument is a paste.deploy style config uri,
such that you can pass it an un-adorned raw filesystem path (or config
string) and the config directive (either config:, config_dir:, or
config_str:) will be added automatically based on the type of entity
(either a file or directory, or if no such entity on the file system -
just a string) before passing it through to the paste.deploy function. | def wrap_conf_type(f):
"""
Wrap a function whos first argument is a paste.deploy style config uri,
such that you can pass it an un-adorned raw filesystem path (or config
string) and the config directive (either config:, config_dir:, or
config_str:) will be added automatically based on the type of entity
(either a file or directory, or if no such entity on the file system -
just a string) before passing it through to the paste.deploy function.
"""
def wrapper(conf_path, *args, **kwargs):
if os.path.isdir(conf_path):
conf_type = 'config_dir'
else:
conf_type = 'config'
conf_uri = '%s:%s' % (conf_type, conf_path)
return f(conf_uri, *args, **kwargs)
return wrapper |
Bind socket to bind ip:port in conf
:param conf: Configuration dict to read settings from
:returns: a socket object as returned from socket.listen or
ssl.wrap_socket if conf specifies cert_file | def get_socket(conf):
"""Bind socket to bind ip:port in conf
:param conf: Configuration dict to read settings from
:returns: a socket object as returned from socket.listen or
ssl.wrap_socket if conf specifies cert_file
"""
try:
bind_port = int(conf['bind_port'])
except (ValueError, KeyError, TypeError):
raise ConfigFilePortError()
bind_addr = (conf.get('bind_ip', '0.0.0.0'), bind_port)
address_family = [addr[0] for addr in socket.getaddrinfo(
bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
sock = None
bind_timeout = int(conf.get('bind_timeout', 30))
retry_until = time.time() + bind_timeout
warn_ssl = False
try:
keepidle = int(conf.get('keep_idle', 600))
if keepidle <= 0 or keepidle >= 2 ** 15 - 1:
raise ValueError()
except (ValueError, KeyError, TypeError):
raise ConfigFileError()
while not sock and time.time() < retry_until:
try:
sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)),
family=address_family)
if 'cert_file' in conf:
warn_ssl = True
sock = ssl.wrap_socket(sock, certfile=conf['cert_file'],
keyfile=conf['key_file'])
except socket.error as err:
if err.args[0] != errno.EADDRINUSE:
raise
sleep(0.1)
if not sock:
raise Exception('Could not bind to %(addr)s:%(port)s '
'after trying for %(timeout)s seconds' % {
'addr': bind_addr[0], 'port': bind_addr[1],
'timeout': bind_timeout})
# in my experience, sockets can hang around forever without keepalive
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, keepidle)
if warn_ssl:
ssl_warning_message = ('WARNING: SSL should only be enabled for '
'testing purposes. Use external SSL '
'termination for a production deployment.')
get_logger(conf).warning(ssl_warning_message)
print(ssl_warning_message)
return sock |
Loads a context from a config file, and if the context is a pipeline
then presents the app with the opportunity to modify the pipeline.
:param conf_file: path to a config file
:param global_conf: a dict of options to update the loaded config. Options
in ``global_conf`` will override those in ``conf_file`` except where
the ``conf_file`` option is preceded by ``set``.
:param allow_modify_pipeline: if True, and the context is a pipeline, and
the loaded app has a ``modify_wsgi_pipeline`` property, then that
property will be called before the pipeline is loaded.
:return: the loaded app | def loadapp(conf_file, global_conf=None, allow_modify_pipeline=True):
"""
Loads a context from a config file, and if the context is a pipeline
then presents the app with the opportunity to modify the pipeline.
:param conf_file: path to a config file
:param global_conf: a dict of options to update the loaded config. Options
in ``global_conf`` will override those in ``conf_file`` except where
the ``conf_file`` option is preceded by ``set``.
:param allow_modify_pipeline: if True, and the context is a pipeline, and
the loaded app has a ``modify_wsgi_pipeline`` property, then that
property will be called before the pipeline is loaded.
:return: the loaded app
"""
global_conf = global_conf or {}
ctx = loadcontext(loadwsgi.APP, conf_file, global_conf=global_conf)
if ctx.object_type.name == 'pipeline':
# give app the opportunity to modify the pipeline context
ultimate_app = ctx.app_context.create()
func = getattr(ultimate_app, 'modify_wsgi_pipeline', None)
if func and allow_modify_pipeline:
func(PipelineWrapper(ctx))
filters = [c.create() for c in reversed(ctx.filter_contexts)]
pipeline = [ultimate_app]
request_logging_app = app = ultimate_app
for filter_app in filters:
app = filter_app(pipeline[0])
pipeline.insert(0, app)
if request_logging_app is ultimate_app and \
app.__class__.__name__ == 'ProxyLoggingMiddleware':
request_logging_app = filter_app(ultimate_app)
# Set some separate-pipeline attrs
request_logging_app._pipeline = [
request_logging_app, ultimate_app]
request_logging_app._pipeline_request_logging_app = \
request_logging_app
request_logging_app._pipeline_final_app = ultimate_app
for app in pipeline:
app._pipeline = pipeline
# For things like making (logged) backend requests for
# get_account_info and get_container_info
app._pipeline_request_logging_app = request_logging_app
# For getting proxy-server options like *_existence_skip_cache_pct
app._pipeline_final_app = ultimate_app
return pipeline[0]
return ctx.create() |
Read the app config section from a config file.
:param conf_file: path to a config file
:return: a dict | def load_app_config(conf_file):
"""
Read the app config section from a config file.
:param conf_file: path to a config file
:return: a dict
"""
app_conf = {}
try:
ctx = loadcontext(loadwsgi.APP, conf_file)
except LookupError:
pass
else:
app_conf.update(ctx.app_context.global_conf)
app_conf.update(ctx.app_context.local_conf)
return app_conf |
Runs the server according to some strategy. The default strategy runs a
specified number of workers in pre-fork model. The object-server (only)
may use a servers-per-port strategy if its config has a servers_per_port
setting with a value greater than zero.
:param conf_path: Path to paste.deploy style configuration file/directory
:param app_section: App name from conf file to load config from
:param allow_modify_pipeline: Boolean for whether the server should have
an opportunity to change its own pipeline.
Defaults to True
:param test_config: if False (the default) then load and validate the
config and if successful then continue to run the server; if True then
load and validate the config but do not run the server.
:returns: 0 if successful, nonzero otherwise | def run_wsgi(conf_path, app_section, *args, **kwargs):
"""
Runs the server according to some strategy. The default strategy runs a
specified number of workers in pre-fork model. The object-server (only)
may use a servers-per-port strategy if its config has a servers_per_port
setting with a value greater than zero.
:param conf_path: Path to paste.deploy style configuration file/directory
:param app_section: App name from conf file to load config from
:param allow_modify_pipeline: Boolean for whether the server should have
an opportunity to change its own pipeline.
Defaults to True
:param test_config: if False (the default) then load and validate the
config and if successful then continue to run the server; if True then
load and validate the config but do not run the server.
:returns: 0 if successful, nonzero otherwise
"""
try:
conf, logger, global_conf, strategy = check_config(
conf_path, app_section, *args, **kwargs)
except ConfigFileError as err:
print(err)
return 1
if kwargs.get('test_config'):
return 0
# Do some daemonization process hygene before we fork any children or run a
# server without forking.
clean_up_daemon_hygiene()
allow_modify_pipeline = kwargs.get('allow_modify_pipeline', True)
no_fork_sock = strategy.no_fork_sock()
if no_fork_sock:
run_server(conf, logger, no_fork_sock, global_conf=global_conf,
ready_callback=strategy.signal_ready,
allow_modify_pipeline=allow_modify_pipeline)
systemd_notify(logger, "STOPPING=1")
return 0
def stop_with_signal(signum, *args):
"""Set running flag to False and capture the signum"""
running_context[0] = False
running_context[1] = signum
# context to hold boolean running state and stop signum
running_context = [True, None]
signal.signal(signal.SIGTERM, stop_with_signal)
signal.signal(signal.SIGHUP, stop_with_signal)
signal.signal(signal.SIGUSR1, stop_with_signal)
while running_context[0]:
new_workers = {} # pid -> status pipe
for sock, sock_info in strategy.new_worker_socks():
read_fd, write_fd = os.pipe()
pid = os.fork()
if pid == 0:
os.close(read_fd)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
def shutdown_my_listen_sock(signum, *args):
greenio.shutdown_safe(sock)
signal.signal(signal.SIGHUP, shutdown_my_listen_sock)
signal.signal(signal.SIGUSR1, shutdown_my_listen_sock)
strategy.post_fork_hook()
def notify():
os.write(write_fd, b'ready')
os.close(write_fd)
run_server(conf, logger, sock, ready_callback=notify,
allow_modify_pipeline=allow_modify_pipeline)
strategy.log_sock_exit(sock, sock_info)
return 0
else:
os.close(write_fd)
new_workers[pid] = read_fd
strategy.register_worker_start(sock, sock_info, pid)
for pid, read_fd in new_workers.items():
worker_status = os.read(read_fd, 30)
os.close(read_fd)
if worker_status != b'ready':
raise Exception(
'worker %d did not start normally: %r' %
(pid, worker_status))
# TODO: signal_ready() as soon as we have at least one new worker for
# each port, instead of waiting for all of them
strategy.signal_ready()
# The strategy may need to pay attention to something in addition to
# child process exits (like new ports showing up in a ring).
#
# NOTE: a timeout value of None will just instantiate the Timeout
# object and not actually schedule it, which is equivalent to no
# timeout for the green_os.wait().
loop_timeout = strategy.loop_timeout()
with Timeout(loop_timeout, exception=False):
try:
try:
pid, status = green_os.wait()
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
strategy.register_worker_exit(pid)
except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
if err.errno == errno.ECHILD:
# If there are no children at all (ECHILD), then
# there's nothing to actually wait on. We sleep
# for a little bit to avoid a tight CPU spin
# and still are able to catch any KeyboardInterrupt
# events that happen. The value of 0.01 matches the
# value in eventlet's waitpid().
sleep(0.01)
except KeyboardInterrupt:
logger.notice('User quit')
running_context[0] = False
break
if running_context[1] is not None:
try:
signame = SIGNUM_TO_NAME[running_context[1]]
except KeyError:
logger.error('Stopping with unexpected signal %r' %
running_context[1])
else:
logger.notice('%s received (%s)', signame, os.getpid())
if running_context[1] == signal.SIGTERM:
systemd_notify(logger, "STOPPING=1")
os.killpg(0, signal.SIGTERM)
elif running_context[1] == signal.SIGUSR1:
systemd_notify(logger, "RELOADING=1")
# set up a pipe, fork off a child to handle cleanup later,
# and rexec ourselves with an environment variable set which will
# indicate which fd (one of the pipe ends) to write a byte to
# to indicate listen socket setup is complete. That will signal
# the forked-off child to complete its listen socket shutdown.
#
# NOTE: all strategies will now require the parent process to retain
# superuser privileges so that the re'execd process can bind a new
# socket to the configured IP & port(s). We can't just reuse existing
# listen sockets because then the bind IP couldn't be changed.
#
# NOTE: we need to set all our listen sockets close-on-exec so the only
# open reference to those file descriptors will be in the forked-off
# child here who waits to shutdown the old server's listen sockets. If
# the re-exec'ed server's old listen sockets aren't closed-on-exec,
# then the old server can't actually ever exit.
strategy.set_close_on_exec_on_listen_sockets()
read_fd, write_fd = os.pipe()
orig_server_pid = os.getpid()
child_pid = os.fork()
if child_pid:
# parent; set env var for fds and reexec ourselves
os.close(read_fd)
os.putenv(NOTIFY_FD_ENV_KEY, str(write_fd))
myself = os.path.realpath(sys.argv[0])
logger.info("Old server PID=%d re'execing as: %r",
orig_server_pid, [myself] + list(sys.argv))
if hasattr(os, 'set_inheritable'):
# See https://www.python.org/dev/peps/pep-0446/
os.set_inheritable(write_fd, True)
os.execv(myself, sys.argv)
logger.error('Somehow lived past os.execv()?!')
exit('Somehow lived past os.execv()?!')
elif child_pid == 0:
# child
os.close(write_fd)
logger.info('Old server temporary child PID=%d waiting for '
"re-exec'ed PID=%d to signal readiness...",
os.getpid(), orig_server_pid)
try:
got_pid = os.read(read_fd, 30)
except Exception:
logger.warning('Unexpected exception while reading from '
'pipe:', exc_info=True)
else:
got_pid = got_pid.decode('ascii')
if got_pid:
logger.info('Old server temporary child PID=%d notified '
'to shutdown old listen sockets by PID=%s',
os.getpid(), got_pid)
else:
logger.warning('Old server temporary child PID=%d *NOT* '
'notified to shutdown old listen sockets; '
'the pipe just *died*.', os.getpid())
try:
os.close(read_fd)
except Exception:
pass
else:
# SIGHUP or, less likely, run in "once" mode
systemd_notify(logger, "STOPPING=1")
strategy.shutdown_sockets()
signal.signal(signal.SIGTERM, signal.SIG_IGN)
logger.notice('Exited (%s)', os.getpid())
return 0 |
Loads common settings from conf
Sets the logger
Loads the request processor
:param conf_path: Path to paste.deploy style configuration file/directory
:param app_section: App name from conf file to load config from
:returns: the loaded application entry point
:raises ConfigFileError: Exception is raised for config file error | def init_request_processor(conf_path, app_section, *args, **kwargs):
"""
Loads common settings from conf
Sets the logger
Loads the request processor
:param conf_path: Path to paste.deploy style configuration file/directory
:param app_section: App name from conf file to load config from
:returns: the loaded application entry point
:raises ConfigFileError: Exception is raised for config file error
"""
(conf, logger, log_name) = _initrp(conf_path, app_section, *args, **kwargs)
app = loadapp(conf_path, global_conf={'log_name': log_name})
return (app, conf, logger, log_name) |
Returns a new fresh WSGI environment.
:param env: The WSGI environment to base the new environment on.
:param method: The new REQUEST_METHOD or None to use the
original.
:param path: The new path_info or none to use the original. path
should NOT be quoted. When building a url, a Webob
Request (in accordance with wsgi spec) will quote
env['PATH_INFO']. url += quote(environ['PATH_INFO'])
:param query_string: The new query_string or none to use the original.
When building a url, a Webob Request will append
the query string directly to the url.
url += '?' + env['QUERY_STRING']
:param agent: The HTTP user agent to use; default 'Swift'. You
can put %(orig)s in the agent to have it replaced
with the original env's HTTP_USER_AGENT, such as
'%(orig)s StaticWeb'. You also set agent to None to
use the original env's HTTP_USER_AGENT or '' to
have no HTTP_USER_AGENT.
:param swift_source: Used to mark the request as originating out of
middleware. Will be logged in proxy logs.
:returns: Fresh WSGI environment. | def make_env(env, method=None, path=None, agent='Swift', query_string=None,
swift_source=None):
"""
Returns a new fresh WSGI environment.
:param env: The WSGI environment to base the new environment on.
:param method: The new REQUEST_METHOD or None to use the
original.
:param path: The new path_info or none to use the original. path
should NOT be quoted. When building a url, a Webob
Request (in accordance with wsgi spec) will quote
env['PATH_INFO']. url += quote(environ['PATH_INFO'])
:param query_string: The new query_string or none to use the original.
When building a url, a Webob Request will append
the query string directly to the url.
url += '?' + env['QUERY_STRING']
:param agent: The HTTP user agent to use; default 'Swift'. You
can put %(orig)s in the agent to have it replaced
with the original env's HTTP_USER_AGENT, such as
'%(orig)s StaticWeb'. You also set agent to None to
use the original env's HTTP_USER_AGENT or '' to
have no HTTP_USER_AGENT.
:param swift_source: Used to mark the request as originating out of
middleware. Will be logged in proxy logs.
:returns: Fresh WSGI environment.
"""
newenv = {}
for name in ('HTTP_USER_AGENT', 'HTTP_HOST', 'PATH_INFO',
'QUERY_STRING', 'REMOTE_USER', 'REQUEST_METHOD',
'SCRIPT_NAME', 'SERVER_NAME', 'SERVER_PORT',
'HTTP_ORIGIN', 'HTTP_ACCESS_CONTROL_REQUEST_METHOD',
'SERVER_PROTOCOL', 'swift.cache', 'swift.source',
'swift.trans_id', 'swift.authorize_override',
'swift.authorize', 'HTTP_X_USER_ID', 'HTTP_X_PROJECT_ID',
'HTTP_REFERER', 'swift.infocache',
'swift.shard_listing_history'):
if name in env:
newenv[name] = env[name]
if method:
newenv['REQUEST_METHOD'] = method
if path:
newenv['PATH_INFO'] = path
newenv['SCRIPT_NAME'] = ''
if query_string is not None:
newenv['QUERY_STRING'] = query_string
if agent:
newenv['HTTP_USER_AGENT'] = (
agent % {'orig': env.get('HTTP_USER_AGENT', '')}).strip()
elif agent == '' and 'HTTP_USER_AGENT' in newenv:
del newenv['HTTP_USER_AGENT']
if swift_source:
newenv['swift.source'] = swift_source
newenv['wsgi.input'] = BytesIO()
if 'SCRIPT_NAME' not in newenv:
newenv['SCRIPT_NAME'] = ''
return newenv |
Makes a new swob.Request based on the current env but with the
parameters specified.
:param env: The WSGI environment to base the new request on.
:param method: HTTP method of new request; default is from
the original env.
:param path: HTTP path of new request; default is from the
original env. path should be compatible with what you
would send to Request.blank. path should be quoted and it
can include a query string. for example:
'/a%20space?unicode_str%E8%AA%9E=y%20es'
:param body: HTTP body of new request; empty by default.
:param headers: Extra HTTP headers of new request; None by
default.
:param agent: The HTTP user agent to use; default 'Swift'. You
can put %(orig)s in the agent to have it replaced
with the original env's HTTP_USER_AGENT, such as
'%(orig)s StaticWeb'. You also set agent to None to
use the original env's HTTP_USER_AGENT or '' to
have no HTTP_USER_AGENT.
:param swift_source: Used to mark the request as originating out of
middleware. Will be logged in proxy logs.
:param make_env: make_subrequest calls this make_env to help build the
swob.Request.
:returns: Fresh swob.Request object. | def make_subrequest(env, method=None, path=None, body=None, headers=None,
agent='Swift', swift_source=None, make_env=make_env):
"""
Makes a new swob.Request based on the current env but with the
parameters specified.
:param env: The WSGI environment to base the new request on.
:param method: HTTP method of new request; default is from
the original env.
:param path: HTTP path of new request; default is from the
original env. path should be compatible with what you
would send to Request.blank. path should be quoted and it
can include a query string. for example:
'/a%20space?unicode_str%E8%AA%9E=y%20es'
:param body: HTTP body of new request; empty by default.
:param headers: Extra HTTP headers of new request; None by
default.
:param agent: The HTTP user agent to use; default 'Swift'. You
can put %(orig)s in the agent to have it replaced
with the original env's HTTP_USER_AGENT, such as
'%(orig)s StaticWeb'. You also set agent to None to
use the original env's HTTP_USER_AGENT or '' to
have no HTTP_USER_AGENT.
:param swift_source: Used to mark the request as originating out of
middleware. Will be logged in proxy logs.
:param make_env: make_subrequest calls this make_env to help build the
swob.Request.
:returns: Fresh swob.Request object.
"""
query_string = None
path = path or ''
if path and '?' in path:
path, query_string = path.split('?', 1)
newenv = make_env(env, method, path=wsgi_unquote(path), agent=agent,
query_string=query_string, swift_source=swift_source)
if not headers:
headers = {}
if body:
return Request.blank(path, environ=newenv, body=body, headers=headers)
else:
return Request.blank(path, environ=newenv, headers=headers) |
Same as :py:func:`make_env` but with preauthorization. | def make_pre_authed_env(env, method=None, path=None, agent='Swift',
query_string=None, swift_source=None):
"""Same as :py:func:`make_env` but with preauthorization."""
newenv = make_env(
env, method=method, path=path, agent=agent, query_string=query_string,
swift_source=swift_source)
newenv['swift.authorize'] = lambda req: None
newenv['swift.authorize_override'] = True
newenv['REMOTE_USER'] = '.wsgi.pre_authed'
return newenv |
Same as :py:func:`make_subrequest` but with preauthorization. | def make_pre_authed_request(env, method=None, path=None, body=None,
headers=None, agent='Swift', swift_source=None):
"""Same as :py:func:`make_subrequest` but with preauthorization."""
return make_subrequest(
env, method=method, path=path, body=body, headers=headers, agent=agent,
swift_source=swift_source, make_env=make_pre_authed_env) |
Returns a WSGI filter app for use with paste.deploy. | def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
register_swift_info('account_quotas')
def account_quota_filter(app):
return AccountQuotaMiddleware(app)
return account_quota_filter |
Returns a cleaned ACL header value, validating that it meets the formatting
requirements for standard Swift ACL strings.
The ACL format is::
[item[,item...]]
Each item can be a group name to give access to or a referrer designation
to grant or deny based on the HTTP Referer header.
The referrer designation format is::
.r:[-]value
The ``.r`` can also be ``.ref``, ``.referer``, or ``.referrer``; though it
will be shortened to just ``.r`` for decreased character count usage.
The value can be ``*`` to specify any referrer host is allowed access, a
specific host name like ``www.example.com``, or if it has a leading period
``.`` or leading ``*.`` it is a domain name specification, like
``.example.com`` or ``*.example.com``. The leading minus sign ``-``
indicates referrer hosts that should be denied access.
Referrer access is applied in the order they are specified. For example,
.r:.example.com,.r:-thief.example.com would allow all hosts ending with
.example.com except for the specific host thief.example.com.
Example valid ACLs::
.r:*
.r:*,.r:-.thief.com
.r:*,.r:.example.com,.r:-thief.example.com
.r:*,.r:-.thief.com,bobs_account,sues_account:sue
bobs_account,sues_account:sue
Example invalid ACLs::
.r:
.r:-
By default, allowing read access via .r will not allow listing objects in
the container -- just retrieving objects from the container. To turn on
listings, use the .rlistings directive.
Also, .r designations aren't allowed in headers whose names include the
word 'write'.
ACLs that are "messy" will be cleaned up. Examples:
====================== ======================
Original Cleaned
---------------------- ----------------------
``bob, sue`` ``bob,sue``
``bob , sue`` ``bob,sue``
``bob,,,sue`` ``bob,sue``
``.referrer : *`` ``.r:*``
``.ref:*.example.com`` ``.r:.example.com``
``.r:*, .rlistings`` ``.r:*,.rlistings``
====================== ======================
:param name: The name of the header being cleaned, such as X-Container-Read
or X-Container-Write.
:param value: The value of the header being cleaned.
:returns: The value, cleaned of extraneous formatting.
:raises ValueError: If the value does not meet the ACL formatting
requirements; the error message will indicate why. | def clean_acl(name, value):
"""
Returns a cleaned ACL header value, validating that it meets the formatting
requirements for standard Swift ACL strings.
The ACL format is::
[item[,item...]]
Each item can be a group name to give access to or a referrer designation
to grant or deny based on the HTTP Referer header.
The referrer designation format is::
.r:[-]value
The ``.r`` can also be ``.ref``, ``.referer``, or ``.referrer``; though it
will be shortened to just ``.r`` for decreased character count usage.
The value can be ``*`` to specify any referrer host is allowed access, a
specific host name like ``www.example.com``, or if it has a leading period
``.`` or leading ``*.`` it is a domain name specification, like
``.example.com`` or ``*.example.com``. The leading minus sign ``-``
indicates referrer hosts that should be denied access.
Referrer access is applied in the order they are specified. For example,
.r:.example.com,.r:-thief.example.com would allow all hosts ending with
.example.com except for the specific host thief.example.com.
Example valid ACLs::
.r:*
.r:*,.r:-.thief.com
.r:*,.r:.example.com,.r:-thief.example.com
.r:*,.r:-.thief.com,bobs_account,sues_account:sue
bobs_account,sues_account:sue
Example invalid ACLs::
.r:
.r:-
By default, allowing read access via .r will not allow listing objects in
the container -- just retrieving objects from the container. To turn on
listings, use the .rlistings directive.
Also, .r designations aren't allowed in headers whose names include the
word 'write'.
ACLs that are "messy" will be cleaned up. Examples:
====================== ======================
Original Cleaned
---------------------- ----------------------
``bob, sue`` ``bob,sue``
``bob , sue`` ``bob,sue``
``bob,,,sue`` ``bob,sue``
``.referrer : *`` ``.r:*``
``.ref:*.example.com`` ``.r:.example.com``
``.r:*, .rlistings`` ``.r:*,.rlistings``
====================== ======================
:param name: The name of the header being cleaned, such as X-Container-Read
or X-Container-Write.
:param value: The value of the header being cleaned.
:returns: The value, cleaned of extraneous formatting.
:raises ValueError: If the value does not meet the ACL formatting
requirements; the error message will indicate why.
"""
name = name.lower()
values = []
for raw_value in value.split(','):
raw_value = raw_value.strip()
if not raw_value:
continue
if ':' not in raw_value:
values.append(raw_value)
continue
first, second = (v.strip() for v in raw_value.split(':', 1))
if not first or not first.startswith('.'):
values.append(raw_value)
elif first in ('.r', '.ref', '.referer', '.referrer'):
if 'write' in name:
raise ValueError('Referrers not allowed in write ACL: '
'%s' % repr(raw_value))
negate = False
if second and second.startswith('-'):
negate = True
second = second[1:].strip()
if second and second != '*' and second.startswith('*'):
second = second[1:].strip()
if not second or second == '.':
raise ValueError('No host/domain value after referrer '
'designation in ACL: %s' % repr(raw_value))
values.append('.r:%s%s' % ('-' if negate else '', second))
else:
raise ValueError('Unknown designator %s in ACL: %s' %
(repr(first), repr(raw_value)))
return ','.join(values) |
Returns a standard Swift ACL string for the given inputs.
Caller is responsible for ensuring that :referrers: parameter is only given
if the ACL is being generated for X-Container-Read. (X-Container-Write
and the account ACL headers don't support referrers.)
:param groups: a list of groups (and/or members in most auth systems) to
grant access
:param referrers: a list of referrer designations (without the leading .r:)
:param header_name: (optional) header name of the ACL we're preparing, for
clean_acl; if None, returned ACL won't be cleaned
:returns: a Swift ACL string for use in X-Container-{Read,Write},
X-Account-Access-Control, etc. | def format_acl_v1(groups=None, referrers=None, header_name=None):
"""
Returns a standard Swift ACL string for the given inputs.
Caller is responsible for ensuring that :referrers: parameter is only given
if the ACL is being generated for X-Container-Read. (X-Container-Write
and the account ACL headers don't support referrers.)
:param groups: a list of groups (and/or members in most auth systems) to
grant access
:param referrers: a list of referrer designations (without the leading .r:)
:param header_name: (optional) header name of the ACL we're preparing, for
clean_acl; if None, returned ACL won't be cleaned
:returns: a Swift ACL string for use in X-Container-{Read,Write},
X-Account-Access-Control, etc.
"""
groups, referrers = groups or [], referrers or []
referrers = ['.r:%s' % r for r in referrers]
result = ','.join(groups + referrers)
return (clean_acl(header_name, result) if header_name else result) |
Returns a version-2 Swift ACL JSON string.
HTTP headers for Version 2 ACLs have the following form:
Header-Name: {"arbitrary":"json","encoded":"string"}
JSON will be forced ASCII (containing six-char \uNNNN sequences rather
than UTF-8; UTF-8 is valid JSON but clients vary in their support for
UTF-8 headers), and without extraneous whitespace.
Advantages over V1: forward compatibility (new keys don't cause parsing
exceptions); Unicode support; no reserved words (you can have a user
named .rlistings if you want).
:param acl_dict: dict of arbitrary data to put in the ACL; see specific
auth systems such as tempauth for supported values
:returns: a JSON string which encodes the ACL | def format_acl_v2(acl_dict):
r"""
Returns a version-2 Swift ACL JSON string.
HTTP headers for Version 2 ACLs have the following form:
Header-Name: {"arbitrary":"json","encoded":"string"}
JSON will be forced ASCII (containing six-char \uNNNN sequences rather
than UTF-8; UTF-8 is valid JSON but clients vary in their support for
UTF-8 headers), and without extraneous whitespace.
Advantages over V1: forward compatibility (new keys don't cause parsing
exceptions); Unicode support; no reserved words (you can have a user
named .rlistings if you want).
:param acl_dict: dict of arbitrary data to put in the ACL; see specific
auth systems such as tempauth for supported values
:returns: a JSON string which encodes the ACL
"""
return json.dumps(acl_dict, ensure_ascii=True, separators=(',', ':'),
sort_keys=True) |
Compatibility wrapper to help migrate ACL syntax from version 1 to 2.
Delegates to the appropriate version-specific format_acl method, defaulting
to version 1 for backward compatibility.
:param kwargs: keyword args appropriate for the selected ACL syntax version
(see :func:`format_acl_v1` or :func:`format_acl_v2`) | def format_acl(version=1, **kwargs):
"""
Compatibility wrapper to help migrate ACL syntax from version 1 to 2.
Delegates to the appropriate version-specific format_acl method, defaulting
to version 1 for backward compatibility.
:param kwargs: keyword args appropriate for the selected ACL syntax version
(see :func:`format_acl_v1` or :func:`format_acl_v2`)
"""
if version == 1:
return format_acl_v1(
groups=kwargs.get('groups'), referrers=kwargs.get('referrers'),
header_name=kwargs.get('header_name'))
elif version == 2:
return format_acl_v2(kwargs.get('acl_dict'))
raise ValueError("Invalid ACL version: %r" % version) |
Parses a standard Swift ACL string into a referrers list and groups list.
See :func:`clean_acl` for documentation of the standard Swift ACL format.
:param acl_string: The standard Swift ACL string to parse.
:returns: A tuple of (referrers, groups) where referrers is a list of
referrer designations (without the leading .r:) and groups is a
list of groups to allow access. | def parse_acl_v1(acl_string):
"""
Parses a standard Swift ACL string into a referrers list and groups list.
See :func:`clean_acl` for documentation of the standard Swift ACL format.
:param acl_string: The standard Swift ACL string to parse.
:returns: A tuple of (referrers, groups) where referrers is a list of
referrer designations (without the leading .r:) and groups is a
list of groups to allow access.
"""
referrers = []
groups = []
if acl_string:
for value in acl_string.split(','):
if value.startswith('.r:'):
referrers.append(value[len('.r:'):])
else:
groups.append(unquote(value))
return referrers, groups |
Parses a version-2 Swift ACL string and returns a dict of ACL info.
:param data: string containing the ACL data in JSON format
:returns: A dict (possibly empty) containing ACL info, e.g.:
{"groups": [...], "referrers": [...]}
:returns: None if data is None, is not valid JSON or does not parse
as a dict
:returns: empty dictionary if data is an empty string | def parse_acl_v2(data):
"""
Parses a version-2 Swift ACL string and returns a dict of ACL info.
:param data: string containing the ACL data in JSON format
:returns: A dict (possibly empty) containing ACL info, e.g.:
{"groups": [...], "referrers": [...]}
:returns: None if data is None, is not valid JSON or does not parse
as a dict
:returns: empty dictionary if data is an empty string
"""
if data is None:
return None
if data == '':
return {}
try:
result = json.loads(data)
return (result if type(result) is dict else None)
except ValueError:
return None |
Compatibility wrapper to help migrate ACL syntax from version 1 to 2.
Delegates to the appropriate version-specific parse_acl method, attempting
to determine the version from the types of args/kwargs.
:param args: positional args for the selected ACL syntax version
:param kwargs: keyword args for the selected ACL syntax version
(see :func:`parse_acl_v1` or :func:`parse_acl_v2`)
:returns: the return value of :func:`parse_acl_v1` or :func:`parse_acl_v2` | def parse_acl(*args, **kwargs):
"""
Compatibility wrapper to help migrate ACL syntax from version 1 to 2.
Delegates to the appropriate version-specific parse_acl method, attempting
to determine the version from the types of args/kwargs.
:param args: positional args for the selected ACL syntax version
:param kwargs: keyword args for the selected ACL syntax version
(see :func:`parse_acl_v1` or :func:`parse_acl_v2`)
:returns: the return value of :func:`parse_acl_v1` or :func:`parse_acl_v2`
"""
version = kwargs.pop('version', None)
if version in (1, None):
return parse_acl_v1(*args)
elif version == 2:
return parse_acl_v2(*args, **kwargs)
else:
raise ValueError('Unknown ACL version: parse_acl(%r, %r)' %
(args, kwargs)) |
Returns True if the referrer should be allowed based on the referrer_acl
list (as returned by :func:`parse_acl`).
See :func:`clean_acl` for documentation of the standard Swift ACL format.
:param referrer: The value of the HTTP Referer header.
:param referrer_acl: The list of referrer designations as returned by
:func:`parse_acl`.
:returns: True if the referrer should be allowed; False if not. | def referrer_allowed(referrer, referrer_acl):
"""
Returns True if the referrer should be allowed based on the referrer_acl
list (as returned by :func:`parse_acl`).
See :func:`clean_acl` for documentation of the standard Swift ACL format.
:param referrer: The value of the HTTP Referer header.
:param referrer_acl: The list of referrer designations as returned by
:func:`parse_acl`.
:returns: True if the referrer should be allowed; False if not.
"""
allow = False
if referrer_acl:
rhost = urlparse(referrer or '').hostname or 'unknown'
for mhost in referrer_acl:
if mhost.startswith('-'):
mhost = mhost[1:]
if mhost == rhost or (mhost.startswith('.') and
rhost.endswith(mhost)):
allow = False
elif mhost == '*' or mhost == rhost or \
(mhost.startswith('.') and rhost.endswith(mhost)):
allow = True
return allow |
Extract the account ACLs from the given account_info, and return the ACLs.
:param info: a dict of the form returned by get_account_info
:returns: None (no ACL system metadata is set), or a dict of the form::
{'admin': [...], 'read-write': [...], 'read-only': [...]}
:raises ValueError: on a syntactically invalid header | def acls_from_account_info(info):
"""
Extract the account ACLs from the given account_info, and return the ACLs.
:param info: a dict of the form returned by get_account_info
:returns: None (no ACL system metadata is set), or a dict of the form::
{'admin': [...], 'read-write': [...], 'read-only': [...]}
:raises ValueError: on a syntactically invalid header
"""
acl = parse_acl(
version=2, data=info.get('sysmeta', {}).get('core-access-control'))
if acl is None:
return None
admin_members = acl.get('admin', [])
readwrite_members = acl.get('read-write', [])
readonly_members = acl.get('read-only', [])
if not any((admin_members, readwrite_members, readonly_members)):
return None
acls = {
'admin': admin_members,
'read-write': readwrite_members,
'read-only': readonly_members,
}
if six.PY2:
for k in ('admin', 'read-write', 'read-only'):
acls[k] = [v.encode('utf8') for v in acls[k]]
return acls |
Returns a properly formatted response body according to format.
Handles json and xml, otherwise will return text/plain.
Note: xml response does not include xml declaration.
:params data_format: resulting format
:params data_dict: generated data about results.
:params error_list: list of quoted filenames that failed
:params root_tag: the tag name to use for root elements when returning XML;
e.g. 'extract' or 'delete' | def get_response_body(data_format, data_dict, error_list, root_tag):
"""
Returns a properly formatted response body according to format.
Handles json and xml, otherwise will return text/plain.
Note: xml response does not include xml declaration.
:params data_format: resulting format
:params data_dict: generated data about results.
:params error_list: list of quoted filenames that failed
:params root_tag: the tag name to use for root elements when returning XML;
e.g. 'extract' or 'delete'
"""
if data_format == 'application/json':
data_dict['Errors'] = error_list
return json.dumps(data_dict).encode('ascii')
if data_format and data_format.endswith('/xml'):
output = ['<', root_tag, '>\n']
for key in sorted(data_dict):
xml_key = key.replace(' ', '_').lower()
output.extend([
'<', xml_key, '>',
saxutils.escape(str(data_dict[key])),
'</', xml_key, '>\n',
])
output.append('<errors>\n')
for name, status in error_list:
output.extend([
'<object><name>', saxutils.escape(name), '</name><status>',
saxutils.escape(status), '</status></object>\n',
])
output.extend(['</errors>\n</', root_tag, '>\n'])
if six.PY2:
return ''.join(output)
return ''.join(output).encode('utf-8')
output = []
for key in sorted(data_dict):
output.append('%s: %s\n' % (key, data_dict[key]))
output.append('Errors:\n')
output.extend(
'%s, %s\n' % (name, status)
for name, status in error_list)
if six.PY2:
return ''.join(output)
return ''.join(output).encode('utf-8') |
Enforces that inner_iter yields exactly <nbytes> bytes before
exhaustion.
If inner_iter fails to do so, BadResponseLength is raised.
:param inner_iter: iterable of bytestrings
:param nbytes: number of bytes expected | def enforce_byte_count(inner_iter, nbytes):
"""
Enforces that inner_iter yields exactly <nbytes> bytes before
exhaustion.
If inner_iter fails to do so, BadResponseLength is raised.
:param inner_iter: iterable of bytestrings
:param nbytes: number of bytes expected
"""
try:
bytes_left = nbytes
for chunk in inner_iter:
if bytes_left >= len(chunk):
yield chunk
bytes_left -= len(chunk)
else:
yield chunk[:bytes_left]
raise BadResponseLength(
"Too many bytes; truncating after %d bytes "
"with at least %d surplus bytes remaining" % (
nbytes, len(chunk) - bytes_left))
if bytes_left:
raise BadResponseLength('Expected another %d bytes' % (
bytes_left,))
finally:
close_if_possible(inner_iter) |
Given a domain, returns its DNS CNAME mapping and DNS ttl.
:param domain: domain to query on
:param resolver: dns.resolver.Resolver() instance used for executing DNS
queries
:returns: (ttl, result) | def lookup_cname(domain, resolver): # pragma: no cover
"""
Given a domain, returns its DNS CNAME mapping and DNS ttl.
:param domain: domain to query on
:param resolver: dns.resolver.Resolver() instance used for executing DNS
queries
:returns: (ttl, result)
"""
try:
answer = resolver.query(domain, 'CNAME').rrset
ttl = answer.ttl
result = list(answer.items)[0].to_text()
result = result.rstrip('.')
return ttl, result
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
# As the memcache lib returns None when nothing is found in cache,
# returning false helps to distinguish between "nothing in cache"
# (None) and "nothing to cache" (False).
return 60, False
except (dns.exception.DNSException):
return 0, None |
Validate that the value from x-copy-from header is
well formatted. We assume the caller ensures that
x-copy-from header is present in req.headers.
:param req: HTTP request object
:returns: A tuple with container name and object name
:raise HTTPPreconditionFailed: if x-copy-from value
is not well formatted. | def _check_copy_from_header(req):
"""
Validate that the value from x-copy-from header is
well formatted. We assume the caller ensures that
x-copy-from header is present in req.headers.
:param req: HTTP request object
:returns: A tuple with container name and object name
:raise HTTPPreconditionFailed: if x-copy-from value
is not well formatted.
"""
return check_path_header(req, 'X-Copy-From', 2,
'X-Copy-From header must be of the form '
'<container name>/<object name>') |
Validate that the value from destination header is
well formatted. We assume the caller ensures that
destination header is present in req.headers.
:param req: HTTP request object
:returns: A tuple with container name and object name
:raise HTTPPreconditionFailed: if destination value
is not well formatted. | def _check_destination_header(req):
"""
Validate that the value from destination header is
well formatted. We assume the caller ensures that
destination header is present in req.headers.
:param req: HTTP request object
:returns: A tuple with container name and object name
:raise HTTPPreconditionFailed: if destination value
is not well formatted.
"""
return check_path_header(req, 'Destination', 2,
'Destination header must be of the form '
'<container name>/<object name>') |
Will copy desired headers from src to dest.
:params src: an instance of collections.Mapping
:params dest: an instance of collections.Mapping | def _copy_headers(src, dest):
"""
Will copy desired headers from src to dest.
:params src: an instance of collections.Mapping
:params dest: an instance of collections.Mapping
"""
for k, v in src.items():
if (is_sys_or_user_meta('object', k) or
is_object_transient_sysmeta(k) or
k.lower() == 'x-delete-at'):
dest[k] = v |
Returns the WSGI filter for use with paste.deploy. | def filter_factory(global_conf, **local_conf):
"""Returns the WSGI filter for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
logger = get_logger(conf, log_route='formpost')
allowed_digests, deprecated_digests = get_allowed_digests(
conf.get('allowed_digests', '').split(), logger)
info = {'allowed_digests': sorted(allowed_digests)}
if deprecated_digests:
info['deprecated_digests'] = sorted(deprecated_digests)
register_swift_info('formpost', **info)
conf.update(info)
return lambda app: FormPost(app, conf) |
Returns a WSGI filter app for use with paste.deploy. | def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return KeystoneAuth(app, conf)
return auth_filter |
Determine the content type to use for an account or container listing
response.
:param req: request object
:returns: content type as a string (e.g. text/plain, application/json)
:raises HTTPNotAcceptable: if the requested content type is not acceptable
:raises HTTPBadRequest: if the 'format' query param is provided and
not valid UTF-8 | def get_listing_content_type(req):
"""
Determine the content type to use for an account or container listing
response.
:param req: request object
:returns: content type as a string (e.g. text/plain, application/json)
:raises HTTPNotAcceptable: if the requested content type is not acceptable
:raises HTTPBadRequest: if the 'format' query param is provided and
not valid UTF-8
"""
query_format = get_param(req, 'format')
if query_format:
req.accept = FORMAT2CONTENT_TYPE.get(
query_format.lower(), FORMAT2CONTENT_TYPE['plain'])
try:
out_content_type = req.accept.best_match(
['text/plain', 'application/json', 'application/xml', 'text/xml'])
except ValueError:
raise HTTPBadRequest(request=req, body=b'Invalid Accept header')
if not out_content_type:
raise HTTPNotAcceptable(request=req)
return out_content_type |
Parses general parms for rate limits looking for things that
start with the provided name_prefix within the provided conf
and returns lists for both internal use and for /info
:param conf: conf dict to parse
:param name_prefix: prefix of config parms to look for
:param info: set to return extra stuff for /info registration | def interpret_conf_limits(conf, name_prefix, info=None):
"""
Parses general parms for rate limits looking for things that
start with the provided name_prefix within the provided conf
and returns lists for both internal use and for /info
:param conf: conf dict to parse
:param name_prefix: prefix of config parms to look for
:param info: set to return extra stuff for /info registration
"""
conf_limits = []
for conf_key in conf:
if conf_key.startswith(name_prefix):
cont_size = int(conf_key[len(name_prefix):])
rate = float(conf[conf_key])
conf_limits.append((cont_size, rate))
conf_limits.sort()
ratelimits = []
conf_limits_info = list(conf_limits)
while conf_limits:
cur_size, cur_rate = conf_limits.pop(0)
if conf_limits:
next_size, next_rate = conf_limits[0]
slope = (float(next_rate) - float(cur_rate)) \
/ (next_size - cur_size)
def new_scope(cur_size, slope, cur_rate):
# making new scope for variables
return lambda x: (x - cur_size) * slope + cur_rate
line_func = new_scope(cur_size, slope, cur_rate)
else:
line_func = lambda x: cur_rate
ratelimits.append((cur_size, cur_rate, line_func))
if info is None:
return ratelimits
else:
return ratelimits, conf_limits_info |
Returns number of requests allowed per second for given size. | def get_maxrate(ratelimits, size):
"""
Returns number of requests allowed per second for given size.
"""
last_func = None
if size:
size = int(size)
for ratesize, rate, func in ratelimits:
if size < ratesize:
break
last_func = func
if last_func:
return last_func(size)
return None |
paste.deploy app factory for creating WSGI proxy apps. | def filter_factory(global_conf, **local_conf):
"""
paste.deploy app factory for creating WSGI proxy apps.
"""
conf = global_conf.copy()
conf.update(local_conf)
account_ratelimit = float(conf.get('account_ratelimit', 0))
max_sleep_time_seconds = float(conf.get('max_sleep_time_seconds', 60))
container_ratelimits, cont_limit_info = interpret_conf_limits(
conf, 'container_ratelimit_', info=1)
container_listing_ratelimits, cont_list_limit_info = \
interpret_conf_limits(conf, 'container_listing_ratelimit_', info=1)
# not all limits are exposed (intentionally)
register_swift_info('ratelimit',
account_ratelimit=account_ratelimit,
max_sleep_time_seconds=max_sleep_time_seconds,
container_ratelimits=cont_limit_info,
container_listing_ratelimits=cont_list_limit_info)
def limit_filter(app):
return RateLimitMiddleware(app, conf)
return limit_filter |
paste.deploy app factory for creating WSGI proxy apps. | def filter_factory(global_conf, **local_conf):
"""
paste.deploy app factory for creating WSGI proxy apps.
"""
conf = global_conf.copy()
conf.update(local_conf)
if config_true_value(conf.get('read_only')):
register_swift_info('read_only')
def read_only_filter(app):
return ReadOnlyMiddleware(app, conf)
return read_only_filter |
Given a request body, parses it and returns a list of dictionaries.
The output structure is nearly the same as the input structure, but it
is not an exact copy. Given a valid object-backed input dictionary
``d_in``, its corresponding output dictionary ``d_out`` will be as follows:
* d_out['etag'] == d_in['etag']
* d_out['path'] == d_in['path']
* d_in['size_bytes'] can be a string ("12") or an integer (12), but
d_out['size_bytes'] is an integer.
* (optional) d_in['range'] is a string of the form "M-N", "M-", or
"-N", where M and N are non-negative integers. d_out['range'] is the
corresponding swob.Range object. If d_in does not have a key
'range', neither will d_out.
Inlined data dictionaries will have any extraneous padding stripped.
:raises: HTTPException on parse errors or semantic errors (e.g. bogus
JSON structure, syntactically invalid ranges)
:returns: a list of dictionaries on success | def parse_and_validate_input(req_body, req_path):
"""
Given a request body, parses it and returns a list of dictionaries.
The output structure is nearly the same as the input structure, but it
is not an exact copy. Given a valid object-backed input dictionary
``d_in``, its corresponding output dictionary ``d_out`` will be as follows:
* d_out['etag'] == d_in['etag']
* d_out['path'] == d_in['path']
* d_in['size_bytes'] can be a string ("12") or an integer (12), but
d_out['size_bytes'] is an integer.
* (optional) d_in['range'] is a string of the form "M-N", "M-", or
"-N", where M and N are non-negative integers. d_out['range'] is the
corresponding swob.Range object. If d_in does not have a key
'range', neither will d_out.
Inlined data dictionaries will have any extraneous padding stripped.
:raises: HTTPException on parse errors or semantic errors (e.g. bogus
JSON structure, syntactically invalid ranges)
:returns: a list of dictionaries on success
"""
try:
parsed_data = json.loads(req_body)
except ValueError:
raise HTTPBadRequest("Manifest must be valid JSON.\n")
if not isinstance(parsed_data, list):
raise HTTPBadRequest("Manifest must be a list.\n")
# If we got here, req_path refers to an object, so this won't ever raise
# ValueError.
vrs, account, _junk = split_path(req_path, 3, 3, True)
errors = []
for seg_index, seg_dict in enumerate(parsed_data):
if not isinstance(seg_dict, dict):
errors.append(b"Index %d: not a JSON object" % seg_index)
continue
for required in SLO_KEYS:
if required in seg_dict:
segment_type = required
break
else:
errors.append(
b"Index %d: expected keys to include one of %s"
% (seg_index,
b" or ".join(repr(required) for required in SLO_KEYS)))
continue
allowed_keys = SLO_KEYS[segment_type].union([segment_type])
extraneous_keys = [k for k in seg_dict if k not in allowed_keys]
if extraneous_keys:
errors.append(
b"Index %d: extraneous keys %s"
% (seg_index,
b", ".join(json.dumps(ek).encode('ascii')
for ek in sorted(extraneous_keys))))
continue
if segment_type == 'path':
if not isinstance(seg_dict['path'], six.string_types):
errors.append(b"Index %d: \"path\" must be a string" %
seg_index)
continue
if not (seg_dict.get('etag') is None or
isinstance(seg_dict['etag'], six.string_types)):
errors.append(b'Index %d: "etag" must be a string or null '
b'(if provided)' % seg_index)
continue
if '/' not in seg_dict['path'].strip('/'):
errors.append(
b"Index %d: path does not refer to an object. Path must "
b"be of the form /container/object." % seg_index)
continue
seg_size = seg_dict.get('size_bytes')
if seg_size is not None:
try:
seg_size = int(seg_size)
seg_dict['size_bytes'] = seg_size
except (TypeError, ValueError):
errors.append(b"Index %d: invalid size_bytes" % seg_index)
continue
if seg_size < 1 and seg_index != (len(parsed_data) - 1):
errors.append(b"Index %d: too small; each segment must be "
b"at least 1 byte."
% (seg_index,))
continue
obj_path = '/'.join(['', vrs, account,
quote(seg_dict['path'].lstrip('/'))])
if req_path == obj_path:
errors.append(
b"Index %d: manifest must not include itself as a segment"
% seg_index)
continue
if seg_dict.get('range'):
try:
seg_dict['range'] = Range('bytes=%s' % seg_dict['range'])
except ValueError:
errors.append(b"Index %d: invalid range" % seg_index)
continue
if len(seg_dict['range'].ranges) > 1:
errors.append(b"Index %d: multiple ranges "
b"(only one allowed)" % seg_index)
continue
# If the user *told* us the object's size, we can check range
# satisfiability right now. If they lied about the size, we'll
# fail that validation later.
if (seg_size is not None and 1 != len(
seg_dict['range'].ranges_for_length(seg_size))):
errors.append(b"Index %d: unsatisfiable range" % seg_index)
continue
elif segment_type == 'data':
# Validate that the supplied data is non-empty and base64-encoded
try:
data = strict_b64decode(seg_dict['data'])
except ValueError:
errors.append(
b"Index %d: data must be valid base64" % seg_index)
continue
if len(data) < 1:
errors.append(b"Index %d: too small; each segment must be "
b"at least 1 byte."
% (seg_index,))
continue
# re-encode to normalize padding
seg_dict['data'] = base64.b64encode(data).decode('ascii')
if parsed_data and all('data' in d for d in parsed_data):
errors.append(b"Inline data segments require at least one "
b"object-backed segment.")
if errors:
error_message = b"".join(e + b"\n" for e in errors)
raise HTTPBadRequest(error_message,
headers={"Content-Type": "text/plain"})
return parsed_data |
Decode any inlined data and update sub_slo segments bytes from content-type
when available; then annotate segment dicts in segments list with
'segment_length'.
N.B. raw_data segments don't have a bytes key and range-segments need to
calculate their length from their range key but afterwards all segments
dicts will have 'segment_length' representing the length of the segment. | def _annotate_segments(segments, logger=None):
"""
Decode any inlined data and update sub_slo segments bytes from content-type
when available; then annotate segment dicts in segments list with
'segment_length'.
N.B. raw_data segments don't have a bytes key and range-segments need to
calculate their length from their range key but afterwards all segments
dicts will have 'segment_length' representing the length of the segment.
"""
for seg_dict in segments:
if 'data' in seg_dict:
seg_dict['raw_data'] = base64.b64decode(seg_dict.pop('data'))
segment_length = len(seg_dict['raw_data'])
else:
if config_true_value(seg_dict.get('sub_slo')):
override_bytes_from_content_type(
seg_dict, logger=logger)
seg_range = seg_dict.get('range')
if seg_range is not None:
# The range is of the form N-M, where N and M are both
# positive decimal integers. We know this because this
# middleware is the only thing that creates the SLO
# manifests stored in the cluster.
range_start, range_end = [
int(x) for x in seg_range.split('-')]
segment_length = (range_end - range_start) + 1
else:
segment_length = int(seg_dict['bytes'])
seg_dict['segment_length'] = segment_length |
Helper function to calculate the byterange for a part_num response.
N.B. as a side-effect of calculating the single tuple representing the
byterange required for a part_num response this function will also mutate
the request's Range header so that swob knows to return 206.
:param req: the request object
:param segments: the list of seg_dicts
:param part_num: the part number of the object to return
:returns: a tuple representing the byterange | def calculate_byterange_for_part_num(req, segments, part_num):
"""
Helper function to calculate the byterange for a part_num response.
N.B. as a side-effect of calculating the single tuple representing the
byterange required for a part_num response this function will also mutate
the request's Range header so that swob knows to return 206.
:param req: the request object
:param segments: the list of seg_dicts
:param part_num: the part number of the object to return
:returns: a tuple representing the byterange
"""
start = 0
for seg in segments[:part_num - 1]:
start += seg['segment_length']
last = start + segments[part_num - 1]['segment_length']
# We need to mutate the request's Range header so that swob knows to
# handle these partial content requests correctly.
req.range = "bytes=%d-%d" % (start, last - 1)
return start, last - 1 |
Calculate the byteranges based on the request, segments, and part number.
N.B. as a side-effect of calculating the single tuple representing the
byterange required for a part_num response this function will also mutate
the request's Range header so that swob knows to return 206.
:param req: the request object
:param segments: the list of seg_dicts
:param resp_attrs: the slo response attributes
:param part_num: the part number of the object to return
:returns: a list of tuples representing byteranges | def calculate_byteranges(req, segments, resp_attrs, part_num):
"""
Calculate the byteranges based on the request, segments, and part number.
N.B. as a side-effect of calculating the single tuple representing the
byterange required for a part_num response this function will also mutate
the request's Range header so that swob knows to return 206.
:param req: the request object
:param segments: the list of seg_dicts
:param resp_attrs: the slo response attributes
:param part_num: the part number of the object to return
:returns: a list of tuples representing byteranges
"""
if req.range:
byteranges = [
# For some reason, swob.Range.ranges_for_length adds 1 to the
# last byte's position.
(start, end - 1) for start, end
in req.range.ranges_for_length(resp_attrs.slo_size)]
elif part_num:
byteranges = [
calculate_byterange_for_part_num(req, segments, part_num)]
else:
byteranges = [(0, resp_attrs.slo_size - 1)]
return byteranges |
Returns a Static Web WSGI filter for use with paste.deploy. | def filter_factory(global_conf, **local_conf):
"""Returns a Static Web WSGI filter for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
register_swift_info('staticweb')
def staticweb_filter(app):
return StaticWeb(app, conf)
return staticweb_filter |
Validate that the value from x-symlink-target header is well formatted
and that the x-symlink-target-etag header (if present) does not contain
problematic characters. We assume the caller ensures that
x-symlink-target header is present in req.headers.
:param req: HTTP request object
:returns: a tuple, the full versioned path to the object (as a WSGI string)
and the X-Symlink-Target-Etag header value which may be None
:raise: HTTPPreconditionFailed if x-symlink-target value
is not well formatted.
:raise: HTTPBadRequest if the x-symlink-target value points to the request
path.
:raise: HTTPBadRequest if the x-symlink-target-etag value contains
a semicolon, double-quote, or backslash. | def _validate_and_prep_request_headers(req):
"""
Validate that the value from x-symlink-target header is well formatted
and that the x-symlink-target-etag header (if present) does not contain
problematic characters. We assume the caller ensures that
x-symlink-target header is present in req.headers.
:param req: HTTP request object
:returns: a tuple, the full versioned path to the object (as a WSGI string)
and the X-Symlink-Target-Etag header value which may be None
:raise: HTTPPreconditionFailed if x-symlink-target value
is not well formatted.
:raise: HTTPBadRequest if the x-symlink-target value points to the request
path.
:raise: HTTPBadRequest if the x-symlink-target-etag value contains
a semicolon, double-quote, or backslash.
"""
# N.B. check_path_header doesn't assert the leading slash and
# copy middleware may accept the format. In the symlink, API
# says apparently to use "container/object" format so add the
# validation first, here.
error_body = 'X-Symlink-Target header must be of the form ' \
'<container name>/<object name>'
if wsgi_unquote(req.headers[TGT_OBJ_SYMLINK_HDR]).startswith('/'):
raise HTTPPreconditionFailed(
body=error_body,
request=req, content_type='text/plain')
# check container and object format
container, obj = check_path_header(
req, TGT_OBJ_SYMLINK_HDR, 2,
error_body)
req.headers[TGT_OBJ_SYMLINK_HDR] = wsgi_quote('%s/%s' % (container, obj))
# Check account format if it exists
account = check_account_format(
req, wsgi_unquote(req.headers[TGT_ACCT_SYMLINK_HDR])) \
if TGT_ACCT_SYMLINK_HDR in req.headers else None
# Extract request path
_junk, req_acc, req_cont, req_obj = req.split_path(4, 4, True)
if account:
req.headers[TGT_ACCT_SYMLINK_HDR] = wsgi_quote(account)
else:
account = req_acc
# Check if symlink targets the symlink itself or not
if (account, container, obj) == (req_acc, req_cont, req_obj):
raise HTTPBadRequest(
body='Symlink cannot target itself',
request=req, content_type='text/plain')
etag = normalize_etag(req.headers.get(TGT_ETAG_SYMLINK_HDR, None))
if etag and any(c in etag for c in ';"\\'):
# See cgi.parse_header for why the above chars are problematic
raise HTTPBadRequest(
body='Bad %s format' % TGT_ETAG_SYMLINK_HDR.title(),
request=req, content_type='text/plain')
if not (etag or req.headers.get('Content-Type')):
req.headers['Content-Type'] = 'application/symlink'
return '/v1/%s/%s/%s' % (account, container, obj), etag |
Helper function to translate from client-facing X-Symlink-* headers
to cluster-facing X-Object-Sysmeta-Symlink-* headers.
:param headers: request headers dict. Note that the headers dict
will be updated directly. | def symlink_usermeta_to_sysmeta(headers):
"""
Helper function to translate from client-facing X-Symlink-* headers
to cluster-facing X-Object-Sysmeta-Symlink-* headers.
:param headers: request headers dict. Note that the headers dict
will be updated directly.
"""
# To preseve url-encoded value in the symlink header, use raw value
for user_hdr, sysmeta_hdr in (
(TGT_OBJ_SYMLINK_HDR, TGT_OBJ_SYSMETA_SYMLINK_HDR),
(TGT_ACCT_SYMLINK_HDR, TGT_ACCT_SYSMETA_SYMLINK_HDR)):
if user_hdr in headers:
headers[sysmeta_hdr] = headers.pop(user_hdr) |
Helper function to translate from cluster-facing
X-Object-Sysmeta-Symlink-* headers to client-facing X-Symlink-* headers.
:param headers: request headers dict. Note that the headers dict
will be updated directly. | def symlink_sysmeta_to_usermeta(headers):
"""
Helper function to translate from cluster-facing
X-Object-Sysmeta-Symlink-* headers to client-facing X-Symlink-* headers.
:param headers: request headers dict. Note that the headers dict
will be updated directly.
"""
for user_hdr, sysmeta_hdr in (
(TGT_OBJ_SYMLINK_HDR, TGT_OBJ_SYSMETA_SYMLINK_HDR),
(TGT_ACCT_SYMLINK_HDR, TGT_ACCT_SYSMETA_SYMLINK_HDR),
(TGT_ETAG_SYMLINK_HDR, TGT_ETAG_SYSMETA_SYMLINK_HDR),
(TGT_BYTES_SYMLINK_HDR, TGT_BYTES_SYSMETA_SYMLINK_HDR)):
if sysmeta_hdr in headers:
headers[user_hdr] = headers.pop(sysmeta_hdr) |
Returns a WSGI filter app for use with paste.deploy. | def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
register_swift_info('tempauth', account_acls=True)
def auth_filter(app):
return TempAuth(app, conf)
return auth_filter |
Extracts the tempurl keys from metadata.
:param meta: account metadata
:returns: list of keys found (possibly empty if no keys set)
Example:
meta = get_account_info(...)['meta']
keys = get_tempurl_keys_from_metadata(meta) | def get_tempurl_keys_from_metadata(meta):
"""
Extracts the tempurl keys from metadata.
:param meta: account metadata
:returns: list of keys found (possibly empty if no keys set)
Example:
meta = get_account_info(...)['meta']
keys = get_tempurl_keys_from_metadata(meta)
"""
return [(get_valid_utf8_str(value) if six.PY2 else value)
for key, value in meta.items()
if key.lower() in ('temp-url-key', 'temp-url-key-2')] |
Returns the normalized expiration value as an int
If not None, the value is converted to an int if possible or 0
if not, and checked for expiration (returns 0 if expired). | def normalize_temp_url_expires(value):
"""
Returns the normalized expiration value as an int
If not None, the value is converted to an int if possible or 0
if not, and checked for expiration (returns 0 if expired).
"""
if value is None:
return value
try:
temp_url_expires = int(value)
except ValueError:
try:
temp_url_expires = timegm(strptime(
value, EXPIRES_ISO8601_FORMAT))
except ValueError:
temp_url_expires = 0
if temp_url_expires < time():
temp_url_expires = 0
return temp_url_expires |
Returns the provided temporary URL parameters (sig, expires, prefix,
temp_url_ip_range), if given and syntactically valid.
Either sig, expires or prefix could be None if not provided.
:param env: The WSGI environment for the request.
:returns: (sig, expires, prefix, filename, inline,
temp_url_ip_range) as described above. | def get_temp_url_info(env):
"""
Returns the provided temporary URL parameters (sig, expires, prefix,
temp_url_ip_range), if given and syntactically valid.
Either sig, expires or prefix could be None if not provided.
:param env: The WSGI environment for the request.
:returns: (sig, expires, prefix, filename, inline,
temp_url_ip_range) as described above.
"""
sig = expires = prefix = ip_range = filename = inline = None
qs = parse_qs(env.get('QUERY_STRING', ''), keep_blank_values=True)
if 'temp_url_ip_range' in qs:
ip_range = qs['temp_url_ip_range'][0]
if 'temp_url_sig' in qs:
sig = qs['temp_url_sig'][0]
if 'temp_url_expires' in qs:
expires = qs['temp_url_expires'][0]
if 'temp_url_prefix' in qs:
prefix = qs['temp_url_prefix'][0]
if 'filename' in qs:
filename = qs['filename'][0]
if 'inline' in qs:
inline = True
return (sig, expires, prefix, filename, inline, ip_range) |
Returns the WSGI filter for use with paste.deploy. | def filter_factory(global_conf, **local_conf):
"""Returns the WSGI filter for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
logger = get_logger(conf, log_route='tempurl')
defaults = {
'methods': 'GET HEAD PUT POST DELETE',
'incoming_remove_headers': DEFAULT_INCOMING_REMOVE_HEADERS,
'incoming_allow_headers': DEFAULT_INCOMING_ALLOW_HEADERS,
'outgoing_remove_headers': DEFAULT_OUTGOING_REMOVE_HEADERS,
'outgoing_allow_headers': DEFAULT_OUTGOING_ALLOW_HEADERS,
}
info_conf = {k: conf.get(k, v).split() for k, v in defaults.items()}
allowed_digests, deprecated_digests = get_allowed_digests(
conf.get('allowed_digests', '').split(), logger)
info_conf['allowed_digests'] = sorted(allowed_digests)
if deprecated_digests:
info_conf['deprecated_digests'] = sorted(deprecated_digests)
register_swift_info('tempurl', **info_conf)
conf.update(info_conf)
register_sensitive_param('temp_url_sig')
return lambda app: TempURL(app, conf, logger) |
Serialize crypto meta to a form suitable for including in a header value.
The crypto-meta is serialized as a json object. The iv and key values are
random bytes and as a result need to be base64 encoded before sending over
the wire. Base64 encoding returns a bytes object in py3, to future proof
the code, decode this data to produce a string, which is what the
json.dumps function expects.
:param crypto_meta: a dict containing crypto meta items
:returns: a string serialization of a crypto meta dict | def dump_crypto_meta(crypto_meta):
"""
Serialize crypto meta to a form suitable for including in a header value.
The crypto-meta is serialized as a json object. The iv and key values are
random bytes and as a result need to be base64 encoded before sending over
the wire. Base64 encoding returns a bytes object in py3, to future proof
the code, decode this data to produce a string, which is what the
json.dumps function expects.
:param crypto_meta: a dict containing crypto meta items
:returns: a string serialization of a crypto meta dict
"""
def b64_encode_meta(crypto_meta):
return {
name: (base64.b64encode(value).decode() if name in ('iv', 'key')
else b64_encode_meta(value) if isinstance(value, dict)
else value)
for name, value in crypto_meta.items()}
# use sort_keys=True to make serialized form predictable for testing
return urlparse.quote_plus(
json.dumps(b64_encode_meta(crypto_meta), sort_keys=True)) |
Build the crypto_meta from the json object.
Note that json.loads always produces unicode strings; to ensure the
resultant crypto_meta matches the original object:
* cast all keys to str (effectively a no-op on py3),
* base64 decode 'key' and 'iv' values to bytes, and
* encode remaining string values as UTF-8 on py2 (while leaving them
as native unicode strings on py3).
:param value: a string serialization of a crypto meta dict
:param b64decode: decode the 'key' and 'iv' values to bytes, default True
:returns: a dict containing crypto meta items
:raises EncryptionException: if an error occurs while parsing the
crypto meta | def load_crypto_meta(value, b64decode=True):
"""
Build the crypto_meta from the json object.
Note that json.loads always produces unicode strings; to ensure the
resultant crypto_meta matches the original object:
* cast all keys to str (effectively a no-op on py3),
* base64 decode 'key' and 'iv' values to bytes, and
* encode remaining string values as UTF-8 on py2 (while leaving them
as native unicode strings on py3).
:param value: a string serialization of a crypto meta dict
:param b64decode: decode the 'key' and 'iv' values to bytes, default True
:returns: a dict containing crypto meta items
:raises EncryptionException: if an error occurs while parsing the
crypto meta
"""
def b64_decode_meta(crypto_meta):
return {
str(name): (
base64.b64decode(val) if name in ('iv', 'key') and b64decode
else b64_decode_meta(val) if isinstance(val, dict)
else val.encode('utf8') if six.PY2 else val)
for name, val in crypto_meta.items()}
try:
if not isinstance(value, six.string_types):
raise ValueError('crypto meta not a string')
val = json.loads(urlparse.unquote_plus(value))
if not isinstance(val, dict):
raise ValueError('crypto meta not a Mapping')
return b64_decode_meta(val)
except (KeyError, ValueError, TypeError) as err:
msg = 'Bad crypto meta %r: %s' % (value, err)
raise EncryptionException(msg) |
Serialize and append crypto metadata to an encrypted value.
:param value: value to which serialized crypto meta will be appended.
:param crypto_meta: a dict of crypto meta
:return: a string of the form <value>; swift_meta=<serialized crypto meta> | def append_crypto_meta(value, crypto_meta):
"""
Serialize and append crypto metadata to an encrypted value.
:param value: value to which serialized crypto meta will be appended.
:param crypto_meta: a dict of crypto meta
:return: a string of the form <value>; swift_meta=<serialized crypto meta>
"""
if not isinstance(value, str):
raise ValueError
return '%s; swift_meta=%s' % (value, dump_crypto_meta(crypto_meta)) |
Extract and deserialize any crypto meta from the end of a value.
:param value: string that may have crypto meta at end
:return: a tuple of the form:
(<value without crypto meta>, <deserialized crypto meta> or None) | def extract_crypto_meta(value):
"""
Extract and deserialize any crypto meta from the end of a value.
:param value: string that may have crypto meta at end
:return: a tuple of the form:
(<value without crypto meta>, <deserialized crypto meta> or None)
"""
swift_meta = None
value, meta = parse_header(value)
if 'swift_meta' in meta:
swift_meta = load_crypto_meta(meta['swift_meta'])
return value, swift_meta |
Encrypt a header value using the supplied key.
:param crypto: a Crypto instance
:param value: value to encrypt
:param key: crypto key to use
:returns: a tuple of (encrypted value, crypto_meta) where crypto_meta is a
dict of form returned by
:py:func:`~swift.common.middleware.crypto.Crypto.get_crypto_meta`
:raises ValueError: if value is empty | def encrypt_header_val(crypto, value, key):
"""
Encrypt a header value using the supplied key.
:param crypto: a Crypto instance
:param value: value to encrypt
:param key: crypto key to use
:returns: a tuple of (encrypted value, crypto_meta) where crypto_meta is a
dict of form returned by
:py:func:`~swift.common.middleware.crypto.Crypto.get_crypto_meta`
:raises ValueError: if value is empty
"""
if not value:
raise ValueError('empty value is not acceptable')
crypto_meta = crypto.create_crypto_meta()
crypto_ctxt = crypto.create_encryption_ctxt(key, crypto_meta['iv'])
enc_val = bytes_to_wsgi(base64.b64encode(
crypto_ctxt.update(wsgi_to_bytes(value))))
return enc_val, crypto_meta |
Compute an HMAC-SHA256 using given key and etag.
:param key: The starting key for the hash.
:param etag: The etag to hash.
:returns: a Base64-encoded representation of the HMAC | def _hmac_etag(key, etag):
"""
Compute an HMAC-SHA256 using given key and etag.
:param key: The starting key for the hash.
:param etag: The etag to hash.
:returns: a Base64-encoded representation of the HMAC
"""
if not isinstance(etag, bytes):
etag = wsgi_to_bytes(etag)
result = hmac.new(key, etag, digestmod=hashlib.sha256).digest()
return base64.b64encode(result).decode() |
Provides a factory function for loading encryption middleware. | def filter_factory(global_conf, **local_conf):
"""Provides a factory function for loading encryption middleware."""
conf = global_conf.copy()
conf.update(local_conf)
enabled = not config_true_value(conf.get('disable_encryption', 'false'))
register_swift_info('encryption', admin=True, enabled=enabled)
def encryption_filter(app):
return Decrypter(Encrypter(app, conf), conf)
return encryption_filter |
Takes an S3 style ACL and returns a list of header/value pairs that
implement that ACL in Swift, or "NotImplemented" if there isn't a way to do
that yet. | def swift_acl_translate(acl, group='', user='', xml=False):
"""
Takes an S3 style ACL and returns a list of header/value pairs that
implement that ACL in Swift, or "NotImplemented" if there isn't a way to do
that yet.
"""
swift_acl = {}
swift_acl['public-read'] = [['X-Container-Read', '.r:*,.rlistings']]
# Swift does not support public write:
# https://answers.launchpad.net/swift/+question/169541
swift_acl['public-read-write'] = [['X-Container-Write', '.r:*'],
['X-Container-Read',
'.r:*,.rlistings']]
# TODO: if there's a way to get group and user, this should work for
# private:
# swift_acl['private'] = \
# [['HTTP_X_CONTAINER_WRITE', group + ':' + user], \
# ['HTTP_X_CONTAINER_READ', group + ':' + user]]
swift_acl['private'] = [['X-Container-Write', '.'],
['X-Container-Read', '.']]
# Swift doesn't have per-object ACLs, so this is best-effort
swift_acl['bucket-owner-full-control'] = swift_acl['private']
swift_acl['bucket-owner-read'] = swift_acl['private']
if xml:
# We are working with XML and need to parse it
try:
elem = fromstring(acl, 'AccessControlPolicy')
except (XMLSyntaxError, DocumentInvalid):
raise MalformedACLError()
acl = 'unknown'
for grant in elem.findall('./AccessControlList/Grant'):
permission = grant.find('./Permission').text
grantee = grant.find('./Grantee').get('{%s}type' % XMLNS_XSI)
if permission == "FULL_CONTROL" and grantee == 'CanonicalUser' and\
acl != 'public-read' and acl != 'public-read-write':
acl = 'private'
elif permission == "READ" and grantee == 'Group' and\
acl != 'public-read-write':
acl = 'public-read'
elif permission == "WRITE" and grantee == 'Group':
acl = 'public-read-write'
else:
acl = 'unsupported'
if acl in ('authenticated-read', 'log-delivery-write'):
raise S3NotImplemented()
elif acl not in swift_acl:
raise ACLError()
return swift_acl[acl] |
Handle the x-amz-acl header.
Note that this header currently used for only normal-acl
(not implemented) on s3acl.
TODO: add translation to swift acl like as x-container-read to s3acl | def handle_acl_header(req):
"""
Handle the x-amz-acl header.
Note that this header currently used for only normal-acl
(not implemented) on s3acl.
TODO: add translation to swift acl like as x-container-read to s3acl
"""
amz_acl = req.environ['HTTP_X_AMZ_ACL']
# Translate the Amazon ACL to something that can be
# implemented in Swift, 501 otherwise. Swift uses POST
# for ACLs, whereas S3 uses PUT.
del req.environ['HTTP_X_AMZ_ACL']
if req.query_string:
req.query_string = ''
try:
translated_acl = swift_acl_translate(amz_acl)
except ACLError:
raise InvalidArgument('x-amz-acl', amz_acl)
for header, acl in translated_acl:
req.headers[header] = acl |
Standard filter factory to use the middleware with paste.deploy | def filter_factory(global_conf, **local_conf):
"""Standard filter factory to use the middleware with paste.deploy"""
conf = global_conf.copy()
conf.update(local_conf)
register_swift_info(
's3api',
# TODO: make default values as variables
max_bucket_listing=int(conf.get('max_bucket_listing', 1000)),
max_parts_listing=int(conf.get('max_parts_listing', 1000)),
max_upload_part_num=int(conf.get('max_upload_part_num', 1000)),
max_multi_delete_objects=int(
conf.get('max_multi_delete_objects', 1000)),
allow_multipart_uploads=config_true_value(
conf.get('allow_multipart_uploads', True)),
min_segment_size=int(conf.get('min_segment_size', 5242880)),
s3_acl=config_true_value(conf.get('s3_acl', False)),
)
register_sensitive_header('authorization')
register_sensitive_param('Signature')
register_sensitive_param('X-Amz-Signature')
def s3api_filter(app):
return S3ApiMiddleware(ListingEtagMiddleware(app), conf)
return s3api_filter |
Set and retrieve the acl in self.headers | def _header_acl_property(resource):
"""
Set and retrieve the acl in self.headers
"""
def getter(self):
return getattr(self, '_%s' % resource)
def setter(self, value):
self.headers.update(encode_acl(resource, value))
setattr(self, '_%s' % resource, value)
def deleter(self):
self.headers[sysmeta_header(resource, 'acl')] = ''
return property(getter, setter, deleter,
doc='Get and set the %s acl property' % resource) |
Helper function to find a request class to use from Map | def get_request_class(env, s3_acl):
"""
Helper function to find a request class to use from Map
"""
if s3_acl:
request_classes = (S3AclRequest, SigV4S3AclRequest)
else:
request_classes = (S3Request, SigV4Request)
req = swob.Request(env)
if 'X-Amz-Credential' in req.params or \
req.headers.get('Authorization', '').startswith(
'AWS4-HMAC-SHA256 '):
# This is an Amazon SigV4 request
return request_classes[1]
else:
# The others using Amazon SigV2 class
return request_classes[0] |
Returns a WSGI filter app for use with paste.deploy. | def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return S3Token(app, conf)
return auth_filter |
Encode an ACL instance to Swift metadata.
Given a resource type and an ACL instance, this method returns HTTP
headers, which can be used for Swift metadata. | def encode_acl(resource, acl):
"""
Encode an ACL instance to Swift metadata.
Given a resource type and an ACL instance, this method returns HTTP
headers, which can be used for Swift metadata.
"""
header_value = {"Owner": acl.owner.id}
grants = []
for grant in acl.grants:
grant = {"Permission": grant.permission,
"Grantee": str(grant.grantee)}
grants.append(grant)
header_value.update({"Grant": grants})
headers = {}
key = sysmeta_header(resource, 'acl')
headers[key] = json.dumps(header_value, separators=(',', ':'))
return headers |
Decode Swift metadata to an ACL instance.
Given a resource type and HTTP headers, this method returns an ACL
instance. | def decode_acl(resource, headers, allow_no_owner):
"""
Decode Swift metadata to an ACL instance.
Given a resource type and HTTP headers, this method returns an ACL
instance.
"""
value = ''
key = sysmeta_header(resource, 'acl')
if key in headers:
value = headers[key]
if value == '':
# Fix me: In the case of value is empty or not dict instance,
# I want an instance of Owner as None.
# However, in the above process would occur error in reference
# to an instance variable of Owner.
return ACL(Owner(None, None), [], True, allow_no_owner)
try:
encode_value = json.loads(value)
if not isinstance(encode_value, dict):
return ACL(Owner(None, None), [], True, allow_no_owner)
id = None
name = None
grants = []
if 'Owner' in encode_value:
id = encode_value['Owner']
name = encode_value['Owner']
if 'Grant' in encode_value:
for grant in encode_value['Grant']:
grantee = None
# pylint: disable-msg=E1101
for group in Group.__subclasses__():
if group.__name__ == grant['Grantee']:
grantee = group()
if not grantee:
grantee = User(grant['Grantee'])
permission = grant['Permission']
grants.append(Grant(grantee, permission))
return ACL(Owner(id, name), grants, True, allow_no_owner)
except Exception as e:
raise InvalidSubresource((resource, 'acl', value), e) |
Convert a URI to one of the predefined groups. | def get_group_subclass_from_uri(uri):
"""
Convert a URI to one of the predefined groups.
"""
for group in Group.__subclasses__(): # pylint: disable-msg=E1101
if group.uri == uri:
return group
raise InvalidArgument('uri', uri, 'Invalid group uri') |
A set of predefined grants supported by AWS S3. | def canned_acl_grantees(bucket_owner, object_owner=None):
"""
A set of predefined grants supported by AWS S3.
"""
owner = object_owner or bucket_owner
return {
'private': [
('FULL_CONTROL', User(owner.name)),
],
'public-read': [
('READ', AllUsers()),
('FULL_CONTROL', User(owner.name)),
],
'public-read-write': [
('READ', AllUsers()),
('WRITE', AllUsers()),
('FULL_CONTROL', User(owner.name)),
],
'authenticated-read': [
('READ', AuthenticatedUsers()),
('FULL_CONTROL', User(owner.name)),
],
'bucket-owner-read': [
('READ', User(bucket_owner.name)),
('FULL_CONTROL', User(owner.name)),
],
'bucket-owner-full-control': [
('FULL_CONTROL', User(owner.name)),
('FULL_CONTROL', User(bucket_owner.name)),
],
'log-delivery-write': [
('WRITE', LogDelivery()),
('READ_ACP', LogDelivery()),
('FULL_CONTROL', User(owner.name)),
],
} |
Returns the system metadata prefix for given resource type. | def sysmeta_prefix(resource):
"""
Returns the system metadata prefix for given resource type.
"""
if resource.lower() == 'object':
return 'x-object-sysmeta-s3api-'
else:
return 'x-container-sysmeta-s3api-' |
Returns the system metadata header for given resource type and name. | def sysmeta_header(resource, name):
"""
Returns the system metadata header for given resource type and name.
"""
return sysmeta_prefix(resource) + name |
Validates the name of the bucket against S3 criteria,
http://docs.amazonwebservices.com/AmazonS3/latest/BucketRestrictions.html
True is valid, False is invalid. | def validate_bucket_name(name, dns_compliant_bucket_names):
"""
Validates the name of the bucket against S3 criteria,
http://docs.amazonwebservices.com/AmazonS3/latest/BucketRestrictions.html
True is valid, False is invalid.
"""
valid_chars = '-.a-z0-9'
if not dns_compliant_bucket_names:
valid_chars += 'A-Z_'
max_len = 63 if dns_compliant_bucket_names else 255
if len(name) < 3 or len(name) > max_len or not name[0].isalnum():
# Bucket names should be between 3 and 63 (or 255) characters long
# Bucket names must start with a letter or a number
return False
elif dns_compliant_bucket_names and (
'.-' in name or '-.' in name or '..' in name or
not name[-1].isalnum()):
# Bucket names cannot contain dashes next to periods
# Bucket names cannot contain two adjacent periods
# Bucket names must end with a letter or a number
return False
elif name.endswith('.'):
# Bucket names must not end with dot
return False
elif re.match(r"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.)"
r"{3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$",
name):
# Bucket names cannot be formatted as an IP Address
return False
elif not re.match("^[%s]*$" % valid_chars, name):
# Bucket names can contain lowercase letters, numbers, and hyphens.
return False
else:
return True |
mktime creates a float instance in epoch time really like as time.mktime
the difference from time.mktime is allowing to 2 formats string for the
argument for the S3 testing usage.
TODO: support
:param timestamp_str: a string of timestamp formatted as
(a) RFC2822 (e.g. date header)
(b) %Y-%m-%dT%H:%M:%S (e.g. copy result)
:param time_format: a string of format to parse in (b) process
:returns: a float instance in epoch time | def mktime(timestamp_str, time_format='%Y-%m-%dT%H:%M:%S'):
"""
mktime creates a float instance in epoch time really like as time.mktime
the difference from time.mktime is allowing to 2 formats string for the
argument for the S3 testing usage.
TODO: support
:param timestamp_str: a string of timestamp formatted as
(a) RFC2822 (e.g. date header)
(b) %Y-%m-%dT%H:%M:%S (e.g. copy result)
:param time_format: a string of format to parse in (b) process
:returns: a float instance in epoch time
"""
# time_tuple is the *remote* local time
time_tuple = email.utils.parsedate_tz(timestamp_str)
if time_tuple is None:
time_tuple = time.strptime(timestamp_str, time_format)
# add timezone info as utc (no time difference)
time_tuple += (0, )
# We prefer calendar.gmtime and a manual adjustment over
# email.utils.mktime_tz because older versions of Python (<2.7.4) may
# double-adjust for timezone in some situations (such when swift changes
# os.environ['TZ'] without calling time.tzset()).
epoch_time = calendar.timegm(time_tuple) - time_tuple[9]
return epoch_time |
Attempts to construct an S3 ACL based on what is found in the swift headers | def get_acl(account_name, headers):
"""
Attempts to construct an S3 ACL based on what is found in the swift headers
"""
elem = Element('AccessControlPolicy')
owner = SubElement(elem, 'Owner')
SubElement(owner, 'ID').text = account_name
SubElement(owner, 'DisplayName').text = account_name
access_control_list = SubElement(elem, 'AccessControlList')
# grant FULL_CONTROL to myself by default
grant = SubElement(access_control_list, 'Grant')
grantee = SubElement(grant, 'Grantee', nsmap={'xsi': XMLNS_XSI})
grantee.set('{%s}type' % XMLNS_XSI, 'CanonicalUser')
SubElement(grantee, 'ID').text = account_name
SubElement(grantee, 'DisplayName').text = account_name
SubElement(grant, 'Permission').text = 'FULL_CONTROL'
referrers, _ = parse_acl(headers.get('x-container-read'))
if referrer_allowed('unknown', referrers):
# grant public-read access
grant = SubElement(access_control_list, 'Grant')
grantee = SubElement(grant, 'Grantee', nsmap={'xsi': XMLNS_XSI})
grantee.set('{%s}type' % XMLNS_XSI, 'Group')
SubElement(grantee, 'URI').text = \
'http://acs.amazonaws.com/groups/global/AllUsers'
SubElement(grant, 'Permission').text = 'READ'
referrers, _ = parse_acl(headers.get('x-container-write'))
if referrer_allowed('unknown', referrers):
# grant public-write access
grant = SubElement(access_control_list, 'Grant')
grantee = SubElement(grant, 'Grantee', nsmap={'xsi': XMLNS_XSI})
grantee.set('{%s}type' % XMLNS_XSI, 'Group')
SubElement(grantee, 'URI').text = \
'http://acs.amazonaws.com/groups/global/AllUsers'
SubElement(grant, 'Permission').text = 'WRITE'
body = tostring(elem)
return HTTPOk(body=body, content_type="text/plain") |
A decorator to ensure that the request is a bucket operation. If the
target resource is an object, this decorator updates the request by default
so that the controller handles it as a bucket operation. If 'err_resp' is
specified, this raises it on error instead. | def bucket_operation(func=None, err_resp=None, err_msg=None):
"""
A decorator to ensure that the request is a bucket operation. If the
target resource is an object, this decorator updates the request by default
so that the controller handles it as a bucket operation. If 'err_resp' is
specified, this raises it on error instead.
"""
def _bucket_operation(func):
@functools.wraps(func)
def wrapped(self, req):
if not req.is_bucket_request:
if err_resp:
raise err_resp(msg=err_msg)
self.logger.debug('A key is specified for bucket API.')
req.object_name = None
return func(self, req)
return wrapped
if func:
return _bucket_operation(func)
else:
return _bucket_operation |
A decorator to ensure that the request is an object operation. If the
target resource is not an object, this raises an error response. | def object_operation(func):
"""
A decorator to ensure that the request is an object operation. If the
target resource is not an object, this raises an error response.
"""
@functools.wraps(func)
def wrapped(self, req):
if not req.is_object_request:
raise InvalidRequest('A key must be specified')
return func(self, req)
return wrapped |
A decorator to ensure the container existence. | def check_container_existence(func):
"""
A decorator to ensure the container existence.
"""
@functools.wraps(func)
def check_container(self, req):
req.get_container_info(self.app)
return func(self, req)
return check_container |
Make a HEAD request for existing upload object metadata. Tries the upload
marker first, and then falls back to the manifest object.
:param req: an S3Request object.
:param app: the wsgi app.
:param upload_id: the upload id.
:returns: a tuple of (S3Response, boolean) where the boolean is True if the
response is from the upload marker and False otherwise.
:raises: NoSuchUpload if neither the marker nor the manifest were found. | def _get_upload_info(req, app, upload_id):
"""
Make a HEAD request for existing upload object metadata. Tries the upload
marker first, and then falls back to the manifest object.
:param req: an S3Request object.
:param app: the wsgi app.
:param upload_id: the upload id.
:returns: a tuple of (S3Response, boolean) where the boolean is True if the
response is from the upload marker and False otherwise.
:raises: NoSuchUpload if neither the marker nor the manifest were found.
"""
container = req.container_name + MULTIUPLOAD_SUFFIX
obj = '%s/%s' % (req.object_name, upload_id)
# XXX: if we leave the copy-source header, somewhere later we might
# drop in a ?version-id=... query string that's utterly inappropriate
# for the upload marker. Until we get around to fixing that, just pop
# it off for now...
copy_source = req.headers.pop('X-Amz-Copy-Source', None)
try:
resp = req.get_response(app, 'HEAD', container=container, obj=obj)
return resp, True
except NoSuchKey:
# ensure consistent path and policy are logged despite manifest HEAD
upload_marker_path = req.environ.get('s3api.backend_path')
policy_index = req.policy_index
try:
resp = req.get_response(app, 'HEAD')
if resp.sysmeta_headers.get(sysmeta_header(
'object', 'upload-id')) == upload_id:
return resp, False
except NoSuchKey:
pass
finally:
# Ops often find it more useful for us to log the upload marker
# path, so put it back
if upload_marker_path is not None:
req.environ['s3api.backend_path'] = upload_marker_path
if policy_index is not None:
req.policy_index = policy_index
raise NoSuchUpload(upload_id=upload_id)
finally:
# ...making sure to restore any copy-source before returning
if copy_source is not None:
req.headers['X-Amz-Copy-Source'] = copy_source |
Provides a factory function for loading versioning middleware. | def filter_factory(global_conf, **local_conf):
"""Provides a factory function for loading versioning middleware."""
conf = global_conf.copy()
conf.update(local_conf)
if config_true_value(conf.get('allow_versioned_writes')):
register_swift_info('versioned_writes', allowed_flags=(
CLIENT_VERSIONS_LOC, CLIENT_HISTORY_LOC))
allow_object_versioning = config_true_value(conf.get(
'allow_object_versioning'))
if allow_object_versioning:
register_swift_info('object_versioning')
def versioning_filter(app):
if allow_object_versioning:
if 'symlink' not in get_swift_info():
raise ValueError('object versioning requires symlinks')
app = ObjectVersioningMiddleware(app, conf)
return VersionedWritesMiddleware(app, conf)
return versioning_filter |
Pre-validation for all component ring builders that are to be included in
the composite ring. Checks that all component rings are valid with respect
to each other.
:param builders: a list of :class:`swift.common.ring.builder.RingBuilder`
instances
:raises ValueError: if the builders are invalid with respect to each other | def pre_validate_all_builders(builders):
"""
Pre-validation for all component ring builders that are to be included in
the composite ring. Checks that all component rings are valid with respect
to each other.
:param builders: a list of :class:`swift.common.ring.builder.RingBuilder`
instances
:raises ValueError: if the builders are invalid with respect to each other
"""
if len(builders) < 2:
raise ValueError('Two or more component builders are required.')
# all ring builders should be consistent for each MUST_MATCH_ATTRS
for attr in MUST_MATCH_ATTRS:
attr_dict = defaultdict(list)
for i, builder in enumerate(builders):
value = getattr(builder, attr, None)
attr_dict[value].append(i)
if len(attr_dict) > 1:
variations = ['%s=%s found at indexes %s' %
(attr, val, indexes)
for val, indexes in attr_dict.items()]
raise ValueError(
'All builders must have same value for %r.\n%s'
% (attr, '\n '.join(variations)))
# all ring builders should have int replica count and not have dirty mods
errors = []
for index, builder in enumerate(builders):
if int(builder.replicas) != builder.replicas:
errors.append(
'Non integer replica count %s found at index %s' %
(builder.replicas, index))
if builder.devs_changed:
errors.append(
'Builder needs rebalance to apply changes at index %s' %
index)
if errors:
raise ValueError(
'Problem with builders.\n%s' % ('\n '.join(errors)))
# check regions
regions_info = {}
for builder in builders:
regions_info[builder] = set(
dev['region'] for dev in builder._iter_devs())
for first_region_set, second_region_set in combinations(
regions_info.values(), 2):
inter = first_region_set & second_region_set
if inter:
raise ValueError('Same region found in different rings')
# check device uniqueness
check_for_dev_uniqueness(builders) |
Check that no device appears in more than one of the given list of
builders.
:param builders: a list of :class:`swift.common.ring.builder.RingBuilder`
instances
:raises ValueError: if the same device is found in more than one builder | def check_for_dev_uniqueness(builders):
"""
Check that no device appears in more than one of the given list of
builders.
:param builders: a list of :class:`swift.common.ring.builder.RingBuilder`
instances
:raises ValueError: if the same device is found in more than one builder
"""
builder2devs = []
for i, builder in enumerate(builders):
dev_set = set()
for dev in builder._iter_devs():
ip, port, device = (dev['ip'], dev['port'], dev['device'])
for j, (other_builder, devs) in enumerate(builder2devs):
if (ip, port, device) in devs:
raise ValueError(
'Duplicate ip/port/device combination %s/%s/%s found '
'in builders at indexes %s and %s' %
(ip, port, device, j, i)
)
dev_set.add((ip, port, device))
builder2devs.append((builder, dev_set)) |
Given a list of component ring builders, return a composite RingData
instance.
:param builders: a list of
:class:`swift.common.ring.builder.RingBuilder` instances
:return: a new RingData instance built from the component builders
:raises ValueError: if the builders are invalid with respect to each other | def _make_composite_ring(builders):
"""
Given a list of component ring builders, return a composite RingData
instance.
:param builders: a list of
:class:`swift.common.ring.builder.RingBuilder` instances
:return: a new RingData instance built from the component builders
:raises ValueError: if the builders are invalid with respect to each other
"""
composite_r2p2d = []
composite_devs = []
device_offset = 0
for builder in builders:
# copy all devs list and replica2part2dev table to be able
# to modify the id for each dev
devs = copy.deepcopy(builder.devs)
r2p2d = copy.deepcopy(builder._replica2part2dev)
for part2dev in r2p2d:
for part, dev in enumerate(part2dev):
part2dev[part] += device_offset
for dev in [d for d in devs if d]:
# note that some devs may not be referenced in r2p2d but update
# their dev id nonetheless
dev['id'] += device_offset
composite_r2p2d.extend(r2p2d)
composite_devs.extend(devs)
device_offset += len(builder.devs)
return RingData(composite_r2p2d, composite_devs, builders[0].part_shift) |
Given a list of component ring builders, perform validation on the list of
builders and return a composite RingData instance.
:param builders: a list of
:class:`swift.common.ring.builder.RingBuilder` instances
:return: a new RingData instance built from the component builders
:raises ValueError: if the builders are invalid with respect to each other | def compose_rings(builders):
"""
Given a list of component ring builders, perform validation on the list of
builders and return a composite RingData instance.
:param builders: a list of
:class:`swift.common.ring.builder.RingBuilder` instances
:return: a new RingData instance built from the component builders
:raises ValueError: if the builders are invalid with respect to each other
"""
pre_validate_all_builders(builders)
rd = _make_composite_ring(builders)
return rd |
Return a dict of selected builder attributes to save in composite meta. The
dict has keys ``version``, ``replicas`` and ``id``.
:param builder: a :class:`swift.common.ring.builder.RingBuilder`
instance
:return: a dict of component metadata | def _make_component_meta(builder):
"""
Return a dict of selected builder attributes to save in composite meta. The
dict has keys ``version``, ``replicas`` and ``id``.
:param builder: a :class:`swift.common.ring.builder.RingBuilder`
instance
:return: a dict of component metadata
"""
attrs = ['version', 'replicas', 'id']
metadata = dict((attr, getattr(builder, attr)) for attr in attrs)
return metadata |
Return a dict with key ``components`` that maps to a list of dicts, each
dict being of the form returned by :func:`_make_component_meta`.
:param builders: a list of
:class:`swift.common.ring.builder.RingBuilder` instances
:return: a dict of composite metadata | def _make_composite_metadata(builders):
"""
Return a dict with key ``components`` that maps to a list of dicts, each
dict being of the form returned by :func:`_make_component_meta`.
:param builders: a list of
:class:`swift.common.ring.builder.RingBuilder` instances
:return: a dict of composite metadata
"""
component_meta = [_make_component_meta(builder) for builder in builders]
return {'components': component_meta} |
Check that the given new_component metadata describes the same builder as
the given old_component metadata. The new_component builder does not
necessarily need to be in the same state as when the old_component metadata
was created to satisfy this check e.g. it may have changed devs and been
rebalanced.
:param old_component: a dict of metadata describing a component builder
:param new_component: a dict of metadata describing a component builder
:raises ValueError: if the new_component is not the same as that described
by the old_component | def check_same_builder(old_component, new_component):
"""
Check that the given new_component metadata describes the same builder as
the given old_component metadata. The new_component builder does not
necessarily need to be in the same state as when the old_component metadata
was created to satisfy this check e.g. it may have changed devs and been
rebalanced.
:param old_component: a dict of metadata describing a component builder
:param new_component: a dict of metadata describing a component builder
:raises ValueError: if the new_component is not the same as that described
by the old_component
"""
for key in ['replicas', 'id']:
if old_component[key] != new_component[key]:
raise ValueError("Attribute mismatch for %s: %r != %r" %
(key, old_component[key], new_component[key])) |
Return True if the given builder has been modified with respect to its
state when the given component_meta was created.
:param old_component: a dict of metadata describing a component ring
:param new_component: a dict of metadata describing a component ring
:return: True if the builder has been modified, False otherwise.
:raises ValueError: if the version of the new_component is older than the
version of the existing component. | def is_builder_newer(old_component, new_component):
"""
Return True if the given builder has been modified with respect to its
state when the given component_meta was created.
:param old_component: a dict of metadata describing a component ring
:param new_component: a dict of metadata describing a component ring
:return: True if the builder has been modified, False otherwise.
:raises ValueError: if the version of the new_component is older than the
version of the existing component.
"""
if new_component['version'] < old_component['version']:
raise ValueError('Older builder version: %s < %s' %
(new_component['version'], old_component['version']))
return old_component['version'] < new_component['version'] |
Check that the given builders and their order are the same as that
used to build an existing composite ring. Return True if any of the given
builders has been modified with respect to its state when the given
component_meta was created.
:param old_composite_meta: a dict of the form returned by
:func:`_make_composite_meta`
:param new_composite_meta: a dict of the form returned by
:func:`_make_composite_meta`
:return: True if any of the components has been modified, False otherwise.
:raises Value Error: if proposed new components do not match any existing
components. | def check_against_existing(old_composite_meta, new_composite_meta):
"""
Check that the given builders and their order are the same as that
used to build an existing composite ring. Return True if any of the given
builders has been modified with respect to its state when the given
component_meta was created.
:param old_composite_meta: a dict of the form returned by
:func:`_make_composite_meta`
:param new_composite_meta: a dict of the form returned by
:func:`_make_composite_meta`
:return: True if any of the components has been modified, False otherwise.
:raises Value Error: if proposed new components do not match any existing
components.
"""
errors = []
newer = False
old_components = old_composite_meta['components']
new_components = new_composite_meta['components']
for i, old_component in enumerate(old_components):
try:
new_component = new_components[i]
except IndexError:
errors.append("Missing builder at index %d" % i)
continue
try:
# check we have same component builder in this position vs existing
check_same_builder(old_component, new_component)
newer |= is_builder_newer(old_component, new_component)
except ValueError as err:
errors.append("Invalid builder change at index %d: %s" % (i, err))
for j, new_component in enumerate(new_components[i + 1:], start=i + 1):
errors.append("Unexpected extra builder at index %d: %r" %
(j, new_component))
if errors:
raise ValueError('\n'.join(errors))
return newer |
Check that all builders in the given list have id's assigned and that no
id appears more than once in the list.
:param builders: a list instances of
:class:`swift.common.ring.builder.RingBuilder`
:raises: ValueError if any builder id is missing or repeated | def check_builder_ids(builders):
"""
Check that all builders in the given list have id's assigned and that no
id appears more than once in the list.
:param builders: a list instances of
:class:`swift.common.ring.builder.RingBuilder`
:raises: ValueError if any builder id is missing or repeated
"""
id2index = defaultdict(list)
errors = []
for i, builder in enumerate(builders):
try:
id2index[builder.id].append(str(i))
except AttributeError as err:
errors.append("Problem with builder at index %d: %s" % (i, err))
for builder_id, index in id2index.items():
if len(index) > 1:
errors.append("Builder id %r used at indexes %s" %
(builder_id, ', '.join(index)))
if errors:
raise ValueError('\n'.join(errors)) |
Returns a tuple of tiers for a given device in ascending order by
length.
:returns: tuple of tiers | def tiers_for_dev(dev):
"""
Returns a tuple of tiers for a given device in ascending order by
length.
:returns: tuple of tiers
"""
t1 = dev['region']
t2 = dev['zone']
t3 = dev['ip']
t4 = dev['id']
return ((t1,),
(t1, t2),
(t1, t2, t3),
(t1, t2, t3, t4)) |
Construct the tier tree from the zone layout.
The tier tree is a dictionary that maps tiers to their child tiers.
A synthetic root node of () is generated so that there's one tree,
not a forest.
Example:
region 1 -+---- zone 1 -+---- 192.168.101.1 -+---- device id 0
| | |
| | +---- device id 1
| | |
| | +---- device id 2
| |
| +---- 192.168.101.2 -+---- device id 3
| |
| +---- device id 4
| |
| +---- device id 5
|
+---- zone 2 -+---- 192.168.102.1 -+---- device id 6
| |
| +---- device id 7
| |
| +---- device id 8
|
+---- 192.168.102.2 -+---- device id 9
|
+---- device id 10
region 2 -+---- zone 1 -+---- 192.168.201.1 -+---- device id 12
| |
| +---- device id 13
| |
| +---- device id 14
|
+---- 192.168.201.2 -+---- device id 15
|
+---- device id 16
|
+---- device id 17
The tier tree would look like:
{
(): [(1,), (2,)],
(1,): [(1, 1), (1, 2)],
(2,): [(2, 1)],
(1, 1): [(1, 1, 192.168.101.1),
(1, 1, 192.168.101.2)],
(1, 2): [(1, 2, 192.168.102.1),
(1, 2, 192.168.102.2)],
(2, 1): [(2, 1, 192.168.201.1),
(2, 1, 192.168.201.2)],
(1, 1, 192.168.101.1): [(1, 1, 192.168.101.1, 0),
(1, 1, 192.168.101.1, 1),
(1, 1, 192.168.101.1, 2)],
(1, 1, 192.168.101.2): [(1, 1, 192.168.101.2, 3),
(1, 1, 192.168.101.2, 4),
(1, 1, 192.168.101.2, 5)],
(1, 2, 192.168.102.1): [(1, 2, 192.168.102.1, 6),
(1, 2, 192.168.102.1, 7),
(1, 2, 192.168.102.1, 8)],
(1, 2, 192.168.102.2): [(1, 2, 192.168.102.2, 9),
(1, 2, 192.168.102.2, 10)],
(2, 1, 192.168.201.1): [(2, 1, 192.168.201.1, 12),
(2, 1, 192.168.201.1, 13),
(2, 1, 192.168.201.1, 14)],
(2, 1, 192.168.201.2): [(2, 1, 192.168.201.2, 15),
(2, 1, 192.168.201.2, 16),
(2, 1, 192.168.201.2, 17)],
}
:devices: device dicts from which to generate the tree
:returns: tier tree | def build_tier_tree(devices):
"""
Construct the tier tree from the zone layout.
The tier tree is a dictionary that maps tiers to their child tiers.
A synthetic root node of () is generated so that there's one tree,
not a forest.
Example:
region 1 -+---- zone 1 -+---- 192.168.101.1 -+---- device id 0
| | |
| | +---- device id 1
| | |
| | +---- device id 2
| |
| +---- 192.168.101.2 -+---- device id 3
| |
| +---- device id 4
| |
| +---- device id 5
|
+---- zone 2 -+---- 192.168.102.1 -+---- device id 6
| |
| +---- device id 7
| |
| +---- device id 8
|
+---- 192.168.102.2 -+---- device id 9
|
+---- device id 10
region 2 -+---- zone 1 -+---- 192.168.201.1 -+---- device id 12
| |
| +---- device id 13
| |
| +---- device id 14
|
+---- 192.168.201.2 -+---- device id 15
|
+---- device id 16
|
+---- device id 17
The tier tree would look like:
{
(): [(1,), (2,)],
(1,): [(1, 1), (1, 2)],
(2,): [(2, 1)],
(1, 1): [(1, 1, 192.168.101.1),
(1, 1, 192.168.101.2)],
(1, 2): [(1, 2, 192.168.102.1),
(1, 2, 192.168.102.2)],
(2, 1): [(2, 1, 192.168.201.1),
(2, 1, 192.168.201.2)],
(1, 1, 192.168.101.1): [(1, 1, 192.168.101.1, 0),
(1, 1, 192.168.101.1, 1),
(1, 1, 192.168.101.1, 2)],
(1, 1, 192.168.101.2): [(1, 1, 192.168.101.2, 3),
(1, 1, 192.168.101.2, 4),
(1, 1, 192.168.101.2, 5)],
(1, 2, 192.168.102.1): [(1, 2, 192.168.102.1, 6),
(1, 2, 192.168.102.1, 7),
(1, 2, 192.168.102.1, 8)],
(1, 2, 192.168.102.2): [(1, 2, 192.168.102.2, 9),
(1, 2, 192.168.102.2, 10)],
(2, 1, 192.168.201.1): [(2, 1, 192.168.201.1, 12),
(2, 1, 192.168.201.1, 13),
(2, 1, 192.168.201.1, 14)],
(2, 1, 192.168.201.2): [(2, 1, 192.168.201.2, 15),
(2, 1, 192.168.201.2, 16),
(2, 1, 192.168.201.2, 17)],
}
:devices: device dicts from which to generate the tree
:returns: tier tree
"""
tier2children = defaultdict(set)
for dev in devices:
for tier in tiers_for_dev(dev):
if len(tier) > 1:
tier2children[tier[0:-1]].add(tier)
else:
tier2children[()].add(tier)
return tier2children |
Return normalized ip if the ip is a valid ip.
Otherwise raise ValueError Exception. The hostname is
normalized to all lower case. IPv6-addresses are converted to
lowercase and fully expanded. | def validate_and_normalize_ip(ip):
"""
Return normalized ip if the ip is a valid ip.
Otherwise raise ValueError Exception. The hostname is
normalized to all lower case. IPv6-addresses are converted to
lowercase and fully expanded.
"""
# first convert to lower case
new_ip = ip.lower()
if is_valid_ipv4(new_ip):
return new_ip
elif is_valid_ipv6(new_ip):
return expand_ipv6(new_ip)
else:
raise ValueError('Invalid ip %s' % ip) |
Return normalized address if the address is a valid ip or hostname.
Otherwise raise ValueError Exception. The hostname is
normalized to all lower case. IPv6-addresses are converted to
lowercase and fully expanded.
RFC1123 2.1 Host Names and Nubmers
DISCUSSION
This last requirement is not intended to specify the complete
syntactic form for entering a dotted-decimal host number;
that is considered to be a user-interface issue. For
example, a dotted-decimal number must be enclosed within
"[ ]" brackets for SMTP mail (see Section 5.2.17). This
notation could be made universal within a host system,
simplifying the syntactic checking for a dotted-decimal
number.
If a dotted-decimal number can be entered without such
identifying delimiters, then a full syntactic check must be
made, because a segment of a host domain name is now allowed
to begin with a digit and could legally be entirely numeric
(see Section 6.1.2.4). However, a valid host name can never
have the dotted-decimal form #.#.#.#, since at least the
highest-level component label will be alphabetic. | def validate_and_normalize_address(address):
"""
Return normalized address if the address is a valid ip or hostname.
Otherwise raise ValueError Exception. The hostname is
normalized to all lower case. IPv6-addresses are converted to
lowercase and fully expanded.
RFC1123 2.1 Host Names and Nubmers
DISCUSSION
This last requirement is not intended to specify the complete
syntactic form for entering a dotted-decimal host number;
that is considered to be a user-interface issue. For
example, a dotted-decimal number must be enclosed within
"[ ]" brackets for SMTP mail (see Section 5.2.17). This
notation could be made universal within a host system,
simplifying the syntactic checking for a dotted-decimal
number.
If a dotted-decimal number can be entered without such
identifying delimiters, then a full syntactic check must be
made, because a segment of a host domain name is now allowed
to begin with a digit and could legally be entirely numeric
(see Section 6.1.2.4). However, a valid host name can never
have the dotted-decimal form #.#.#.#, since at least the
highest-level component label will be alphabetic.
"""
new_address = address.lstrip('[').rstrip(']')
if address.startswith('[') and address.endswith(']'):
return validate_and_normalize_ip(new_address)
new_address = new_address.lower()
if is_valid_ipv4(new_address):
return new_address
elif is_valid_ipv6(new_address):
return expand_ipv6(new_address)
elif is_valid_hostname(new_address):
return new_address
else:
raise ValueError('Invalid address %s' % address) |
Return True if the provided hostname is a valid hostname | def is_valid_hostname(hostname):
"""
Return True if the provided hostname is a valid hostname
"""
if len(hostname) < 1 or len(hostname) > 255:
return False
if hostname.endswith('.'):
# strip exactly one dot from the right, if present
hostname = hostname[:-1]
allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split(".")) |
Return True if the provided dev_ip and dev_port are among the IP
addresses specified in my_ips and my_port respectively.
To support accurate locality determination in the server-per-port
deployment, when my_port is None, only IP addresses are used for
determining locality (dev_port is ignored).
If dev_ip is a hostname then it is first translated to an IP
address before checking it against my_ips. | def is_local_device(my_ips, my_port, dev_ip, dev_port):
"""
Return True if the provided dev_ip and dev_port are among the IP
addresses specified in my_ips and my_port respectively.
To support accurate locality determination in the server-per-port
deployment, when my_port is None, only IP addresses are used for
determining locality (dev_port is ignored).
If dev_ip is a hostname then it is first translated to an IP
address before checking it against my_ips.
"""
candidate_ips = []
if not is_valid_ip(dev_ip) and is_valid_hostname(dev_ip):
try:
# get the ip for this host; use getaddrinfo so that
# it works for both ipv4 and ipv6 addresses
addrinfo = socket.getaddrinfo(dev_ip, dev_port)
for addr in addrinfo:
family = addr[0]
dev_ip = addr[4][0] # get the ip-address
if family == socket.AF_INET6:
dev_ip = expand_ipv6(dev_ip)
candidate_ips.append(dev_ip)
except socket.gaierror:
return False
else:
if is_valid_ipv6(dev_ip):
dev_ip = expand_ipv6(dev_ip)
candidate_ips = [dev_ip]
for dev_ip in candidate_ips:
if dev_ip in my_ips and (my_port is None or dev_port == my_port):
return True
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.